[llvm-commits] [gcc-plugin] r79561 - in /gcc-plugin/trunk: Makefile get_arch_dir i386/llvm-i386-target.h i386/llvm-i386.cpp i386/llvm-target.cpp i386/llvm-target.h llvm-target.cpp llvm-target.h

Duncan Sands baldrick at free.fr
Thu Aug 20 12:17:52 PDT 2009


Author: baldrick
Date: Thu Aug 20 14:17:52 2009
New Revision: 79561

URL: http://llvm.org/viewvc/llvm-project?rev=79561&view=rev
Log:
Use some horrible hacks to automagically determine
the architecture directory.  For example, if the
triple is x86_64-unknown-linux-gnu then it works
out that the arch directory is i386.  This should
all be done in a configure script, only I couldn't
write one of those to save my life.
Also, give the x86 specific files architecture
neutral names.

Added:
    gcc-plugin/trunk/get_arch_dir
    gcc-plugin/trunk/i386/llvm-target.cpp
      - copied unchanged from r79452, gcc-plugin/trunk/i386/llvm-i386.cpp
    gcc-plugin/trunk/i386/llvm-target.h
      - copied unchanged from r79452, gcc-plugin/trunk/i386/llvm-i386-target.h
Removed:
    gcc-plugin/trunk/i386/llvm-i386-target.h
    gcc-plugin/trunk/i386/llvm-i386.cpp
    gcc-plugin/trunk/llvm-target.cpp
    gcc-plugin/trunk/llvm-target.h
Modified:
    gcc-plugin/trunk/Makefile

Modified: gcc-plugin/trunk/Makefile
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/Makefile?rev=79561&r1=79560&r2=79561&view=diff

==============================================================================
--- gcc-plugin/trunk/Makefile (original)
+++ gcc-plugin/trunk/Makefile Thu Aug 20 14:17:52 2009
@@ -2,20 +2,20 @@
 GCCOBJECT_DIR=/home/duncan/tmp/gcc.fsf.master-objects
 #GCCPLUGIN_DIR:=$(shell $(GCC) -print-file-name=plugin)
 
+TARGET_TRIPLE:=$(shell $(GCCOBJECT_DIR)/gcc/xgcc -v 2>&1 | grep "^Target:" | sed -e "s/^Target: *//")
+ARCH_DIR:=$(shell TARGET_TRIPLE=$(TARGET_TRIPLE) GCCSOURCE_DIR=$(GCCSOURCE_DIR) $(SHELL) ./get_arch_dir)
+
 C_SOURCE_FILES=llvm-cache.c
-CPP_SOURCE_FILES=llvm-convert.cpp llvm-backend.cpp llvm-debug.cpp llvm-target.cpp llvm-types.cpp bits_and_bobs.cpp
+CPP_SOURCE_FILES=llvm-convert.cpp llvm-backend.cpp llvm-debug.cpp \
+		 $(ARCH_DIR)/llvm-target.cpp llvm-types.cpp bits_and_bobs.cpp
 PLUGIN_OBJECT_FILES=$(C_SOURCE_FILES:.c=.o) $(CPP_SOURCE_FILES:.cpp=.o)
 
-#CFLAGS+=-I$(GCCPLUGINS_DIR)/include -fPIC -O2
-
-TARGET_TRIPLE:=$(shell $(GCCOBJECT_DIR)/gcc/xgcc -v 2>&1 | grep "^Target:" | sed -e "s/^Target: *//")
-
 CFLAGS+=-Werror -fPIC -g -O2
-CFLAGS+=-DIN_GCC -DTARGET_NAME=\"$(TARGET_TRIPLE)\"
+CFLAGS+=-DIN_GCC -DTARGET_NAME=\"$(TARGET_TRIPLE)\" -I$(ARCH_DIR)
 CFLAGS+=-I${GCCOBJECT_DIR}/gcc -I${GCCOBJECT_DIR}/gcc/include \
 	-I${GCCSOURCE_DIR}/gcc -I${GCCSOURCE_DIR}/include \
 	-I${GCCSOURCE_DIR}/libcpp/include -I${GCCSOURCE_DIR}/libdecnumber \
-	-I${GCCOBJECT_DIR}/libdecnumber
+	-I${GCCOBJECT_DIR}/libdecnumber -I.
 CXXFLAGS+=$(CFLAGS) $(shell llvm-config --cppflags)
 
 LDFLAGS+=$(shell llvm-config --ldflags) $(shell llvm-config --libs analysis core target x86)

Added: gcc-plugin/trunk/get_arch_dir
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/get_arch_dir?rev=79561&view=auto

==============================================================================
--- gcc-plugin/trunk/get_arch_dir (added)
+++ gcc-plugin/trunk/get_arch_dir Thu Aug 20 14:17:52 2009
@@ -0,0 +1,3 @@
+export target=$TARGET_TRIPLE
+. $GCCSOURCE_DIR/gcc/config.gcc
+echo $cpu_type

Removed: gcc-plugin/trunk/i386/llvm-i386-target.h
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/i386/llvm-i386-target.h?rev=79560&view=auto

==============================================================================
--- gcc-plugin/trunk/i386/llvm-i386-target.h (original)
+++ gcc-plugin/trunk/i386/llvm-i386-target.h (removed)
@@ -1,898 +0,0 @@
-/* Some target-specific hooks for gcc->llvm conversion
-Copyright (C) 2007 Free Software Foundation, Inc.
-Contributed by Anton Korobeynikov (asl at math.spbu.ru)
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING.  If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA.  */
-
-#ifndef LLVM_I386_TARGET_H
-#define LLVM_I386_TARGET_H
-
-/* LLVM specific stuff for supporting calling convention output */
-#define TARGET_ADJUST_LLVM_CC(CC, type)                         \
-  {                                                             \
-    tree type_attributes = TYPE_ATTRIBUTES (type);              \
-    if (lookup_attribute ("stdcall", type_attributes)) {        \
-      CC = CallingConv::X86_StdCall;                            \
-    } else if (lookup_attribute("fastcall", type_attributes)) { \
-      CC = CallingConv::X86_FastCall;                           \
-    }                                                           \
-  }
-
-#define TARGET_ADJUST_LLVM_RETATTR(Rattributes, type)           \
-  {                                                             \
-    tree type_attributes = TYPE_ATTRIBUTES (type);              \
-    if (!TARGET_64BIT && (TARGET_SSEREGPARM ||                  \
-               lookup_attribute("sseregparm", type_attributes)))\
-      RAttributes |= Attribute::InReg;                          \
-  }
-
-/* LLVM specific stuff for converting gcc's `regparm` attribute to LLVM's
-   `inreg` parameter attribute */
-#define LLVM_TARGET_ENABLE_REGPARM
-
-extern "C" int ix86_regparm;
-
-#define LLVM_TARGET_INIT_REGPARM(local_regparm, local_fp_regparm, type) \
-  {                                                             \
-    tree attr;                                                  \
-    local_regparm = ix86_regparm;                               \
-    local_fp_regparm = TARGET_SSEREGPARM ? 3 : 0;               \
-    attr = lookup_attribute ("regparm",                         \
-                              TYPE_ATTRIBUTES (type));          \
-    if (attr) {                                                 \
-      local_regparm = TREE_INT_CST_LOW (TREE_VALUE              \
-                                        (TREE_VALUE (attr)));   \
-    }                                                           \
-    attr = lookup_attribute("sseregparm",                       \
-                              TYPE_ATTRIBUTES (type));          \
-    if (attr)                                                   \
-      local_fp_regparm = 3;                                     \
-  }
-
-#define LLVM_ADJUST_REGPARM_ATTRIBUTE(PAttribute, Type, Size,   \
-                                      local_regparm,            \
-                                      local_fp_regparm)         \
-  {                                                             \
-    if (!TARGET_64BIT) {                                        \
-      if (TREE_CODE(Type) == REAL_TYPE &&                       \
-          (TYPE_PRECISION(Type)==32 ||                          \
-           TYPE_PRECISION(Type)==64)) {                         \
-          local_fp_regparm -= 1;                                \
-          if (local_fp_regparm >= 0)                            \
-            PAttribute |= Attribute::InReg;                     \
-          else                                                  \
-            local_fp_regparm = 0;                               \
-      } else if (INTEGRAL_TYPE_P(Type) ||                       \
-                 POINTER_TYPE_P(Type)) {                        \
-          int words =                                           \
-                  (Size + BITS_PER_WORD - 1) / BITS_PER_WORD;   \
-          local_regparm -= words;                               \
-          if (local_regparm>=0)                                 \
-            PAttribute |= Attribute::InReg;                     \
-          else                                                  \
-            local_regparm = 0;                                  \
-      }                                                         \
-    }                                                           \
-  }
-
-#define LLVM_SET_RED_ZONE_FLAG(disable_red_zone)                \
-  if (TARGET_64BIT && TARGET_NO_RED_ZONE)                       \
-    disable_red_zone = 1;
-
-#ifdef LLVM_ABI_H
-
-/* On x86-32 objects containing SSE vectors are 16 byte aligned, everything
-   else 4.  On x86-64 vectors are 8-byte aligned, everything else can
-   be figured out by the back end. */
-extern "C" bool contains_aligned_value_p(tree);
-#define LLVM_BYVAL_ALIGNMENT(T) \
-  (TARGET_64BIT ? (TREE_CODE(T)==VECTOR_TYPE ? 8 : 0) : \
-   TARGET_SSE && contains_aligned_value_p(T) ? 16 : 4)
-
-extern tree llvm_x86_should_return_selt_struct_as_scalar(tree);
-
-/* Structs containing a single data field plus zero-length fields are
-   considered as if they were the type of the data field.  On x86-64,
-   if the element type is an MMX vector, return it as double (which will
-   get it into XMM0). */
-
-#define LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR(X) \
-  llvm_x86_should_return_selt_struct_as_scalar((X))
-
-extern bool llvm_x86_should_pass_aggregate_in_integer_regs(tree, 
-                                                          unsigned*, bool*);
-
-/* LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS - Return true if this aggregate
-   value should be passed in integer registers.  This differs from the usual
-   handling in that x86-64 passes 128-bit structs and unions which only
-   contain data in the first 64 bits, as 64-bit objects.  (These can be
-   created by abusing __attribute__((aligned)).  */
-#define LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(X, Y, Z)             \
-  llvm_x86_should_pass_aggregate_in_integer_regs((X), (Y), (Z))
-
-extern const Type *llvm_x86_scalar_type_for_struct_return(tree type, 
-                                                          unsigned *Offset);
-
-/* LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be 
-   returned as a scalar, otherwise return NULL. */
-#define LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(X, Y) \
-  llvm_x86_scalar_type_for_struct_return((X), (Y))
-
-extern const Type *llvm_x86_aggr_type_for_struct_return(tree type);
-
-/* LLVM_AGGR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be 
-   returned as an aggregate, otherwise return NULL. */
-#define LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(X) \
-  llvm_x86_aggr_type_for_struct_return(X)
-
-extern void llvm_x86_extract_multiple_return_value(Value *Src, Value *Dest,
-                                                   bool isVolatile,
-                                                   LLVMBuilder &B);
-
-/* LLVM_EXTRACT_MULTIPLE_RETURN_VALUE - Extract multiple return value from
-   SRC and assign it to DEST. */
-#define LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Src,Dest,V,B)       \
-  llvm_x86_extract_multiple_return_value((Src),(Dest),(V),(B))
-
-extern bool llvm_x86_should_pass_vector_using_byval_attr(tree);
-
-/* On x86-64, vectors which are not MMX nor SSE should be passed byval. */
-#define LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(X)      \
-  llvm_x86_should_pass_vector_using_byval_attr((X))
-
-extern bool llvm_x86_should_pass_vector_in_integer_regs(tree);
-
-/* On x86-32, vectors which are not MMX nor SSE should be passed as integers. */
-#define LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(X)      \
-  llvm_x86_should_pass_vector_in_integer_regs((X))
-
-extern tree llvm_x86_should_return_vector_as_scalar(tree, bool);
-
-/* The MMX vector v1i64 is returned in EAX and EDX on Darwin.  Communicate
-    this by returning i64 here.  Likewise, (generic) vectors such as v2i16
-    are returned in EAX.  
-    On Darwin x86-64, MMX vectors are returned in XMM0.  Communicate this by
-    returning f64.  */
-#define LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR(X,isBuiltin)\
-  llvm_x86_should_return_vector_as_scalar((X), (isBuiltin))
-
-extern bool llvm_x86_should_return_vector_as_shadow(tree, bool);
-
-/* MMX vectors v2i32, v4i16, v8i8, v2f32 are returned using sret on Darwin
-   32-bit.  Vectors bigger than 128 are returned using sret.  */
-#define LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW(X,isBuiltin)\
-  llvm_x86_should_return_vector_as_shadow((X),(isBuiltin))
-
-extern bool
-llvm_x86_should_not_return_complex_in_memory(tree type);
-
-/* LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY - A hook to allow
-   special _Complex handling. Return true if X should be returned using
-   multiple value return instruction.  */
-#define LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY(X) \
-  llvm_x86_should_not_return_complex_in_memory((X))
-
-extern bool
-llvm_x86_should_pass_aggregate_as_fca(tree type, const Type *);
-
-/* LLVM_SHOULD_PASS_AGGREGATE_AS_FCA - Return true if an aggregate of the
-   specified type should be passed as a first-class aggregate. */
-#ifndef LLVM_SHOULD_PASS_AGGREGATE_AS_FCA
-#define LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(X, TY) \
-  llvm_x86_should_pass_aggregate_as_fca(X, TY)
-#endif
-
-extern bool llvm_x86_should_pass_aggregate_in_memory(tree, const Type *);
-
-#define LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(X, TY)      \
-  llvm_x86_should_pass_aggregate_in_memory(X, TY)
-
-
-extern bool
-llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree, const Type *Ty,
-                                                std::vector<const Type*>&);
-extern bool
-llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree, const Type *Ty,
-                                                std::vector<const Type*>&);
-
-#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E)       \
-  (TARGET_64BIT ?                                                    \
-   llvm_x86_64_should_pass_aggregate_in_mixed_regs((T), (TY), (E)) : \
-   llvm_x86_32_should_pass_aggregate_in_mixed_regs((T), (TY), (E)))
-
-extern
-bool llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<const Type*>&,
-                                                    std::vector<const Type*>&,
-                                                    bool);
-
-#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR, CC)       \
-  (TARGET_64BIT ?                                                     \
-   llvm_x86_64_aggregate_partially_passed_in_regs((E), (SE), (ISR)) : \
-   false)
-
-#endif /* LLVM_ABI_H */
-
-/* Register class used for passing given 64bit part of the argument.
-   These represent classes as documented by the PS ABI, with the exception
-   of SSESF, SSEDF classes, that are basically SSE class, just gcc will
-   use SF or DFmode move instead of DImode to avoid reformatting penalties.
-
-   Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
-   whenever possible (upper half does contain padding).
- */
-enum x86_64_reg_class
-  {
-    X86_64_NO_CLASS,
-    X86_64_INTEGER_CLASS,
-    X86_64_INTEGERSI_CLASS,
-    X86_64_SSE_CLASS,
-    X86_64_SSESF_CLASS,
-    X86_64_SSEDF_CLASS,
-    X86_64_SSEUP_CLASS,
-    X86_64_X87_CLASS,
-    X86_64_X87UP_CLASS,
-    X86_64_COMPLEX_X87_CLASS,
-    X86_64_MEMORY_CLASS
-  };
-
-/* Codes for all the SSE/MMX builtins.  */
-enum ix86_builtins
-{
-  IX86_BUILTIN_ADDPS,
-  IX86_BUILTIN_ADDSS,
-  IX86_BUILTIN_DIVPS,
-  IX86_BUILTIN_DIVSS,
-  IX86_BUILTIN_MULPS,
-  IX86_BUILTIN_MULSS,
-  IX86_BUILTIN_SUBPS,
-  IX86_BUILTIN_SUBSS,
-
-  IX86_BUILTIN_CMPEQPS,
-  IX86_BUILTIN_CMPLTPS,
-  IX86_BUILTIN_CMPLEPS,
-  IX86_BUILTIN_CMPGTPS,
-  IX86_BUILTIN_CMPGEPS,
-  IX86_BUILTIN_CMPNEQPS,
-  IX86_BUILTIN_CMPNLTPS,
-  IX86_BUILTIN_CMPNLEPS,
-  IX86_BUILTIN_CMPNGTPS,
-  IX86_BUILTIN_CMPNGEPS,
-  IX86_BUILTIN_CMPORDPS,
-  IX86_BUILTIN_CMPUNORDPS,
-  IX86_BUILTIN_CMPNEPS,
-  IX86_BUILTIN_CMPEQSS,
-  IX86_BUILTIN_CMPLTSS,
-  IX86_BUILTIN_CMPLESS,
-  IX86_BUILTIN_CMPNEQSS,
-  IX86_BUILTIN_CMPNLTSS,
-  IX86_BUILTIN_CMPNLESS,
-  IX86_BUILTIN_CMPNGTSS,
-  IX86_BUILTIN_CMPNGESS,
-  IX86_BUILTIN_CMPORDSS,
-  IX86_BUILTIN_CMPUNORDSS,
-  IX86_BUILTIN_CMPNESS,
-
-  IX86_BUILTIN_COMIEQSS,
-  IX86_BUILTIN_COMILTSS,
-  IX86_BUILTIN_COMILESS,
-  IX86_BUILTIN_COMIGTSS,
-  IX86_BUILTIN_COMIGESS,
-  IX86_BUILTIN_COMINEQSS,
-  IX86_BUILTIN_UCOMIEQSS,
-  IX86_BUILTIN_UCOMILTSS,
-  IX86_BUILTIN_UCOMILESS,
-  IX86_BUILTIN_UCOMIGTSS,
-  IX86_BUILTIN_UCOMIGESS,
-  IX86_BUILTIN_UCOMINEQSS,
-
-  IX86_BUILTIN_CVTPI2PS,
-  IX86_BUILTIN_CVTPS2PI,
-  IX86_BUILTIN_CVTSI2SS,
-  IX86_BUILTIN_CVTSI642SS,
-  IX86_BUILTIN_CVTSS2SI,
-  IX86_BUILTIN_CVTSS2SI64,
-  IX86_BUILTIN_CVTTPS2PI,
-  IX86_BUILTIN_CVTTSS2SI,
-  IX86_BUILTIN_CVTTSS2SI64,
-
-  IX86_BUILTIN_MAXPS,
-  IX86_BUILTIN_MAXSS,
-  IX86_BUILTIN_MINPS,
-  IX86_BUILTIN_MINSS,
-
-  IX86_BUILTIN_LOADUPS,
-  IX86_BUILTIN_STOREUPS,
-  IX86_BUILTIN_MOVSS,
-
-  IX86_BUILTIN_MOVHLPS,
-  IX86_BUILTIN_MOVLHPS,
-  IX86_BUILTIN_LOADHPS,
-  IX86_BUILTIN_LOADLPS,
-  IX86_BUILTIN_STOREHPS,
-  IX86_BUILTIN_STORELPS,
-
-  IX86_BUILTIN_MASKMOVQ,
-  IX86_BUILTIN_MOVMSKPS,
-  IX86_BUILTIN_PMOVMSKB,
-
-  IX86_BUILTIN_MOVNTPS,
-  IX86_BUILTIN_MOVNTQ,
-
-  IX86_BUILTIN_LOADDQU,
-  IX86_BUILTIN_STOREDQU,
-
-  IX86_BUILTIN_PACKSSWB,
-  IX86_BUILTIN_PACKSSDW,
-  IX86_BUILTIN_PACKUSWB,
-
-  IX86_BUILTIN_PADDB,
-  IX86_BUILTIN_PADDW,
-  IX86_BUILTIN_PADDD,
-  IX86_BUILTIN_PADDQ,
-  IX86_BUILTIN_PADDSB,
-  IX86_BUILTIN_PADDSW,
-  IX86_BUILTIN_PADDUSB,
-  IX86_BUILTIN_PADDUSW,
-  IX86_BUILTIN_PSUBB,
-  IX86_BUILTIN_PSUBW,
-  IX86_BUILTIN_PSUBD,
-  IX86_BUILTIN_PSUBQ,
-  IX86_BUILTIN_PSUBSB,
-  IX86_BUILTIN_PSUBSW,
-  IX86_BUILTIN_PSUBUSB,
-  IX86_BUILTIN_PSUBUSW,
-
-  IX86_BUILTIN_PAND,
-  IX86_BUILTIN_PANDN,
-  IX86_BUILTIN_POR,
-  IX86_BUILTIN_PXOR,
-
-  IX86_BUILTIN_PAVGB,
-  IX86_BUILTIN_PAVGW,
-
-  IX86_BUILTIN_PCMPEQB,
-  IX86_BUILTIN_PCMPEQW,
-  IX86_BUILTIN_PCMPEQD,
-  IX86_BUILTIN_PCMPGTB,
-  IX86_BUILTIN_PCMPGTW,
-  IX86_BUILTIN_PCMPGTD,
-
-  IX86_BUILTIN_PMADDWD,
-
-  IX86_BUILTIN_PMAXSW,
-  IX86_BUILTIN_PMAXUB,
-  IX86_BUILTIN_PMINSW,
-  IX86_BUILTIN_PMINUB,
-
-  IX86_BUILTIN_PMULHUW,
-  IX86_BUILTIN_PMULHW,
-  IX86_BUILTIN_PMULLW,
-
-  IX86_BUILTIN_PSADBW,
-  IX86_BUILTIN_PSHUFW,
-
-  IX86_BUILTIN_PSLLW,
-  IX86_BUILTIN_PSLLD,
-  IX86_BUILTIN_PSLLQ,
-  IX86_BUILTIN_PSRAW,
-  IX86_BUILTIN_PSRAD,
-  IX86_BUILTIN_PSRLW,
-  IX86_BUILTIN_PSRLD,
-  IX86_BUILTIN_PSRLQ,
-  IX86_BUILTIN_PSLLWI,
-  IX86_BUILTIN_PSLLDI,
-  IX86_BUILTIN_PSLLQI,
-  IX86_BUILTIN_PSRAWI,
-  IX86_BUILTIN_PSRADI,
-  IX86_BUILTIN_PSRLWI,
-  IX86_BUILTIN_PSRLDI,
-  IX86_BUILTIN_PSRLQI,
-
-  IX86_BUILTIN_PUNPCKHBW,
-  IX86_BUILTIN_PUNPCKHWD,
-  IX86_BUILTIN_PUNPCKHDQ,
-  IX86_BUILTIN_PUNPCKLBW,
-  IX86_BUILTIN_PUNPCKLWD,
-  IX86_BUILTIN_PUNPCKLDQ,
-
-  IX86_BUILTIN_SHUFPS,
-
-  IX86_BUILTIN_RCPPS,
-  IX86_BUILTIN_RCPSS,
-  IX86_BUILTIN_RSQRTPS,
-  IX86_BUILTIN_RSQRTSS,
-  IX86_BUILTIN_SQRTPS,
-  IX86_BUILTIN_SQRTSS,
-
-  IX86_BUILTIN_UNPCKHPS,
-  IX86_BUILTIN_UNPCKLPS,
-
-  IX86_BUILTIN_ANDPS,
-  IX86_BUILTIN_ANDNPS,
-  IX86_BUILTIN_ORPS,
-  IX86_BUILTIN_XORPS,
-
-  IX86_BUILTIN_EMMS,
-  IX86_BUILTIN_LDMXCSR,
-  IX86_BUILTIN_STMXCSR,
-  IX86_BUILTIN_SFENCE,
-
-  /* 3DNow! Original */
-  IX86_BUILTIN_FEMMS,
-  IX86_BUILTIN_PAVGUSB,
-  IX86_BUILTIN_PF2ID,
-  IX86_BUILTIN_PFACC,
-  IX86_BUILTIN_PFADD,
-  IX86_BUILTIN_PFCMPEQ,
-  IX86_BUILTIN_PFCMPGE,
-  IX86_BUILTIN_PFCMPGT,
-  IX86_BUILTIN_PFMAX,
-  IX86_BUILTIN_PFMIN,
-  IX86_BUILTIN_PFMUL,
-  IX86_BUILTIN_PFRCP,
-  IX86_BUILTIN_PFRCPIT1,
-  IX86_BUILTIN_PFRCPIT2,
-  IX86_BUILTIN_PFRSQIT1,
-  IX86_BUILTIN_PFRSQRT,
-  IX86_BUILTIN_PFSUB,
-  IX86_BUILTIN_PFSUBR,
-  IX86_BUILTIN_PI2FD,
-  IX86_BUILTIN_PMULHRW,
-
-  /* 3DNow! Athlon Extensions */
-  IX86_BUILTIN_PF2IW,
-  IX86_BUILTIN_PFNACC,
-  IX86_BUILTIN_PFPNACC,
-  IX86_BUILTIN_PI2FW,
-  IX86_BUILTIN_PSWAPDSI,
-  IX86_BUILTIN_PSWAPDSF,
-
-  /* SSE2 */
-  IX86_BUILTIN_ADDPD,
-  IX86_BUILTIN_ADDSD,
-  IX86_BUILTIN_DIVPD,
-  IX86_BUILTIN_DIVSD,
-  IX86_BUILTIN_MULPD,
-  IX86_BUILTIN_MULSD,
-  IX86_BUILTIN_SUBPD,
-  IX86_BUILTIN_SUBSD,
-
-  IX86_BUILTIN_CMPEQPD,
-  IX86_BUILTIN_CMPLTPD,
-  IX86_BUILTIN_CMPLEPD,
-  IX86_BUILTIN_CMPGTPD,
-  IX86_BUILTIN_CMPGEPD,
-  IX86_BUILTIN_CMPNEQPD,
-  IX86_BUILTIN_CMPNLTPD,
-  IX86_BUILTIN_CMPNLEPD,
-  IX86_BUILTIN_CMPNGTPD,
-  IX86_BUILTIN_CMPNGEPD,
-  IX86_BUILTIN_CMPORDPD,
-  IX86_BUILTIN_CMPUNORDPD,
-  IX86_BUILTIN_CMPNEPD,
-  IX86_BUILTIN_CMPEQSD,
-  IX86_BUILTIN_CMPLTSD,
-  IX86_BUILTIN_CMPLESD,
-  IX86_BUILTIN_CMPNEQSD,
-  IX86_BUILTIN_CMPNLTSD,
-  IX86_BUILTIN_CMPNLESD,
-  IX86_BUILTIN_CMPORDSD,
-  IX86_BUILTIN_CMPUNORDSD,
-  IX86_BUILTIN_CMPNESD,
-
-  IX86_BUILTIN_COMIEQSD,
-  IX86_BUILTIN_COMILTSD,
-  IX86_BUILTIN_COMILESD,
-  IX86_BUILTIN_COMIGTSD,
-  IX86_BUILTIN_COMIGESD,
-  IX86_BUILTIN_COMINEQSD,
-  IX86_BUILTIN_UCOMIEQSD,
-  IX86_BUILTIN_UCOMILTSD,
-  IX86_BUILTIN_UCOMILESD,
-  IX86_BUILTIN_UCOMIGTSD,
-  IX86_BUILTIN_UCOMIGESD,
-  IX86_BUILTIN_UCOMINEQSD,
-
-  IX86_BUILTIN_MAXPD,
-  IX86_BUILTIN_MAXSD,
-  IX86_BUILTIN_MINPD,
-  IX86_BUILTIN_MINSD,
-
-  IX86_BUILTIN_ANDPD,
-  IX86_BUILTIN_ANDNPD,
-  IX86_BUILTIN_ORPD,
-  IX86_BUILTIN_XORPD,
-
-  IX86_BUILTIN_SQRTPD,
-  IX86_BUILTIN_SQRTSD,
-
-  IX86_BUILTIN_UNPCKHPD,
-  IX86_BUILTIN_UNPCKLPD,
-
-  IX86_BUILTIN_SHUFPD,
-
-  IX86_BUILTIN_LOADUPD,
-  IX86_BUILTIN_STOREUPD,
-  IX86_BUILTIN_MOVSD,
-
-  IX86_BUILTIN_LOADHPD,
-  IX86_BUILTIN_LOADLPD,
-
-  IX86_BUILTIN_CVTDQ2PD,
-  IX86_BUILTIN_CVTDQ2PS,
-
-  IX86_BUILTIN_CVTPD2DQ,
-  IX86_BUILTIN_CVTPD2PI,
-  IX86_BUILTIN_CVTPD2PS,
-  IX86_BUILTIN_CVTTPD2DQ,
-  IX86_BUILTIN_CVTTPD2PI,
-
-  IX86_BUILTIN_CVTPI2PD,
-  IX86_BUILTIN_CVTSI2SD,
-  IX86_BUILTIN_CVTSI642SD,
-
-  IX86_BUILTIN_CVTSD2SI,
-  IX86_BUILTIN_CVTSD2SI64,
-  IX86_BUILTIN_CVTSD2SS,
-  IX86_BUILTIN_CVTSS2SD,
-  IX86_BUILTIN_CVTTSD2SI,
-  IX86_BUILTIN_CVTTSD2SI64,
-
-  IX86_BUILTIN_CVTPS2DQ,
-  IX86_BUILTIN_CVTPS2PD,
-  IX86_BUILTIN_CVTTPS2DQ,
-
-  IX86_BUILTIN_MOVNTI,
-  IX86_BUILTIN_MOVNTPD,
-  IX86_BUILTIN_MOVNTDQ,
-
-  /* SSE2 MMX */
-  IX86_BUILTIN_MASKMOVDQU,
-  IX86_BUILTIN_MOVMSKPD,
-  IX86_BUILTIN_PMOVMSKB128,
-
-  /* APPLE LOCAL begin 4099020 */
-  IX86_BUILTIN_MOVQ,
-  IX86_BUILTIN_LOADQ,
-  IX86_BUILTIN_STOREQ,
-  /* APPLE LOCAL end 4099020 */
-
-  IX86_BUILTIN_PACKSSWB128,
-  IX86_BUILTIN_PACKSSDW128,
-  IX86_BUILTIN_PACKUSWB128,
-
-  IX86_BUILTIN_PADDB128,
-  IX86_BUILTIN_PADDW128,
-  IX86_BUILTIN_PADDD128,
-  IX86_BUILTIN_PADDQ128,
-  IX86_BUILTIN_PADDSB128,
-  IX86_BUILTIN_PADDSW128,
-  IX86_BUILTIN_PADDUSB128,
-  IX86_BUILTIN_PADDUSW128,
-  IX86_BUILTIN_PSUBB128,
-  IX86_BUILTIN_PSUBW128,
-  IX86_BUILTIN_PSUBD128,
-  IX86_BUILTIN_PSUBQ128,
-  IX86_BUILTIN_PSUBSB128,
-  IX86_BUILTIN_PSUBSW128,
-  IX86_BUILTIN_PSUBUSB128,
-  IX86_BUILTIN_PSUBUSW128,
-
-  IX86_BUILTIN_PAND128,
-  IX86_BUILTIN_PANDN128,
-  IX86_BUILTIN_POR128,
-  IX86_BUILTIN_PXOR128,
-
-  IX86_BUILTIN_PAVGB128,
-  IX86_BUILTIN_PAVGW128,
-
-  IX86_BUILTIN_PCMPEQB128,
-  IX86_BUILTIN_PCMPEQW128,
-  IX86_BUILTIN_PCMPEQD128,
-  IX86_BUILTIN_PCMPGTB128,
-  IX86_BUILTIN_PCMPGTW128,
-  IX86_BUILTIN_PCMPGTD128,
-
-  IX86_BUILTIN_PMADDWD128,
-
-  IX86_BUILTIN_PMAXSW128,
-  IX86_BUILTIN_PMAXUB128,
-  IX86_BUILTIN_PMINSW128,
-  IX86_BUILTIN_PMINUB128,
-
-  IX86_BUILTIN_PMULUDQ,
-  IX86_BUILTIN_PMULUDQ128,
-  IX86_BUILTIN_PMULHUW128,
-  IX86_BUILTIN_PMULHW128,
-  IX86_BUILTIN_PMULLW128,
-
-  IX86_BUILTIN_PSADBW128,
-  IX86_BUILTIN_PSHUFHW,
-  IX86_BUILTIN_PSHUFLW,
-  IX86_BUILTIN_PSHUFD,
-
-  IX86_BUILTIN_PSLLW128,
-  IX86_BUILTIN_PSLLD128,
-  IX86_BUILTIN_PSLLQ128,
-  IX86_BUILTIN_PSRAW128,
-  IX86_BUILTIN_PSRAD128,
-  IX86_BUILTIN_PSRLW128,
-  IX86_BUILTIN_PSRLD128,
-  IX86_BUILTIN_PSRLQ128,
-  IX86_BUILTIN_PSLLDQI128,
-  /* APPLE LOCAL 591583 */
-  IX86_BUILTIN_PSLLDQI128_BYTESHIFT,
-  IX86_BUILTIN_PSLLWI128,
-  IX86_BUILTIN_PSLLDI128,
-  IX86_BUILTIN_PSLLQI128,
-  IX86_BUILTIN_PSRAWI128,
-  IX86_BUILTIN_PSRADI128,
-  IX86_BUILTIN_PSRLDQI128,
-  /* APPLE LOCAL 591583 */
-  IX86_BUILTIN_PSRLDQI128_BYTESHIFT,
-  IX86_BUILTIN_PSRLWI128,
-  IX86_BUILTIN_PSRLDI128,
-  IX86_BUILTIN_PSRLQI128,
-
-  IX86_BUILTIN_PUNPCKHBW128,
-  IX86_BUILTIN_PUNPCKHWD128,
-  IX86_BUILTIN_PUNPCKHDQ128,
-  IX86_BUILTIN_PUNPCKHQDQ128,
-  IX86_BUILTIN_PUNPCKLBW128,
-  IX86_BUILTIN_PUNPCKLWD128,
-  IX86_BUILTIN_PUNPCKLDQ128,
-  IX86_BUILTIN_PUNPCKLQDQ128,
-
-  IX86_BUILTIN_CLFLUSH,
-  IX86_BUILTIN_MFENCE,
-  IX86_BUILTIN_LFENCE,
-
-  /* Prescott New Instructions.  */
-  IX86_BUILTIN_ADDSUBPS,
-  IX86_BUILTIN_HADDPS,
-  IX86_BUILTIN_HSUBPS,
-  IX86_BUILTIN_MOVSHDUP,
-  IX86_BUILTIN_MOVSLDUP,
-  IX86_BUILTIN_ADDSUBPD,
-  IX86_BUILTIN_HADDPD,
-  IX86_BUILTIN_HSUBPD,
-  IX86_BUILTIN_LDDQU,
-
-  IX86_BUILTIN_MONITOR,
-  IX86_BUILTIN_MWAIT,
-
-  /* Merom New Instructions.  */
-  IX86_BUILTIN_PHADDW,
-  IX86_BUILTIN_PHADDD,
-  IX86_BUILTIN_PHADDSW,
-  IX86_BUILTIN_PHSUBW,
-  IX86_BUILTIN_PHSUBD,
-  IX86_BUILTIN_PHSUBSW,
-  IX86_BUILTIN_PMADDUBSW,
-  IX86_BUILTIN_PMULHRSW,
-  IX86_BUILTIN_PSHUFB,
-  IX86_BUILTIN_PSIGNB,
-  IX86_BUILTIN_PSIGNW,
-  IX86_BUILTIN_PSIGND,
-  IX86_BUILTIN_PALIGNR,
-  IX86_BUILTIN_PABSB,
-  IX86_BUILTIN_PABSW,
-  IX86_BUILTIN_PABSD,
-
-  IX86_BUILTIN_PHADDW128,
-  IX86_BUILTIN_PHADDD128,
-  IX86_BUILTIN_PHADDSW128,
-  IX86_BUILTIN_PHSUBW128,
-  IX86_BUILTIN_PHSUBD128,
-  IX86_BUILTIN_PHSUBSW128,
-  IX86_BUILTIN_PMADDUBSW128,
-  IX86_BUILTIN_PMULHRSW128,
-  IX86_BUILTIN_PSHUFB128,
-  IX86_BUILTIN_PSIGNB128,
-  IX86_BUILTIN_PSIGNW128,
-  IX86_BUILTIN_PSIGND128,
-  IX86_BUILTIN_PALIGNR128,
-  IX86_BUILTIN_PABSB128,
-  IX86_BUILTIN_PABSW128,
-  IX86_BUILTIN_PABSD128,
-  /* APPLE LOCAL begin 5612787 mainline sse4 */
-  /* AMDFAM10 - SSE4A New Instructions.  */
-  IX86_BUILTIN_MOVNTSD,
-  IX86_BUILTIN_MOVNTSS,
-  IX86_BUILTIN_EXTRQI,
-  IX86_BUILTIN_EXTRQ,
-  IX86_BUILTIN_INSERTQI,
-  IX86_BUILTIN_INSERTQ,
-
-  /* SSE4.1.  */
-  IX86_BUILTIN_BLENDPD,
-  IX86_BUILTIN_BLENDPS,
-  IX86_BUILTIN_BLENDVPD,
-  IX86_BUILTIN_BLENDVPS,
-  IX86_BUILTIN_PBLENDVB128,
-  IX86_BUILTIN_PBLENDW128,
-
-  IX86_BUILTIN_DPPD,
-  IX86_BUILTIN_DPPS,
-
-  IX86_BUILTIN_INSERTPS128,
-
-  IX86_BUILTIN_MOVNTDQA,
-  IX86_BUILTIN_MPSADBW128,
-  IX86_BUILTIN_PACKUSDW128,
-  IX86_BUILTIN_PCMPEQQ,
-  IX86_BUILTIN_PHMINPOSUW128,
-
-  IX86_BUILTIN_PMAXSB128,
-  IX86_BUILTIN_PMAXSD128,
-  IX86_BUILTIN_PMAXUD128,
-  IX86_BUILTIN_PMAXUW128,
-
-  IX86_BUILTIN_PMINSB128,
-  IX86_BUILTIN_PMINSD128,
-  IX86_BUILTIN_PMINUD128,
-  IX86_BUILTIN_PMINUW128,
-
-  IX86_BUILTIN_PMOVSXBW128,
-  IX86_BUILTIN_PMOVSXBD128,
-  IX86_BUILTIN_PMOVSXBQ128,
-  IX86_BUILTIN_PMOVSXWD128,
-  IX86_BUILTIN_PMOVSXWQ128,
-  IX86_BUILTIN_PMOVSXDQ128,
-
-  IX86_BUILTIN_PMOVZXBW128,
-  IX86_BUILTIN_PMOVZXBD128,
-  IX86_BUILTIN_PMOVZXBQ128,
-  IX86_BUILTIN_PMOVZXWD128,
-  IX86_BUILTIN_PMOVZXWQ128,
-  IX86_BUILTIN_PMOVZXDQ128,
-
-  IX86_BUILTIN_PMULDQ128,
-  IX86_BUILTIN_PMULLD128,
-
-  IX86_BUILTIN_ROUNDPD,
-  IX86_BUILTIN_ROUNDPS,
-  IX86_BUILTIN_ROUNDSD,
-  IX86_BUILTIN_ROUNDSS,
-
-  IX86_BUILTIN_PTESTZ,
-  IX86_BUILTIN_PTESTC,
-  IX86_BUILTIN_PTESTNZC,
-  /* APPLE LOCAL end 5612787 mainline sse4 */
-  /* APPLE LOCAL end mainline */
-  IX86_BUILTIN_VEC_INIT_V2SI,
-  IX86_BUILTIN_VEC_INIT_V4HI,
-  IX86_BUILTIN_VEC_INIT_V8QI,
-  IX86_BUILTIN_VEC_EXT_V2DF,
-  IX86_BUILTIN_VEC_EXT_V2DI,
-  IX86_BUILTIN_VEC_EXT_V4SF,
-  IX86_BUILTIN_VEC_EXT_V4SI,
-  IX86_BUILTIN_VEC_EXT_V8HI,
-  /* APPLE LOCAL begin 5612787 mainline sse4 */
-  /* deletion */
-  /* APPLE LOCAL end 5612787 mainline sse4 */
-  IX86_BUILTIN_VEC_EXT_V2SI,
-  IX86_BUILTIN_VEC_EXT_V4HI,
-  /* APPLE LOCAL begin 5612787 mainline sse4 */
-  IX86_BUILTIN_VEC_EXT_V16QI,
-  IX86_BUILTIN_VEC_SET_V2DI,
-  IX86_BUILTIN_VEC_SET_V4SF,
-  IX86_BUILTIN_VEC_SET_V4SI,
-  /* APPLE LOCAL end 5612787 mainline sse4 */
-  IX86_BUILTIN_VEC_SET_V8HI,
-  IX86_BUILTIN_VEC_SET_V4HI,
-  /* APPLE LOCAL begin 5612787 mainline sse4 */
-  IX86_BUILTIN_VEC_SET_V16QI,
-
-  IX86_BUILTIN_VEC_PACK_SFIX,
-
-  /* SSE4.2.  */
-  IX86_BUILTIN_CRC32QI,
-  IX86_BUILTIN_CRC32HI,
-  IX86_BUILTIN_CRC32SI,
-  IX86_BUILTIN_CRC32DI,
-
-  IX86_BUILTIN_PCMPESTRI128,
-  IX86_BUILTIN_PCMPESTRM128,
-  IX86_BUILTIN_PCMPESTRA128,
-  IX86_BUILTIN_PCMPESTRC128,
-  IX86_BUILTIN_PCMPESTRO128,
-  IX86_BUILTIN_PCMPESTRS128,
-  IX86_BUILTIN_PCMPESTRZ128,
-  IX86_BUILTIN_PCMPISTRI128,
-  IX86_BUILTIN_PCMPISTRM128,
-  IX86_BUILTIN_PCMPISTRA128,
-  IX86_BUILTIN_PCMPISTRC128,
-  IX86_BUILTIN_PCMPISTRO128,
-  IX86_BUILTIN_PCMPISTRS128,
-  IX86_BUILTIN_PCMPISTRZ128,
-
-  IX86_BUILTIN_PCMPGTQ,
-
-  /* TFmode support builtins.  */
-  IX86_BUILTIN_INFQ,
-  IX86_BUILTIN_FABSQ,
-  IX86_BUILTIN_COPYSIGNQ,
-  /* APPLE LOCAL end 5612787 mainline sse4 */
-
-  IX86_BUILTIN_MAX
-};
-
-/* LLVM_TARGET_INTRINSIC_PREFIX - Specify what prefix this target uses for its
- * intrinsics.
- */
-#define LLVM_TARGET_INTRINSIC_PREFIX "x86"
-
-/* LLVM_TARGET_NAME - This specifies the name of the target, which correlates to
- * the llvm::InitializeXXXTarget() function.
- */
-#define LLVM_TARGET_NAME X86
-
-/* Turn -march=xx into a CPU type.
- */
-#define LLVM_SET_SUBTARGET_FEATURES(F) \
-  { if (TARGET_MACHO && ! strcmp (ix86_arch_string, "apple")) \
-      F.setCPU(TARGET_64BIT ? "core2" : "yonah");             \
-    else                                                      \
-      F.setCPU(ix86_arch_string);                             \
-    if (TARGET_64BIT)   F.AddFeature("64bit");                \
-    if (TARGET_MMX)     F.AddFeature("mmx");                  \
-    else if (target_flags_explicit & MASK_MMX) F.AddFeature("mmx", false); \
-    if (TARGET_SSE)     F.AddFeature("sse");                  \
-    else if (target_flags_explicit & MASK_SSE) F.AddFeature("sse", false); \
-    if (TARGET_SSE2)    F.AddFeature("sse2");                 \
-    else if (target_flags_explicit & MASK_SSE2) F.AddFeature("sse2", false); \
-    if (TARGET_SSE3)    F.AddFeature("sse3");                 \
-    else if (target_flags_explicit & MASK_SSE3) F.AddFeature("sse3", false); \
-    if (TARGET_SSSE3)   F.AddFeature("ssse3");                \
-    if (TARGET_SSE4_1)  F.AddFeature("sse41");                \
-    if (TARGET_SSE4_2)  F.AddFeature("sse42");                \
-    if (TARGET_3DNOW)   F.AddFeature("3dnow");                \
-    if (TARGET_3DNOW_A) F.AddFeature("3dnowa");               \
-  }
-
-#define LLVM_SET_IMPLICIT_FLOAT(flag_no_implicit_float)       \
-  if (!TARGET_80387)                                          \
-    flag_no_implicit_float = 1;                               \
-  else                                                        \
-    flag_no_implicit_float = 0;                               
-    
-/* LLVM ABI definition macros. */
-
-/* When -m64 is specified, set the architecture to x86_64-os-blah even if the
- * compiler was configured for i[3456]86-os-blah.
- */
-#define LLVM_OVERRIDE_TARGET_ARCH() \
-  (TARGET_64BIT ? "x86_64" : "i386")
-
-/* LLVM_TARGET_INTRINSIC_LOWER - To handle builtins, we want to expand the
- * invocation into normal LLVM code.  If the target can handle the builtin, this
- * macro should call the target TreeToLLVM::TargetIntrinsicLower method and
- *  return true.This macro is invoked from a method in the TreeToLLVM class.
- */
-#define LLVM_TARGET_INTRINSIC_LOWER(EXP, BUILTIN_CODE, DESTLOC, RESULT,       \
-                                    DESTTY, OPS)                              \
-        TargetIntrinsicLower(EXP, BUILTIN_CODE, DESTLOC, RESULT, DESTTY, OPS);
-
-/* When extracting a register name for a constraint, use the string extracted
-   from the magic symbol built for that register, rather than reg_names.
-   The latter maps both AH and AL to the same thing, which means we can't
-   distinguish them. */
-#define LLVM_DO_NOT_USE_REG_NAMES
-
-#endif /* LLVM_I386_TARGET_H */

Removed: gcc-plugin/trunk/i386/llvm-i386.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/i386/llvm-i386.cpp?rev=79560&view=auto

==============================================================================
--- gcc-plugin/trunk/i386/llvm-i386.cpp (original)
+++ gcc-plugin/trunk/i386/llvm-i386.cpp (removed)
@@ -1,1515 +0,0 @@
-/* High-level LLVM backend interface 
-Copyright (C) 2005 Free Software Foundation, Inc.
-Contributed by Evan Cheng (evan.cheng at apple.com)
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING.  If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA.  */
-
-//===----------------------------------------------------------------------===//
-// This is a C++ source file that implements specific llvm IA-32 ABI.
-//===----------------------------------------------------------------------===//
-
-// LLVM headers
-#include "llvm/DerivedTypes.h"
-#include "llvm/Instructions.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-
-// System headers
-#include <gmp.h>
-
-// GCC headers
-#undef VISIBILITY_HIDDEN
-
-extern "C" {
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include "target.h"
-#include "toplev.h"
-#include "tree.h"
-}
-
-// Plugin headers
-#include "llvm-abi.h"
-#include "llvm-internal.h"
-#include "llvm-target.h"
-
-static LLVMContext &Context = getGlobalContext();
-
-/* TargetIntrinsicLower - For builtins that we want to expand to normal LLVM
- * code, emit the code now.  If we can handle the code, this macro should emit
- * the code, return true.
- */
-bool TreeToLLVM::TargetIntrinsicLower(tree exp,
-                                      unsigned FnCode,
-                                      const MemRef *DestLoc,
-                                      Value *&Result,
-                                      const Type *ResultType,
-                                      std::vector<Value*> &Ops) {
-  switch (FnCode) {
-  default: break;
-  case IX86_BUILTIN_ADDPS:
-  case IX86_BUILTIN_ADDPD:
-  case IX86_BUILTIN_PADDB:
-  case IX86_BUILTIN_PADDW:
-  case IX86_BUILTIN_PADDD:
-  case IX86_BUILTIN_PADDQ:
-  case IX86_BUILTIN_PADDB128:
-  case IX86_BUILTIN_PADDW128:
-  case IX86_BUILTIN_PADDD128:
-  case IX86_BUILTIN_PADDQ128:
-    Result = Builder.CreateAdd(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_SUBPS:
-  case IX86_BUILTIN_SUBPD:
-  case IX86_BUILTIN_PSUBB:
-  case IX86_BUILTIN_PSUBW:
-  case IX86_BUILTIN_PSUBD:
-  case IX86_BUILTIN_PSUBQ:
-  case IX86_BUILTIN_PSUBB128:
-  case IX86_BUILTIN_PSUBW128:
-  case IX86_BUILTIN_PSUBD128:
-  case IX86_BUILTIN_PSUBQ128:
-    Result = Builder.CreateSub(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_MULPS:
-  case IX86_BUILTIN_MULPD:
-  case IX86_BUILTIN_PMULLW:
-  case IX86_BUILTIN_PMULLW128:
-    Result = Builder.CreateMul(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_DIVPS:
-  case IX86_BUILTIN_DIVPD:
-    Result = Builder.CreateFDiv(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_PAND:
-  case IX86_BUILTIN_PAND128:
-    Result = Builder.CreateAnd(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_PANDN:
-  case IX86_BUILTIN_PANDN128:
-    Ops[0] = Builder.CreateNot(Ops[0], "tmp");
-    Result = Builder.CreateAnd(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_POR:
-  case IX86_BUILTIN_POR128:
-    Result = Builder.CreateOr(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_PXOR:
-  case IX86_BUILTIN_PXOR128:
-    Result = Builder.CreateXor(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_ANDPS:
-  case IX86_BUILTIN_ORPS:
-  case IX86_BUILTIN_XORPS:
-  case IX86_BUILTIN_ANDNPS:
-  case IX86_BUILTIN_ANDPD:
-  case IX86_BUILTIN_ORPD:
-  case IX86_BUILTIN_XORPD:
-  case IX86_BUILTIN_ANDNPD:
-    if (cast<VectorType>(ResultType)->getNumElements() == 4)  // v4f32
-      Ops[0] = Builder.CreateBitCast(Ops[0], 
-                                  VectorType::get(Type::getInt32Ty(Context), 4),
-                                     "tmp");
-    else                                                      // v2f64
-      Ops[0] = Builder.CreateBitCast(Ops[0], 
-                                 VectorType::get(Type::getInt64Ty(Context), 2),
-                                     "tmp");
-    
-    Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "tmp");
-    switch (FnCode) {
-      case IX86_BUILTIN_ANDPS:
-      case IX86_BUILTIN_ANDPD:
-        Result = Builder.CreateAnd(Ops[0], Ops[1], "tmp");
-        break;
-      case IX86_BUILTIN_ORPS:
-      case IX86_BUILTIN_ORPD:
-        Result = Builder.CreateOr (Ops[0], Ops[1], "tmp");
-         break;
-      case IX86_BUILTIN_XORPS:
-      case IX86_BUILTIN_XORPD:
-        Result = Builder.CreateXor(Ops[0], Ops[1], "tmp");
-        break;
-      case IX86_BUILTIN_ANDNPS:
-      case IX86_BUILTIN_ANDNPD:
-        Ops[0] = Builder.CreateNot(Ops[0], "tmp");
-        Result = Builder.CreateAnd(Ops[0], Ops[1], "tmp");
-        break;
-    }
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  case IX86_BUILTIN_SHUFPS:
-    if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
-      int EV = Elt->getZExtValue();
-      Result = BuildVectorShuffle(Ops[0], Ops[1],
-                                  ((EV & 0x03) >> 0),   ((EV & 0x0c) >> 2),
-                                  ((EV & 0x30) >> 4)+4, ((EV & 0xc0) >> 6)+4);
-    } else {
-      error_at(EXPR_LOCATION(exp), "mask must be an immediate");
-      Result = Ops[0];
-    }
-    return true;
-  case IX86_BUILTIN_SHUFPD:
-    if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
-      int EV = Elt->getZExtValue();
-      Result = BuildVectorShuffle(Ops[0], Ops[1],
-                                  ((EV & 0x01) >> 0),   ((EV & 0x02) >> 1)+2);
-    } else {
-      error_at(EXPR_LOCATION(exp), "mask must be an immediate");
-      Result = Ops[0];
-    }
-    return true;
-  case IX86_BUILTIN_PSHUFW:
-  case IX86_BUILTIN_PSHUFD:
-    if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
-      int EV = Elt->getZExtValue();
-      Result = BuildVectorShuffle(Ops[0], Ops[0],
-                                  ((EV & 0x03) >> 0),   ((EV & 0x0c) >> 2),
-                                  ((EV & 0x30) >> 4),   ((EV & 0xc0) >> 6));
-    } else {
-      error_at(EXPR_LOCATION(exp), "mask must be an immediate");
-      Result = Ops[0];
-    }
-    return true;
-  case IX86_BUILTIN_PSHUFHW:
-    if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
-      int EV = Elt->getZExtValue();
-      Result = BuildVectorShuffle(Ops[0], Ops[0],
-                                  0, 1, 2, 3,
-                                  ((EV & 0x03) >> 0)+4, ((EV & 0x0c) >> 2)+4,
-                                  ((EV & 0x30) >> 4)+4, ((EV & 0xc0) >> 6)+4);
-      return true;
-    }
-    return false;
-  case IX86_BUILTIN_PSHUFLW:
-    if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
-      int EV = Elt->getZExtValue();
-      Result = BuildVectorShuffle(Ops[0], Ops[0],
-                                  ((EV & 0x03) >> 0),   ((EV & 0x0c) >> 2),
-                                  ((EV & 0x30) >> 4),   ((EV & 0xc0) >> 6),
-                                  4, 5, 6, 7);
-    } else {
-      error_at(EXPR_LOCATION(exp), "mask must be an immediate");
-      Result = Ops[0];
-    }
-    
-    return true;
-  case IX86_BUILTIN_PUNPCKHBW:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 12, 5, 13,
-                                                6, 14, 7, 15);
-    return true;
-  case IX86_BUILTIN_PUNPCKHWD:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 6, 3, 7);
-    return true;
-  case IX86_BUILTIN_PUNPCKHDQ:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 1, 3);
-    return true;
-  case IX86_BUILTIN_PUNPCKLBW:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0,  8, 1,  9,
-                                                2, 10, 3, 11);
-    return true;
-  case IX86_BUILTIN_PUNPCKLWD:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 4, 1, 5);
-    return true;
-  case IX86_BUILTIN_PUNPCKLDQ:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
-    return true;
-  case IX86_BUILTIN_PUNPCKHBW128:
-    Result = BuildVectorShuffle(Ops[0], Ops[1],  8, 24,  9, 25,
-                                                10, 26, 11, 27,
-                                                12, 28, 13, 29,
-                                                14, 30, 15, 31);
-    return true;
-  case IX86_BUILTIN_PUNPCKHWD128:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15);
-    return true;
-  case IX86_BUILTIN_PUNPCKHDQ128:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 6, 3, 7);
-    return true;
-  case IX86_BUILTIN_PUNPCKHQDQ128:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 1, 3);
-    return true;
-  case IX86_BUILTIN_PUNPCKLBW128:
-    Result = BuildVectorShuffle(Ops[0], Ops[1],  0, 16,  1, 17,
-                                                 2, 18,  3, 19,
-                                                 4, 20,  5, 21,
-                                                 6, 22,  7, 23);
-    return true;
-  case IX86_BUILTIN_PUNPCKLWD128:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11);
-    return true;
-  case IX86_BUILTIN_PUNPCKLDQ128:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 4, 1, 5);
-    return true;
-  case IX86_BUILTIN_PUNPCKLQDQ128:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
-    return true;
-  case IX86_BUILTIN_UNPCKHPS:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 6, 3, 7);
-    return true;
-  case IX86_BUILTIN_UNPCKHPD:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 1, 3);
-    return true;
-  case IX86_BUILTIN_UNPCKLPS:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 4, 1, 5);
-    return true;
-  case IX86_BUILTIN_UNPCKLPD:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
-    return true;
-  case IX86_BUILTIN_MOVHLPS:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 6, 7, 2, 3);
-    return true;
-  case IX86_BUILTIN_MOVLHPS:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
-    return true;
-  case IX86_BUILTIN_MOVSS:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 1, 2, 3);
-    return true;
-  case IX86_BUILTIN_MOVSD:
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
-    return true;
-  case IX86_BUILTIN_MOVQ: {
-    Value *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
-    Result = BuildVector(Zero, Zero, Zero, Zero, NULL);
-    Result = BuildVectorShuffle(Result, Ops[0], 4, 5, 2, 3);
-    return true;
-  }
-  case IX86_BUILTIN_LOADQ: {
-    PointerType *i64Ptr = PointerType::getUnqual(Type::getInt64Ty(Context));
-    Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr, "tmp");
-    Ops[0] = Builder.CreateLoad(Ops[0], "tmp");
-    Value *Zero = ConstantInt::get(Type::getInt64Ty(Context), 0);
-    Result = BuildVector(Zero, Zero, NULL);
-    Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
-    Result = Builder.CreateInsertElement(Result, Ops[0], Idx, "tmp");
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_LOADUPS: {
-    VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
-    PointerType *v4f32Ptr = PointerType::getUnqual(v4f32);
-    Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr, "tmp");
-    LoadInst *LI = Builder.CreateLoad(BC, "tmp");
-    LI->setAlignment(1);
-    Result = LI;
-    return true;
-  }
-  case IX86_BUILTIN_LOADUPD: {
-    VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
-    PointerType *v2f64Ptr = PointerType::getUnqual(v2f64);
-    Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr, "tmp");
-    LoadInst *LI = Builder.CreateLoad(BC, "tmp");
-    LI->setAlignment(1);
-    Result = LI;
-    return true;
-  }
-  case IX86_BUILTIN_LOADDQU: {
-    VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
-    PointerType *v16i8Ptr = PointerType::getUnqual(v16i8);
-    Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr, "tmp");
-    LoadInst *LI = Builder.CreateLoad(BC, "tmp");
-    LI->setAlignment(1);
-    Result = LI;
-    return true;
-  }
-  case IX86_BUILTIN_STOREUPS: {
-    VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
-    PointerType *v4f32Ptr = PointerType::getUnqual(v4f32);
-    Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr, "tmp");
-    StoreInst *SI = Builder.CreateStore(Ops[1], BC);
-    SI->setAlignment(1);
-    Result = SI;
-    return true;
-  }
-  case IX86_BUILTIN_STOREUPD: {
-    VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
-    PointerType *v2f64Ptr = PointerType::getUnqual(v2f64);
-    Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr, "tmp");
-    StoreInst *SI = Builder.CreateStore(Ops[1], BC);
-    SI->setAlignment(1);
-    Result = SI;
-    return true;
-  }
-  case IX86_BUILTIN_STOREDQU: {
-    VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
-    PointerType *v16i8Ptr = PointerType::getUnqual(v16i8);
-    Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr, "tmp");
-    StoreInst *SI = Builder.CreateStore(Ops[1], BC);
-    SI->setAlignment(1);
-    Result = SI;
-    return true;
-  }
-  case IX86_BUILTIN_LOADHPS: {
-    PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
-    Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr, "tmp");
-    Value *Load = Builder.CreateLoad(Ops[1], "tmp");
-    Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
-    Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_LOADLPS: {
-    PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
-    Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr, "tmp");
-    Value *Load = Builder.CreateLoad(Ops[1], "tmp");
-    Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
-    Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 5, 2, 3);
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_LOADHPD: {
-    Value *Load = Builder.CreateLoad(Ops[1], "tmp");
-    Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
-    Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_LOADLPD: {
-    Value *Load = Builder.CreateLoad(Ops[1], "tmp");
-    Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
-    Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
-    Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_STOREHPS: {
-    VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
-    PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
-    Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr, "tmp");
-    Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 1);
-    Ops[1] = Builder.CreateBitCast(Ops[1], v2f64, "tmp");
-    Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "tmp");
-    Result = Builder.CreateStore(Ops[1], Ops[0]);
-    return true;
-  }
-  case IX86_BUILTIN_STORELPS: {
-    VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
-    PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
-    Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr, "tmp");
-    Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
-    Ops[1] = Builder.CreateBitCast(Ops[1], v2f64, "tmp");
-    Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "tmp");
-    Result = Builder.CreateStore(Ops[1], Ops[0]);
-    return true;
-  }
-  case IX86_BUILTIN_MOVSHDUP:
-    Result = BuildVectorShuffle(Ops[0], Ops[0], 1, 1, 3, 3);
-    return true;
-  case IX86_BUILTIN_MOVSLDUP:
-    Result = BuildVectorShuffle(Ops[0], Ops[0], 0, 0, 2, 2);
-    return true;
-  case IX86_BUILTIN_VEC_INIT_V2SI:
-    Result = BuildVector(Ops[0], Ops[1], NULL);
-    return true;
-  case IX86_BUILTIN_VEC_INIT_V4HI:
-    // Sometimes G++ promotes arguments to int.
-    for (unsigned i = 0; i != 4; ++i)
-      Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt16Ty(Context), false, "tmp");
-    Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3], NULL);
-    return true;
-  case IX86_BUILTIN_VEC_INIT_V8QI:
-    // Sometimes G++ promotes arguments to int.
-    for (unsigned i = 0; i != 8; ++i)
-      Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt8Ty(Context), false, "tmp");
-    Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3],
-                         Ops[4], Ops[5], Ops[6], Ops[7], NULL);
-    return true;
-  case IX86_BUILTIN_VEC_EXT_V2SI:
-  case IX86_BUILTIN_VEC_EXT_V4HI:
-  case IX86_BUILTIN_VEC_EXT_V2DF:
-  case IX86_BUILTIN_VEC_EXT_V2DI:
-  case IX86_BUILTIN_VEC_EXT_V4SI:
-  case IX86_BUILTIN_VEC_EXT_V4SF:
-  case IX86_BUILTIN_VEC_EXT_V8HI:
-  case IX86_BUILTIN_VEC_EXT_V16QI:
-    Result = Builder.CreateExtractElement(Ops[0], Ops[1], "tmp");
-    return true;
-  case IX86_BUILTIN_VEC_SET_V16QI:
-    // Sometimes G++ promotes arguments to int.
-    Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt8Ty(Context), false, "tmp");
-    Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
-    return true;
-  case IX86_BUILTIN_VEC_SET_V4HI:
-  case IX86_BUILTIN_VEC_SET_V8HI:
-    // GCC sometimes doesn't produce the right element type.
-    Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt16Ty(Context), false, "tmp");
-    Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
-    return true;
-  case IX86_BUILTIN_VEC_SET_V4SI:
-    Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
-    return true;
-  case IX86_BUILTIN_VEC_SET_V2DI:
-    Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
-    return true;
-  case IX86_BUILTIN_CMPEQPS:
-  case IX86_BUILTIN_CMPLTPS:
-  case IX86_BUILTIN_CMPLEPS:
-  case IX86_BUILTIN_CMPGTPS:
-  case IX86_BUILTIN_CMPGEPS:
-  case IX86_BUILTIN_CMPNEQPS:
-  case IX86_BUILTIN_CMPNLTPS:
-  case IX86_BUILTIN_CMPNLEPS:
-  case IX86_BUILTIN_CMPNGTPS:
-  case IX86_BUILTIN_CMPNGEPS:
-  case IX86_BUILTIN_CMPORDPS:
-  case IX86_BUILTIN_CMPUNORDPS: {
-    Function *cmpps =
-      Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_cmp_ps);
-    bool flip = false;
-    unsigned PredCode;
-    switch (FnCode) {
-    default: assert(0 && "Unknown fncode!");
-    case IX86_BUILTIN_CMPEQPS: PredCode = 0; break;
-    case IX86_BUILTIN_CMPLTPS: PredCode = 1; break;
-    case IX86_BUILTIN_CMPGTPS: PredCode = 1; flip = true; break;
-    case IX86_BUILTIN_CMPLEPS: PredCode = 2; break;
-    case IX86_BUILTIN_CMPGEPS: PredCode = 2; flip = true; break;
-    case IX86_BUILTIN_CMPUNORDPS: PredCode = 3; break;
-    case IX86_BUILTIN_CMPNEQPS: PredCode = 4; break;
-    case IX86_BUILTIN_CMPNLTPS: PredCode = 5; break;
-    case IX86_BUILTIN_CMPNGTPS: PredCode = 5; flip = true; break;
-    case IX86_BUILTIN_CMPNLEPS: PredCode = 6; break;
-    case IX86_BUILTIN_CMPNGEPS: PredCode = 6; flip = true; break;
-    case IX86_BUILTIN_CMPORDPS: PredCode = 7; break;
-    }
-    Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
-    Value *Arg0 = Ops[0];
-    Value *Arg1 = Ops[1];
-    if (flip) std::swap(Arg0, Arg1);
-    Value *CallOps[3] = { Arg0, Arg1, Pred };
-    Result = Builder.CreateCall(cmpps, CallOps, CallOps+3, "tmp");
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_CMPEQSS:
-  case IX86_BUILTIN_CMPLTSS:
-  case IX86_BUILTIN_CMPLESS:
-  case IX86_BUILTIN_CMPNEQSS:
-  case IX86_BUILTIN_CMPNLTSS:
-  case IX86_BUILTIN_CMPNLESS:
-  case IX86_BUILTIN_CMPNGTSS:
-  case IX86_BUILTIN_CMPNGESS:
-  case IX86_BUILTIN_CMPORDSS:
-  case IX86_BUILTIN_CMPUNORDSS: {
-    Function *cmpss =
-      Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_cmp_ss);
-    unsigned PredCode;
-    switch (FnCode) {
-    default: assert(0 && "Unknown fncode");
-    case IX86_BUILTIN_CMPEQSS:    PredCode = 0; break;
-    case IX86_BUILTIN_CMPLTSS:    PredCode = 1; break;
-    case IX86_BUILTIN_CMPLESS:    PredCode = 2; break;
-    case IX86_BUILTIN_CMPUNORDSS: PredCode = 3; break;
-    case IX86_BUILTIN_CMPNEQSS:   PredCode = 4; break;
-    case IX86_BUILTIN_CMPNLTSS:   PredCode = 5; break;
-    case IX86_BUILTIN_CMPNLESS:   PredCode = 6; break;
-    case IX86_BUILTIN_CMPORDSS:   PredCode = 7; break;
-    }
-    Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
-    Value *CallOps[3] = { Ops[0], Ops[1], Pred };
-    Result = Builder.CreateCall(cmpss, CallOps, CallOps+3, "tmp");
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_CMPEQPD:
-  case IX86_BUILTIN_CMPLTPD:
-  case IX86_BUILTIN_CMPLEPD:
-  case IX86_BUILTIN_CMPGTPD:
-  case IX86_BUILTIN_CMPGEPD:
-  case IX86_BUILTIN_CMPNEQPD:
-  case IX86_BUILTIN_CMPNLTPD:
-  case IX86_BUILTIN_CMPNLEPD:
-  case IX86_BUILTIN_CMPNGTPD:
-  case IX86_BUILTIN_CMPNGEPD:
-  case IX86_BUILTIN_CMPORDPD:
-  case IX86_BUILTIN_CMPUNORDPD: {
-    Function *cmppd =
-      Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_cmp_pd);
-    bool flip = false;
-    unsigned PredCode;
-    switch (FnCode) {
-    default: assert(0 && "Unknown fncode!");
-    case IX86_BUILTIN_CMPEQPD:    PredCode = 0; break;
-    case IX86_BUILTIN_CMPLTPD:    PredCode = 1; break;
-    case IX86_BUILTIN_CMPGTPD:    PredCode = 1; flip = true; break;
-    case IX86_BUILTIN_CMPLEPD:    PredCode = 2; break;
-    case IX86_BUILTIN_CMPGEPD:    PredCode = 2; flip = true; break;
-    case IX86_BUILTIN_CMPUNORDPD: PredCode = 3; break;
-    case IX86_BUILTIN_CMPNEQPD:   PredCode = 4; break;
-    case IX86_BUILTIN_CMPNLTPD:   PredCode = 5; break;
-    case IX86_BUILTIN_CMPNGTPD:   PredCode = 5; flip = true; break;
-    case IX86_BUILTIN_CMPNLEPD:   PredCode = 6; break;
-    case IX86_BUILTIN_CMPNGEPD:   PredCode = 6; flip = true; break;
-    case IX86_BUILTIN_CMPORDPD:   PredCode = 7; break;
-    }
-    Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
-    Value *Arg0 = Ops[0];
-    Value *Arg1 = Ops[1];
-    if (flip) std::swap(Arg0, Arg1);
-
-    Value *CallOps[3] = { Arg0, Arg1, Pred };
-    Result = Builder.CreateCall(cmppd, CallOps, CallOps+3, "tmp");
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_CMPEQSD:
-  case IX86_BUILTIN_CMPLTSD:
-  case IX86_BUILTIN_CMPLESD:
-  case IX86_BUILTIN_CMPNEQSD:
-  case IX86_BUILTIN_CMPNLTSD:
-  case IX86_BUILTIN_CMPNLESD:
-  case IX86_BUILTIN_CMPORDSD:
-  case IX86_BUILTIN_CMPUNORDSD: {
-    Function *cmpsd =
-      Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_cmp_sd);
-    unsigned PredCode;
-    switch (FnCode) {
-      default: assert(0 && "Unknown fncode");
-    case IX86_BUILTIN_CMPEQSD:    PredCode = 0; break;
-    case IX86_BUILTIN_CMPLTSD:    PredCode = 1; break;
-    case IX86_BUILTIN_CMPLESD:    PredCode = 2; break;
-    case IX86_BUILTIN_CMPUNORDSD: PredCode = 3; break;
-    case IX86_BUILTIN_CMPNEQSD:   PredCode = 4; break;
-    case IX86_BUILTIN_CMPNLTSD:   PredCode = 5; break;
-    case IX86_BUILTIN_CMPNLESD:   PredCode = 6; break;
-    case IX86_BUILTIN_CMPORDSD:   PredCode = 7; break;
-    }
-    Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
-    Value *CallOps[3] = { Ops[0], Ops[1], Pred };
-    Result = Builder.CreateCall(cmpsd, CallOps, CallOps+3, "tmp");
-    Result = Builder.CreateBitCast(Result, ResultType, "tmp");
-    return true;
-  }
-  case IX86_BUILTIN_LDMXCSR: {
-    Function *ldmxcsr =
-      Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_ldmxcsr);
-    Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
-    Builder.CreateStore(Ops[0], Ptr);
-    Ptr = Builder.CreateBitCast(Ptr,
-                             PointerType::getUnqual(Type::getInt8Ty(Context)), "tmp");
-    Result = Builder.CreateCall(ldmxcsr, Ptr);
-    return true;
-  }
-  case IX86_BUILTIN_STMXCSR: {
-    Function *stmxcsr =
-      Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_stmxcsr);
-    Value *Ptr  = CreateTemporary(Type::getInt32Ty(Context));
-    Value *BPtr = Builder.CreateBitCast(Ptr,
-                             PointerType::getUnqual(Type::getInt8Ty(Context)), "tmp");
-    Builder.CreateCall(stmxcsr, BPtr);
-    
-    Result = Builder.CreateLoad(Ptr, "tmp");
-    return true;
-  }
-  }
-
-  return false;
-}
-
-/* These are defined in i386.c */
-#define MAX_CLASSES 4
-extern "C" enum machine_mode type_natural_mode(tree, CUMULATIVE_ARGS *);
-extern "C" int examine_argument(enum machine_mode, const_tree, int, int*, int*);
-extern "C" int classify_argument(enum machine_mode, const_tree,
-                               enum x86_64_reg_class classes[MAX_CLASSES], int);
-
-/* Target hook for llvm-abi.h. It returns true if an aggregate of the
-   specified type should be passed in memory. This is only called for
-   x86-64. */
-static bool llvm_x86_64_should_pass_aggregate_in_memory(tree TreeType,
-                                                        enum machine_mode Mode){
-  int IntRegs, SSERegs;
-  /* If examine_argument return 0, then it's passed byval in memory.*/
-  int ret = examine_argument(Mode, TreeType, 0, &IntRegs, &SSERegs);
-  if (ret==0)
-    return true;
-  if (ret==1 && IntRegs==0 && SSERegs==0)   // zero-sized struct
-    return true;
-  return false;
-}
-
-/* Returns true if all elements of the type are integer types. */
-static bool llvm_x86_is_all_integer_types(const Type *Ty) {
-  for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
-       I != E; ++I) {
-    const Type *STy = I->get();
-    if (!STy->isIntOrIntVector() && !isa<PointerType>(STy))
-      return false;
-  }
-  return true;
-}
-
-/* Target hook for llvm-abi.h. It returns true if an aggregate of the
-   specified type should be passed in a number of registers of mixed types.
-   It also returns a vector of types that correspond to the registers used
-   for parameter passing. This is only called for x86-32. */
-bool
-llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
-                                                std::vector<const Type*> &Elts){
-  // If this is a small fixed size type, investigate it.
-  HOST_WIDE_INT SrcSize = int_size_in_bytes(TreeType);
-  if (SrcSize <= 0 || SrcSize > 16)
-    return false;
-
-  // X86-32 passes aggregates on the stack.  If this is an extremely simple
-  // aggregate whose elements would be passed the same if passed as scalars,
-  // pass them that way in order to promote SROA on the caller and callee side.
-  // Note that we can't support passing all structs this way.  For example,
-  // {i16, i16} should be passed in on 32-bit unit, which is not how "i16, i16"
-  // would be passed as stand-alone arguments.
-  const StructType *STy = dyn_cast<StructType>(Ty);
-  if (!STy || STy->isPacked()) return false;
-
-  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
-    const Type *EltTy = STy->getElementType(i);
-    // 32 and 64-bit integers are fine, as are float and double.  Long double
-    // (which can be picked as the type for a union of 16 bytes) is not fine, 
-    // as loads and stores of it get only 10 bytes.
-    if (EltTy == Type::getInt32Ty(Context) ||
-        EltTy == Type::getInt64Ty(Context) || 
-        EltTy == Type::getFloatTy(Context) ||
-        EltTy == Type::getDoubleTy(Context) ||
-        isa<PointerType>(EltTy)) {
-      Elts.push_back(EltTy);
-      continue;
-    }
-    
-    // TODO: Vectors are also ok to pass if they don't require extra alignment.
-    // TODO: We can also pass structs like {i8, i32}.
-    
-    Elts.clear();
-    return false;
-  }
-  
-  return true;
-}  
-
-/* It returns true if an aggregate of the specified type should be passed as a
-   first class aggregate. */
-bool llvm_x86_should_pass_aggregate_as_fca(tree type, const Type *Ty) {
-  if (TREE_CODE(type) != COMPLEX_TYPE)
-    return false;
-  const StructType *STy = dyn_cast<StructType>(Ty);
-  if (!STy || STy->isPacked()) return false;
-
-  // FIXME: Currently codegen isn't lowering most _Complex types in a way that
-  // makes it ABI compatible for x86-64. Same for _Complex char and _Complex
-  // short in 32-bit.
-  const Type *EltTy = STy->getElementType(0);
-  return !((TARGET_64BIT && (EltTy->isInteger() ||
-                             EltTy == Type::getFloatTy(Context) ||
-                             EltTy == Type::getDoubleTy(Context))) ||
-           EltTy == Type::getInt16Ty(Context) ||
-           EltTy == Type::getInt8Ty(Context));
-}
-
-/* Target hook for llvm-abi.h. It returns true if an aggregate of the
-   specified type should be passed in memory. */
-bool llvm_x86_should_pass_aggregate_in_memory(tree TreeType, const Type *Ty) {
-  if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
-    return false;
-
-  enum machine_mode Mode = type_natural_mode(TreeType, NULL);
-  HOST_WIDE_INT Bytes =
-    (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
-
-  // Zero sized array, struct, or class, not passed in memory.
-  if (Bytes == 0)
-    return false;
-
-  if (!TARGET_64BIT) {
-    std::vector<const Type*> Elts;
-    return !llvm_x86_32_should_pass_aggregate_in_mixed_regs(TreeType, Ty, Elts);
-  }
-  return llvm_x86_64_should_pass_aggregate_in_memory(TreeType, Mode);
-}
-
-/* count_num_registers_uses - Return the number of GPRs and XMMs parameter
-   register used so far.  Caller is responsible for initializing outputs. */
-static void count_num_registers_uses(std::vector<const Type*> &ScalarElts,
-                                     unsigned &NumGPRs, unsigned &NumXMMs) {
-  for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
-    const Type *Ty = ScalarElts[i];
-    if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
-      if (!TARGET_MACHO)
-        continue;
-      if (VTy->getNumElements() == 1)
-        // v1i64 is passed in GPRs on Darwin.
-        ++NumGPRs;
-      else
-        // All other vector scalar values are passed in XMM registers.
-        ++NumXMMs;
-    } else if (Ty->isInteger() || isa<PointerType>(Ty)) {
-      ++NumGPRs;
-    } else if (Ty==Type::getVoidTy(Context)) {
-      // Padding bytes that are not passed anywhere
-      ;
-    } else {
-      // Floating point scalar argument.
-      assert(Ty->isFloatingPoint() && Ty->isPrimitiveType() &&
-             "Expecting a floating point primitive type!");
-      if (Ty->getTypeID() == Type::FloatTyID
-          || Ty->getTypeID() == Type::DoubleTyID)
-        ++NumXMMs;
-    }
-  }
-}
-
-/* Target hook for llvm-abi.h. This is called when an aggregate is being passed
-   in registers. If there are only enough available parameter registers to pass
-   part of the aggregate, return true. That means the aggregate should instead
-   be passed in memory. */
-bool
-llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<const Type*> &Elts,
-                                         std::vector<const Type*> &ScalarElts,
-                                         bool isShadowReturn) {
-  // Counting number of GPRs and XMMs used so far. According to AMD64 ABI
-  // document: "If there are no registers available for any eightbyte of an
-  // argument, the whole  argument is passed on the stack." X86-64 uses 6
-  // integer 
-  // For example, if two GPRs are required but only one is available, then
-  // both parts will be in memory.
-  // FIXME: This is a temporary solution. To be removed when llvm has first
-  // class aggregate values.
-  unsigned NumGPRs = isShadowReturn ? 1 : 0;
-  unsigned NumXMMs = 0;
-  count_num_registers_uses(ScalarElts, NumGPRs, NumXMMs);
-
-  unsigned NumGPRsNeeded = 0;
-  unsigned NumXMMsNeeded = 0;
-  count_num_registers_uses(Elts, NumGPRsNeeded, NumXMMsNeeded);
-
-  bool GPRsSatisfied = true;
-  if (NumGPRsNeeded) {
-    if (NumGPRs < 6) {
-      if ((NumGPRs + NumGPRsNeeded) > 6)
-        // Only partially satisfied.
-        return true;
-    } else
-      GPRsSatisfied = false;
-  }
-
-  bool XMMsSatisfied = true;
-  if (NumXMMsNeeded) {
-    if (NumXMMs < 8) {
-      if ((NumXMMs + NumXMMsNeeded) > 8)
-        // Only partially satisfied.
-        return true;
-    } else
-      XMMsSatisfied = false;
-  }
-
-  return !GPRsSatisfied || !XMMsSatisfied;
-}
-
-/* Target hook for llvm-abi.h. It returns true if an aggregate of the
-   specified type should be passed in a number of registers of mixed types.
-   It also returns a vector of types that correspond to the registers used
-   for parameter passing. This is only called for x86-64. */
-bool
-llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
-                                                std::vector<const Type*> &Elts){
-  if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
-    return false;
-
-  enum x86_64_reg_class Class[MAX_CLASSES];
-  enum machine_mode Mode = type_natural_mode(TreeType, NULL);
-  bool totallyEmpty = true;
-  HOST_WIDE_INT Bytes =
-    (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
-  int NumClasses = classify_argument(Mode, TreeType, Class, 0);
-  if (!NumClasses)
-    return false;
-
-  if (NumClasses == 1 && Class[0] == X86_64_INTEGERSI_CLASS)
-    // This will fit in one i32 register.
-    return false;
-
-  for (int i = 0; i < NumClasses; ++i) {
-    switch (Class[i]) {
-    case X86_64_INTEGER_CLASS:
-    case X86_64_INTEGERSI_CLASS:
-      Elts.push_back(Type::getInt64Ty(Context));
-      totallyEmpty = false;
-      Bytes -= 8;
-      break;
-    case X86_64_SSE_CLASS:
-      totallyEmpty = false;
-      // If it's a SSE class argument, then one of the followings are possible:
-      // 1. 1 x SSE, size is 8: 1 x Double.
-      // 2. 1 x SSE, size is 4: 1 x Float.
-      // 3. 1 x SSE + 1 x SSEUP, size is 16: 1 x <4 x i32>, <4 x f32>,
-      //                                         <2 x i64>, or <2 x f64>.
-      // 4. 1 x SSE + 1 x SSESF, size is 12: 1 x Double, 1 x Float.
-      // 5. 2 x SSE, size is 16: 2 x Double.
-      if ((NumClasses-i) == 1) {
-        if (Bytes == 8) {
-          Elts.push_back(Type::getDoubleTy(Context));
-          Bytes -= 8;
-        } else if (Bytes == 4) {
-          Elts.push_back (Type::getFloatTy(Context));
-          Bytes -= 4;
-        } else
-          assert(0 && "Not yet handled!");
-      } else if ((NumClasses-i) == 2) {
-        if (Class[i+1] == X86_64_SSEUP_CLASS) {
-          const Type *Ty = ConvertType(TreeType);
-          if (const StructType *STy = dyn_cast<StructType>(Ty))
-            // Look pass the struct wrapper.
-            if (STy->getNumElements() == 1)
-              Ty = STy->getElementType(0);
-          if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
-            if (VTy->getNumElements() == 2) {
-              if (VTy->getElementType()->isInteger()) {
-                Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
-              } else {
-                Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
-              }
-              Bytes -= 8;
-            } else {
-              assert(VTy->getNumElements() == 4);
-              if (VTy->getElementType()->isInteger()) {
-                Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
-              } else {
-                Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
-              }
-              Bytes -= 4;
-            }
-          } else if (llvm_x86_is_all_integer_types(Ty)) {
-            Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
-            Bytes -= 4;
-          } else {
-            Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
-            Bytes -= 4;
-          }
-        } else if (Class[i+1] == X86_64_SSESF_CLASS) {
-          assert(Bytes == 12 && "Not yet handled!");
-          Elts.push_back(Type::getDoubleTy(Context));
-          Elts.push_back(Type::getFloatTy(Context));
-          Bytes -= 12;
-        } else if (Class[i+1] == X86_64_SSE_CLASS) {
-          Elts.push_back(Type::getDoubleTy(Context));
-          Elts.push_back(Type::getDoubleTy(Context));
-          Bytes -= 16;
-        } else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
-          Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
-          Elts.push_back(Type::getDoubleTy(Context));
-        } else if (Class[i+1] == X86_64_INTEGER_CLASS) {
-          Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
-          Elts.push_back(Type::getInt64Ty(Context));
-        } else if (Class[i+1] == X86_64_NO_CLASS) {
-          // padding bytes, don't pass
-          Elts.push_back(Type::getDoubleTy(Context));
-          Elts.push_back(Type::getVoidTy(Context));
-          Bytes -= 16;
-        } else
-          assert(0 && "Not yet handled!");
-        ++i; // Already handled the next one.
-      } else
-        assert(0 && "Not yet handled!");
-      break;
-    case X86_64_SSESF_CLASS:
-      totallyEmpty = false;
-      Elts.push_back(Type::getFloatTy(Context));
-      Bytes -= 4;
-      break;
-    case X86_64_SSEDF_CLASS:
-      totallyEmpty = false;
-      Elts.push_back(Type::getDoubleTy(Context));
-      Bytes -= 8;
-      break;
-    case X86_64_X87_CLASS:
-    case X86_64_X87UP_CLASS:
-    case X86_64_COMPLEX_X87_CLASS:
-      return false;
-    case X86_64_NO_CLASS:
-      // Padding bytes that are not passed (unless the entire object consists
-      // of padding)
-      Elts.push_back(Type::getVoidTy(Context));
-      Bytes -= 8;
-      break;
-    default: assert(0 && "Unexpected register class!");
-    }
-  }
-
-  return !totallyEmpty;
-}
-
-/* On Darwin x86-32, vectors which are not MMX nor SSE should be passed as 
-   integers.  On Darwin x86-64, such vectors bigger than 128 bits should be
-   passed in memory (byval). */
-bool llvm_x86_should_pass_vector_in_integer_regs(tree type) {
-  if (!TARGET_MACHO)
-    return false;
-  if (TREE_CODE(type) == VECTOR_TYPE &&
-      TYPE_SIZE(type) &&
-      TREE_CODE(TYPE_SIZE(type))==INTEGER_CST) {
-    if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 && TARGET_MMX)
-      return false;
-    if (TREE_INT_CST_LOW(TYPE_SIZE(type))==128 && TARGET_SSE)
-      return false;
-    if (TARGET_64BIT && TREE_INT_CST_LOW(TYPE_SIZE(type)) > 128)
-      return false;
-  }
-  return true;
-}
-
-/* On Darwin x86-64, vectors which are bigger than 128 bits should be passed
-   byval (in memory).  */
-bool llvm_x86_should_pass_vector_using_byval_attr(tree type) {
-  if (!TARGET_MACHO)
-    return false;
-  if (!TARGET_64BIT)
-    return false;
-  if (TREE_CODE(type) == VECTOR_TYPE &&
-      TYPE_SIZE(type) &&
-      TREE_CODE(TYPE_SIZE(type))==INTEGER_CST) {
-    if (TREE_INT_CST_LOW(TYPE_SIZE(type))<=128)
-      return false;
-  }
-  return true;
-}
-
-/* The MMX vector v1i64 is returned in EAX and EDX on Darwin.  Communicate
-    this by returning i64 here.  Likewise, (generic) vectors such as v2i16
-    are returned in EAX.  
-   On Darwin x86-64, v1i64 is returned in RAX and other MMX vectors are 
-    returned in XMM0.  Judging from comments, this would not be right for
-    Win64.  Don't know about Linux.  */
-tree llvm_x86_should_return_vector_as_scalar(tree type, bool isBuiltin) {
-  if (TARGET_MACHO &&
-      !isBuiltin &&
-      TREE_CODE(type) == VECTOR_TYPE &&
-      TYPE_SIZE(type) &&
-      TREE_CODE(TYPE_SIZE(type))==INTEGER_CST) {
-    if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 &&
-        TYPE_VECTOR_SUBPARTS(type)==1)
-      return uint64_type_node;
-    if (TARGET_64BIT && TREE_INT_CST_LOW(TYPE_SIZE(type))==64)
-      return double_type_node;
-    if (TREE_INT_CST_LOW(TYPE_SIZE(type))==32)
-      return uint32_type_node;
-  }
-  return 0;
-}
-
-/* MMX vectors are returned in XMM0 on x86-64 Darwin.  The easiest way to
-   communicate this is pretend they're doubles.
-   Judging from comments, this would not be right for Win64.  Don't know
-   about Linux.  */
-tree llvm_x86_should_return_selt_struct_as_scalar(tree type) {
-  tree retType = isSingleElementStructOrArray(type, true, false);
-  if (!retType || !TARGET_64BIT || !TARGET_MACHO)
-    return retType;
-  if (TREE_CODE(retType) == VECTOR_TYPE &&
-      TYPE_SIZE(retType) &&
-      TREE_CODE(TYPE_SIZE(retType))==INTEGER_CST &&
-      TREE_INT_CST_LOW(TYPE_SIZE(retType))==64)
-    return double_type_node;
-  return retType;
-}
-
-/* MMX vectors v2i32, v4i16, v8i8, v2f32 are returned using sret on Darwin
-   32-bit.  Vectors bigger than 128 are returned using sret.  */
-bool llvm_x86_should_return_vector_as_shadow(tree type, bool isBuiltin) {
-  if (TARGET_MACHO &&
-    !isBuiltin &&
-    !TARGET_64BIT &&
-    TREE_CODE(type) == VECTOR_TYPE &&
-    TYPE_SIZE(type) &&
-    TREE_CODE(TYPE_SIZE(type))==INTEGER_CST) {
-    if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 &&
-       TYPE_VECTOR_SUBPARTS(type)>1)
-      return true;
-  }
-  if (TREE_INT_CST_LOW(TYPE_SIZE(type))>128)
-    return true;
-  return false;
-}
-
-// llvm_x86_should_not_return_complex_in_memory -  Return true if TYPE 
-// should be returned using multiple value return instruction.
-bool llvm_x86_should_not_return_complex_in_memory(tree type) {
-
-  if (!TARGET_64BIT)
-    return false;
-
-  if (TREE_CODE(type) == COMPLEX_TYPE &&
-      TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) == 32)
-    return true;
-
-  return false;
-}
-
-// llvm_suitable_multiple_ret_value_type - Return TRUE if return value 
-// of type TY should be returned using multiple value return instruction.
-static bool llvm_suitable_multiple_ret_value_type(const Type *Ty,
-                                                  tree TreeType) {
-
-  if (!TARGET_64BIT)
-    return false;
-
-  const StructType *STy = dyn_cast<StructType>(Ty);
-  if (!STy)
-    return false;
-
-  if (llvm_x86_should_not_return_complex_in_memory(TreeType))
-    return true;
-
-  // Let gcc specific routine answer the question.
-  enum x86_64_reg_class Class[MAX_CLASSES];
-  enum machine_mode Mode = type_natural_mode(TreeType, NULL);
-  int NumClasses = classify_argument(Mode, TreeType, Class, 0);
-  if (NumClasses == 0)
-    return false;
-
-  if (NumClasses == 1 && 
-      (Class[0] == X86_64_INTEGERSI_CLASS || Class[0] == X86_64_INTEGER_CLASS))
-    // This will fit in one i64 register.
-    return false;
-
-  if (NumClasses == 2 &&
-      (Class[0] == X86_64_NO_CLASS || Class[1] == X86_64_NO_CLASS))
-    // One word is padding which is not passed at all; treat this as returning
-    // the scalar type of the other word.
-    return false;
-
-  // Otherwise, use of multiple value return is OK.
-  return true;
-}
-
-// llvm_x86_scalar_type_for_struct_return - Return LLVM type if TYPE
-// can be returned as a scalar, otherwise return NULL.
-const Type *llvm_x86_scalar_type_for_struct_return(tree type, unsigned *Offset) {
-  *Offset = 0;
-  const Type *Ty = ConvertType(type);
-  unsigned Size = getTargetData().getTypeAllocSize(Ty);
-  if (Size == 0)
-    return Type::getVoidTy(Context);
-  else if (Size == 1)
-    return Type::getInt8Ty(Context);
-  else if (Size == 2)
-    return Type::getInt16Ty(Context);
-  else if (Size <= 4)
-    return Type::getInt32Ty(Context);
-
-  // Check if Ty should be returned using multiple value return instruction.
-  if (llvm_suitable_multiple_ret_value_type(Ty, type))
-    return NULL;
-
-  if (TARGET_64BIT) {
-    // This logic relies on llvm_suitable_multiple_ret_value_type to have
-    // removed anything not expected here.
-    enum x86_64_reg_class Class[MAX_CLASSES];
-    enum machine_mode Mode = type_natural_mode(type, NULL);
-    int NumClasses = classify_argument(Mode, type, Class, 0);
-    if (NumClasses == 0)
-      return Type::getInt64Ty(Context);
-
-    if (NumClasses == 1) {
-      if (Class[0] == X86_64_INTEGERSI_CLASS ||
-          Class[0] == X86_64_INTEGER_CLASS) {
-        // one int register
-        HOST_WIDE_INT Bytes =
-          (Mode == BLKmode) ? int_size_in_bytes(type) : 
-                              (int) GET_MODE_SIZE(Mode);
-        if (Bytes>4)
-          return Type::getInt64Ty(Context);
-        else if (Bytes>2)
-          return Type::getInt32Ty(Context);
-        else if (Bytes>1)
-          return Type::getInt16Ty(Context);
-        else
-          return Type::getInt8Ty(Context);
-      }
-      assert(0 && "Unexpected type!"); 
-    }
-    if (NumClasses == 2) {
-      if (Class[1] == X86_64_NO_CLASS) {
-        if (Class[0] == X86_64_INTEGER_CLASS || 
-            Class[0] == X86_64_NO_CLASS ||
-            Class[0] == X86_64_INTEGERSI_CLASS)
-          return Type::getInt64Ty(Context);
-        else if (Class[0] == X86_64_SSE_CLASS || Class[0] == X86_64_SSEDF_CLASS)
-          return Type::getDoubleTy(Context);
-        else if (Class[0] == X86_64_SSESF_CLASS)
-          return Type::getFloatTy(Context);
-        assert(0 && "Unexpected type!");
-      }
-      if (Class[0] == X86_64_NO_CLASS) {
-        *Offset = 8;
-        if (Class[1] == X86_64_INTEGERSI_CLASS ||
-            Class[1] == X86_64_INTEGER_CLASS)
-          return Type::getInt64Ty(Context);
-        else if (Class[1] == X86_64_SSE_CLASS || Class[1] == X86_64_SSEDF_CLASS)
-          return Type::getDoubleTy(Context);
-        else if (Class[1] == X86_64_SSESF_CLASS)
-          return Type::getFloatTy(Context);
-        assert(0 && "Unexpected type!"); 
-      }
-      assert(0 && "Unexpected type!");
-    }
-    assert(0 && "Unexpected type!");
-  } else {
-    if (Size <= 8)
-      return Type::getInt64Ty(Context);
-    else if (Size <= 16)
-      return IntegerType::get(Context, 128);
-    else if (Size <= 32)
-      return IntegerType::get(Context, 256);
-  }
-  return NULL;
-}
-
-/// llvm_x86_64_get_multiple_return_reg_classes - Find register classes used
-/// to return Ty. It is expected that Ty requires multiple return values.
-/// This routine uses GCC implementation to find required register classes.
-/// The original implementation of this routine is based on 
-/// llvm_x86_64_should_pass_aggregate_in_mixed_regs code.
-void
-llvm_x86_64_get_multiple_return_reg_classes(tree TreeType, const Type *Ty,
-                                            std::vector<const Type*> &Elts){
-  enum x86_64_reg_class Class[MAX_CLASSES];
-  enum machine_mode Mode = type_natural_mode(TreeType, NULL);
-  HOST_WIDE_INT Bytes =
-    (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
-  int NumClasses = classify_argument(Mode, TreeType, Class, 0);
-  if (!NumClasses)
-     assert(0 && "This type does not need multiple return registers!");
-
-  if (NumClasses == 1 && Class[0] == X86_64_INTEGERSI_CLASS)
-    // This will fit in one i32 register.
-     assert(0 && "This type does not need multiple return registers!");
-
-  if (NumClasses == 1 && Class[0] == X86_64_INTEGER_CLASS)
-     assert(0 && "This type does not need multiple return registers!");
-
-  // classify_argument uses a single X86_64_NO_CLASS as a special case for
-  // empty structs. Recognize it and don't add any return values in that
-  // case.
-  if (NumClasses == 1 && Class[0] == X86_64_NO_CLASS)
-     return;
-
-  for (int i = 0; i < NumClasses; ++i) {
-    switch (Class[i]) {
-    case X86_64_INTEGER_CLASS:
-    case X86_64_INTEGERSI_CLASS:
-      Elts.push_back(Type::getInt64Ty(Context));
-      Bytes -= 8;
-      break;
-    case X86_64_SSE_CLASS:
-      // If it's a SSE class argument, then one of the followings are possible:
-      // 1. 1 x SSE, size is 8: 1 x Double.
-      // 2. 1 x SSE, size is 4: 1 x Float.
-      // 3. 1 x SSE + 1 x SSEUP, size is 16: 1 x <4 x i32>, <4 x f32>,
-      //                                         <2 x i64>, or <2 x f64>.
-      // 4. 1 x SSE + 1 x SSESF, size is 12: 1 x Double, 1 x Float.
-      // 5. 2 x SSE, size is 16: 2 x Double.
-      // 6. 1 x SSE, 1 x NO:  Second is padding, pass as double.
-      if ((NumClasses-i) == 1) {
-        if (Bytes == 8) {
-          Elts.push_back(Type::getDoubleTy(Context));
-          Bytes -= 8;
-        } else if (Bytes == 4) {
-          Elts.push_back(Type::getFloatTy(Context));
-          Bytes -= 4;
-        } else
-          assert(0 && "Not yet handled!");
-      } else if ((NumClasses-i) == 2) {
-        if (Class[i+1] == X86_64_SSEUP_CLASS) {
-          const Type *Ty = ConvertType(TreeType);
-          if (const StructType *STy = dyn_cast<StructType>(Ty))
-            // Look pass the struct wrapper.
-            if (STy->getNumElements() == 1)
-              Ty = STy->getElementType(0);
-          if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
-            if (VTy->getNumElements() == 2) {
-              if (VTy->getElementType()->isInteger())
-                Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
-              else
-                Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
-              Bytes -= 8;
-            } else {
-              assert(VTy->getNumElements() == 4);
-              if (VTy->getElementType()->isInteger())
-                Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
-              else
-                Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
-              Bytes -= 4;
-            }
-          } else if (llvm_x86_is_all_integer_types(Ty)) {
-            Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
-            Bytes -= 4;
-          } else {
-            Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
-            Bytes -= 4;
-          }
-        } else if (Class[i+1] == X86_64_SSESF_CLASS) {
-          assert(Bytes == 12 && "Not yet handled!");
-          Elts.push_back(Type::getDoubleTy(Context));
-          Elts.push_back(Type::getFloatTy(Context));
-          Bytes -= 12;
-        } else if (Class[i+1] == X86_64_SSE_CLASS) {
-          Elts.push_back(Type::getDoubleTy(Context));
-          Elts.push_back(Type::getDoubleTy(Context));
-          Bytes -= 16;
-        } else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
-          Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
-          Elts.push_back(Type::getDoubleTy(Context));
-        } else if (Class[i+1] == X86_64_INTEGER_CLASS) {
-          Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
-          Elts.push_back(Type::getInt64Ty(Context));
-        } else if (Class[i+1] == X86_64_NO_CLASS) {
-          Elts.push_back(Type::getDoubleTy(Context));
-          Bytes -= 16;
-        } else {
-          assert(0 && "Not yet handled!");
-        }
-        ++i; // Already handled the next one.
-      } else
-        assert(0 && "Not yet handled!");
-      break;
-    case X86_64_SSESF_CLASS:
-      Elts.push_back(Type::getFloatTy(Context));
-      Bytes -= 4;
-      break;
-    case X86_64_SSEDF_CLASS:
-      Elts.push_back(Type::getDoubleTy(Context));
-      Bytes -= 8;
-      break;
-    case X86_64_X87_CLASS:
-    case X86_64_X87UP_CLASS:
-    case X86_64_COMPLEX_X87_CLASS:
-      Elts.push_back(Type::getX86_FP80Ty(Context));
-      break;
-    case X86_64_NO_CLASS:
-      // padding bytes.
-      Elts.push_back(Type::getInt64Ty(Context));
-      break;
-    default: assert(0 && "Unexpected register class!");
-    }
-  }
-}
-
-// Return LLVM Type if TYPE can be returned as an aggregate, 
-// otherwise return NULL.
-const Type *llvm_x86_aggr_type_for_struct_return(tree type) {
-  const Type *Ty = ConvertType(type);
-  if (!llvm_suitable_multiple_ret_value_type(Ty, type))
-    return NULL;
-
-  const StructType *STy = cast<StructType>(Ty);
-  unsigned NumElements = STy->getNumElements();
-  std::vector<const Type *> ElementTypes;
-
-  // Special handling for _Complex.
-  if (llvm_x86_should_not_return_complex_in_memory(type)) {
-    ElementTypes.push_back(Type::getX86_FP80Ty(Context));
-    ElementTypes.push_back(Type::getX86_FP80Ty(Context));
-    return StructType::get(Context, ElementTypes, STy->isPacked());
-  } 
-
-  std::vector<const Type*> GCCElts;
-  llvm_x86_64_get_multiple_return_reg_classes(type, Ty, GCCElts);
-  return StructType::get(Context, GCCElts, false);
-}
-
-// llvm_x86_extract_mrv_array_element - Helper function that help extract 
-// an array element from multiple return value.
-//
-// Here, SRC is returning multiple values. DEST's DESTFIELNO field is an array.
-// Extract SRCFIELDNO's ELEMENO value and store it in DEST's FIELDNO field's 
-// ELEMENTNO.
-//
-static void llvm_x86_extract_mrv_array_element(Value *Src, Value *Dest,
-                                               unsigned SrcFieldNo, 
-                                               unsigned SrcElemNo,
-                                               unsigned DestFieldNo, 
-                                               unsigned DestElemNo,
-                                               LLVMBuilder &Builder,
-                                               bool isVolatile) {
-  Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
-  const StructType *STy = cast<StructType>(Src->getType());
-  llvm::Value *Idxs[3];
-  Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
-  Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestFieldNo);
-  Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestElemNo);
-  Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
-  if (isa<VectorType>(STy->getElementType(SrcFieldNo))) {
-    Value *ElemIndex = ConstantInt::get(Type::getInt32Ty(Context), SrcElemNo);
-    Value *EVIElem = Builder.CreateExtractElement(EVI, ElemIndex, "mrv");
-    Builder.CreateStore(EVIElem, GEP, isVolatile);
-  } else {
-    Builder.CreateStore(EVI, GEP, isVolatile);
-  }
-}
-
-// llvm_x86_extract_multiple_return_value - Extract multiple values returned
-// by SRC and store them in DEST. It is expected thaty SRC and
-// DEST types are StructType, but they may not match.
-void llvm_x86_extract_multiple_return_value(Value *Src, Value *Dest,
-                                            bool isVolatile,
-                                            LLVMBuilder &Builder) {
-  
-  const StructType *STy = cast<StructType>(Src->getType());
-  unsigned NumElements = STy->getNumElements();
-
-  const PointerType *PTy = cast<PointerType>(Dest->getType());
-  const StructType *DestTy = cast<StructType>(PTy->getElementType());
-
-  unsigned SNO = 0;
-  unsigned DNO = 0;
-
-  if (DestTy->getNumElements() == 3
-      && DestTy->getElementType(0)->getTypeID() == Type::FloatTyID
-      && DestTy->getElementType(1)->getTypeID() == Type::FloatTyID
-      && DestTy->getElementType(2)->getTypeID() == Type::FloatTyID) {
-    // DestTy is { float, float, float }
-    // STy is { <4 x float>, float > }
-
-    Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
-
-    Value *E0Index = ConstantInt::get(Type::getInt32Ty(Context), 0);
-    Value *EVI0 = Builder.CreateExtractElement(EVI, E0Index, "mrv.v");
-    Value *GEP0 = Builder.CreateStructGEP(Dest, 0, "mrv_gep");
-    Builder.CreateStore(EVI0, GEP0, isVolatile);
-
-    Value *E1Index = ConstantInt::get(Type::getInt32Ty(Context), 1);
-    Value *EVI1 = Builder.CreateExtractElement(EVI, E1Index, "mrv.v");
-    Value *GEP1 = Builder.CreateStructGEP(Dest, 1, "mrv_gep");
-    Builder.CreateStore(EVI1, GEP1, isVolatile);
-
-    Value *GEP2 = Builder.CreateStructGEP(Dest, 2, "mrv_gep");
-    Value *EVI2 = Builder.CreateExtractValue(Src, 1, "mrv_gr");
-    Builder.CreateStore(EVI2, GEP2, isVolatile);
-    return;
-  }
-
-  while (SNO < NumElements) {
-
-    const Type *DestElemType = DestTy->getElementType(DNO);
-
-    // Directly access first class values using getresult.
-    if (DestElemType->isSingleValueType()) {
-      Value *GEP = Builder.CreateStructGEP(Dest, DNO, "mrv_gep");
-      Value *EVI = Builder.CreateExtractValue(Src, SNO, "mrv_gr");
-      Builder.CreateStore(EVI, GEP, isVolatile);
-      ++DNO; ++SNO;
-      continue;
-    } 
-
-    // Special treatement for _Complex.
-    if (const StructType *ComplexType = dyn_cast<StructType>(DestElemType)) {
-      llvm::Value *Idxs[3];
-      Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
-      Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DNO);
-
-      Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
-      Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
-      Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
-      Builder.CreateStore(EVI, GEP, isVolatile);
-      ++SNO;
-
-      Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 1);
-      GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
-      EVI = Builder.CreateExtractValue(Src, 1, "mrv_gr");
-      Builder.CreateStore(EVI, GEP, isVolatile);
-      ++DNO; ++SNO;
-      continue;
-    }
-    
-    // Access array elements individually. Note, Src and Dest type may
-    // not match. For example { <2 x float>, float } and { float[3]; }
-    const ArrayType *ATy = cast<ArrayType>(DestElemType);
-    unsigned ArraySize = ATy->getNumElements();
-    unsigned DElemNo = 0; // DestTy's DNO field's element number
-    while (DElemNo < ArraySize) {
-      unsigned i = 0;
-      unsigned Size = 1;
-      
-      if (const VectorType *SElemTy = 
-          dyn_cast<VectorType>(STy->getElementType(SNO))) {
-        Size = SElemTy->getNumElements();
-        if (SElemTy->getElementType()->getTypeID() == Type::FloatTyID
-            && Size == 4)
-          // Ignore last two <4 x float> elements.
-          Size = 2;
-      }
-      while (i < Size) {
-        llvm_x86_extract_mrv_array_element(Src, Dest, SNO, i++, 
-                                           DNO, DElemNo++, 
-                                           Builder, isVolatile);
-      }
-      // Consumed this src field. Try next one.
-      ++SNO;
-    }
-    // Finished building current dest field. 
-    ++DNO;
-  }
-}
-
-/// llvm_x86_should_pass_aggregate_in_integer_regs - x86-32 is same as the
-/// default.  x86-64 detects the case where a type is 16 bytes long but
-/// only 8 of them are passed, the rest being padding (*size is set to 8
-/// to identify this case).  It also pads out the size to that of a full
-/// register.  This means we'll be loading bytes off the end of the object
-/// in some cases.  That's what gcc does, so it must be OK, right?  Right?
-bool llvm_x86_should_pass_aggregate_in_integer_regs(tree type, unsigned *size,
-                                                    bool *DontCheckAlignment) {
-  *size = 0;
-  if (TARGET_64BIT) {
-    enum x86_64_reg_class Class[MAX_CLASSES];
-    enum machine_mode Mode = type_natural_mode(type, NULL);
-    int NumClasses = classify_argument(Mode, type, Class, 0);
-    *DontCheckAlignment= true;
-    if (NumClasses == 1 && (Class[0] == X86_64_INTEGER_CLASS ||
-                            Class[0] == X86_64_INTEGERSI_CLASS)) {
-      // one int register
-      HOST_WIDE_INT Bytes =
-        (Mode == BLKmode) ? int_size_in_bytes(type) : (int) GET_MODE_SIZE(Mode);
-      if (Bytes>4)
-        *size = 8;
-      else if (Bytes>2)
-        *size = 4;
-      else
-        *size = Bytes;
-      return true;
-    }
-    if (NumClasses == 2 && (Class[0] == X86_64_INTEGERSI_CLASS ||
-                            Class[0] == X86_64_INTEGER_CLASS)) {
-      if (Class[1] == X86_64_INTEGER_CLASS) {
-        // 16 byte object, 2 int registers
-        *size = 16;
-        return true;
-      }
-      // IntegerSI can occur only as element 0.
-      if (Class[1] == X86_64_NO_CLASS) {
-        // 16 byte object, only 1st register has information
-        *size = 8;
-        return true;
-      }
-    }
-    return false;    
-  }
-  else 
-    return !isSingleElementStructOrArray(type, false, true);
-}

Removed: gcc-plugin/trunk/llvm-target.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-target.cpp?rev=79560&view=auto

==============================================================================
--- gcc-plugin/trunk/llvm-target.cpp (original)
+++ gcc-plugin/trunk/llvm-target.cpp (removed)
@@ -1 +0,0 @@
-link i386/llvm-i386.cpp
\ No newline at end of file

Removed: gcc-plugin/trunk/llvm-target.h
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-target.h?rev=79560&view=auto

==============================================================================
--- gcc-plugin/trunk/llvm-target.h (original)
+++ gcc-plugin/trunk/llvm-target.h (removed)
@@ -1 +0,0 @@
-link i386/llvm-i386-target.h
\ No newline at end of file





More information about the llvm-commits mailing list