r209579 - AArch64/ARM64: rename ARM64 components to AArch64

Tim Northover tnorthover at apple.com
Sat May 24 05:52:08 PDT 2014


Author: tnorthover
Date: Sat May 24 07:52:07 2014
New Revision: 209579

URL: http://llvm.org/viewvc/llvm-project?rev=209579&view=rev
Log:
AArch64/ARM64: rename ARM64 components to AArch64

This keeps Clang consistent with backend naming conventions.

Added:
    cfe/trunk/include/clang/Basic/BuiltinsAArch64.def
      - copied, changed from r209578, cfe/trunk/include/clang/Basic/BuiltinsARM64.def
Removed:
    cfe/trunk/include/clang/Basic/BuiltinsARM64.def
Modified:
    cfe/trunk/docs/LanguageExtensions.rst
    cfe/trunk/include/clang/Basic/TargetBuiltins.h
    cfe/trunk/include/clang/Sema/Sema.h
    cfe/trunk/lib/Basic/Targets.cpp
    cfe/trunk/lib/CodeGen/CGBuiltin.cpp
    cfe/trunk/lib/CodeGen/CGObjCMac.cpp
    cfe/trunk/lib/CodeGen/CodeGenFunction.h
    cfe/trunk/lib/CodeGen/TargetInfo.cpp
    cfe/trunk/lib/Driver/ToolChains.cpp
    cfe/trunk/lib/Driver/Tools.cpp
    cfe/trunk/lib/Frontend/InitHeaderSearch.cpp
    cfe/trunk/lib/Sema/SemaChecking.cpp
    cfe/trunk/test/CodeGen/arm64-crc32.c
    cfe/trunk/test/CodeGen/arm64-vrnd.c
    cfe/trunk/test/CodeGen/arm64-vrsqrt.c
    cfe/trunk/test/CodeGen/arm64_vCMP.c
    cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c
    cfe/trunk/test/CodeGen/arm64_vMaxMin.c
    cfe/trunk/test/CodeGen/arm64_vadd.c
    cfe/trunk/test/CodeGen/arm64_vca.c
    cfe/trunk/test/CodeGen/arm64_vcreate.c
    cfe/trunk/test/CodeGen/arm64_vcvtfp.c
    cfe/trunk/test/CodeGen/arm64_vneg.c
    cfe/trunk/test/CodeGen/arm64_vset_lane.c
    cfe/trunk/test/CodeGen/arm64_vshift.c
    cfe/trunk/test/CodeGen/arm64_vsli.c
    cfe/trunk/test/CodeGen/arm64_vsri.c
    cfe/trunk/test/CodeGen/builtins-arm-exclusive.c
    cfe/trunk/test/CodeGen/neon-crypto.c
    cfe/trunk/test/Driver/arm-alignment.c

Modified: cfe/trunk/docs/LanguageExtensions.rst
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/docs/LanguageExtensions.rst?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/docs/LanguageExtensions.rst (original)
+++ cfe/trunk/docs/LanguageExtensions.rst Sat May 24 07:52:07 2014
@@ -1566,7 +1566,7 @@ instructions for implementing atomic ope
   void __builtin_arm_clrex(void);
 
 The types ``T`` currently supported are:
-* Integer types with width at most 64 bits (or 128 bits on ARM64).
+* Integer types with width at most 64 bits (or 128 bits on AArch64).
 * Floating-point types
 * Pointer types.
 

Copied: cfe/trunk/include/clang/Basic/BuiltinsAArch64.def (from r209578, cfe/trunk/include/clang/Basic/BuiltinsARM64.def)
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsAArch64.def?p2=cfe/trunk/include/clang/Basic/BuiltinsAArch64.def&p1=cfe/trunk/include/clang/Basic/BuiltinsARM64.def&r1=209578&r2=209579&rev=209579&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/BuiltinsARM64.def (original)
+++ cfe/trunk/include/clang/Basic/BuiltinsAArch64.def Sat May 24 07:52:07 2014
@@ -1,4 +1,4 @@
-//===--- BuiltinsARM64.def - ARM64 Builtin function database ----*- C++ -*-===//
+//==- BuiltinsAArch64.def - AArch64 Builtin function database ----*- C++ -*-==//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -7,7 +7,7 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file defines the ARM64-specific builtin function database.  Users of
+// This file defines the AArch64-specific builtin function database.  Users of
 // this file must define the BUILTIN macro to make use of this information.
 //
 //===----------------------------------------------------------------------===//

Removed: cfe/trunk/include/clang/Basic/BuiltinsARM64.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsARM64.def?rev=209578&view=auto
==============================================================================
--- cfe/trunk/include/clang/Basic/BuiltinsARM64.def (original)
+++ cfe/trunk/include/clang/Basic/BuiltinsARM64.def (removed)
@@ -1,34 +0,0 @@
-//===--- BuiltinsARM64.def - ARM64 Builtin function database ----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the ARM64-specific builtin function database.  Users of
-// this file must define the BUILTIN macro to make use of this information.
-//
-//===----------------------------------------------------------------------===//
-
-// The format of this database matches clang/Basic/Builtins.def.
-
-// In libgcc
-BUILTIN(__clear_cache, "vv*v*", "i")
-
-BUILTIN(__builtin_arm_ldrex, "v.", "t")
-BUILTIN(__builtin_arm_strex, "i.", "t")
-BUILTIN(__builtin_arm_clrex, "v", "")
-
-// CRC32
-BUILTIN(__builtin_arm_crc32b, "UiUiUc", "nc")
-BUILTIN(__builtin_arm_crc32cb, "UiUiUc", "nc")
-BUILTIN(__builtin_arm_crc32h, "UiUiUs", "nc")
-BUILTIN(__builtin_arm_crc32ch, "UiUiUs", "nc")
-BUILTIN(__builtin_arm_crc32w, "UiUiUi", "nc")
-BUILTIN(__builtin_arm_crc32cw, "UiUiUi", "nc")
-BUILTIN(__builtin_arm_crc32d, "UiUiLUi", "nc")
-BUILTIN(__builtin_arm_crc32cd, "UiUiLUi", "nc")
-
-#undef BUILTIN

Modified: cfe/trunk/include/clang/Basic/TargetBuiltins.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/TargetBuiltins.h?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/TargetBuiltins.h (original)
+++ cfe/trunk/include/clang/Basic/TargetBuiltins.h Sat May 24 07:52:07 2014
@@ -41,13 +41,13 @@ namespace clang {
     };
   }
 
-  /// \brief ARM64 builtins
-  namespace ARM64 {
+  /// \brief AArch64 builtins
+  namespace AArch64 {
   enum {
     LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
     LastNEONBuiltin = NEON::FirstTSBuiltin - 1,
   #define BUILTIN(ID, TYPE, ATTRS) BI##ID,
-  #include "clang/Basic/BuiltinsARM64.def"
+  #include "clang/Basic/BuiltinsAArch64.def"
     LastTSBuiltin
   };
   }

Modified: cfe/trunk/include/clang/Sema/Sema.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Sema/Sema.h?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/include/clang/Sema/Sema.h (original)
+++ cfe/trunk/include/clang/Sema/Sema.h Sat May 24 07:52:07 2014
@@ -8125,7 +8125,7 @@ private:
   bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
   bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 
-  bool CheckARM64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+  bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
   bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
   bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 

Modified: cfe/trunk/lib/Basic/Targets.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Basic/Targets.cpp?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/Basic/Targets.cpp (original)
+++ cfe/trunk/lib/Basic/Targets.cpp Sat May 24 07:52:07 2014
@@ -4233,7 +4233,7 @@ public:
 
 
 namespace {
-class ARM64TargetInfo : public TargetInfo {
+class AArch64TargetInfo : public TargetInfo {
   virtual void setDescriptionString() = 0;
   static const TargetInfo::GCCRegAlias GCCRegAliases[];
   static const char *const GCCRegNames[];
@@ -4252,7 +4252,7 @@ class ARM64TargetInfo : public TargetInf
   std::string ABI;
 
 public:
-  ARM64TargetInfo(const llvm::Triple &Triple)
+  AArch64TargetInfo(const llvm::Triple &Triple)
       : TargetInfo(Triple), ABI("aapcs") {
 
     if (getTriple().getOS() == llvm::Triple::NetBSD) {
@@ -4283,7 +4283,7 @@ public:
     // specifiers.
     NoAsmVariants = true;
 
-    // ARM64 targets default to using the ARM C++ ABI.
+    // AArch64 targets default to using the ARM C++ ABI.
     TheCXXABI.set(TargetCXXABI::GenericAArch64);
   }
 
@@ -4364,7 +4364,7 @@ public:
   virtual void getTargetBuiltins(const Builtin::Info *&Records,
                                  unsigned &NumRecords) const {
     Records = BuiltinInfo;
-    NumRecords = clang::ARM64::LastTSBuiltin - Builtin::FirstTSBuiltin;
+    NumRecords = clang::AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin;
   }
 
   virtual bool hasFeature(StringRef Feature) const {
@@ -4453,7 +4453,7 @@ public:
   }
 };
 
-const char *const ARM64TargetInfo::GCCRegNames[] = {
+const char *const AArch64TargetInfo::GCCRegNames[] = {
   // 32-bit Integer registers
   "w0",  "w1",  "w2",  "w3",  "w4",  "w5",  "w6",  "w7",  "w8",  "w9",  "w10",
   "w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21",
@@ -4480,13 +4480,13 @@ const char *const ARM64TargetInfo::GCCRe
   "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
 };
 
-void ARM64TargetInfo::getGCCRegNames(const char *const *&Names,
+void AArch64TargetInfo::getGCCRegNames(const char *const *&Names,
                                      unsigned &NumNames) const {
   Names = GCCRegNames;
   NumNames = llvm::array_lengthof(GCCRegNames);
 }
 
-const TargetInfo::GCCRegAlias ARM64TargetInfo::GCCRegAliases[] = {
+const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
   { { "w31" }, "wsp" },
   { { "x29" }, "fp" },
   { { "x30" }, "lr" },
@@ -4495,23 +4495,23 @@ const TargetInfo::GCCRegAlias ARM64Targe
   // don't want to substitute one of these for a different-sized one.
 };
 
-void ARM64TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+void AArch64TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
                                        unsigned &NumAliases) const {
   Aliases = GCCRegAliases;
   NumAliases = llvm::array_lengthof(GCCRegAliases);
 }
 
-const Builtin::Info ARM64TargetInfo::BuiltinInfo[] = {
+const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
 #define BUILTIN(ID, TYPE, ATTRS)                                               \
   { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
 #include "clang/Basic/BuiltinsNEON.def"
 
 #define BUILTIN(ID, TYPE, ATTRS)                                               \
   { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#include "clang/Basic/BuiltinsARM64.def"
+#include "clang/Basic/BuiltinsAArch64.def"
 };
 
-class ARM64leTargetInfo : public ARM64TargetInfo {
+class AArch64leTargetInfo : public AArch64TargetInfo {
   void setDescriptionString() override {
     if (getTriple().isOSBinFormatMachO())
       DescriptionString = "e-m:o-i64:64-i128:128-n32:64-S128";
@@ -4520,38 +4520,38 @@ class ARM64leTargetInfo : public ARM64Ta
   }
 
 public:
-  ARM64leTargetInfo(const llvm::Triple &Triple)
-    : ARM64TargetInfo(Triple) {
+  AArch64leTargetInfo(const llvm::Triple &Triple)
+    : AArch64TargetInfo(Triple) {
     BigEndian = false;
     }
   void getTargetDefines(const LangOptions &Opts,
                         MacroBuilder &Builder) const override {
     Builder.defineMacro("__AARCH64EL__");
-    ARM64TargetInfo::getTargetDefines(Opts, Builder);
+    AArch64TargetInfo::getTargetDefines(Opts, Builder);
   }
 };
 
-class ARM64beTargetInfo : public ARM64TargetInfo {
+class AArch64beTargetInfo : public AArch64TargetInfo {
   void setDescriptionString() override {
     assert(!getTriple().isOSBinFormatMachO());
     DescriptionString = "E-m:e-i64:64-i128:128-n32:64-S128";
   }
 
 public:
-  ARM64beTargetInfo(const llvm::Triple &Triple)
-    : ARM64TargetInfo(Triple) { }
+  AArch64beTargetInfo(const llvm::Triple &Triple)
+    : AArch64TargetInfo(Triple) { }
   void getTargetDefines(const LangOptions &Opts,
                         MacroBuilder &Builder) const override {
     Builder.defineMacro("__AARCH64EB__");
     Builder.defineMacro("__AARCH_BIG_ENDIAN");
     Builder.defineMacro("__ARM_BIG_ENDIAN");
-    ARM64TargetInfo::getTargetDefines(Opts, Builder);
+    AArch64TargetInfo::getTargetDefines(Opts, Builder);
   }
 };
 } // end anonymous namespace.
 
 namespace {
-class DarwinARM64TargetInfo : public DarwinTargetInfo<ARM64leTargetInfo> {
+class DarwinAArch64TargetInfo : public DarwinTargetInfo<AArch64leTargetInfo> {
 protected:
   void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
                     MacroBuilder &Builder) const override {
@@ -4567,8 +4567,8 @@ protected:
   }
 
 public:
-  DarwinARM64TargetInfo(const llvm::Triple &Triple)
-      : DarwinTargetInfo<ARM64leTargetInfo>(Triple) {
+  DarwinAArch64TargetInfo(const llvm::Triple &Triple)
+      : DarwinTargetInfo<AArch64leTargetInfo>(Triple) {
     Int64Type = SignedLongLong;
     WCharType = SignedInt;
     UseSignedCharForObjCBool = false;
@@ -5917,25 +5917,25 @@ static TargetInfo *AllocateTarget(const
 
   case llvm::Triple::arm64:
     if (Triple.isOSDarwin())
-      return new DarwinARM64TargetInfo(Triple);
+      return new DarwinAArch64TargetInfo(Triple);
 
     switch (os) {
     case llvm::Triple::Linux:
-      return new LinuxTargetInfo<ARM64leTargetInfo>(Triple);
+      return new LinuxTargetInfo<AArch64leTargetInfo>(Triple);
     case llvm::Triple::NetBSD:
-      return new NetBSDTargetInfo<ARM64leTargetInfo>(Triple);
+      return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple);
     default:
-      return new ARM64leTargetInfo(Triple);
+      return new AArch64leTargetInfo(Triple);
     }
 
   case llvm::Triple::arm64_be:
     switch (os) {
     case llvm::Triple::Linux:
-      return new LinuxTargetInfo<ARM64beTargetInfo>(Triple);
+      return new LinuxTargetInfo<AArch64beTargetInfo>(Triple);
     case llvm::Triple::NetBSD:
-      return new NetBSDTargetInfo<ARM64beTargetInfo>(Triple);
+      return new NetBSDTargetInfo<AArch64beTargetInfo>(Triple);
     default:
-      return new ARM64beTargetInfo(Triple);
+      return new AArch64beTargetInfo(Triple);
     }
 
   case llvm::Triple::xcore:
@@ -5947,21 +5947,21 @@ static TargetInfo *AllocateTarget(const
   case llvm::Triple::aarch64:
     switch (os) {
     case llvm::Triple::Linux:
-      return new LinuxTargetInfo<ARM64leTargetInfo>(Triple);
+      return new LinuxTargetInfo<AArch64leTargetInfo>(Triple);
     case llvm::Triple::NetBSD:
-      return new NetBSDTargetInfo<ARM64leTargetInfo>(Triple);
+      return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple);
     default:
-      return new ARM64leTargetInfo(Triple);
+      return new AArch64leTargetInfo(Triple);
     }
 
   case llvm::Triple::aarch64_be:
     switch (os) {
     case llvm::Triple::Linux:
-      return new LinuxTargetInfo<ARM64beTargetInfo>(Triple);
+      return new LinuxTargetInfo<AArch64beTargetInfo>(Triple);
     case llvm::Triple::NetBSD:
-      return new NetBSDTargetInfo<ARM64beTargetInfo>(Triple);
+      return new NetBSDTargetInfo<AArch64beTargetInfo>(Triple);
     default:
-      return new ARM64beTargetInfo(Triple);
+      return new AArch64beTargetInfo(Triple);
     }
 
   case llvm::Triple::arm:

Modified: cfe/trunk/lib/CodeGen/CGBuiltin.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGBuiltin.cpp?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGBuiltin.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp Sat May 24 07:52:07 2014
@@ -1646,7 +1646,7 @@ Value *CodeGenFunction::EmitTargetBuilti
   case llvm::Triple::aarch64_be:
   case llvm::Triple::arm64:
   case llvm::Triple::arm64_be:
-    return EmitARM64BuiltinExpr(BuiltinID, E);
+    return EmitAArch64BuiltinExpr(BuiltinID, E);
   case llvm::Triple::x86:
   case llvm::Triple::x86_64:
     return EmitX86BuiltinExpr(BuiltinID, E);
@@ -2079,109 +2079,109 @@ static NeonIntrinsicInfo ARMSIMDIntrinsi
   NEONMAP0(vzipq_v)
 };
 
-static NeonIntrinsicInfo ARM64SIMDIntrinsicMap[] = {
-  NEONMAP1(vabs_v, arm64_neon_abs, 0),
-  NEONMAP1(vabsq_v, arm64_neon_abs, 0),
+static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
+  NEONMAP1(vabs_v, aarch64_neon_abs, 0),
+  NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
   NEONMAP0(vaddhn_v),
-  NEONMAP1(vaesdq_v, arm64_crypto_aesd, 0),
-  NEONMAP1(vaeseq_v, arm64_crypto_aese, 0),
-  NEONMAP1(vaesimcq_v, arm64_crypto_aesimc, 0),
-  NEONMAP1(vaesmcq_v, arm64_crypto_aesmc, 0),
-  NEONMAP1(vcage_v, arm64_neon_facge, 0),
-  NEONMAP1(vcageq_v, arm64_neon_facge, 0),
-  NEONMAP1(vcagt_v, arm64_neon_facgt, 0),
-  NEONMAP1(vcagtq_v, arm64_neon_facgt, 0),
-  NEONMAP1(vcale_v, arm64_neon_facge, 0),
-  NEONMAP1(vcaleq_v, arm64_neon_facge, 0),
-  NEONMAP1(vcalt_v, arm64_neon_facgt, 0),
-  NEONMAP1(vcaltq_v, arm64_neon_facgt, 0),
-  NEONMAP1(vcls_v, arm64_neon_cls, Add1ArgType),
-  NEONMAP1(vclsq_v, arm64_neon_cls, Add1ArgType),
+  NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
+  NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
+  NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
+  NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
+  NEONMAP1(vcage_v, aarch64_neon_facge, 0),
+  NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
+  NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
+  NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
+  NEONMAP1(vcale_v, aarch64_neon_facge, 0),
+  NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
+  NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
+  NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
+  NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
+  NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
   NEONMAP1(vclz_v, ctlz, Add1ArgType),
   NEONMAP1(vclzq_v, ctlz, Add1ArgType),
   NEONMAP1(vcnt_v, ctpop, Add1ArgType),
   NEONMAP1(vcntq_v, ctpop, Add1ArgType),
-  NEONMAP1(vcvt_f16_v, arm64_neon_vcvtfp2hf, 0),
-  NEONMAP1(vcvt_f32_f16, arm64_neon_vcvthf2fp, 0),
+  NEONMAP1(vcvt_f16_v, aarch64_neon_vcvtfp2hf, 0),
+  NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
   NEONMAP0(vcvt_f32_v),
-  NEONMAP2(vcvt_n_f32_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0),
-  NEONMAP2(vcvt_n_f64_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0),
-  NEONMAP1(vcvt_n_s32_v, arm64_neon_vcvtfp2fxs, 0),
-  NEONMAP1(vcvt_n_s64_v, arm64_neon_vcvtfp2fxs, 0),
-  NEONMAP1(vcvt_n_u32_v, arm64_neon_vcvtfp2fxu, 0),
-  NEONMAP1(vcvt_n_u64_v, arm64_neon_vcvtfp2fxu, 0),
+  NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+  NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+  NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
+  NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
   NEONMAP0(vcvtq_f32_v),
-  NEONMAP2(vcvtq_n_f32_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0),
-  NEONMAP2(vcvtq_n_f64_v, arm64_neon_vcvtfxu2fp, arm64_neon_vcvtfxs2fp, 0),
-  NEONMAP1(vcvtq_n_s32_v, arm64_neon_vcvtfp2fxs, 0),
-  NEONMAP1(vcvtq_n_s64_v, arm64_neon_vcvtfp2fxs, 0),
-  NEONMAP1(vcvtq_n_u32_v, arm64_neon_vcvtfp2fxu, 0),
-  NEONMAP1(vcvtq_n_u64_v, arm64_neon_vcvtfp2fxu, 0),
-  NEONMAP1(vcvtx_f32_v, arm64_neon_fcvtxn, AddRetType | Add1ArgType),
+  NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+  NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+  NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
+  NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
+  NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
+  NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
   NEONMAP0(vext_v),
   NEONMAP0(vextq_v),
   NEONMAP0(vfma_v),
   NEONMAP0(vfmaq_v),
-  NEONMAP2(vhadd_v, arm64_neon_uhadd, arm64_neon_shadd, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vhaddq_v, arm64_neon_uhadd, arm64_neon_shadd, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vhsub_v, arm64_neon_uhsub, arm64_neon_shsub, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vhsubq_v, arm64_neon_uhsub, arm64_neon_shsub, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
   NEONMAP0(vmovl_v),
   NEONMAP0(vmovn_v),
-  NEONMAP1(vmul_v, arm64_neon_pmul, Add1ArgType),
-  NEONMAP1(vmulq_v, arm64_neon_pmul, Add1ArgType),
-  NEONMAP1(vpadd_v, arm64_neon_addp, Add1ArgType),
-  NEONMAP2(vpaddl_v, arm64_neon_uaddlp, arm64_neon_saddlp, UnsignedAlts),
-  NEONMAP2(vpaddlq_v, arm64_neon_uaddlp, arm64_neon_saddlp, UnsignedAlts),
-  NEONMAP1(vpaddq_v, arm64_neon_addp, Add1ArgType),
-  NEONMAP1(vqabs_v, arm64_neon_sqabs, Add1ArgType),
-  NEONMAP1(vqabsq_v, arm64_neon_sqabs, Add1ArgType),
-  NEONMAP2(vqadd_v, arm64_neon_uqadd, arm64_neon_sqadd, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vqaddq_v, arm64_neon_uqadd, arm64_neon_sqadd, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vqdmlal_v, arm64_neon_sqdmull, arm64_neon_sqadd, 0),
-  NEONMAP2(vqdmlsl_v, arm64_neon_sqdmull, arm64_neon_sqsub, 0),
-  NEONMAP1(vqdmulh_v, arm64_neon_sqdmulh, Add1ArgType),
-  NEONMAP1(vqdmulhq_v, arm64_neon_sqdmulh, Add1ArgType),
-  NEONMAP1(vqdmull_v, arm64_neon_sqdmull, Add1ArgType),
-  NEONMAP2(vqmovn_v, arm64_neon_uqxtn, arm64_neon_sqxtn, Add1ArgType | UnsignedAlts),
-  NEONMAP1(vqmovun_v, arm64_neon_sqxtun, Add1ArgType),
-  NEONMAP1(vqneg_v, arm64_neon_sqneg, Add1ArgType),
-  NEONMAP1(vqnegq_v, arm64_neon_sqneg, Add1ArgType),
-  NEONMAP1(vqrdmulh_v, arm64_neon_sqrdmulh, Add1ArgType),
-  NEONMAP1(vqrdmulhq_v, arm64_neon_sqrdmulh, Add1ArgType),
-  NEONMAP2(vqrshl_v, arm64_neon_uqrshl, arm64_neon_sqrshl, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vqrshlq_v, arm64_neon_uqrshl, arm64_neon_sqrshl, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vqshl_n_v, arm64_neon_uqshl, arm64_neon_sqshl, UnsignedAlts),
-  NEONMAP2(vqshl_v, arm64_neon_uqshl, arm64_neon_sqshl, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vqshlq_n_v, arm64_neon_uqshl, arm64_neon_sqshl,UnsignedAlts),
-  NEONMAP2(vqshlq_v, arm64_neon_uqshl, arm64_neon_sqshl, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vqsub_v, arm64_neon_uqsub, arm64_neon_sqsub, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vqsubq_v, arm64_neon_uqsub, arm64_neon_sqsub, Add1ArgType | UnsignedAlts),
-  NEONMAP1(vraddhn_v, arm64_neon_raddhn, Add1ArgType),
-  NEONMAP2(vrecpe_v, arm64_neon_frecpe, arm64_neon_urecpe, 0),
-  NEONMAP2(vrecpeq_v, arm64_neon_frecpe, arm64_neon_urecpe, 0),
-  NEONMAP1(vrecps_v, arm64_neon_frecps, Add1ArgType),
-  NEONMAP1(vrecpsq_v, arm64_neon_frecps, Add1ArgType),
-  NEONMAP2(vrhadd_v, arm64_neon_urhadd, arm64_neon_srhadd, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vrhaddq_v, arm64_neon_urhadd, arm64_neon_srhadd, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vrshl_v, arm64_neon_urshl, arm64_neon_srshl, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vrshlq_v, arm64_neon_urshl, arm64_neon_srshl, Add1ArgType | UnsignedAlts),
-  NEONMAP2(vrsqrte_v, arm64_neon_frsqrte, arm64_neon_ursqrte, 0),
-  NEONMAP2(vrsqrteq_v, arm64_neon_frsqrte, arm64_neon_ursqrte, 0),
-  NEONMAP1(vrsqrts_v, arm64_neon_frsqrts, Add1ArgType),
-  NEONMAP1(vrsqrtsq_v, arm64_neon_frsqrts, Add1ArgType),
-  NEONMAP1(vrsubhn_v, arm64_neon_rsubhn, Add1ArgType),
-  NEONMAP1(vsha1su0q_v, arm64_crypto_sha1su0, 0),
-  NEONMAP1(vsha1su1q_v, arm64_crypto_sha1su1, 0),
-  NEONMAP1(vsha256h2q_v, arm64_crypto_sha256h2, 0),
-  NEONMAP1(vsha256hq_v, arm64_crypto_sha256h, 0),
-  NEONMAP1(vsha256su0q_v, arm64_crypto_sha256su0, 0),
-  NEONMAP1(vsha256su1q_v, arm64_crypto_sha256su1, 0),
+  NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
+  NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
+  NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
+  NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
+  NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
+  NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
+  NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
+  NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
+  NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
+  NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
+  NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
+  NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
+  NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
+  NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
+  NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
+  NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
+  NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
+  NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
+  NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
+  NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
+  NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
+  NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
+  NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
+  NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
+  NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
+  NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
+  NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
+  NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
+  NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
+  NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
+  NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
+  NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
+  NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
+  NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
+  NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
+  NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
+  NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
+  NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
   NEONMAP0(vshl_n_v),
-  NEONMAP2(vshl_v, arm64_neon_ushl, arm64_neon_sshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
   NEONMAP0(vshll_n_v),
   NEONMAP0(vshlq_n_v),
-  NEONMAP2(vshlq_v, arm64_neon_ushl, arm64_neon_sshl, Add1ArgType | UnsignedAlts),
+  NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
   NEONMAP0(vshr_n_v),
   NEONMAP0(vshrn_n_v),
   NEONMAP0(vshrq_n_v),
@@ -2190,199 +2190,199 @@ static NeonIntrinsicInfo ARM64SIMDIntrin
   NEONMAP0(vtstq_v),
 };
 
-static NeonIntrinsicInfo ARM64SISDIntrinsicMap[] = {
-  NEONMAP1(vabdd_f64, arm64_sisd_fabd, Add1ArgType),
-  NEONMAP1(vabds_f32, arm64_sisd_fabd, Add1ArgType),
-  NEONMAP1(vabsd_s64, arm64_neon_abs, Add1ArgType),
-  NEONMAP1(vaddlv_s32, arm64_neon_saddlv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddlv_u32, arm64_neon_uaddlv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddlvq_s32, arm64_neon_saddlv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddlvq_u32, arm64_neon_uaddlv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddv_f32, arm64_neon_faddv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddv_s32, arm64_neon_saddv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddv_u32, arm64_neon_uaddv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddvq_f32, arm64_neon_faddv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddvq_f64, arm64_neon_faddv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddvq_s32, arm64_neon_saddv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddvq_s64, arm64_neon_saddv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddvq_u32, arm64_neon_uaddv, AddRetType | Add1ArgType),
-  NEONMAP1(vaddvq_u64, arm64_neon_uaddv, AddRetType | Add1ArgType),
-  NEONMAP1(vcaged_f64, arm64_neon_facge, AddRetType | Add1ArgType),
-  NEONMAP1(vcages_f32, arm64_neon_facge, AddRetType | Add1ArgType),
-  NEONMAP1(vcagtd_f64, arm64_neon_facgt, AddRetType | Add1ArgType),
-  NEONMAP1(vcagts_f32, arm64_neon_facgt, AddRetType | Add1ArgType),
-  NEONMAP1(vcaled_f64, arm64_neon_facge, AddRetType | Add1ArgType),
-  NEONMAP1(vcales_f32, arm64_neon_facge, AddRetType | Add1ArgType),
-  NEONMAP1(vcaltd_f64, arm64_neon_facgt, AddRetType | Add1ArgType),
-  NEONMAP1(vcalts_f32, arm64_neon_facgt, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtad_s64_f64, arm64_neon_fcvtas, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtad_u64_f64, arm64_neon_fcvtau, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtas_s32_f32, arm64_neon_fcvtas, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtas_u32_f32, arm64_neon_fcvtau, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtd_n_f64_s64, arm64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtd_n_f64_u64, arm64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtd_n_s64_f64, arm64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtd_n_u64_f64, arm64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtmd_s64_f64, arm64_neon_fcvtms, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtmd_u64_f64, arm64_neon_fcvtmu, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtms_s32_f32, arm64_neon_fcvtms, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtms_u32_f32, arm64_neon_fcvtmu, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtnd_s64_f64, arm64_neon_fcvtns, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtnd_u64_f64, arm64_neon_fcvtnu, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtns_s32_f32, arm64_neon_fcvtns, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtns_u32_f32, arm64_neon_fcvtnu, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtpd_s64_f64, arm64_neon_fcvtps, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtpd_u64_f64, arm64_neon_fcvtpu, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtps_s32_f32, arm64_neon_fcvtps, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtps_u32_f32, arm64_neon_fcvtpu, AddRetType | Add1ArgType),
-  NEONMAP1(vcvts_n_f32_s32, arm64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
-  NEONMAP1(vcvts_n_f32_u32, arm64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
-  NEONMAP1(vcvts_n_s32_f32, arm64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
-  NEONMAP1(vcvts_n_u32_f32, arm64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
-  NEONMAP1(vcvtxd_f32_f64, arm64_sisd_fcvtxn, 0),
-  NEONMAP1(vmaxnmv_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxnmvq_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxnmvq_f64, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxv_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxv_s32, arm64_neon_smaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxv_u32, arm64_neon_umaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxvq_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxvq_f64, arm64_neon_fmaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxvq_s32, arm64_neon_smaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vmaxvq_u32, arm64_neon_umaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vminnmv_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vminnmvq_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vminnmvq_f64, arm64_neon_fminnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vminv_f32, arm64_neon_fminv, AddRetType | Add1ArgType),
-  NEONMAP1(vminv_s32, arm64_neon_sminv, AddRetType | Add1ArgType),
-  NEONMAP1(vminv_u32, arm64_neon_uminv, AddRetType | Add1ArgType),
-  NEONMAP1(vminvq_f32, arm64_neon_fminv, AddRetType | Add1ArgType),
-  NEONMAP1(vminvq_f64, arm64_neon_fminv, AddRetType | Add1ArgType),
-  NEONMAP1(vminvq_s32, arm64_neon_sminv, AddRetType | Add1ArgType),
-  NEONMAP1(vminvq_u32, arm64_neon_uminv, AddRetType | Add1ArgType),
-  NEONMAP1(vmull_p64, arm64_neon_pmull64, 0),
-  NEONMAP1(vmulxd_f64, arm64_neon_fmulx, Add1ArgType),
-  NEONMAP1(vmulxs_f32, arm64_neon_fmulx, Add1ArgType),
-  NEONMAP1(vpaddd_s64, arm64_neon_uaddv, AddRetType | Add1ArgType),
-  NEONMAP1(vpaddd_u64, arm64_neon_uaddv, AddRetType | Add1ArgType),
-  NEONMAP1(vpmaxnmqd_f64, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vpmaxnms_f32, arm64_neon_fmaxnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vpmaxqd_f64, arm64_neon_fmaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vpmaxs_f32, arm64_neon_fmaxv, AddRetType | Add1ArgType),
-  NEONMAP1(vpminnmqd_f64, arm64_neon_fminnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vpminnms_f32, arm64_neon_fminnmv, AddRetType | Add1ArgType),
-  NEONMAP1(vpminqd_f64, arm64_neon_fminv, AddRetType | Add1ArgType),
-  NEONMAP1(vpmins_f32, arm64_neon_fminv, AddRetType | Add1ArgType),
-  NEONMAP1(vqabsb_s8, arm64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqabsd_s64, arm64_neon_sqabs, Add1ArgType),
-  NEONMAP1(vqabsh_s16, arm64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqabss_s32, arm64_neon_sqabs, Add1ArgType),
-  NEONMAP1(vqaddb_s8, arm64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqaddb_u8, arm64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqaddd_s64, arm64_neon_sqadd, Add1ArgType),
-  NEONMAP1(vqaddd_u64, arm64_neon_uqadd, Add1ArgType),
-  NEONMAP1(vqaddh_s16, arm64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqaddh_u16, arm64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqadds_s32, arm64_neon_sqadd, Add1ArgType),
-  NEONMAP1(vqadds_u32, arm64_neon_uqadd, Add1ArgType),
-  NEONMAP1(vqdmulhh_s16, arm64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqdmulhs_s32, arm64_neon_sqdmulh, Add1ArgType),
-  NEONMAP1(vqdmullh_s16, arm64_neon_sqdmull, VectorRet | Use128BitVectors),
-  NEONMAP1(vqdmulls_s32, arm64_neon_sqdmulls_scalar, 0),
-  NEONMAP1(vqmovnd_s64, arm64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
-  NEONMAP1(vqmovnd_u64, arm64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
-  NEONMAP1(vqmovnh_s16, arm64_neon_sqxtn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqmovnh_u16, arm64_neon_uqxtn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqmovns_s32, arm64_neon_sqxtn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqmovns_u32, arm64_neon_uqxtn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqmovund_s64, arm64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
-  NEONMAP1(vqmovunh_s16, arm64_neon_sqxtun, VectorRet | Use64BitVectors),
-  NEONMAP1(vqmovuns_s32, arm64_neon_sqxtun, VectorRet | Use64BitVectors),
-  NEONMAP1(vqnegb_s8, arm64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqnegd_s64, arm64_neon_sqneg, Add1ArgType),
-  NEONMAP1(vqnegh_s16, arm64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqnegs_s32, arm64_neon_sqneg, Add1ArgType),
-  NEONMAP1(vqrdmulhh_s16, arm64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqrdmulhs_s32, arm64_neon_sqrdmulh, Add1ArgType),
-  NEONMAP1(vqrshlb_s8, arm64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqrshlb_u8, arm64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqrshld_s64, arm64_neon_sqrshl, Add1ArgType),
-  NEONMAP1(vqrshld_u64, arm64_neon_uqrshl, Add1ArgType),
-  NEONMAP1(vqrshlh_s16, arm64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqrshlh_u16, arm64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqrshls_s32, arm64_neon_sqrshl, Add1ArgType),
-  NEONMAP1(vqrshls_u32, arm64_neon_uqrshl, Add1ArgType),
-  NEONMAP1(vqrshrnd_n_s64, arm64_neon_sqrshrn, AddRetType),
-  NEONMAP1(vqrshrnd_n_u64, arm64_neon_uqrshrn, AddRetType),
-  NEONMAP1(vqrshrnh_n_s16, arm64_neon_sqrshrn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqrshrnh_n_u16, arm64_neon_uqrshrn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqrshrns_n_s32, arm64_neon_sqrshrn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqrshrns_n_u32, arm64_neon_uqrshrn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqrshrund_n_s64, arm64_neon_sqrshrun, AddRetType),
-  NEONMAP1(vqrshrunh_n_s16, arm64_neon_sqrshrun, VectorRet | Use64BitVectors),
-  NEONMAP1(vqrshruns_n_s32, arm64_neon_sqrshrun, VectorRet | Use64BitVectors),
-  NEONMAP1(vqshlb_n_s8, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshlb_n_u8, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshlb_s8, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshlb_u8, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshld_s64, arm64_neon_sqshl, Add1ArgType),
-  NEONMAP1(vqshld_u64, arm64_neon_uqshl, Add1ArgType),
-  NEONMAP1(vqshlh_n_s16, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshlh_n_u16, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshlh_s16, arm64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshlh_u16, arm64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshls_n_s32, arm64_neon_sqshl, Add1ArgType),
-  NEONMAP1(vqshls_n_u32, arm64_neon_uqshl, Add1ArgType),
-  NEONMAP1(vqshls_s32, arm64_neon_sqshl, Add1ArgType),
-  NEONMAP1(vqshls_u32, arm64_neon_uqshl, Add1ArgType),
-  NEONMAP1(vqshlub_n_s8, arm64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshluh_n_s16, arm64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqshlus_n_s32, arm64_neon_sqshlu, Add1ArgType),
-  NEONMAP1(vqshrnd_n_s64, arm64_neon_sqshrn, AddRetType),
-  NEONMAP1(vqshrnd_n_u64, arm64_neon_uqshrn, AddRetType),
-  NEONMAP1(vqshrnh_n_s16, arm64_neon_sqshrn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqshrnh_n_u16, arm64_neon_uqshrn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqshrns_n_s32, arm64_neon_sqshrn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqshrns_n_u32, arm64_neon_uqshrn, VectorRet | Use64BitVectors),
-  NEONMAP1(vqshrund_n_s64, arm64_neon_sqshrun, AddRetType),
-  NEONMAP1(vqshrunh_n_s16, arm64_neon_sqshrun, VectorRet | Use64BitVectors),
-  NEONMAP1(vqshruns_n_s32, arm64_neon_sqshrun, VectorRet | Use64BitVectors),
-  NEONMAP1(vqsubb_s8, arm64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqsubb_u8, arm64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqsubd_s64, arm64_neon_sqsub, Add1ArgType),
-  NEONMAP1(vqsubd_u64, arm64_neon_uqsub, Add1ArgType),
-  NEONMAP1(vqsubh_s16, arm64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqsubh_u16, arm64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vqsubs_s32, arm64_neon_sqsub, Add1ArgType),
-  NEONMAP1(vqsubs_u32, arm64_neon_uqsub, Add1ArgType),
-  NEONMAP1(vrecped_f64, arm64_neon_frecpe, Add1ArgType),
-  NEONMAP1(vrecpes_f32, arm64_neon_frecpe, Add1ArgType),
-  NEONMAP1(vrecpxd_f64, arm64_neon_frecpx, Add1ArgType),
-  NEONMAP1(vrecpxs_f32, arm64_neon_frecpx, Add1ArgType),
-  NEONMAP1(vrshld_s64, arm64_neon_srshl, Add1ArgType),
-  NEONMAP1(vrshld_u64, arm64_neon_urshl, Add1ArgType),
-  NEONMAP1(vrsqrted_f64, arm64_neon_frsqrte, Add1ArgType),
-  NEONMAP1(vrsqrtes_f32, arm64_neon_frsqrte, Add1ArgType),
-  NEONMAP1(vrsqrtsd_f64, arm64_neon_frsqrts, Add1ArgType),
-  NEONMAP1(vrsqrtss_f32, arm64_neon_frsqrts, Add1ArgType),
-  NEONMAP1(vsha1cq_u32, arm64_crypto_sha1c, 0),
-  NEONMAP1(vsha1h_u32, arm64_crypto_sha1h, 0),
-  NEONMAP1(vsha1mq_u32, arm64_crypto_sha1m, 0),
-  NEONMAP1(vsha1pq_u32, arm64_crypto_sha1p, 0),
-  NEONMAP1(vshld_s64, arm64_neon_sshl, Add1ArgType),
-  NEONMAP1(vshld_u64, arm64_neon_ushl, Add1ArgType),
-  NEONMAP1(vslid_n_s64, arm64_neon_vsli, Vectorize1ArgType),
-  NEONMAP1(vslid_n_u64, arm64_neon_vsli, Vectorize1ArgType),
-  NEONMAP1(vsqaddb_u8, arm64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vsqaddd_u64, arm64_neon_usqadd, Add1ArgType),
-  NEONMAP1(vsqaddh_u16, arm64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vsqadds_u32, arm64_neon_usqadd, Add1ArgType),
-  NEONMAP1(vsrid_n_s64, arm64_neon_vsri, Vectorize1ArgType),
-  NEONMAP1(vsrid_n_u64, arm64_neon_vsri, Vectorize1ArgType),
-  NEONMAP1(vuqaddb_s8, arm64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vuqaddd_s64, arm64_neon_suqadd, Add1ArgType),
-  NEONMAP1(vuqaddh_s16, arm64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
-  NEONMAP1(vuqadds_s32, arm64_neon_suqadd, Add1ArgType),
+static NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
+  NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
+  NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
+  NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
+  NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
+  NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
+  NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
+  NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
+  NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
+  NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
+  NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+  NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
+  NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
+  NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
+  NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+  NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+  NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+  NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
+  NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
+  NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
+  NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
+  NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
+  NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
+  NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
+  NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
+  NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
+  NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
+  NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
+  NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
+  NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
+  NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
+  NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
+  NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
+  NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
+  NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
+  NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
+  NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
+  NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
+  NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
+  NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
+  NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
+  NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
+  NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
+  NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
+  NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
+  NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
+  NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
+  NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
+  NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
+  NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
+  NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
+  NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
+  NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
+  NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
+  NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
+  NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
+  NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
+  NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
+  NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
+  NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
+  NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
+  NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
+  NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
+  NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
+  NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
+  NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
+  NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
+  NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
+  NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
+  NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
+  NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
+  NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
+  NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
+  NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
+  NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
+  NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
+  NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
+  NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
+  NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
 };
 
 #undef NEONMAP0
@@ -2391,8 +2391,8 @@ static NeonIntrinsicInfo ARM64SISDIntrin
 
 static bool NEONSIMDIntrinsicsProvenSorted = false;
 
-static bool ARM64SIMDIntrinsicsProvenSorted = false;
-static bool ARM64SISDIntrinsicsProvenSorted = false;
+static bool AArch64SIMDIntrinsicsProvenSorted = false;
+static bool AArch64SISDIntrinsicsProvenSorted = false;
 
 
 static const NeonIntrinsicInfo *
@@ -3534,7 +3534,7 @@ Value *CodeGenFunction::EmitARMBuiltinEx
   }
 }
 
-static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
+static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
                                       const CallExpr *E,
                                       SmallVectorImpl<Value *> &Ops) {
   unsigned int Int = 0;
@@ -3597,20 +3597,20 @@ static Value *EmitARM64TblBuiltinExpr(Co
   case NEON::BI__builtin_neon_vtbl1_v: {
     TblOps.push_back(Ops[0]);
     return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
-                              Intrinsic::arm64_neon_tbl1, "vtbl1");
+                              Intrinsic::aarch64_neon_tbl1, "vtbl1");
   }
   case NEON::BI__builtin_neon_vtbl2_v: {
     TblOps.push_back(Ops[0]);
     TblOps.push_back(Ops[1]);
     return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
-                              Intrinsic::arm64_neon_tbl1, "vtbl1");
+                              Intrinsic::aarch64_neon_tbl1, "vtbl1");
   }
   case NEON::BI__builtin_neon_vtbl3_v: {
     TblOps.push_back(Ops[0]);
     TblOps.push_back(Ops[1]);
     TblOps.push_back(Ops[2]);
     return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
-                              Intrinsic::arm64_neon_tbl2, "vtbl2");
+                              Intrinsic::aarch64_neon_tbl2, "vtbl2");
   }
   case NEON::BI__builtin_neon_vtbl4_v: {
     TblOps.push_back(Ops[0]);
@@ -3618,12 +3618,12 @@ static Value *EmitARM64TblBuiltinExpr(Co
     TblOps.push_back(Ops[2]);
     TblOps.push_back(Ops[3]);
     return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
-                              Intrinsic::arm64_neon_tbl2, "vtbl2");
+                              Intrinsic::aarch64_neon_tbl2, "vtbl2");
   }
   case NEON::BI__builtin_neon_vtbx1_v: {
     TblOps.push_back(Ops[1]);
     Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
-                                       Intrinsic::arm64_neon_tbl1, "vtbl1");
+                                       Intrinsic::aarch64_neon_tbl1, "vtbl1");
 
     llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
     Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
@@ -3638,14 +3638,14 @@ static Value *EmitARM64TblBuiltinExpr(Co
     TblOps.push_back(Ops[1]);
     TblOps.push_back(Ops[2]);
     return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
-                              Intrinsic::arm64_neon_tbx1, "vtbx1");
+                              Intrinsic::aarch64_neon_tbx1, "vtbx1");
   }
   case NEON::BI__builtin_neon_vtbx3_v: {
     TblOps.push_back(Ops[1]);
     TblOps.push_back(Ops[2]);
     TblOps.push_back(Ops[3]);
     Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
-                                       Intrinsic::arm64_neon_tbl2, "vtbl2");
+                                       Intrinsic::aarch64_neon_tbl2, "vtbl2");
 
     llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
     Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
@@ -3663,32 +3663,32 @@ static Value *EmitARM64TblBuiltinExpr(Co
     TblOps.push_back(Ops[3]);
     TblOps.push_back(Ops[4]);
     return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
-                              Intrinsic::arm64_neon_tbx2, "vtbx2");
+                              Intrinsic::aarch64_neon_tbx2, "vtbx2");
   }
   case NEON::BI__builtin_neon_vqtbl1_v:
   case NEON::BI__builtin_neon_vqtbl1q_v:
-    Int = Intrinsic::arm64_neon_tbl1; s = "vtbl1"; break;
+    Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
   case NEON::BI__builtin_neon_vqtbl2_v:
   case NEON::BI__builtin_neon_vqtbl2q_v: {
-    Int = Intrinsic::arm64_neon_tbl2; s = "vtbl2"; break;
+    Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
   case NEON::BI__builtin_neon_vqtbl3_v:
   case NEON::BI__builtin_neon_vqtbl3q_v:
-    Int = Intrinsic::arm64_neon_tbl3; s = "vtbl3"; break;
+    Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
   case NEON::BI__builtin_neon_vqtbl4_v:
   case NEON::BI__builtin_neon_vqtbl4q_v:
-    Int = Intrinsic::arm64_neon_tbl4; s = "vtbl4"; break;
+    Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
   case NEON::BI__builtin_neon_vqtbx1_v:
   case NEON::BI__builtin_neon_vqtbx1q_v:
-    Int = Intrinsic::arm64_neon_tbx1; s = "vtbx1"; break;
+    Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
   case NEON::BI__builtin_neon_vqtbx2_v:
   case NEON::BI__builtin_neon_vqtbx2q_v:
-    Int = Intrinsic::arm64_neon_tbx2; s = "vtbx2"; break;
+    Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
   case NEON::BI__builtin_neon_vqtbx3_v:
   case NEON::BI__builtin_neon_vqtbx3q_v:
-    Int = Intrinsic::arm64_neon_tbx3; s = "vtbx3"; break;
+    Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
   case NEON::BI__builtin_neon_vqtbx4_v:
   case NEON::BI__builtin_neon_vqtbx4q_v:
-    Int = Intrinsic::arm64_neon_tbx4; s = "vtbx4"; break;
+    Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
   }
   }
 
@@ -3720,7 +3720,7 @@ Value *CodeGenFunction::vectorWrapScalar
 Value *CodeGenFunction::
 emitVectorWrappedScalar8Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
                                   const char *Name) {
-  // i8 is not a legal types for ARM64, so we can't just use
+  // i8 is not a legal types for AArch64, so we can't just use
   // a normal overloaed intrinsic call for these scalar types. Instead
   // we'll build 64-bit vectors w/ lane zero being our input values and
   // perform the operation on that. The back end can pattern match directly
@@ -3736,7 +3736,7 @@ emitVectorWrappedScalar8Intrinsic(unsign
 Value *CodeGenFunction::
 emitVectorWrappedScalar16Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
                                    const char *Name) {
-  // i16 is not a legal types for ARM64, so we can't just use
+  // i16 is not a legal types for AArch64, so we can't just use
   // a normal overloaed intrinsic call for these scalar types. Instead
   // we'll build 64-bit vectors w/ lane zero being our input values and
   // perform the operation on that. The back end can pattern match directly
@@ -3749,9 +3749,9 @@ emitVectorWrappedScalar16Intrinsic(unsig
   return Builder.CreateExtractElement(V, CI, "lane0");
 }
 
-Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID,
-                                             const CallExpr *E) {
-  if (BuiltinID == ARM64::BI__clear_cache) {
+Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
+                                               const CallExpr *E) {
+  if (BuiltinID == AArch64::BI__clear_cache) {
     assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
     const FunctionDecl *FD = E->getDirectCallee();
     SmallVector<Value*, 2> Ops;
@@ -3763,9 +3763,9 @@ Value *CodeGenFunction::EmitARM64Builtin
     return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
   }
 
-  if (BuiltinID == ARM64::BI__builtin_arm_ldrex &&
+  if (BuiltinID == AArch64::BI__builtin_arm_ldrex &&
       getContext().getTypeSize(E->getType()) == 128) {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_ldxp);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ldxp);
 
     Value *LdPtr = EmitScalarExpr(E->getArg(0));
     Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
@@ -3781,7 +3781,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
     Val = Builder.CreateOr(Val, Val1);
     return Builder.CreateBitCast(Val, ConvertType(E->getType()));
-  } else if (BuiltinID == ARM64::BI__builtin_arm_ldrex) {
+  } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex) {
     Value *LoadAddr = EmitScalarExpr(E->getArg(0));
 
     QualType Ty = E->getType();
@@ -3790,7 +3790,7 @@ Value *CodeGenFunction::EmitARM64Builtin
                                                   getContext().getTypeSize(Ty));
     LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
 
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_ldxr, LoadAddr->getType());
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ldxr, LoadAddr->getType());
     Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
 
     if (RealResTy->isPointerTy())
@@ -3800,9 +3800,9 @@ Value *CodeGenFunction::EmitARM64Builtin
     return Builder.CreateBitCast(Val, RealResTy);
   }
 
-  if (BuiltinID == ARM64::BI__builtin_arm_strex &&
+  if (BuiltinID == AArch64::BI__builtin_arm_strex &&
       getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_stxp);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_stxp);
     llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, NULL);
 
     Value *One = llvm::ConstantInt::get(Int32Ty, 1);
@@ -3819,7 +3819,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
                                          Int8PtrTy);
     return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "stxp");
-  } else if (BuiltinID == ARM64::BI__builtin_arm_strex) {
+  } else if (BuiltinID == AArch64::BI__builtin_arm_strex) {
     Value *StoreVal = EmitScalarExpr(E->getArg(0));
     Value *StoreAddr = EmitScalarExpr(E->getArg(1));
 
@@ -3835,34 +3835,34 @@ Value *CodeGenFunction::EmitARM64Builtin
       StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
     }
 
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_stxr, StoreAddr->getType());
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_stxr, StoreAddr->getType());
     return Builder.CreateCall2(F, StoreVal, StoreAddr, "stxr");
   }
 
-  if (BuiltinID == ARM64::BI__builtin_arm_clrex) {
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_clrex);
+  if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
     return Builder.CreateCall(F);
   }
 
   // CRC32
   Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
   switch (BuiltinID) {
-  case ARM64::BI__builtin_arm_crc32b:
-    CRCIntrinsicID = Intrinsic::arm64_crc32b; break;
-  case ARM64::BI__builtin_arm_crc32cb:
-    CRCIntrinsicID = Intrinsic::arm64_crc32cb; break;
-  case ARM64::BI__builtin_arm_crc32h:
-    CRCIntrinsicID = Intrinsic::arm64_crc32h; break;
-  case ARM64::BI__builtin_arm_crc32ch:
-    CRCIntrinsicID = Intrinsic::arm64_crc32ch; break;
-  case ARM64::BI__builtin_arm_crc32w:
-    CRCIntrinsicID = Intrinsic::arm64_crc32w; break;
-  case ARM64::BI__builtin_arm_crc32cw:
-    CRCIntrinsicID = Intrinsic::arm64_crc32cw; break;
-  case ARM64::BI__builtin_arm_crc32d:
-    CRCIntrinsicID = Intrinsic::arm64_crc32x; break;
-  case ARM64::BI__builtin_arm_crc32cd:
-    CRCIntrinsicID = Intrinsic::arm64_crc32cx; break;
+  case AArch64::BI__builtin_arm_crc32b:
+    CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
+  case AArch64::BI__builtin_arm_crc32cb:
+    CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
+  case AArch64::BI__builtin_arm_crc32h:
+    CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
+  case AArch64::BI__builtin_arm_crc32ch:
+    CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
+  case AArch64::BI__builtin_arm_crc32w:
+    CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
+  case AArch64::BI__builtin_arm_crc32cw:
+    CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
+  case AArch64::BI__builtin_arm_crc32d:
+    CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
+  case AArch64::BI__builtin_arm_crc32cd:
+    CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
   }
 
   if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
@@ -3880,9 +3880,9 @@ Value *CodeGenFunction::EmitARM64Builtin
   for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
     Ops.push_back(EmitScalarExpr(E->getArg(i)));
 
-  llvm::ArrayRef<NeonIntrinsicInfo> SISDMap(ARM64SISDIntrinsicMap);
+  llvm::ArrayRef<NeonIntrinsicInfo> SISDMap(AArch64SISDIntrinsicMap);
   const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
-      SISDMap, BuiltinID, ARM64SISDIntrinsicsProvenSorted);
+      SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
 
   if (Builtin) {
     Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
@@ -4226,27 +4226,27 @@ Value *CodeGenFunction::EmitARM64Builtin
     ProductOps.push_back(vectorWrapScalar16(Ops[1]));
     ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
     llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
-    Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmull, VTy),
+    Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
                           ProductOps, "vqdmlXl");
     Constant *CI = ConstantInt::get(Int32Ty, 0);
     Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
 
     unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
-                                        ? Intrinsic::arm64_neon_sqadd
-                                        : Intrinsic::arm64_neon_sqsub;
+                                        ? Intrinsic::aarch64_neon_sqadd
+                                        : Intrinsic::aarch64_neon_sqsub;
     return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
   }
   case NEON::BI__builtin_neon_vqshlud_n_s64: {
     Ops.push_back(EmitScalarExpr(E->getArg(1)));
     Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqshlu, Int64Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
                         Ops, "vqshlu_n");
   }
   case NEON::BI__builtin_neon_vqshld_n_u64:
   case NEON::BI__builtin_neon_vqshld_n_s64: {
     unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
-                                   ? Intrinsic::arm64_neon_uqshl
-                                   : Intrinsic::arm64_neon_sqshl;
+                                   ? Intrinsic::aarch64_neon_uqshl
+                                   : Intrinsic::aarch64_neon_sqshl;
     Ops.push_back(EmitScalarExpr(E->getArg(1)));
     Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
     return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
@@ -4254,8 +4254,8 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vrshrd_n_u64:
   case NEON::BI__builtin_neon_vrshrd_n_s64: {
     unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
-                                   ? Intrinsic::arm64_neon_urshl
-                                   : Intrinsic::arm64_neon_srshl;
+                                   ? Intrinsic::aarch64_neon_urshl
+                                   : Intrinsic::aarch64_neon_srshl;
     Ops.push_back(EmitScalarExpr(E->getArg(1)));
     int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
     Ops[1] = ConstantInt::get(Int64Ty, -SV);
@@ -4264,8 +4264,8 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vrsrad_n_u64:
   case NEON::BI__builtin_neon_vrsrad_n_s64: {
     unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
-                                   ? Intrinsic::arm64_neon_urshl
-                                   : Intrinsic::arm64_neon_srshl;
+                                   ? Intrinsic::aarch64_neon_urshl
+                                   : Intrinsic::aarch64_neon_srshl;
     Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
     Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
     Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Int64Ty), Ops[1],
@@ -4323,7 +4323,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     ProductOps.push_back(vectorWrapScalar16(Ops[1]));
     ProductOps.push_back(vectorWrapScalar16(Ops[2]));
     llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
-    Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmull, VTy),
+    Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
                           ProductOps, "vqdmlXl");
     Constant *CI = ConstantInt::get(Int32Ty, 0);
     Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
@@ -4331,8 +4331,8 @@ Value *CodeGenFunction::EmitARM64Builtin
 
     unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
                        BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
-                          ? Intrinsic::arm64_neon_sqadd
-                          : Intrinsic::arm64_neon_sqsub;
+                          ? Intrinsic::aarch64_neon_sqadd
+                          : Intrinsic::aarch64_neon_sqsub;
     return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
   }
   case NEON::BI__builtin_neon_vqdmlals_s32:
@@ -4341,12 +4341,12 @@ Value *CodeGenFunction::EmitARM64Builtin
     ProductOps.push_back(Ops[1]);
     ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
     Ops[1] =
-        EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmulls_scalar),
+        EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
                      ProductOps, "vqdmlXl");
 
     unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
-                                        ? Intrinsic::arm64_neon_sqadd
-                                        : Intrinsic::arm64_neon_sqsub;
+                                        ? Intrinsic::aarch64_neon_sqadd
+                                        : Intrinsic::aarch64_neon_sqsub;
     return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
   }
   case NEON::BI__builtin_neon_vqdmlals_lane_s32:
@@ -4359,14 +4359,14 @@ Value *CodeGenFunction::EmitARM64Builtin
     ProductOps.push_back(Ops[1]);
     ProductOps.push_back(Ops[2]);
     Ops[1] =
-        EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_sqdmulls_scalar),
+        EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
                      ProductOps, "vqdmlXl");
     Ops.pop_back();
 
     unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
                        BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
-                          ? Intrinsic::arm64_neon_sqadd
-                          : Intrinsic::arm64_neon_sqsub;
+                          ? Intrinsic::aarch64_neon_sqadd
+                          : Intrinsic::aarch64_neon_sqsub;
     return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
   }
   }
@@ -4376,17 +4376,17 @@ Value *CodeGenFunction::EmitARM64Builtin
   if (!Ty)
     return nullptr;
 
-  // Not all intrinsics handled by the common case work for ARM64 yet, so only
+  // Not all intrinsics handled by the common case work for AArch64 yet, so only
   // defer to common code if it's been added to our special map.
-  Builtin = findNeonIntrinsicInMap(ARM64SIMDIntrinsicMap, BuiltinID,
-                                   ARM64SIMDIntrinsicsProvenSorted);
+  Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
+                                   AArch64SIMDIntrinsicsProvenSorted);
 
   if (Builtin)
     return EmitCommonNeonBuiltinExpr(
         Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
         Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
 
-  if (Value *V = EmitARM64TblBuiltinExpr(*this, BuiltinID, E, Ops))
+  if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
     return V;
 
   unsigned Int;
@@ -4492,26 +4492,26 @@ Value *CodeGenFunction::EmitARM64Builtin
   }
   case NEON::BI__builtin_neon_vmull_v:
     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
-    Int = usgn ? Intrinsic::arm64_neon_umull : Intrinsic::arm64_neon_smull;
-    if (Type.isPoly()) Int = Intrinsic::arm64_neon_pmull;
+    Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
+    if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
   case NEON::BI__builtin_neon_vmax_v:
   case NEON::BI__builtin_neon_vmaxq_v:
     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
-    Int = usgn ? Intrinsic::arm64_neon_umax : Intrinsic::arm64_neon_smax;
-    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmax;
+    Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
   case NEON::BI__builtin_neon_vmin_v:
   case NEON::BI__builtin_neon_vminq_v:
     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
-    Int = usgn ? Intrinsic::arm64_neon_umin : Intrinsic::arm64_neon_smin;
-    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmin;
+    Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
   case NEON::BI__builtin_neon_vabd_v:
   case NEON::BI__builtin_neon_vabdq_v:
     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
-    Int = usgn ? Intrinsic::arm64_neon_uabd : Intrinsic::arm64_neon_sabd;
-    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fabd;
+    Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
   case NEON::BI__builtin_neon_vpadal_v:
   case NEON::BI__builtin_neon_vpadalq_v: {
@@ -4521,7 +4521,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     llvm::Type *ArgTy = llvm::VectorType::get(
         llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
     llvm::Type* Tys[2] = { VTy, ArgTy };
-    Int = usgn ? Intrinsic::arm64_neon_uaddlp : Intrinsic::arm64_neon_saddlp;
+    Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
     SmallVector<llvm::Value*, 1> TmpOps;
     TmpOps.push_back(Ops[1]);
     Function *F = CGM.getIntrinsic(Int, Tys);
@@ -4532,33 +4532,33 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vpmin_v:
   case NEON::BI__builtin_neon_vpminq_v:
     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
-    Int = usgn ? Intrinsic::arm64_neon_uminp : Intrinsic::arm64_neon_sminp;
-    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fminp;
+    Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
   case NEON::BI__builtin_neon_vpmax_v:
   case NEON::BI__builtin_neon_vpmaxq_v:
     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
-    Int = usgn ? Intrinsic::arm64_neon_umaxp : Intrinsic::arm64_neon_smaxp;
-    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::arm64_neon_fmaxp;
+    Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
+    if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
   case NEON::BI__builtin_neon_vminnm_v:
   case NEON::BI__builtin_neon_vminnmq_v:
-    Int = Intrinsic::arm64_neon_fminnm;
+    Int = Intrinsic::aarch64_neon_fminnm;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
   case NEON::BI__builtin_neon_vmaxnm_v:
   case NEON::BI__builtin_neon_vmaxnmq_v:
-    Int = Intrinsic::arm64_neon_fmaxnm;
+    Int = Intrinsic::aarch64_neon_fmaxnm;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
   case NEON::BI__builtin_neon_vrecpss_f32: {
     llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext());
     Ops.push_back(EmitScalarExpr(E->getArg(1)));
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_frecps, f32Type),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f32Type),
                         Ops, "vrecps");
   }
   case NEON::BI__builtin_neon_vrecpsd_f64: {
     llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext());
     Ops.push_back(EmitScalarExpr(E->getArg(1)));
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_frecps, f64Type),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
                         Ops, "vrecps");
   }
   case NEON::BI__builtin_neon_vrshr_n_v:
@@ -4566,34 +4566,34 @@ Value *CodeGenFunction::EmitARM64Builtin
     // FIXME: this can be shared with 32-bit ARM, but not AArch64 at the
     // moment. After the final merge it should be added to
     // EmitCommonNeonBuiltinExpr.
-    Int = usgn ? Intrinsic::arm64_neon_urshl : Intrinsic::arm64_neon_srshl;
+    Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
   case NEON::BI__builtin_neon_vqshlu_n_v:
   case NEON::BI__builtin_neon_vqshluq_n_v:
     // FIXME: AArch64 and ARM use different intrinsics for this, but are
     // essentially compatible. It should be in EmitCommonNeonBuiltinExpr after
     // the final merge.
-    Int = Intrinsic::arm64_neon_sqshlu;
+    Int = Intrinsic::aarch64_neon_sqshlu;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", 1, false);
   case NEON::BI__builtin_neon_vqshrun_n_v:
     // FIXME: as above
-    Int = Intrinsic::arm64_neon_sqshrun;
+    Int = Intrinsic::aarch64_neon_sqshrun;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
   case NEON::BI__builtin_neon_vqrshrun_n_v:
     // FIXME: and again.
-    Int = Intrinsic::arm64_neon_sqrshrun;
+    Int = Intrinsic::aarch64_neon_sqrshrun;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
   case NEON::BI__builtin_neon_vqshrn_n_v:
     // FIXME: guess
-    Int = usgn ? Intrinsic::arm64_neon_uqshrn : Intrinsic::arm64_neon_sqshrn;
+    Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
   case NEON::BI__builtin_neon_vrshrn_n_v:
     // FIXME: there might be a pattern here.
-    Int = Intrinsic::arm64_neon_rshrn;
+    Int = Intrinsic::aarch64_neon_rshrn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
   case NEON::BI__builtin_neon_vqrshrn_n_v:
     // FIXME: another one
-    Int = usgn ? Intrinsic::arm64_neon_uqrshrn : Intrinsic::arm64_neon_sqrshrn;
+    Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
   case NEON::BI__builtin_neon_vrnda_v:
   case NEON::BI__builtin_neon_vrndaq_v: {
@@ -4612,7 +4612,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   }
   case NEON::BI__builtin_neon_vrndn_v:
   case NEON::BI__builtin_neon_vrndnq_v: {
-    Int = Intrinsic::arm64_neon_frintn;
+    Int = Intrinsic::aarch64_neon_frintn;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
   }
   case NEON::BI__builtin_neon_vrndp_v:
@@ -4699,7 +4699,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vcvtaq_s64_v:
   case NEON::BI__builtin_neon_vcvta_u64_v:
   case NEON::BI__builtin_neon_vcvtaq_u64_v: {
-    Int = usgn ? Intrinsic::arm64_neon_fcvtau : Intrinsic::arm64_neon_fcvtas;
+    Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
     bool Double =
       (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
     llvm::Type *InTy =
@@ -4717,7 +4717,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vcvtmq_s64_v:
   case NEON::BI__builtin_neon_vcvtm_u64_v:
   case NEON::BI__builtin_neon_vcvtmq_u64_v: {
-    Int = usgn ? Intrinsic::arm64_neon_fcvtmu : Intrinsic::arm64_neon_fcvtms;
+    Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
     bool Double =
       (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
     llvm::Type *InTy =
@@ -4735,7 +4735,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vcvtnq_s64_v:
   case NEON::BI__builtin_neon_vcvtn_u64_v:
   case NEON::BI__builtin_neon_vcvtnq_u64_v: {
-    Int = usgn ? Intrinsic::arm64_neon_fcvtnu : Intrinsic::arm64_neon_fcvtns;
+    Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
     bool Double =
       (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
     llvm::Type *InTy =
@@ -4753,7 +4753,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vcvtpq_s64_v:
   case NEON::BI__builtin_neon_vcvtp_u64_v:
   case NEON::BI__builtin_neon_vcvtpq_u64_v: {
-    Int = usgn ? Intrinsic::arm64_neon_fcvtpu : Intrinsic::arm64_neon_fcvtps;
+    Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
     bool Double =
       (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
     llvm::Type *InTy =
@@ -4765,7 +4765,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   }
   case NEON::BI__builtin_neon_vmulx_v:
   case NEON::BI__builtin_neon_vmulxq_v: {
-    Int = Intrinsic::arm64_neon_fmulx;
+    Int = Intrinsic::aarch64_neon_fmulx;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
   }
   case NEON::BI__builtin_neon_vmul_lane_v:
@@ -4786,12 +4786,12 @@ Value *CodeGenFunction::EmitARM64Builtin
     return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
   case NEON::BI__builtin_neon_vpmaxnm_v:
   case NEON::BI__builtin_neon_vpmaxnmq_v: {
-    Int = Intrinsic::arm64_neon_fmaxnmp;
+    Int = Intrinsic::aarch64_neon_fmaxnmp;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
   }
   case NEON::BI__builtin_neon_vpminnm_v:
   case NEON::BI__builtin_neon_vpminnmq_v: {
-    Int = Intrinsic::arm64_neon_fminnmp;
+    Int = Intrinsic::aarch64_neon_fminnmp;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
   }
   case NEON::BI__builtin_neon_vsqrt_v:
@@ -4802,7 +4802,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   }
   case NEON::BI__builtin_neon_vrbit_v:
   case NEON::BI__builtin_neon_vrbitq_v: {
-    Int = Intrinsic::arm64_neon_rbit;
+    Int = Intrinsic::aarch64_neon_rbit;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
   }
   case NEON::BI__builtin_neon_vaddv_u8:
@@ -4810,7 +4810,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     usgn = true;
     // FALLTHROUGH
   case NEON::BI__builtin_neon_vaddv_s8: {
-    Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv;
+    Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
@@ -4824,7 +4824,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     usgn = true;
     // FALLTHROUGH
   case NEON::BI__builtin_neon_vaddv_s16: {
-    Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv;
+    Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
@@ -4838,7 +4838,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     usgn = true;
     // FALLTHROUGH
   case NEON::BI__builtin_neon_vaddvq_s8: {
-    Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv;
+    Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
@@ -4852,7 +4852,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     usgn = true;
     // FALLTHROUGH
   case NEON::BI__builtin_neon_vaddvq_s16: {
-    Int = usgn ? Intrinsic::arm64_neon_uaddv : Intrinsic::arm64_neon_saddv;
+    Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
@@ -4863,7 +4863,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vmaxv_u8: {
-    Int = Intrinsic::arm64_neon_umaxv;
+    Int = Intrinsic::aarch64_neon_umaxv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
@@ -4874,7 +4874,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 8));
   }
   case NEON::BI__builtin_neon_vmaxv_u16: {
-    Int = Intrinsic::arm64_neon_umaxv;
+    Int = Intrinsic::aarch64_neon_umaxv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
@@ -4885,7 +4885,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vmaxvq_u8: {
-    Int = Intrinsic::arm64_neon_umaxv;
+    Int = Intrinsic::aarch64_neon_umaxv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
@@ -4896,7 +4896,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 8));
   }
   case NEON::BI__builtin_neon_vmaxvq_u16: {
-    Int = Intrinsic::arm64_neon_umaxv;
+    Int = Intrinsic::aarch64_neon_umaxv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
@@ -4907,7 +4907,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vmaxv_s8: {
-    Int = Intrinsic::arm64_neon_smaxv;
+    Int = Intrinsic::aarch64_neon_smaxv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
@@ -4918,7 +4918,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 8));
   }
   case NEON::BI__builtin_neon_vmaxv_s16: {
-    Int = Intrinsic::arm64_neon_smaxv;
+    Int = Intrinsic::aarch64_neon_smaxv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
@@ -4929,7 +4929,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vmaxvq_s8: {
-    Int = Intrinsic::arm64_neon_smaxv;
+    Int = Intrinsic::aarch64_neon_smaxv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
@@ -4940,7 +4940,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 8));
   }
   case NEON::BI__builtin_neon_vmaxvq_s16: {
-    Int = Intrinsic::arm64_neon_smaxv;
+    Int = Intrinsic::aarch64_neon_smaxv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
@@ -4951,7 +4951,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vminv_u8: {
-    Int = Intrinsic::arm64_neon_uminv;
+    Int = Intrinsic::aarch64_neon_uminv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
@@ -4962,7 +4962,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 8));
   }
   case NEON::BI__builtin_neon_vminv_u16: {
-    Int = Intrinsic::arm64_neon_uminv;
+    Int = Intrinsic::aarch64_neon_uminv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
@@ -4973,7 +4973,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vminvq_u8: {
-    Int = Intrinsic::arm64_neon_uminv;
+    Int = Intrinsic::aarch64_neon_uminv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
@@ -4984,7 +4984,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 8));
   }
   case NEON::BI__builtin_neon_vminvq_u16: {
-    Int = Intrinsic::arm64_neon_uminv;
+    Int = Intrinsic::aarch64_neon_uminv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
@@ -4995,7 +4995,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vminv_s8: {
-    Int = Intrinsic::arm64_neon_sminv;
+    Int = Intrinsic::aarch64_neon_sminv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
@@ -5006,7 +5006,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 8));
   }
   case NEON::BI__builtin_neon_vminv_s16: {
-    Int = Intrinsic::arm64_neon_sminv;
+    Int = Intrinsic::aarch64_neon_sminv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
@@ -5017,7 +5017,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vminvq_s8: {
-    Int = Intrinsic::arm64_neon_sminv;
+    Int = Intrinsic::aarch64_neon_sminv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
@@ -5028,7 +5028,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 8));
   }
   case NEON::BI__builtin_neon_vminvq_s16: {
-    Int = Intrinsic::arm64_neon_sminv;
+    Int = Intrinsic::aarch64_neon_sminv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
@@ -5044,7 +5044,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     return Builder.CreateFMul(Ops[0], RHS);
   }
   case NEON::BI__builtin_neon_vaddlv_u8: {
-    Int = Intrinsic::arm64_neon_uaddlv;
+    Int = Intrinsic::aarch64_neon_uaddlv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
@@ -5055,7 +5055,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vaddlv_u16: {
-    Int = Intrinsic::arm64_neon_uaddlv;
+    Int = Intrinsic::aarch64_neon_uaddlv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
@@ -5064,7 +5064,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
   }
   case NEON::BI__builtin_neon_vaddlvq_u8: {
-    Int = Intrinsic::arm64_neon_uaddlv;
+    Int = Intrinsic::aarch64_neon_uaddlv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
@@ -5075,7 +5075,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vaddlvq_u16: {
-    Int = Intrinsic::arm64_neon_uaddlv;
+    Int = Intrinsic::aarch64_neon_uaddlv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
@@ -5084,7 +5084,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
   }
   case NEON::BI__builtin_neon_vaddlv_s8: {
-    Int = Intrinsic::arm64_neon_saddlv;
+    Int = Intrinsic::aarch64_neon_saddlv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
@@ -5095,7 +5095,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vaddlv_s16: {
-    Int = Intrinsic::arm64_neon_saddlv;
+    Int = Intrinsic::aarch64_neon_saddlv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
@@ -5104,7 +5104,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
   }
   case NEON::BI__builtin_neon_vaddlvq_s8: {
-    Int = Intrinsic::arm64_neon_saddlv;
+    Int = Intrinsic::aarch64_neon_saddlv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
@@ -5115,7 +5115,7 @@ Value *CodeGenFunction::EmitARM64Builtin
              llvm::IntegerType::get(getLLVMContext(), 16));
   }
   case NEON::BI__builtin_neon_vaddlvq_s16: {
-    Int = Intrinsic::arm64_neon_saddlv;
+    Int = Intrinsic::aarch64_neon_saddlv;
     Ty = llvm::IntegerType::get(getLLVMContext(), 32);
     VTy =
       llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
@@ -5125,13 +5125,13 @@ Value *CodeGenFunction::EmitARM64Builtin
   }
   case NEON::BI__builtin_neon_vsri_n_v:
   case NEON::BI__builtin_neon_vsriq_n_v: {
-    Int = Intrinsic::arm64_neon_vsri;
+    Int = Intrinsic::aarch64_neon_vsri;
     llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
     return EmitNeonCall(Intrin, Ops, "vsri_n");
   }
   case NEON::BI__builtin_neon_vsli_n_v:
   case NEON::BI__builtin_neon_vsliq_n_v: {
-    Int = Intrinsic::arm64_neon_vsli;
+    Int = Intrinsic::aarch64_neon_vsli;
     llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
     return EmitNeonCall(Intrin, Ops, "vsli_n");
   }
@@ -5142,7 +5142,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     return Builder.CreateAdd(Ops[0], Ops[1]);
   case NEON::BI__builtin_neon_vrsra_n_v:
   case NEON::BI__builtin_neon_vrsraq_n_v: {
-    Int = usgn ? Intrinsic::arm64_neon_urshl : Intrinsic::arm64_neon_srshl;
+    Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
     SmallVector<llvm::Value*,2> TmpOps;
     TmpOps.push_back(Ops[1]);
     TmpOps.push_back(Ops[2]);
@@ -5166,15 +5166,15 @@ Value *CodeGenFunction::EmitARM64Builtin
     switch (BuiltinID) {
     case NEON::BI__builtin_neon_vld1_x2_v:
     case NEON::BI__builtin_neon_vld1q_x2_v:
-      Int = Intrinsic::arm64_neon_ld1x2;
+      Int = Intrinsic::aarch64_neon_ld1x2;
       break;
     case NEON::BI__builtin_neon_vld1_x3_v:
     case NEON::BI__builtin_neon_vld1q_x3_v:
-      Int = Intrinsic::arm64_neon_ld1x3;
+      Int = Intrinsic::aarch64_neon_ld1x3;
       break;
     case NEON::BI__builtin_neon_vld1_x4_v:
     case NEON::BI__builtin_neon_vld1q_x4_v:
-      Int = Intrinsic::arm64_neon_ld1x4;
+      Int = Intrinsic::aarch64_neon_ld1x4;
       break;
     }
     Function *F = CGM.getIntrinsic(Int, Tys);
@@ -5195,15 +5195,15 @@ Value *CodeGenFunction::EmitARM64Builtin
     switch (BuiltinID) {
     case NEON::BI__builtin_neon_vst1_x2_v:
     case NEON::BI__builtin_neon_vst1q_x2_v:
-      Int = Intrinsic::arm64_neon_st1x2;
+      Int = Intrinsic::aarch64_neon_st1x2;
       break;
     case NEON::BI__builtin_neon_vst1_x3_v:
     case NEON::BI__builtin_neon_vst1q_x3_v:
-      Int = Intrinsic::arm64_neon_st1x3;
+      Int = Intrinsic::aarch64_neon_st1x3;
       break;
     case NEON::BI__builtin_neon_vst1_x4_v:
     case NEON::BI__builtin_neon_vst1q_x4_v:
-      Int = Intrinsic::arm64_neon_st1x4;
+      Int = Intrinsic::aarch64_neon_st1x4;
       break;
     }
     SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end());
@@ -5247,7 +5247,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
     llvm::Type *Tys[2] = { VTy, PTy };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
     Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
     Ops[0] = Builder.CreateBitCast(Ops[0],
                 llvm::PointerType::getUnqual(Ops[1]->getType()));
@@ -5258,7 +5258,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
     llvm::Type *Tys[2] = { VTy, PTy };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
     Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
     Ops[0] = Builder.CreateBitCast(Ops[0],
                 llvm::PointerType::getUnqual(Ops[1]->getType()));
@@ -5269,7 +5269,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
     llvm::Type *Tys[2] = { VTy, PTy };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
     Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
     Ops[0] = Builder.CreateBitCast(Ops[0],
                 llvm::PointerType::getUnqual(Ops[1]->getType()));
@@ -5281,7 +5281,7 @@ Value *CodeGenFunction::EmitARM64Builtin
       llvm::PointerType::getUnqual(VTy->getElementType());
     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
     llvm::Type *Tys[2] = { VTy, PTy };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2r, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
     Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
     Ops[0] = Builder.CreateBitCast(Ops[0],
                 llvm::PointerType::getUnqual(Ops[1]->getType()));
@@ -5293,7 +5293,7 @@ Value *CodeGenFunction::EmitARM64Builtin
       llvm::PointerType::getUnqual(VTy->getElementType());
     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
     llvm::Type *Tys[2] = { VTy, PTy };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3r, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
     Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
     Ops[0] = Builder.CreateBitCast(Ops[0],
                 llvm::PointerType::getUnqual(Ops[1]->getType()));
@@ -5305,7 +5305,7 @@ Value *CodeGenFunction::EmitARM64Builtin
       llvm::PointerType::getUnqual(VTy->getElementType());
     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
     llvm::Type *Tys[2] = { VTy, PTy };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4r, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
     Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
     Ops[0] = Builder.CreateBitCast(Ops[0],
                 llvm::PointerType::getUnqual(Ops[1]->getType()));
@@ -5314,7 +5314,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vld2_lane_v:
   case NEON::BI__builtin_neon_vld2q_lane_v: {
     llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld2lane, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
     Ops.push_back(Ops[1]);
     Ops.erase(Ops.begin()+1);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -5330,7 +5330,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vld3_lane_v:
   case NEON::BI__builtin_neon_vld3q_lane_v: {
     llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld3lane, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
     Ops.push_back(Ops[1]);
     Ops.erase(Ops.begin()+1);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -5347,7 +5347,7 @@ Value *CodeGenFunction::EmitARM64Builtin
   case NEON::BI__builtin_neon_vld4_lane_v:
   case NEON::BI__builtin_neon_vld4q_lane_v: {
     llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
-    Function *F = CGM.getIntrinsic(Intrinsic::arm64_neon_ld4lane, Tys);
+    Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
     Ops.push_back(Ops[1]);
     Ops.erase(Ops.begin()+1);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -5367,7 +5367,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     Ops.push_back(Ops[0]);
     Ops.erase(Ops.begin());
     llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st2, Tys),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
                         Ops, "");
   }
   case NEON::BI__builtin_neon_vst2_lane_v:
@@ -5377,7 +5377,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     Ops[2] = Builder.CreateZExt(Ops[2],
                 llvm::IntegerType::get(getLLVMContext(), 64));
     llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st2lane, Tys),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
                         Ops, "");
   }
   case NEON::BI__builtin_neon_vst3_v:
@@ -5385,7 +5385,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     Ops.push_back(Ops[0]);
     Ops.erase(Ops.begin());
     llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st3, Tys),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
                         Ops, "");
   }
   case NEON::BI__builtin_neon_vst3_lane_v:
@@ -5395,7 +5395,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     Ops[3] = Builder.CreateZExt(Ops[3],
                 llvm::IntegerType::get(getLLVMContext(), 64));
     llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st3lane, Tys),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
                         Ops, "");
   }
   case NEON::BI__builtin_neon_vst4_v:
@@ -5403,7 +5403,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     Ops.push_back(Ops[0]);
     Ops.erase(Ops.begin());
     llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st4, Tys),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
                         Ops, "");
   }
   case NEON::BI__builtin_neon_vst4_lane_v:
@@ -5413,7 +5413,7 @@ Value *CodeGenFunction::EmitARM64Builtin
     Ops[4] = Builder.CreateZExt(Ops[4],
                 llvm::IntegerType::get(getLLVMContext(), 64));
     llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_st4lane, Tys),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
                         Ops, "");
   }
   case NEON::BI__builtin_neon_vtrn_v:
@@ -5476,45 +5476,45 @@ Value *CodeGenFunction::EmitARM64Builtin
     return SV;
   }
   case NEON::BI__builtin_neon_vqtbl1q_v: {
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl1, Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
                         Ops, "vtbl1");
   }
   case NEON::BI__builtin_neon_vqtbl2q_v: {
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl2, Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
                         Ops, "vtbl2");
   }
   case NEON::BI__builtin_neon_vqtbl3q_v: {
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl3, Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
                         Ops, "vtbl3");
   }
   case NEON::BI__builtin_neon_vqtbl4q_v: {
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbl4, Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
                         Ops, "vtbl4");
   }
   case NEON::BI__builtin_neon_vqtbx1q_v: {
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx1, Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
                         Ops, "vtbx1");
   }
   case NEON::BI__builtin_neon_vqtbx2q_v: {
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx2, Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
                         Ops, "vtbx2");
   }
   case NEON::BI__builtin_neon_vqtbx3q_v: {
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx3, Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
                         Ops, "vtbx3");
   }
   case NEON::BI__builtin_neon_vqtbx4q_v: {
-    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm64_neon_tbx4, Ty),
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
                         Ops, "vtbx4");
   }
   case NEON::BI__builtin_neon_vsqadd_v:
   case NEON::BI__builtin_neon_vsqaddq_v: {
-    Int = Intrinsic::arm64_neon_usqadd;
+    Int = Intrinsic::aarch64_neon_usqadd;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
   }
   case NEON::BI__builtin_neon_vuqadd_v:
   case NEON::BI__builtin_neon_vuqaddq_v: {
-    Int = Intrinsic::arm64_neon_suqadd;
+    Int = Intrinsic::aarch64_neon_suqadd;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
   }
   }

Modified: cfe/trunk/lib/CodeGen/CGObjCMac.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGObjCMac.cpp?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGObjCMac.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGObjCMac.cpp Sat May 24 07:52:07 2014
@@ -5034,7 +5034,8 @@ ObjCCommonTypesHelper::ObjCCommonTypesHe
 
   // arm64 targets use "int" ivar offset variables. All others,
   // including OS X x86_64 and Windows x86_64, use "long" ivar offsets.
-  if (CGM.getTarget().getTriple().getArch() == llvm::Triple::arm64)
+  if (CGM.getTarget().getTriple().getArch() == llvm::Triple::arm64 ||
+      CGM.getTarget().getTriple().getArch() == llvm::Triple::aarch64)
     IvarOffsetVarTy = IntTy;
   else
     IvarOffsetVarTy = LongTy;

Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CodeGenFunction.h (original)
+++ cfe/trunk/lib/CodeGen/CodeGenFunction.h Sat May 24 07:52:07 2014
@@ -2226,14 +2226,14 @@ public:
   llvm::Value *EmitConcatVectors(llvm::Value *Lo, llvm::Value *Hi,
                                  llvm::Type *ArgTy);
   llvm::Value *EmitExtractHigh(llvm::Value *In, llvm::Type *ResTy);
-  // Helper functions for EmitARM64BuiltinExpr.
+  // Helper functions for EmitAArch64BuiltinExpr.
   llvm::Value *vectorWrapScalar8(llvm::Value *Op);
   llvm::Value *vectorWrapScalar16(llvm::Value *Op);
   llvm::Value *emitVectorWrappedScalar8Intrinsic(
       unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name);
   llvm::Value *emitVectorWrappedScalar16Intrinsic(
       unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name);
-  llvm::Value *EmitARM64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+  llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
   llvm::Value *EmitNeon64Call(llvm::Function *F,
                               llvm::SmallVectorImpl<llvm::Value *> &O,
                               const char *name);

Modified: cfe/trunk/lib/CodeGen/TargetInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/TargetInfo.cpp?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/TargetInfo.cpp (original)
+++ cfe/trunk/lib/CodeGen/TargetInfo.cpp Sat May 24 07:52:07 2014
@@ -3104,12 +3104,12 @@ PPC64TargetCodeGenInfo::initDwarfEHRegSi
 }
 
 //===----------------------------------------------------------------------===//
-// ARM64 ABI Implementation
+// AArch64 ABI Implementation
 //===----------------------------------------------------------------------===//
 
 namespace {
 
-class ARM64ABIInfo : public ABIInfo {
+class AArch64ABIInfo : public ABIInfo {
 public:
   enum ABIKind {
     AAPCS = 0,
@@ -3120,7 +3120,7 @@ private:
   ABIKind Kind;
 
 public:
-  ARM64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
+  AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
 
 private:
   ABIKind getABIKind() const { return Kind; }
@@ -3212,10 +3212,10 @@ private:
   }
 };
 
-class ARM64TargetCodeGenInfo : public TargetCodeGenInfo {
+class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
 public:
-  ARM64TargetCodeGenInfo(CodeGenTypes &CGT, ARM64ABIInfo::ABIKind Kind)
-      : TargetCodeGenInfo(new ARM64ABIInfo(CGT, Kind)) {}
+  AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
+      : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
 
   StringRef getARCRetainAutoreleasedReturnValueMarker() const {
     return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
@@ -3231,12 +3231,12 @@ static bool isHomogeneousAggregate(QualT
                                    ASTContext &Context,
                                    uint64_t *HAMembers = nullptr);
 
-ABIArgInfo ARM64ABIInfo::classifyArgumentType(QualType Ty,
-                                              unsigned &AllocatedVFP,
-                                              bool &IsHA,
-                                              unsigned &AllocatedGPR,
-                                              bool &IsSmallAggr,
-                                              bool IsNamedArg) const {
+ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
+                                                unsigned &AllocatedVFP,
+                                                bool &IsHA,
+                                                unsigned &AllocatedGPR,
+                                                bool &IsSmallAggr,
+                                                bool IsNamedArg) const {
   // Handle illegal vector types here.
   if (isIllegalVectorType(Ty)) {
     uint64_t Size = getContext().getTypeSize(Ty);
@@ -3346,7 +3346,7 @@ ABIArgInfo ARM64ABIInfo::classifyArgumen
   return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
 }
 
-ABIArgInfo ARM64ABIInfo::classifyReturnType(QualType RetTy) const {
+ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
   if (RetTy->isVoidType())
     return ABIArgInfo::getIgnore();
 
@@ -3382,8 +3382,8 @@ ABIArgInfo ARM64ABIInfo::classifyReturnT
   return ABIArgInfo::getIndirect(0);
 }
 
-/// isIllegalVectorType - check whether the vector type is legal for ARM64.
-bool ARM64ABIInfo::isIllegalVectorType(QualType Ty) const {
+/// isIllegalVectorType - check whether the vector type is legal for AArch64.
+bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
   if (const VectorType *VT = Ty->getAs<VectorType>()) {
     // Check whether VT is legal.
     unsigned NumElements = VT->getNumElements();
@@ -3624,7 +3624,7 @@ static llvm::Value *EmitAArch64VAArg(llv
   return ResAddr;
 }
 
-llvm::Value *ARM64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
+llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
                                           CodeGenFunction &CGF) const {
 
   unsigned AllocatedGPR = 0, AllocatedVFP = 0;
@@ -3636,7 +3636,7 @@ llvm::Value *ARM64ABIInfo::EmitAAPCSVAAr
                           AI.isIndirect(), CGF);
 }
 
-llvm::Value *ARM64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
+llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
                                            CodeGenFunction &CGF) const {
   // We do not support va_arg for aggregates or illegal vector types.
   // Lower VAArg here for these cases and use the LLVM va_arg instruction for
@@ -6473,11 +6473,11 @@ const TargetCodeGenInfo &CodeGenModule::
   case llvm::Triple::aarch64_be:
   case llvm::Triple::arm64:
   case llvm::Triple::arm64_be: {
-    ARM64ABIInfo::ABIKind Kind = ARM64ABIInfo::AAPCS;
+    AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
     if (strcmp(getTarget().getABI(), "darwinpcs") == 0)
-      Kind = ARM64ABIInfo::DarwinPCS;
+      Kind = AArch64ABIInfo::DarwinPCS;
 
-    return *(TheTargetCodeGenInfo = new ARM64TargetCodeGenInfo(Types, Kind));
+    return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
   }
 
   case llvm::Triple::arm:

Modified: cfe/trunk/lib/Driver/ToolChains.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Driver/ToolChains.cpp?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/Driver/ToolChains.cpp (original)
+++ cfe/trunk/lib/Driver/ToolChains.cpp Sat May 24 07:52:07 2014
@@ -400,7 +400,8 @@ void DarwinClang::AddLinkRuntimeLibArgs(
     // it never went into the SDK.
     // Linking against libgcc_s.1 isn't needed for iOS 5.0+
     if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator() &&
-        getTriple().getArch() != llvm::Triple::arm64)
+        (getTriple().getArch() != llvm::Triple::arm64 &&
+         getTriple().getArch() != llvm::Triple::aarch64))
       CmdArgs.push_back("-lgcc_s.1");
 
     // We currently always need a static runtime library for iOS.
@@ -520,6 +521,7 @@ void Darwin::AddDeploymentTarget(Derived
     if (!OSXTarget.empty() && !iOSTarget.empty()) {
       if (getTriple().getArch() == llvm::Triple::arm ||
           getTriple().getArch() == llvm::Triple::arm64 ||
+          getTriple().getArch() == llvm::Triple::aarch64 ||
           getTriple().getArch() == llvm::Triple::thumb)
         OSXTarget = "";
       else
@@ -656,6 +658,7 @@ void DarwinClang::AddCCKextLibArgs(const
   // Use the newer cc_kext for iOS ARM after 6.0.
   if (!isTargetIPhoneOS() || isTargetIOSSimulator() ||
       getTriple().getArch() == llvm::Triple::arm64 ||
+      getTriple().getArch() == llvm::Triple::aarch64 ||
       !isIPhoneOSVersionLT(6, 0)) {
     llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
   } else {
@@ -926,7 +929,8 @@ DerivedArgList *Darwin::TranslateArgs(co
   // but we can't check the deployment target in the translation code until
   // it is set here.
   if (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0) &&
-      getTriple().getArch() != llvm::Triple::arm64) {
+      getTriple().getArch() != llvm::Triple::arm64 &&
+      getTriple().getArch() != llvm::Triple::aarch64) {
     for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
       Arg *A = *it;
       ++it;
@@ -993,7 +997,8 @@ bool MachO::isPIEDefault() const {
 
 bool MachO::isPICDefaultForced() const {
   return (getArch() == llvm::Triple::x86_64 ||
-          getArch() == llvm::Triple::arm64);
+          getArch() == llvm::Triple::arm64 ||
+          getArch() == llvm::Triple::aarch64);
 }
 
 bool MachO::SupportsProfiling() const {
@@ -1082,7 +1087,8 @@ void Darwin::addStartObjectFileArgs(cons
           if (isTargetIOSSimulator()) {
             ; // iOS simulator does not need crt1.o.
           } else if (isTargetIPhoneOS()) {
-            if (getArch() == llvm::Triple::arm64)
+            if (getArch() == llvm::Triple::arm64 ||
+                getArch() == llvm::Triple::aarch64)
               ; // iOS does not need any crt1 files for arm64
             else if (isIPhoneOSVersionLT(3, 1))
               CmdArgs.push_back("-lcrt1.o");

Modified: cfe/trunk/lib/Driver/Tools.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Driver/Tools.cpp?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/Driver/Tools.cpp (original)
+++ cfe/trunk/lib/Driver/Tools.cpp Sat May 24 07:52:07 2014
@@ -841,8 +841,9 @@ void Clang::AddARMTargetArgs(const ArgLi
   }
 }
 
-/// getARM64TargetCPU - Get the (LLVM) name of the ARM64 cpu we are targeting.
-static std::string getARM64TargetCPU(const ArgList &Args) {
+/// getAArch64TargetCPU - Get the (LLVM) name of the AArch64 cpu we are
+/// targeting.
+static std::string getAArch64TargetCPU(const ArgList &Args) {
   // If we have -mcpu=, use that.
   if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
     StringRef MCPU = A->getValue();
@@ -864,8 +865,8 @@ static std::string getARM64TargetCPU(con
   return "generic";
 }
 
-void Clang::AddARM64TargetArgs(const ArgList &Args,
-                               ArgStringList &CmdArgs) const {
+void Clang::AddAArch64TargetArgs(const ArgList &Args,
+                                 ArgStringList &CmdArgs) const {
   std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
   llvm::Triple Triple(TripleStr);
 
@@ -890,11 +891,11 @@ void Clang::AddARM64TargetArgs(const Arg
   CmdArgs.push_back(ABIName);
 
   CmdArgs.push_back("-target-cpu");
-  CmdArgs.push_back(Args.MakeArgString(getARM64TargetCPU(Args)));
+  CmdArgs.push_back(Args.MakeArgString(getAArch64TargetCPU(Args)));
 
   if (Args.hasArg(options::OPT_mstrict_align)) {
     CmdArgs.push_back("-backend-option");
-    CmdArgs.push_back("-arm64-strict-align");
+    CmdArgs.push_back("-aarch64-strict-align");
   }
 }
 
@@ -1327,7 +1328,7 @@ static std::string getCPUName(const ArgL
   case llvm::Triple::aarch64_be:
   case llvm::Triple::arm64:
   case llvm::Triple::arm64_be:
-    return getARM64TargetCPU(Args);
+    return getAArch64TargetCPU(Args);
 
   case llvm::Triple::arm:
   case llvm::Triple::armeb:
@@ -2455,7 +2456,8 @@ void Clang::ConstructJob(Compilation &C,
   // PIC or PIE options above, if these show up, PIC is disabled.
   llvm::Triple Triple(TripleStr);
   if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6) ||
-                       Triple.getArch() == llvm::Triple::arm64))
+                       Triple.getArch() == llvm::Triple::arm64 ||
+                       Triple.getArch() == llvm::Triple::aarch64))
     PIC = PIE = false;
   if (Args.hasArg(options::OPT_static))
     PIC = PIE = false;
@@ -2782,9 +2784,11 @@ void Clang::ConstructJob(Compilation &C,
     AddARMTargetArgs(Args, CmdArgs, KernelOrKext);
     break;
 
+  case llvm::Triple::aarch64:
+  case llvm::Triple::aarch64_be:
   case llvm::Triple::arm64:
   case llvm::Triple::arm64_be:
-    AddARM64TargetArgs(Args, CmdArgs);
+    AddAArch64TargetArgs(Args, CmdArgs);
     break;
 
   case llvm::Triple::mips:
@@ -3408,16 +3412,20 @@ void Clang::ConstructJob(Compilation &C,
                                  options::OPT_munaligned_access)) {
       if (A->getOption().matches(options::OPT_mno_unaligned_access)) {
         CmdArgs.push_back("-backend-option");
-        if (getToolChain().getTriple().getArch() == llvm::Triple::arm64 ||
+        if (getToolChain().getTriple().getArch() == llvm::Triple::aarch64 ||
+            getToolChain().getTriple().getArch() == llvm::Triple::aarch64_be ||
+            getToolChain().getTriple().getArch() == llvm::Triple::arm64 ||
             getToolChain().getTriple().getArch() == llvm::Triple::arm64_be)
-          CmdArgs.push_back("-arm64-strict-align");
+          CmdArgs.push_back("-aarch64-strict-align");
         else
           CmdArgs.push_back("-arm-strict-align");
       } else {
         CmdArgs.push_back("-backend-option");
-        if (getToolChain().getTriple().getArch() == llvm::Triple::arm64 ||
+        if (getToolChain().getTriple().getArch() == llvm::Triple::aarch64 ||
+            getToolChain().getTriple().getArch() == llvm::Triple::aarch64_be ||
+            getToolChain().getTriple().getArch() == llvm::Triple::arm64 ||
             getToolChain().getTriple().getArch() == llvm::Triple::arm64_be)
-          CmdArgs.push_back("-arm64-no-strict-align");
+          CmdArgs.push_back("-aarch64-no-strict-align");
         else
           CmdArgs.push_back("-arm-no-strict-align");
       }

Modified: cfe/trunk/lib/Frontend/InitHeaderSearch.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Frontend/InitHeaderSearch.cpp?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/Frontend/InitHeaderSearch.cpp (original)
+++ cfe/trunk/lib/Frontend/InitHeaderSearch.cpp Sat May 24 07:52:07 2014
@@ -379,6 +379,7 @@ AddDefaultCPlusPlusIncludePaths(const ll
                                   "arm-apple-darwin10", "v6", "", triple);
       break;
 
+    case llvm::Triple::aarch64:
     case llvm::Triple::arm64:
       AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
                                   "arm64-apple-darwin10", "", "", triple);

Modified: cfe/trunk/lib/Sema/SemaChecking.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Sema/SemaChecking.cpp?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/lib/Sema/SemaChecking.cpp (original)
+++ cfe/trunk/lib/Sema/SemaChecking.cpp Sat May 24 07:52:07 2014
@@ -313,7 +313,7 @@ Sema::CheckBuiltinFunctionCall(unsigned
       case llvm::Triple::aarch64_be:
       case llvm::Triple::arm64:
       case llvm::Triple::arm64_be:
-        if (CheckARM64BuiltinFunctionCall(BuiltinID, TheCall))
+        if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
           return ExprError();
         break;
       case llvm::Triple::mips:
@@ -473,11 +473,11 @@ bool Sema::CheckARMBuiltinExclusiveCall(
                                         unsigned MaxWidth) {
   assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
           BuiltinID == ARM::BI__builtin_arm_strex ||
-          BuiltinID == ARM64::BI__builtin_arm_ldrex ||
-          BuiltinID == ARM64::BI__builtin_arm_strex) &&
+          BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+          BuiltinID == AArch64::BI__builtin_arm_strex) &&
          "unexpected ARM builtin");
   bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
-                 BuiltinID == ARM64::BI__builtin_arm_ldrex;
+                 BuiltinID == AArch64::BI__builtin_arm_ldrex;
 
   DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
 
@@ -608,12 +608,12 @@ bool Sema::CheckARMBuiltinFunctionCall(u
   return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
 }
 
-bool Sema::CheckARM64BuiltinFunctionCall(unsigned BuiltinID,
+bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
                                          CallExpr *TheCall) {
   llvm::APSInt Result;
 
-  if (BuiltinID == ARM64::BI__builtin_arm_ldrex ||
-      BuiltinID == ARM64::BI__builtin_arm_strex) {
+  if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+      BuiltinID == AArch64::BI__builtin_arm_strex) {
     return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
   }
 

Modified: cfe/trunk/test/CodeGen/arm64-crc32.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64-crc32.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64-crc32.c (original)
+++ cfe/trunk/test/CodeGen/arm64-crc32.c Sat May 24 07:52:07 2014
@@ -6,50 +6,50 @@ int crc32b(int a, char b)
 {
         return __builtin_arm_crc32b(a,b);
 // CHECK: [[T0:%[0-9]+]] = zext i8 %b to i32
-// CHECK: call i32 @llvm.arm64.crc32b(i32 %a, i32 [[T0]])
+// CHECK: call i32 @llvm.aarch64.crc32b(i32 %a, i32 [[T0]])
 }
 
 int crc32cb(int a, char b)
 {
         return __builtin_arm_crc32cb(a,b);
 // CHECK: [[T0:%[0-9]+]] = zext i8 %b to i32
-// CHECK: call i32 @llvm.arm64.crc32cb(i32 %a, i32 [[T0]])
+// CHECK: call i32 @llvm.aarch64.crc32cb(i32 %a, i32 [[T0]])
 }
 
 int crc32h(int a, short b)
 {
         return __builtin_arm_crc32h(a,b);
 // CHECK: [[T0:%[0-9]+]] = zext i16 %b to i32
-// CHECK: call i32 @llvm.arm64.crc32h(i32 %a, i32 [[T0]])
+// CHECK: call i32 @llvm.aarch64.crc32h(i32 %a, i32 [[T0]])
 }
 
 int crc32ch(int a, short b)
 {
         return __builtin_arm_crc32ch(a,b);
 // CHECK: [[T0:%[0-9]+]] = zext i16 %b to i32
-// CHECK: call i32 @llvm.arm64.crc32ch(i32 %a, i32 [[T0]])
+// CHECK: call i32 @llvm.aarch64.crc32ch(i32 %a, i32 [[T0]])
 }
 
 int crc32w(int a, int b)
 {
         return __builtin_arm_crc32w(a,b);
-// CHECK: call i32 @llvm.arm64.crc32w(i32 %a, i32 %b)
+// CHECK: call i32 @llvm.aarch64.crc32w(i32 %a, i32 %b)
 }
 
 int crc32cw(int a, int b)
 {
         return __builtin_arm_crc32cw(a,b);
-// CHECK: call i32 @llvm.arm64.crc32cw(i32 %a, i32 %b)
+// CHECK: call i32 @llvm.aarch64.crc32cw(i32 %a, i32 %b)
 }
 
 int crc32d(int a, long b)
 {
         return __builtin_arm_crc32d(a,b);
-// CHECK: call i32 @llvm.arm64.crc32x(i32 %a, i64 %b)
+// CHECK: call i32 @llvm.aarch64.crc32x(i32 %a, i64 %b)
 }
 
 int crc32cd(int a, long b)
 {
         return __builtin_arm_crc32cd(a,b);
-// CHECK: call i32 @llvm.arm64.crc32cx(i32 %a, i64 %b)
+// CHECK: call i32 @llvm.aarch64.crc32cx(i32 %a, i64 %b)
 }

Modified: cfe/trunk/test/CodeGen/arm64-vrnd.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64-vrnd.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64-vrnd.c (original)
+++ cfe/trunk/test/CodeGen/arm64-vrnd.c Sat May 24 07:52:07 2014
@@ -11,13 +11,13 @@ int64x2_t rnd5(float64x2_t a) { return v
 
 
 int32x2_t rnd7(float32x2_t a) { return vrndn_f32(a); }
-// CHECK: call <2 x float> @llvm.arm64.neon.frintn.v2f32(<2 x float>
+// CHECK: call <2 x float> @llvm.aarch64.neon.frintn.v2f32(<2 x float>
 int32x4_t rnd8(float32x4_t a) { return vrndnq_f32(a); }
-// CHECK: call <4 x float> @llvm.arm64.neon.frintn.v4f32(<4 x float>
+// CHECK: call <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float>
 int64x2_t rnd9(float64x2_t a) { return vrndnq_f64(a); }
-// CHECK: call <2 x double> @llvm.arm64.neon.frintn.v2f64(<2 x double>
+// CHECK: call <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double>
 int64x2_t rnd10(float64x2_t a) { return vrndnq_f64(a); }
-// CHECK: call <2 x double> @llvm.arm64.neon.frintn.v2f64(<2 x double>
+// CHECK: call <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double>
 
 int32x2_t rnd11(float32x2_t a) { return vrndm_f32(a); }
 // CHECK: call <2 x float> @llvm.floor.v2f32(<2 x float>

Modified: cfe/trunk/test/CodeGen/arm64-vrsqrt.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64-vrsqrt.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64-vrsqrt.c (original)
+++ cfe/trunk/test/CodeGen/arm64-vrsqrt.c Sat May 24 07:52:07 2014
@@ -4,40 +4,40 @@
 
 uint32x2_t test_vrsqrte_u32(uint32x2_t in) {
   // CHECK-LABEL: @test_vrsqrte_u32
-  // CHECK: call <2 x i32> @llvm.arm64.neon.ursqrte.v2i32(<2 x i32> %in)
+  // CHECK: call <2 x i32> @llvm.aarch64.neon.ursqrte.v2i32(<2 x i32> %in)
   return vrsqrte_u32(in);
 }
 
 float32x2_t test_vrsqrte_f32(float32x2_t in) {
   // CHECK-LABEL: @test_vrsqrte_f32
-  // CHECK: call <2 x float> @llvm.arm64.neon.frsqrte.v2f32(<2 x float> %in)
+  // CHECK: call <2 x float> @llvm.aarch64.neon.frsqrte.v2f32(<2 x float> %in)
   return vrsqrte_f32(in);
 }
 
 
 uint32x4_t test_vrsqrteq_u32(uint32x4_t in) {
   // CHECK-LABEL: @test_vrsqrteq_u32
-  // CHECK: call <4 x i32> @llvm.arm64.neon.ursqrte.v4i32(<4 x i32> %in)
+  // CHECK: call <4 x i32> @llvm.aarch64.neon.ursqrte.v4i32(<4 x i32> %in)
   return vrsqrteq_u32(in);
 }
 
 float32x4_t test_vrsqrteq_f32(float32x4_t in) {
   // CHECK-LABEL: @test_vrsqrteq_f32
-  // CHECK: call <4 x float> @llvm.arm64.neon.frsqrte.v4f32(<4 x float> %in)
+  // CHECK: call <4 x float> @llvm.aarch64.neon.frsqrte.v4f32(<4 x float> %in)
   return vrsqrteq_f32(in);
 }
 
 
 float32x2_t test_vrsqrts_f32(float32x2_t est, float32x2_t val) {
   // CHECK-LABEL: @test_vrsqrts_f32
-  // CHECK: call <2 x float> @llvm.arm64.neon.frsqrts.v2f32(<2 x float> %est, <2 x float> %val)
+  // CHECK: call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %est, <2 x float> %val)
   return vrsqrts_f32(est, val);
 }
 
 
 float32x4_t test_vrsqrtsq_f32(float32x4_t est, float32x4_t val) {
   // CHECK-LABEL: @test_vrsqrtsq_f32
-  // CHECK: call <4 x float> @llvm.arm64.neon.frsqrts.v4f32(<4 x float> %est, <4 x float> %val)
+  // CHECK: call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %est, <4 x float> %val)
   return vrsqrtsq_f32(est, val);
 }
 

Modified: cfe/trunk/test/CodeGen/arm64_vCMP.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vCMP.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vCMP.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vCMP.c Sat May 24 07:52:07 2014
@@ -7,7 +7,7 @@
 int64x2_t test_vabsq_s64(int64x2_t a1) {
   // CHECK: test_vabsq_s64
   return vabsq_s64(a1);
-  // CHECK: llvm.arm64.neon.abs.v2i64
+  // CHECK: llvm.aarch64.neon.abs.v2i64
   // CHECK-NEXT: ret
 }
 
@@ -103,6 +103,6 @@ uint64x2_t test_vcltq_u64(uint64x2_t a1,
 int64x2_t test_vqabsq_s64(int64x2_t a1) {
   // CHECK: test_vqabsq_s64
   return vqabsq_s64(a1);
-  // CHECK: llvm.arm64.neon.sqabs.v2i64(<2 x i64> %a1)
+  // CHECK: llvm.aarch64.neon.sqabs.v2i64(<2 x i64> %a1)
   // CHECK-NEXT: ret
 }

Modified: cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c Sat May 24 07:52:07 2014
@@ -6,25 +6,25 @@
 int64x2x2_t test_vld2q_lane_s64(const void * a1, int64x2x2_t a2) {
   // CHECK: test_vld2q_lane_s64
   return vld2q_lane_s64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.ld2lane.v2i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld2lane.v2i64.p0i8
 }
 
 uint64x2x2_t test_vld2q_lane_u64(const void * a1, uint64x2x2_t a2) {
   // CHECK: test_vld2q_lane_u64
   return vld2q_lane_u64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.ld2lane.v2i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld2lane.v2i64.p0i8
 }
 
 int64x1x2_t test_vld2_lane_s64(const void * a1, int64x1x2_t a2) {
   // CHECK: test_vld2_lane_s64
   return vld2_lane_s64(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld2lane.v1i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld2lane.v1i64.p0i8
 }
 
 uint64x1x2_t test_vld2_lane_u64(const void * a1, uint64x1x2_t a2) {
   // CHECK: test_vld2_lane_u64
   return vld2_lane_u64(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld2lane.v1i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld2lane.v1i64.p0i8
 }
 
 poly8x16x2_t test_vld2q_lane_p8(const void * a1, poly8x16x2_t a2) {
@@ -37,91 +37,91 @@ poly8x16x2_t test_vld2q_lane_p8(const vo
 uint8x16x2_t test_vld2q_lane_u8(const void * a1, uint8x16x2_t a2) {
   // CHECK: test_vld2q_lane_u8
   return vld2q_lane_u8(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld2lane.v16i8.p0i8
+  // CHECK: llvm.aarch64.neon.ld2lane.v16i8.p0i8
 }
 
 int64x2x3_t test_vld3q_lane_s64(const void * a1, int64x2x3_t a2) {
   // CHECK: test_vld3q_lane_s64
   return vld3q_lane_s64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.ld3lane.v2i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld3lane.v2i64.p0i8
 }
 
 uint64x2x3_t test_vld3q_lane_u64(const void * a1, uint64x2x3_t a2) {
   // CHECK: test_vld3q_lane_u64
   return vld3q_lane_u64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.ld3lane.v2i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld3lane.v2i64.p0i8
 }
 
 int64x1x3_t test_vld3_lane_s64(const void * a1, int64x1x3_t a2) {
   // CHECK: test_vld3_lane_s64
   return vld3_lane_s64(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld3lane.v1i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld3lane.v1i64.p0i8
 }
 
 uint64x1x3_t test_vld3_lane_u64(const void * a1, uint64x1x3_t a2) {
   // CHECK: test_vld3_lane_u64
   return vld3_lane_u64(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld3lane.v1i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld3lane.v1i64.p0i8
 }
 
 int8x8x3_t test_vld3_lane_s8(const void * a1, int8x8x3_t a2) {
   // CHECK: test_vld3_lane_s8
   return vld3_lane_s8(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld3lane.v8i8.p0i8
+  // CHECK: llvm.aarch64.neon.ld3lane.v8i8.p0i8
 }
 
 poly8x16x3_t test_vld3q_lane_p8(const void * a1, poly8x16x3_t a2) {
   // CHECK: test_vld3q_lane_p8
   return vld3q_lane_p8(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld3lane.v16i8.p0i8
+  // CHECK: llvm.aarch64.neon.ld3lane.v16i8.p0i8
 }
 
 uint8x16x3_t test_vld3q_lane_u8(const void * a1, uint8x16x3_t a2) {
   // CHECK: test_vld3q_lane_u8
   return vld3q_lane_u8(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld3lane.v16i8.p0i8
+  // CHECK: llvm.aarch64.neon.ld3lane.v16i8.p0i8
 }
 
 int64x2x4_t test_vld4q_lane_s64(const void * a1, int64x2x4_t a2) {
   // CHECK: test_vld4q_lane_s64
   return vld4q_lane_s64(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld4lane.v2i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld4lane.v2i64.p0i8
 }
 
 uint64x2x4_t test_vld4q_lane_u64(const void * a1, uint64x2x4_t a2) {
   // CHECK: test_vld4q_lane_u64
   return vld4q_lane_u64(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld4lane.v2i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld4lane.v2i64.p0i8
 }
 
 int64x1x4_t test_vld4_lane_s64(const void * a1, int64x1x4_t a2) {
   // CHECK: test_vld4_lane_s64
   return vld4_lane_s64(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld4lane.v1i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld4lane.v1i64.p0i8
 }
 
 uint64x1x4_t test_vld4_lane_u64(const void * a1, uint64x1x4_t a2) {
   // CHECK: test_vld4_lane_u64
   return vld4_lane_u64(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld4lane.v1i64.p0i8
+  // CHECK: llvm.aarch64.neon.ld4lane.v1i64.p0i8
 }
 
 int8x8x4_t test_vld4_lane_s8(const void * a1, int8x8x4_t a2) {
   // CHECK: test_vld4_lane_s8
   return vld4_lane_s8(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld4lane.v8i8.p0i8
+  // CHECK: llvm.aarch64.neon.ld4lane.v8i8.p0i8
 }
 
 uint8x8x4_t test_vld4_lane_u8(const void * a1, uint8x8x4_t a2) {
   // CHECK: test_vld4_lane_u8
   return vld4_lane_u8(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld4lane.v8i8.p0i8
+  // CHECK: llvm.aarch64.neon.ld4lane.v8i8.p0i8
 }
 
 poly8x16x4_t test_vld4q_lane_p8(const void * a1, poly8x16x4_t a2) {
   // CHECK: test_vld4q_lane_p8
   return vld4q_lane_p8(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld4lane.v16i8.p0i8
+  // CHECK: llvm.aarch64.neon.ld4lane.v16i8.p0i8
 }
 
 int8x16x4_t test_vld4q_lane_s8(const void * a1, int8x16x4_t a2) {
@@ -136,6 +136,6 @@ int8x16x4_t test_vld4q_lane_s8(const voi
 uint8x16x4_t test_vld4q_lane_u8(const void * a1, uint8x16x4_t a2) {
   // CHECK: test_vld4q_lane_u8
   return vld4q_lane_u8(a1, a2, 0);
-  // CHECK: llvm.arm64.neon.ld4lane.v16i8.p0i8
+  // CHECK: llvm.aarch64.neon.ld4lane.v16i8.p0i8
 }
 

Modified: cfe/trunk/test/CodeGen/arm64_vMaxMin.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vMaxMin.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vMaxMin.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vMaxMin.c Sat May 24 07:52:07 2014
@@ -9,75 +9,75 @@
 int8_t test_vmaxv_s8(int8x8_t a1) {
   // CHECK: test_vmaxv_s8
   return vmaxv_s8(a1);
-  // CHECK @llvm.arm64.neon.smaxv.i32.v8i8
+  // CHECK @llvm.aarch64.neon.smaxv.i32.v8i8
 }
 
 uint16_t test_vminvq_u16(uint16x8_t a1) {
   // CHECK: test_vminvq_u16
   return vminvq_u16(a1);
-  // CHECK llvm.arm64.neon.uminv.i16.v8i16
+  // CHECK llvm.aarch64.neon.uminv.i16.v8i16
 }
 
 // Test a represntative sample of 8 and 16, signed and unsigned, 64 and 128 bit pairwise
 uint8x8_t test_vmin_u8(uint8x8_t a1, uint8x8_t a2) {
   // CHECK: test_vmin_u8
   return vmin_u8(a1, a2);
-  // CHECK llvm.arm64.neon.umin.v8i8
+  // CHECK llvm.aarch64.neon.umin.v8i8
 }
 
 uint8x16_t test_vminq_u8(uint8x16_t a1, uint8x16_t a2) {
   // CHECK: test_vminq_u8
   return vminq_u8(a1, a2);
-  // CHECK llvm.arm64.neon.umin.v16i8
+  // CHECK llvm.aarch64.neon.umin.v16i8
 }
 
 int16x8_t test_vmaxq_s16(int16x8_t a1, int16x8_t a2) {
   // CHECK: test_vmaxq_s16
   return vmaxq_s16(a1, a2);
-  // CHECK llvm.arm64.neon.smax.v8i16
+  // CHECK llvm.aarch64.neon.smax.v8i16
 }
 
 // Test the more complicated cases of [suf]32 and f64
 float64x2_t test_vmaxq_f64(float64x2_t a1, float64x2_t a2) {
   // CHECK: test_vmaxq_f64
   return vmaxq_f64(a1, a2);
-  // CHECK llvm.arm64.neon.fmax.v2f64
+  // CHECK llvm.aarch64.neon.fmax.v2f64
 }
 
 float32x4_t test_vmaxq_f32(float32x4_t a1, float32x4_t a2) {
   // CHECK: test_vmaxq_f32
   return vmaxq_f32(a1, a2);
-  // CHECK llvm.arm64.neon.fmax.v4f32
+  // CHECK llvm.aarch64.neon.fmax.v4f32
 }
 
 float64x2_t test_vminq_f64(float64x2_t a1, float64x2_t a2) {
   // CHECK: test_vminq_f64
   return vminq_f64(a1, a2);
-  // CHECK llvm.arm64.neon.fmin.v2f64
+  // CHECK llvm.aarch64.neon.fmin.v2f64
 }
 
 float32x2_t test_vmax_f32(float32x2_t a1, float32x2_t a2) {
   // CHECK: test_vmax_f32
   return vmax_f32(a1, a2);
-  // CHECK llvm.arm64.neon.fmax.v2f32
+  // CHECK llvm.aarch64.neon.fmax.v2f32
 }
 
 int32x2_t test_vmax_s32(int32x2_t a1, int32x2_t a2) {
   // CHECK: test_vmax_s32
   return vmax_s32(a1, a2);
-  // CHECK llvm.arm64.neon.smax.v2i32
+  // CHECK llvm.aarch64.neon.smax.v2i32
 }
 
 uint32x2_t test_vmin_u32(uint32x2_t a1, uint32x2_t a2) {
   // CHECK: test_vmin_u32
   return vmin_u32(a1, a2);
-  // CHECK llvm.arm64.neon.umin.v2i32
+  // CHECK llvm.aarch64.neon.umin.v2i32
 }
 
 float32_t test_vmaxnmv_f32(float32x2_t a1) {
   // CHECK: test_vmaxnmv_f32
   return vmaxnmv_f32(a1);
-  // CHECK: llvm.arm64.neon.fmaxnmv.f32.v2f32
+  // CHECK: llvm.aarch64.neon.fmaxnmv.f32.v2f32
   // CHECK-NEXT: ret
 }
 
@@ -87,7 +87,7 @@ float32_t test_vmaxnmv_f32(float32x2_t a
 float64_t test_vmaxnmvq_f64(float64x2_t a1) {
   // CHECK@ test_vmaxnmvq_f64
   return vmaxnmvq_f64(a1);
-  // CHECK@ llvm.arm64.neon.saddlv.i64.v2i32
+  // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
   // CHECK-NEXT@ ret
 }
 #endif
@@ -95,14 +95,14 @@ float64_t test_vmaxnmvq_f64(float64x2_t
 float32_t test_vmaxnmvq_f32(float32x4_t a1) {
   // CHECK: test_vmaxnmvq_f32
   return vmaxnmvq_f32(a1);
-  // CHECK: llvm.arm64.neon.fmaxnmv.f32.v4f32
+  // CHECK: llvm.aarch64.neon.fmaxnmv.f32.v4f32
   // CHECK-NEXT: ret
 }
 
 float32_t test_vmaxv_f32(float32x2_t a1) {
   // CHECK: test_vmaxv_f32
   return vmaxv_f32(a1);
-  // CHECK: llvm.arm64.neon.fmaxv.f32.v2f32
+  // CHECK: llvm.aarch64.neon.fmaxv.f32.v2f32
   // FIXME check that the 2nd and 3rd arguments are the same V register below
   // CHECK-CODEGEN: fmaxp.2s
   // CHECK-NEXT: ret
@@ -111,7 +111,7 @@ float32_t test_vmaxv_f32(float32x2_t a1)
 int32_t test_vmaxv_s32(int32x2_t a1) {
   // CHECK: test_vmaxv_s32
   return vmaxv_s32(a1);
-  // CHECK: llvm.arm64.neon.smaxv.i32.v2i32
+  // CHECK: llvm.aarch64.neon.smaxv.i32.v2i32
   // FIXME check that the 2nd and 3rd arguments are the same V register below
   // CHECK-CODEGEN: smaxp.2s
   // CHECK-NEXT: ret
@@ -120,7 +120,7 @@ int32_t test_vmaxv_s32(int32x2_t a1) {
 uint32_t test_vmaxv_u32(uint32x2_t a1) {
   // CHECK: test_vmaxv_u32
   return vmaxv_u32(a1);
-  // CHECK: llvm.arm64.neon.umaxv.i32.v2i32
+  // CHECK: llvm.aarch64.neon.umaxv.i32.v2i32
   // FIXME check that the 2nd and 3rd arguments are the same V register below
   // CHECK-CODEGEN: umaxp.2s
   // CHECK-NEXT: ret
@@ -131,7 +131,7 @@ uint32_t test_vmaxv_u32(uint32x2_t a1) {
 float64_t test_vmaxvq_f64(float64x2_t a1) {
   // CHECK@ test_vmaxvq_f64
   return vmaxvq_f64(a1);
-  // CHECK@ llvm.arm64.neon.fmaxv.i64.v2f64
+  // CHECK@ llvm.aarch64.neon.fmaxv.i64.v2f64
   // CHECK-NEXT@ ret
 }
 #endif
@@ -139,21 +139,21 @@ float64_t test_vmaxvq_f64(float64x2_t a1
 float32_t test_vmaxvq_f32(float32x4_t a1) {
   // CHECK: test_vmaxvq_f32
   return vmaxvq_f32(a1);
-  // CHECK: llvm.arm64.neon.fmaxv.f32.v4f32
+  // CHECK: llvm.aarch64.neon.fmaxv.f32.v4f32
   // CHECK-NEXT: ret
 }
 
 float32_t test_vminnmv_f32(float32x2_t a1) {
   // CHECK: test_vminnmv_f32
   return vminnmv_f32(a1);
-  // CHECK: llvm.arm64.neon.fminnmv.f32.v2f32
+  // CHECK: llvm.aarch64.neon.fminnmv.f32.v2f32
   // CHECK-NEXT: ret
 }
 
 float32_t test_vminvq_f32(float32x4_t a1) {
   // CHECK: test_vminvq_f32
   return vminvq_f32(a1);
-  // CHECK: llvm.arm64.neon.fminv.f32.v4f32
+  // CHECK: llvm.aarch64.neon.fminv.f32.v4f32
   // CHECK-NEXT: ret
 }
 
@@ -163,7 +163,7 @@ float32_t test_vminvq_f32(float32x4_t a1
 float64_t test_vminnmvq_f64(float64x2_t a1) {
   // CHECK@ test_vminnmvq_f64
   return vminnmvq_f64(a1);
-  // CHECK@ llvm.arm64.neon.saddlv.i64.v2i32
+  // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
   // CHECK-NEXT@ ret
 }
 #endif
@@ -171,21 +171,21 @@ float64_t test_vminnmvq_f64(float64x2_t
 float32_t test_vminnmvq_f32(float32x4_t a1) {
   // CHECK: test_vminnmvq_f32
   return vminnmvq_f32(a1);
-  // CHECK: llvm.arm64.neon.fminnmv.f32.v4f32
+  // CHECK: llvm.aarch64.neon.fminnmv.f32.v4f32
   // CHECK-NEXT: ret
 }
 
 float32_t test_vminv_f32(float32x2_t a1) {
   // CHECK: test_vminv_f32
   return vminv_f32(a1);
-  // CHECK: llvm.arm64.neon.fminv.f32.v2f32
+  // CHECK: llvm.aarch64.neon.fminv.f32.v2f32
   // CHECK-NEXT: ret
 }
 
 int32_t test_vminv_s32(int32x2_t a1) {
   // CHECK: test_vminv_s32
   return vminv_s32(a1);
-  // CHECK: llvm.arm64.neon.sminv.i32.v2i32
+  // CHECK: llvm.aarch64.neon.sminv.i32.v2i32
   // CHECK-CODEGEN: sminp.2s
   // CHECK-NEXT: ret
 }
@@ -193,7 +193,7 @@ int32_t test_vminv_s32(int32x2_t a1) {
 uint32_t test_vminv_u32(uint32x2_t a1) {
   // CHECK: test_vminv_u32
   return vminv_u32(a1);
-  // CHECK: llvm.arm64.neon.fminv.f32.v2f32
+  // CHECK: llvm.aarch64.neon.fminv.f32.v2f32
 }
 
 // FIXME punt on this for now; don't forget to fix CHECKs
@@ -201,7 +201,7 @@ uint32_t test_vminv_u32(uint32x2_t a1) {
 float64_t test_vminvq_f64(float64x2_t a1) {
   // CHECK@ test_vminvq_f64
   return vminvq_f64(a1);
-  // CHECK@ llvm.arm64.neon.saddlv.i64.v2i32
+  // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
   // CHECK-NEXT@ ret
 }
 #endif

Modified: cfe/trunk/test/CodeGen/arm64_vadd.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vadd.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vadd.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vadd.c Sat May 24 07:52:07 2014
@@ -5,98 +5,98 @@
 int64_t test_vaddlv_s32(int32x2_t a1) {
   // CHECK: test_vaddlv_s32
   return vaddlv_s32(a1);
-  // CHECK: llvm.arm64.neon.saddlv.i64.v2i32
+  // CHECK: llvm.aarch64.neon.saddlv.i64.v2i32
   // CHECK-NEXT: ret
 }
 
 uint64_t test_vaddlv_u32(uint32x2_t a1) {
   // CHECK: test_vaddlv_u32
   return vaddlv_u32(a1);
-  // CHECK: llvm.arm64.neon.uaddlv.i64.v2i32
+  // CHECK: llvm.aarch64.neon.uaddlv.i64.v2i32
   // CHECK-NEXT: ret
 }
 
 int8_t test_vaddv_s8(int8x8_t a1) {
   // CHECK: test_vaddv_s8
   return vaddv_s8(a1);
-  // CHECK: llvm.arm64.neon.saddv.i32.v8i8
+  // CHECK: llvm.aarch64.neon.saddv.i32.v8i8
   // don't check for return here (there's a trunc?)
 }
 
 int16_t test_vaddv_s16(int16x4_t a1) {
   // CHECK: test_vaddv_s16
   return vaddv_s16(a1);
-  // CHECK: llvm.arm64.neon.saddv.i32.v4i16
+  // CHECK: llvm.aarch64.neon.saddv.i32.v4i16
   // don't check for return here (there's a trunc?)
 }
 
 int32_t test_vaddv_s32(int32x2_t a1) {
   // CHECK: test_vaddv_s32
   return vaddv_s32(a1);
-  // CHECK: llvm.arm64.neon.saddv.i32.v2i32
+  // CHECK: llvm.aarch64.neon.saddv.i32.v2i32
   // CHECK-NEXT: ret
 }
 
 uint8_t test_vaddv_u8(int8x8_t a1) {
   // CHECK: test_vaddv_u8
   return vaddv_u8(a1);
-  // CHECK: llvm.arm64.neon.uaddv.i32.v8i8
+  // CHECK: llvm.aarch64.neon.uaddv.i32.v8i8
   // don't check for return here (there's a trunc?)
 }
 
 uint16_t test_vaddv_u16(int16x4_t a1) {
   // CHECK: test_vaddv_u16
   return vaddv_u16(a1);
-  // CHECK: llvm.arm64.neon.uaddv.i32.v4i16
+  // CHECK: llvm.aarch64.neon.uaddv.i32.v4i16
   // don't check for return here (there's a trunc?)
 }
 
 uint32_t test_vaddv_u32(int32x2_t a1) {
   // CHECK: test_vaddv_u32
   return vaddv_u32(a1);
-  // CHECK: llvm.arm64.neon.uaddv.i32.v2i32
+  // CHECK: llvm.aarch64.neon.uaddv.i32.v2i32
   // CHECK-NEXT: ret
 }
 
 int8_t test_vaddvq_s8(int8x16_t a1) {
   // CHECK: test_vaddvq_s8
   return vaddvq_s8(a1);
-  // CHECK: llvm.arm64.neon.saddv.i32.v16i8
+  // CHECK: llvm.aarch64.neon.saddv.i32.v16i8
   // don't check for return here (there's a trunc?)
 }
 
 int16_t test_vaddvq_s16(int16x8_t a1) {
   // CHECK: test_vaddvq_s16
   return vaddvq_s16(a1);
-  // CHECK: llvm.arm64.neon.saddv.i32.v8i16
+  // CHECK: llvm.aarch64.neon.saddv.i32.v8i16
   // don't check for return here (there's a trunc?)
 }
 
 int32_t test_vaddvq_s32(int32x4_t a1) {
   // CHECK: test_vaddvq_s32
   return vaddvq_s32(a1);
-  // CHECK: llvm.arm64.neon.saddv.i32.v4i32
+  // CHECK: llvm.aarch64.neon.saddv.i32.v4i32
   // CHECK-NEXT: ret
 }
 
 uint8_t test_vaddvq_u8(int8x16_t a1) {
   // CHECK: test_vaddvq_u8
   return vaddvq_u8(a1);
-  // CHECK: llvm.arm64.neon.uaddv.i32.v16i8
+  // CHECK: llvm.aarch64.neon.uaddv.i32.v16i8
   // don't check for return here (there's a trunc?)
 }
 
 uint16_t test_vaddvq_u16(int16x8_t a1) {
   // CHECK: test_vaddvq_u16
   return vaddvq_u16(a1);
-  // CHECK: llvm.arm64.neon.uaddv.i32.v8i16
+  // CHECK: llvm.aarch64.neon.uaddv.i32.v8i16
   // don't check for return here (there's a trunc?)
 }
 
 uint32_t test_vaddvq_u32(int32x4_t a1) {
   // CHECK: test_vaddvq_u32
   return vaddvq_u32(a1);
-  // CHECK: llvm.arm64.neon.uaddv.i32.v4i32
+  // CHECK: llvm.aarch64.neon.uaddv.i32.v4i32
   // CHECK-NEXT: ret
 }
 

Modified: cfe/trunk/test/CodeGen/arm64_vca.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vca.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vca.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vca.c Sat May 24 07:52:07 2014
@@ -6,54 +6,54 @@
 uint32x2_t test_vcale_f32(float32x2_t a1, float32x2_t a2) {
   // CHECK: test_vcale_f32
   return vcale_f32(a1, a2);
-  // CHECK: llvm.arm64.neon.facge.v2i32.v2f32
+  // CHECK: llvm.aarch64.neon.facge.v2i32.v2f32
   // no check for ret here, as there is a bitcast
 }
 
 uint32x4_t test_vcaleq_f32(float32x4_t a1, float32x4_t a2) {
   // CHECK: test_vcaleq_f32
   return vcaleq_f32(a1, a2);
-  // CHECK: llvm.arm64.neon.facge.v4i32.v4f32{{.*a2,.*a1}}
+  // CHECK: llvm.aarch64.neon.facge.v4i32.v4f32{{.*a2,.*a1}}
   // no check for ret here, as there is a bitcast
 }
 
 uint32x2_t test_vcalt_f32(float32x2_t a1, float32x2_t a2) {
   // CHECK: test_vcalt_f32
   return vcalt_f32(a1, a2);
-  // CHECK: llvm.arm64.neon.facgt.v2i32.v2f32{{.*a2,.*a1}}
+  // CHECK: llvm.aarch64.neon.facgt.v2i32.v2f32{{.*a2,.*a1}}
   // no check for ret here, as there is a bitcast
 }
 
 uint32x4_t test_vcaltq_f32(float32x4_t a1, float32x4_t a2) {
   // CHECK: test_vcaltq_f32
   return vcaltq_f32(a1, a2);
-  // CHECK: llvm.arm64.neon.facgt.v4i32.v4f32{{.*a2,.*a1}}
+  // CHECK: llvm.aarch64.neon.facgt.v4i32.v4f32{{.*a2,.*a1}}
 }
 
 uint64x2_t test_vcagtq_f64(float64x2_t a1, float64x2_t a2) {
   // CHECK: test_vcagtq_f64
   return vcagtq_f64(a1, a2);
-  // CHECK: llvm.arm64.neon.facgt.v2i64.v2f64{{.*a1,.*a2}}
+  // CHECK: llvm.aarch64.neon.facgt.v2i64.v2f64{{.*a1,.*a2}}
   // no check for ret here, as there is a bitcast
 }
 
 uint64x2_t test_vcaltq_f64(float64x2_t a1, float64x2_t a2) {
   // CHECK: test_vcaltq_f64
   return vcaltq_f64(a1, a2);
-  // CHECK: llvm.arm64.neon.facgt.v2i64.v2f64{{.*a2,.*a1}}
+  // CHECK: llvm.aarch64.neon.facgt.v2i64.v2f64{{.*a2,.*a1}}
   // no check for ret here, as there is a bitcast
 }
 
 uint64x2_t test_vcageq_f64(float64x2_t a1, float64x2_t a2) {
   // CHECK: test_vcageq_f64
   return vcageq_f64(a1, a2);
-  // CHECK: llvm.arm64.neon.facge.v2i64.v2f64{{.*a1,.*a2}}
+  // CHECK: llvm.aarch64.neon.facge.v2i64.v2f64{{.*a1,.*a2}}
   // no check for ret here, as there is a bitcast
 }
 
 uint64x2_t test_vcaleq_f64(float64x2_t a1, float64x2_t a2) {
   // CHECK: test_vcaleq_f64
   return vcaleq_f64(a1, a2);
-  // CHECK: llvm.arm64.neon.facge.v2i64.v2f64{{.*a2,.*a1}}
+  // CHECK: llvm.aarch64.neon.facge.v2i64.v2f64{{.*a2,.*a1}}
   // no check for ret here, as there is a bitcast
 }

Modified: cfe/trunk/test/CodeGen/arm64_vcreate.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vcreate.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vcreate.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vcreate.c Sat May 24 07:52:07 2014
@@ -17,7 +17,7 @@ float32x2_t test_vcreate_f32(uint64_t a1
 float64x1_t test_vcreate_f64(uint64_t a1) {
   // CHECK@ test_vcreate_f64
   return vcreate_f64(a1);
-  // CHECK@ llvm.arm64.neon.saddlv.i64.v2i32
+  // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
   // CHECK-NEXT@ ret
 }
 */

Modified: cfe/trunk/test/CodeGen/arm64_vcvtfp.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vcvtfp.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vcvtfp.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vcvtfp.c Sat May 24 07:52:07 2014
@@ -35,14 +35,14 @@ float32x4_t test_vcvt_high_f32_f64(float
 float32x2_t test_vcvtx_f32_f64(float64x2_t v) {
   // CHECK: test_vcvtx_f32_f64
   return vcvtx_f32_f64(v);
-  // CHECK: llvm.arm64.neon.fcvtxn.v2f32.v2f64
+  // CHECK: llvm.aarch64.neon.fcvtxn.v2f32.v2f64
   // CHECK-NEXT: ret
 }
 
 float32x4_t test_vcvtx_high_f32_f64(float32x2_t x, float64x2_t v) {
   // CHECK: test_vcvtx_high_f32_f64
   return vcvtx_high_f32_f64(x, v);
-  // CHECK: llvm.arm64.neon.fcvtxn.v2f32.v2f64
+  // CHECK: llvm.aarch64.neon.fcvtxn.v2f32.v2f64
   // CHECK: shufflevector
   // CHECK-NEXT: ret
 }

Modified: cfe/trunk/test/CodeGen/arm64_vneg.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vneg.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vneg.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vneg.c Sat May 24 07:52:07 2014
@@ -13,6 +13,6 @@ int64x2_t test_vnegq_s64(int64x2_t a1) {
 int64x2_t test_vqnegq_s64(int64x2_t a1) {
   // CHECK: test_vqnegq_s64
   return vqnegq_s64(a1);
-  // CHECK: llvm.arm64.neon.sqneg.v2i64
+  // CHECK: llvm.aarch64.neon.sqneg.v2i64
   // CHECK-NEXT: ret
 }

Modified: cfe/trunk/test/CodeGen/arm64_vset_lane.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vset_lane.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vset_lane.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vset_lane.c Sat May 24 07:52:07 2014
@@ -20,7 +20,7 @@ float16x8_t test_vsetq_lane_f16(float16_
 float64x1_t test_vset_lane_f64(float64_t a1, float64x1_t a2) {
   // CHECK-LABEL@ test_vset_lane_f64
   return vset_lane_f64(a1, a2, 0);
-  // CHECK@ @llvm.arm64.neon.smaxv.i32.v8i8
+  // CHECK@ @llvm.aarch64.neon.smaxv.i32.v8i8
 }
 #endif
 

Modified: cfe/trunk/test/CodeGen/arm64_vshift.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vshift.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vshift.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vshift.c Sat May 24 07:52:07 2014
@@ -3,355 +3,355 @@
 
 int8x8_t test_vqshl_n_s8(int8x8_t in) {
   // CHECK-LABEL: @test_vqshl_n_s8
-  // CHECK: call <8 x i8> @llvm.arm64.neon.sqshl.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  // CHECK: call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   return vqshl_n_s8(in, 1);
 }
 
 int16x4_t test_vqshl_n_s16(int16x4_t in) {
   // CHECK-LABEL: @test_vqshl_n_s16
-  // CHECK: call <4 x i16> @llvm.arm64.neon.sqshl.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+  // CHECK: call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
   return vqshl_n_s16(in, 1);
 }
 
 int32x2_t test_vqshl_n_s32(int32x2_t in) {
   // CHECK-LABEL: @test_vqshl_n_s32
-  // CHECK: call <2 x i32> @llvm.arm64.neon.sqshl.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
+  // CHECK: call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
   return vqshl_n_s32(in, 1);
 }
 
 int64x1_t test_vqshl_n_s64(int64x1_t in) {
   // CHECK-LABEL: @test_vqshl_n_s64
-  // CHECK: call <1 x i64> @llvm.arm64.neon.sqshl.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
+  // CHECK: call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
   return vqshl_n_s64(in, 1);
 }
 
 
 int8x16_t test_vqshlq_n_s8(int8x16_t in) {
   // CHECK-LABEL: @test_vqshlq_n_s8
-  // CHECK: call <16 x i8> @llvm.arm64.neon.sqshl.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  // CHECK: call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   return vqshlq_n_s8(in, 1);
 }
 
 int16x8_t test_vqshlq_n_s16(int16x8_t in) {
   // CHECK-LABEL: @test_vqshlq_n_s16
-  // CHECK: call <8 x i16> @llvm.arm64.neon.sqshl.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  // CHECK: call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   return vqshlq_n_s16(in, 1);
 }
 
 int32x4_t test_vqshlq_n_s32(int32x4_t in) {
   // CHECK-LABEL: @test_vqshlq_n_s32
-  // CHECK: call <4 x i32> @llvm.arm64.neon.sqshl.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  // CHECK: call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
   return vqshlq_n_s32(in, 1);
 }
 
 int64x2_t test_vqshlq_n_s64(int64x2_t in) {
   // CHECK-LABEL: @test_vqshlq_n_s64
-  // CHECK: call <2 x i64> @llvm.arm64.neon.sqshl.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
+  // CHECK: call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
   return vqshlq_n_s64(in, 1);
 }
 
 uint8x8_t test_vqshl_n_u8(uint8x8_t in) {
   // CHECK-LABEL: @test_vqshl_n_u8
-  // CHECK: call <8 x i8> @llvm.arm64.neon.uqshl.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  // CHECK: call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   return vqshl_n_u8(in, 1);
 }
 
 uint16x4_t test_vqshl_n_u16(uint16x4_t in) {
   // CHECK-LABEL: @test_vqshl_n_u16
-  // CHECK: call <4 x i16> @llvm.arm64.neon.uqshl.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+  // CHECK: call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
   return vqshl_n_u16(in, 1);
 }
 
 uint32x2_t test_vqshl_n_u32(uint32x2_t in) {
   // CHECK-LABEL: @test_vqshl_n_u32
-  // CHECK: call <2 x i32> @llvm.arm64.neon.uqshl.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
+  // CHECK: call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
   return vqshl_n_u32(in, 1);
 }
 
 uint64x1_t test_vqshl_n_u64(uint64x1_t in) {
   // CHECK-LABEL: @test_vqshl_n_u64
-  // CHECK: call <1 x i64> @llvm.arm64.neon.uqshl.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
+  // CHECK: call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
   return vqshl_n_u64(in, 1);
 }
 
 uint8x16_t test_vqshlq_n_u8(uint8x16_t in) {
   // CHECK-LABEL: @test_vqshlq_n_u8
-  // CHECK: call <16 x i8> @llvm.arm64.neon.uqshl.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  // CHECK: call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   return vqshlq_n_u8(in, 1);
 }
 
 uint16x8_t test_vqshlq_n_u16(uint16x8_t in) {
   // CHECK-LABEL: @test_vqshlq_n_u16
-  // CHECK: call <8 x i16> @llvm.arm64.neon.uqshl.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  // CHECK: call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   return vqshlq_n_u16(in, 1);
 }
 
 uint32x4_t test_vqshlq_n_u32(uint32x4_t in) {
   // CHECK-LABEL: @test_vqshlq_n_u32
-  // CHECK: call <4 x i32> @llvm.arm64.neon.uqshl.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  // CHECK: call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
   return vqshlq_n_u32(in, 1);
 }
 
 uint64x2_t test_vqshlq_n_u64(uint64x2_t in) {
   // CHECK-LABEL: @test_vqshlq_n_u64
-  // CHECK: call <2 x i64> @llvm.arm64.neon.uqshl.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
+  // CHECK: call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
   return vqshlq_n_u64(in, 1);
 }
 
 int8x8_t test_vrshr_n_s8(int8x8_t in) {
   // CHECK-LABEL: @test_vrshr_n_s8
-  // CHECK: call <8 x i8> @llvm.arm64.neon.srshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  // CHECK: call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   return vrshr_n_s8(in, 1);
 }
 
 int16x4_t test_vrshr_n_s16(int16x4_t in) {
   // CHECK-LABEL: @test_vrshr_n_s16
-  // CHECK: call <4 x i16> @llvm.arm64.neon.srshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+  // CHECK: call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
   return vrshr_n_s16(in, 1);
 }
 
 int32x2_t test_vrshr_n_s32(int32x2_t in) {
   // CHECK-LABEL: @test_vrshr_n_s32
-  // CHECK: call <2 x i32> @llvm.arm64.neon.srshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
+  // CHECK: call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
   return vrshr_n_s32(in, 1);
 }
 
 int64x1_t test_vrshr_n_s64(int64x1_t in) {
   // CHECK-LABEL: @test_vrshr_n_s64
-  // CHECK: call <1 x i64> @llvm.arm64.neon.srshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
+  // CHECK: call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
   return vrshr_n_s64(in, 1);
 }
 
 
 int8x16_t test_vrshrq_n_s8(int8x16_t in) {
   // CHECK-LABEL: @test_vrshrq_n_s8
-  // CHECK: call <16 x i8> @llvm.arm64.neon.srshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  // CHECK: call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   return vrshrq_n_s8(in, 1);
 }
 
 int16x8_t test_vrshrq_n_s16(int16x8_t in) {
   // CHECK-LABEL: @test_vrshrq_n_s16
-  // CHECK: call <8 x i16> @llvm.arm64.neon.srshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+  // CHECK: call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
   return vrshrq_n_s16(in, 1);
 }
 
 int32x4_t test_vrshrq_n_s32(int32x4_t in) {
   // CHECK-LABEL: @test_vrshrq_n_s32
-  // CHECK: call <4 x i32> @llvm.arm64.neon.srshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  // CHECK: call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
   return vrshrq_n_s32(in, 1);
 }
 
 int64x2_t test_vrshrq_n_s64(int64x2_t in) {
   // CHECK-LABEL: @test_vrshrq_n_s64
-  // CHECK: call <2 x i64> @llvm.arm64.neon.srshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>
+  // CHECK: call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>
   return vrshrq_n_s64(in, 1);
 }
 
 uint8x8_t test_vrshr_n_u8(uint8x8_t in) {
   // CHECK-LABEL: @test_vrshr_n_u8
-  // CHECK: call <8 x i8> @llvm.arm64.neon.urshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  // CHECK: call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   return vrshr_n_u8(in, 1);
 }
 
 uint16x4_t test_vrshr_n_u16(uint16x4_t in) {
   // CHECK-LABEL: @test_vrshr_n_u16
-  // CHECK: call <4 x i16> @llvm.arm64.neon.urshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+  // CHECK: call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
   return vrshr_n_u16(in, 1);
 }
 
 uint32x2_t test_vrshr_n_u32(uint32x2_t in) {
   // CHECK-LABEL: @test_vrshr_n_u32
-  // CHECK: call <2 x i32> @llvm.arm64.neon.urshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
+  // CHECK: call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
   return vrshr_n_u32(in, 1);
 }
 
 uint64x1_t test_vrshr_n_u64(uint64x1_t in) {
   // CHECK-LABEL: @test_vrshr_n_u64
-  // CHECK: call <1 x i64> @llvm.arm64.neon.urshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
+  // CHECK: call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
   return vrshr_n_u64(in, 1);
 }
 
 uint8x16_t test_vrshrq_n_u8(uint8x16_t in) {
   // CHECK-LABEL: @test_vrshrq_n_u8
-  // CHECK: call <16 x i8> @llvm.arm64.neon.urshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  // CHECK: call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   return vrshrq_n_u8(in, 1);
 }
 
 uint16x8_t test_vrshrq_n_u16(uint16x8_t in) {
   // CHECK-LABEL: @test_vrshrq_n_u16
-  // CHECK: call <8 x i16> @llvm.arm64.neon.urshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+  // CHECK: call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
   return vrshrq_n_u16(in, 1);
 }
 
 uint32x4_t test_vrshrq_n_u32(uint32x4_t in) {
   // CHECK-LABEL: @test_vrshrq_n_u32
-  // CHECK: call <4 x i32> @llvm.arm64.neon.urshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  // CHECK: call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
   return vrshrq_n_u32(in, 1);
 }
 
 uint64x2_t test_vrshrq_n_u64(uint64x2_t in) {
   // CHECK-LABEL: @test_vrshrq_n_u64
-  // CHECK: call <2 x i64> @llvm.arm64.neon.urshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>
+  // CHECK: call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>
   return vrshrq_n_u64(in, 1);
 }
 
 int8x8_t test_vqshlu_n_s8(int8x8_t in) {
   // CHECK-LABEL: @test_vqshlu_n_s8
-  // CHECK: call <8 x i8> @llvm.arm64.neon.sqshlu.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  // CHECK: call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   return vqshlu_n_s8(in, 1);
 }
 
 int16x4_t test_vqshlu_n_s16(int16x4_t in) {
   // CHECK-LABEL: @test_vqshlu_n_s16
-  // CHECK: call <4 x i16> @llvm.arm64.neon.sqshlu.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+  // CHECK: call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
   return vqshlu_n_s16(in, 1);
 }
 
 int32x2_t test_vqshlu_n_s32(int32x2_t in) {
   // CHECK-LABEL: @test_vqshlu_n_s32
-  // CHECK: call <2 x i32> @llvm.arm64.neon.sqshlu.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
+  // CHECK: call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
   return vqshlu_n_s32(in, 1);
 }
 
 int64x1_t test_vqshlu_n_s64(int64x1_t in) {
   // CHECK-LABEL: @test_vqshlu_n_s64
-  // CHECK: call <1 x i64> @llvm.arm64.neon.sqshlu.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
+  // CHECK: call <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
   return vqshlu_n_s64(in, 1);
 }
 
 
 int8x16_t test_vqshluq_n_s8(int8x16_t in) {
   // CHECK-LABEL: @test_vqshluq_n_s8
-  // CHECK: call <16 x i8> @llvm.arm64.neon.sqshlu.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  // CHECK: call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   return vqshluq_n_s8(in, 1);
 }
 
 int16x8_t test_vqshluq_n_s16(int16x8_t in) {
   // CHECK-LABEL: @test_vqshluq_n_s16
-  // CHECK: call <8 x i16> @llvm.arm64.neon.sqshlu.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  // CHECK: call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   return vqshluq_n_s16(in, 1);
 }
 
 int32x4_t test_vqshluq_n_s32(int32x4_t in) {
   // CHECK-LABEL: @test_vqshluq_n_s32
-  // CHECK: call <4 x i32> @llvm.arm64.neon.sqshlu.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  // CHECK: call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
   return vqshluq_n_s32(in, 1);
 }
 
 int64x2_t test_vqshluq_n_s64(int64x2_t in) {
   // CHECK-LABEL: @test_vqshluq_n_s64
-  // CHECK: call <2 x i64> @llvm.arm64.neon.sqshlu.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
+  // CHECK: call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
   return vqshluq_n_s64(in, 1);
 }
 
 int8x8_t test_vrsra_n_s8(int8x8_t acc, int8x8_t in) {
   // CHECK-LABEL: @test_vrsra_n_s8
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i8> @llvm.arm64.neon.srshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   // CHECK: add <8 x i8> [[TMP]], %acc
   return vrsra_n_s8(acc, in, 1);
 }
 
 int16x4_t test_vrsra_n_s16(int16x4_t acc, int16x4_t in) {
   // CHECK-LABEL: @test_vrsra_n_s16
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i16> @llvm.arm64.neon.srshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
   // CHECK: add <4 x i16> [[TMP]], %acc
   return vrsra_n_s16(acc, in, 1);
 }
 
 int32x2_t test_vrsra_n_s32(int32x2_t acc, int32x2_t in) {
   // CHECK-LABEL: @test_vrsra_n_s32
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i32> @llvm.arm64.neon.srshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
   // CHECK: add <2 x i32> [[TMP]], %acc
   return vrsra_n_s32(acc, in, 1);
 }
 
 int64x1_t test_vrsra_n_s64(int64x1_t acc, int64x1_t in) {
   // CHECK-LABEL: @test_vrsra_n_s64
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <1 x i64> @llvm.arm64.neon.srshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
   // CHECK: add <1 x i64> [[TMP]], %acc
   return vrsra_n_s64(acc, in, 1);
 }
 
 int8x16_t test_vrsraq_n_s8(int8x16_t acc, int8x16_t in) {
   // CHECK-LABEL: @test_vrsraq_n_s8
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <16 x i8> @llvm.arm64.neon.srshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   // CHECK: add <16 x i8> [[TMP]], %acc
   return vrsraq_n_s8(acc, in, 1);
 }
 
 int16x8_t test_vrsraq_n_s16(int16x8_t acc, int16x8_t in) {
   // CHECK-LABEL: @test_vrsraq_n_s16
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i16> @llvm.arm64.neon.srshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
   // CHECK: add <8 x i16> [[TMP]], %acc
   return vrsraq_n_s16(acc, in, 1);
 }
 
 int32x4_t test_vrsraq_n_s32(int32x4_t acc, int32x4_t in) {
   // CHECK-LABEL: @test_vrsraq_n_s32
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i32> @llvm.arm64.neon.srshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
   // CHECK: add <4 x i32> [[TMP]], %acc
   return vrsraq_n_s32(acc, in, 1);
 }
 
 int64x2_t test_vrsraq_n_s64(int64x2_t acc, int64x2_t in) {
   // CHECK-LABEL: @test_vrsraq_n_s64
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i64> @llvm.arm64.neon.srshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>)
   // CHECK: add <2 x i64> [[TMP]], %acc
   return vrsraq_n_s64(acc, in, 1);
 }
 
 uint8x8_t test_vrsra_n_u8(uint8x8_t acc, uint8x8_t in) {
   // CHECK-LABEL: @test_vrsra_n_u8
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i8> @llvm.arm64.neon.urshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   // CHECK: add <8 x i8> [[TMP]], %acc
   return vrsra_n_u8(acc, in, 1);
 }
 
 uint16x4_t test_vrsra_n_u16(uint16x4_t acc, uint16x4_t in) {
   // CHECK-LABEL: @test_vrsra_n_u16
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i16> @llvm.arm64.neon.urshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
   // CHECK: add <4 x i16> [[TMP]], %acc
   return vrsra_n_u16(acc, in, 1);
 }
 
 uint32x2_t test_vrsra_n_u32(uint32x2_t acc, uint32x2_t in) {
   // CHECK-LABEL: @test_vrsra_n_u32
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i32> @llvm.arm64.neon.urshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
   // CHECK: add <2 x i32> [[TMP]], %acc
   return vrsra_n_u32(acc, in, 1);
 }
 
 uint64x1_t test_vrsra_n_u64(uint64x1_t acc, uint64x1_t in) {
   // CHECK-LABEL: @test_vrsra_n_u64
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <1 x i64> @llvm.arm64.neon.urshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
   // CHECK: add <1 x i64> [[TMP]], %acc
   return vrsra_n_u64(acc, in, 1);
 }
 
 uint8x16_t test_vrsraq_n_u8(uint8x16_t acc, uint8x16_t in) {
   // CHECK-LABEL: @test_vrsraq_n_u8
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <16 x i8> @llvm.arm64.neon.urshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
   // CHECK: add <16 x i8> [[TMP]], %acc
   return vrsraq_n_u8(acc, in, 1);
 }
 
 uint16x8_t test_vrsraq_n_u16(uint16x8_t acc, uint16x8_t in) {
   // CHECK-LABEL: @test_vrsraq_n_u16
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i16> @llvm.arm64.neon.urshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
   // CHECK: add <8 x i16> [[TMP]], %acc
   return vrsraq_n_u16(acc, in, 1);
 }
 
 uint32x4_t test_vrsraq_n_u32(uint32x4_t acc, uint32x4_t in) {
   // CHECK-LABEL: @test_vrsraq_n_u32
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i32> @llvm.arm64.neon.urshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
   // CHECK: add <4 x i32> [[TMP]], %acc
   return vrsraq_n_u32(acc, in, 1);
 }
 
 uint64x2_t test_vrsraq_n_u64(uint64x2_t acc, uint64x2_t in) {
   // CHECK-LABEL: @test_vrsraq_n_u64
-  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i64> @llvm.arm64.neon.urshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>)
+  // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>)
   // CHECK: add <2 x i64> [[TMP]], %acc
   return vrsraq_n_u64(acc, in, 1);
 }

Modified: cfe/trunk/test/CodeGen/arm64_vsli.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vsli.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vsli.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vsli.c Sat May 24 07:52:07 2014
@@ -9,140 +9,140 @@
 int8x8_t test_vsli_n_s8(int8x8_t a1, int8x8_t a2) {
   // CHECK: test_vsli_n_s8
   return vsli_n_s8(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsli.v8i8
+  // CHECK: llvm.aarch64.neon.vsli.v8i8
   // CHECK_CODEGEN: sli.8b  v0, v1, #3
 }
 
 int16x4_t test_vsli_n_s16(int16x4_t a1, int16x4_t a2) {
   // CHECK: test_vsli_n_s16
   return vsli_n_s16(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsli.v4i16
+  // CHECK: llvm.aarch64.neon.vsli.v4i16
   // CHECK_CODEGEN: sli.4h  v0, v1, #3
 }
 
 int32x2_t test_vsli_n_s32(int32x2_t a1, int32x2_t a2) {
   // CHECK: test_vsli_n_s32
   return vsli_n_s32(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v2i32
+  // CHECK: llvm.aarch64.neon.vsli.v2i32
   // CHECK_CODEGEN: sli.2s  v0, v1, #1
 }
 
 int64x1_t test_vsli_n_s64(int64x1_t a1, int64x1_t a2) {
   // CHECK: test_vsli_n_s64
   return vsli_n_s64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v1i64
+  // CHECK: llvm.aarch64.neon.vsli.v1i64
   // CHECK_CODEGEN: sli     d0, d1, #1
 }
 
 uint8x8_t test_vsli_n_u8(uint8x8_t a1, uint8x8_t a2) {
   // CHECK: test_vsli_n_u8
   return vsli_n_u8(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsli.v8i8
+  // CHECK: llvm.aarch64.neon.vsli.v8i8
   // CHECK_CODEGEN: sli.8b  v0, v1, #3
 }
 
 uint16x4_t test_vsli_n_u16(uint16x4_t a1, uint16x4_t a2) {
   // CHECK: test_vsli_n_u16
   return vsli_n_u16(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsli.v4i16
+  // CHECK: llvm.aarch64.neon.vsli.v4i16
   // CHECK_CODEGEN: sli.4h  v0, v1, #3
 }
 
 uint32x2_t test_vsli_n_u32(uint32x2_t a1, uint32x2_t a2) {
   // CHECK: test_vsli_n_u32
   return vsli_n_u32(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v2i32
+  // CHECK: llvm.aarch64.neon.vsli.v2i32
   // CHECK_CODEGEN: sli.2s  v0, v1, #1
 }
 
 uint64x1_t test_vsli_n_u64(uint64x1_t a1, uint64x1_t a2) {
   // CHECK: test_vsli_n_u64
   return vsli_n_u64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v1i64
+  // CHECK: llvm.aarch64.neon.vsli.v1i64
   // CHECK_CODEGEN: sli     d0, d1, #1
 }
 
 poly8x8_t test_vsli_n_p8(poly8x8_t a1, poly8x8_t a2) {
   // CHECK: test_vsli_n_p8
   return vsli_n_p8(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v8i8
+  // CHECK: llvm.aarch64.neon.vsli.v8i8
   // CHECK_CODEGEN: sli.8b  v0, v1, #1
 }
 
 poly16x4_t test_vsli_n_p16(poly16x4_t a1, poly16x4_t a2) {
   // CHECK: test_vsli_n_p16
   return vsli_n_p16(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v4i16
+  // CHECK: llvm.aarch64.neon.vsli.v4i16
   // CHECK_CODEGEN: sli.4h  v0, v1, #1
 }
 
 int8x16_t test_vsliq_n_s8(int8x16_t a1, int8x16_t a2) {
   // CHECK: test_vsliq_n_s8
   return vsliq_n_s8(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsli.v16i8
+  // CHECK: llvm.aarch64.neon.vsli.v16i8
   // CHECK_CODEGEN: sli.16b v0, v1, #3
 }
 
 int16x8_t test_vsliq_n_s16(int16x8_t a1, int16x8_t a2) {
   // CHECK: test_vsliq_n_s16
   return vsliq_n_s16(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsli.v8i16
+  // CHECK: llvm.aarch64.neon.vsli.v8i16
   // CHECK_CODEGEN: sli.8h  v0, v1, #3
 }
 
 int32x4_t test_vsliq_n_s32(int32x4_t a1, int32x4_t a2) {
   // CHECK: test_vsliq_n_s32
   return vsliq_n_s32(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v4i32
+  // CHECK: llvm.aarch64.neon.vsli.v4i32
   // CHECK_CODEGEN: sli.4s  v0, v1, #1
 }
 
 int64x2_t test_vsliq_n_s64(int64x2_t a1, int64x2_t a2) {
   // CHECK: test_vsliq_n_s64
   return vsliq_n_s64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v2i64
+  // CHECK: llvm.aarch64.neon.vsli.v2i64
   // CHECK_CODEGEN: sli.2d  v0, v1, #1
 }
 
 uint8x16_t test_vsliq_n_u8(uint8x16_t a1, uint8x16_t a2) {
   // CHECK: test_vsliq_n_u8
   return vsliq_n_u8(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsli.v16i8
+  // CHECK: llvm.aarch64.neon.vsli.v16i8
   // CHECK_CODEGEN: sli.16b v0, v1, #3
 }
 
 uint16x8_t test_vsliq_n_u16(uint16x8_t a1, uint16x8_t a2) {
   // CHECK: test_vsliq_n_u16
   return vsliq_n_u16(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsli.v8i16
+  // CHECK: llvm.aarch64.neon.vsli.v8i16
   // CHECK_CODEGEN: sli.8h  v0, v1, #3
 }
 
 uint32x4_t test_vsliq_n_u32(uint32x4_t a1, uint32x4_t a2) {
   // CHECK: test_vsliq_n_u32
   return vsliq_n_u32(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v4i32
+  // CHECK: llvm.aarch64.neon.vsli.v4i32
   // CHECK_CODEGEN: sli.4s  v0, v1, #1
 }
 
 uint64x2_t test_vsliq_n_u64(uint64x2_t a1, uint64x2_t a2) {
   // CHECK: test_vsliq_n_u64
   return vsliq_n_u64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v2i64
+  // CHECK: llvm.aarch64.neon.vsli.v2i64
   // CHECK_CODEGEN: sli.2d  v0, v1, #1
 }
 
 poly8x16_t test_vsliq_n_p8(poly8x16_t a1, poly8x16_t a2) {
   // CHECK: test_vsliq_n_p8
   return vsliq_n_p8(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v16i8
+  // CHECK: llvm.aarch64.neon.vsli.v16i8
   // CHECK_CODEGEN: sli.16b v0, v1, #1
 }
 
 poly16x8_t test_vsliq_n_p16(poly16x8_t a1, poly16x8_t a2) {
   // CHECK: test_vsliq_n_p16
   return vsliq_n_p16(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsli.v8i16
+  // CHECK: llvm.aarch64.neon.vsli.v8i16
   // CHECK_CODEGEN: sli.8h  v0, v1, #1
 }
 

Modified: cfe/trunk/test/CodeGen/arm64_vsri.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vsri.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vsri.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vsri.c Sat May 24 07:52:07 2014
@@ -10,140 +10,140 @@
 int8x8_t test_vsri_n_s8(int8x8_t a1, int8x8_t a2) {
   // CHECK: test_vsri_n_s8
   return vsri_n_s8(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsri.v8i8
+  // CHECK: llvm.aarch64.neon.vsri.v8i8
   // CHECK_CODEGEN: sri.8b  v0, v1, #3
 }
 
 int16x4_t test_vsri_n_s16(int16x4_t a1, int16x4_t a2) {
   // CHECK: test_vsri_n_s16
   return vsri_n_s16(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsri.v4i16
+  // CHECK: llvm.aarch64.neon.vsri.v4i16
   // CHECK_CODEGEN: sri.4h  v0, v1, #3
 }
 
 int32x2_t test_vsri_n_s32(int32x2_t a1, int32x2_t a2) {
   // CHECK: test_vsri_n_s32
   return vsri_n_s32(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v2i32
+  // CHECK: llvm.aarch64.neon.vsri.v2i32
   // CHECK_CODEGEN: sri.2s  v0, v1, #1
 }
 
 int64x1_t test_vsri_n_s64(int64x1_t a1, int64x1_t a2) {
   // CHECK: test_vsri_n_s64
   return vsri_n_s64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v1i64
+  // CHECK: llvm.aarch64.neon.vsri.v1i64
   // CHECK_CODEGEN: sri     d0, d1, #1
 }
 
 uint8x8_t test_vsri_n_u8(uint8x8_t a1, uint8x8_t a2) {
   // CHECK: test_vsri_n_u8
   return vsri_n_u8(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsri.v8i8
+  // CHECK: llvm.aarch64.neon.vsri.v8i8
   // CHECK_CODEGEN: sri.8b  v0, v1, #3
 }
 
 uint16x4_t test_vsri_n_u16(uint16x4_t a1, uint16x4_t a2) {
   // CHECK: test_vsri_n_u16
   return vsri_n_u16(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsri.v4i16
+  // CHECK: llvm.aarch64.neon.vsri.v4i16
   // CHECK_CODEGEN: sri.4h  v0, v1, #3
 }
 
 uint32x2_t test_vsri_n_u32(uint32x2_t a1, uint32x2_t a2) {
   // CHECK: test_vsri_n_u32
   return vsri_n_u32(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v2i32
+  // CHECK: llvm.aarch64.neon.vsri.v2i32
   // CHECK_CODEGEN: sri.2s  v0, v1, #1
 }
 
 uint64x1_t test_vsri_n_u64(uint64x1_t a1, uint64x1_t a2) {
   // CHECK: test_vsri_n_u64
   return vsri_n_u64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v1i64
+  // CHECK: llvm.aarch64.neon.vsri.v1i64
   // CHECK_CODEGEN: sri     d0, d1, #1
 }
 
 poly8x8_t test_vsri_n_p8(poly8x8_t a1, poly8x8_t a2) {
   // CHECK: test_vsri_n_p8
   return vsri_n_p8(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v8i8
+  // CHECK: llvm.aarch64.neon.vsri.v8i8
   // CHECK_CODEGEN: sri.8b  v0, v1, #1
 }
 
 poly16x4_t test_vsri_n_p16(poly16x4_t a1, poly16x4_t a2) {
   // CHECK: test_vsri_n_p16
   return vsri_n_p16(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v4i16
+  // CHECK: llvm.aarch64.neon.vsri.v4i16
   // CHECK_CODEGEN: sri.4h  v0, v1, #1
 }
 
 int8x16_t test_vsriq_n_s8(int8x16_t a1, int8x16_t a2) {
   // CHECK: test_vsriq_n_s8
   return vsriq_n_s8(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsri.v16i8
+  // CHECK: llvm.aarch64.neon.vsri.v16i8
   // CHECK_CODEGEN: sri.16b v0, v1, #3
 }
 
 int16x8_t test_vsriq_n_s16(int16x8_t a1, int16x8_t a2) {
   // CHECK: test_vsriq_n_s16
   return vsriq_n_s16(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsri.v8i16
+  // CHECK: llvm.aarch64.neon.vsri.v8i16
   // CHECK_CODEGEN: sri.8h  v0, v1, #3
 }
 
 int32x4_t test_vsriq_n_s32(int32x4_t a1, int32x4_t a2) {
   // CHECK: test_vsriq_n_s32
   return vsriq_n_s32(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v4i32
+  // CHECK: llvm.aarch64.neon.vsri.v4i32
   // CHECK_CODEGEN: sri.4s  v0, v1, #1
 }
 
 int64x2_t test_vsriq_n_s64(int64x2_t a1, int64x2_t a2) {
   // CHECK: test_vsriq_n_s64
   return vsriq_n_s64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v2i64
+  // CHECK: llvm.aarch64.neon.vsri.v2i64
   // CHECK_CODEGEN: sri.2d  v0, v1, #1
 }
 
 uint8x16_t test_vsriq_n_u8(uint8x16_t a1, uint8x16_t a2) {
   // CHECK: test_vsriq_n_u8
   return vsriq_n_u8(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsri.v16i8
+  // CHECK: llvm.aarch64.neon.vsri.v16i8
   // CHECK_CODEGEN: sri.16b v0, v1, #3
 }
 
 uint16x8_t test_vsriq_n_u16(uint16x8_t a1, uint16x8_t a2) {
   // CHECK: test_vsriq_n_u16
   return vsriq_n_u16(a1, a2, 3);
-  // CHECK: llvm.arm64.neon.vsri.v8i16
+  // CHECK: llvm.aarch64.neon.vsri.v8i16
   // CHECK_CODEGEN: sri.8h  v0, v1, #3
 }
 
 uint32x4_t test_vsriq_n_u32(uint32x4_t a1, uint32x4_t a2) {
   // CHECK: test_vsriq_n_u32
   return vsriq_n_u32(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v4i32
+  // CHECK: llvm.aarch64.neon.vsri.v4i32
   // CHECK_CODEGEN: sri.4s  v0, v1, #1
 }
 
 uint64x2_t test_vsriq_n_u64(uint64x2_t a1, uint64x2_t a2) {
   // CHECK: test_vsriq_n_u64
   return vsriq_n_u64(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v2i64
+  // CHECK: llvm.aarch64.neon.vsri.v2i64
   // CHECK_CODEGEN: sri.2d  v0, v1, #1
 }
 
 poly8x16_t test_vsriq_n_p8(poly8x16_t a1, poly8x16_t a2) {
   // CHECK: test_vsriq_n_p8
   return vsriq_n_p8(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v16i8
+  // CHECK: llvm.aarch64.neon.vsri.v16i8
   // CHECK_CODEGEN: sri.16b v0, v1, #1
 }
 
 poly16x8_t test_vsriq_n_p16(poly16x8_t a1, poly16x8_t a2) {
   // CHECK: test_vsriq_n_p16
   return vsriq_n_p16(a1, a2, 1);
-  // CHECK: llvm.arm64.neon.vsri.v8i16
+  // CHECK: llvm.aarch64.neon.vsri.v8i16
   // CHECK_CODEGEN: sri.8h  v0, v1, #1
 }
 

Modified: cfe/trunk/test/CodeGen/builtins-arm-exclusive.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-arm-exclusive.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-arm-exclusive.c (original)
+++ cfe/trunk/test/CodeGen/builtins-arm-exclusive.c Sat May 24 07:52:07 2014
@@ -21,10 +21,10 @@ int atomic_inc(int *addr) {
 // CHECK:   br i1 [[TST]], label {{%[a-zA-Z0-9.]+}}, label {{%[a-zA-Z0-9.]+}}
 
 // CHECK-ARM64-LABEL: @atomic_inc
-// CHECK-ARM64:   [[OLDVAL:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i32(i32* %addr)
+// CHECK-ARM64:   [[OLDVAL:%.*]] = tail call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
 // CHECK-ARM64:   [[INC:%.*]] = add i64 [[OLDVAL]], 1
 // CHECK-ARM64:   [[TRUNC:%.*]] = and i64 [[INC]], 4294967295
-// CHECK-ARM64:   [[FAILURE:%.*]] = tail call i32 @llvm.arm64.stxr.p0i32(i64 [[TRUNC]], i32* %addr)
+// CHECK-ARM64:   [[FAILURE:%.*]] = tail call i32 @llvm.aarch64.stxr.p0i32(i64 [[TRUNC]], i32* %addr)
 // CHECK-ARM64:   [[TST:%.*]] = icmp eq i32 [[FAILURE]], 0
 // CHECK-ARM64:   br i1 [[TST]], label {{%[a-zA-Z0-9.]+}}, label {{%[a-zA-Z0-9.]+}}
 
@@ -40,7 +40,7 @@ int test_ldrex(char *addr, long long *ad
 // CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i8(i8* %addr)
 // CHECK: and i32 [[INTRES]], 255
 
-// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i8(i8* %addr)
+// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
 // CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
 // CHECK-ARM64: [[SEXTTMP:%.*]] = shl i32 [[TRUNCRES]], 24
 // CHECK-ARM64: ashr exact i32 [[SEXTTMP]], 24
@@ -52,7 +52,7 @@ int test_ldrex(char *addr, long long *ad
 // CHECK: ashr exact i32 [[TMPSEXT]], 16
 
 // CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i16(i16* [[ADDR16]])
+// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.aarch64.ldxr.p0i16(i16* [[ADDR16]])
 // CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
 // CHECK-ARM64: [[TMPSEXT:%.*]] = shl i32 [[TRUNCRES]], 16
 // CHECK-ARM64: ashr exact i32 [[TMPSEXT]], 16
@@ -62,20 +62,20 @@ int test_ldrex(char *addr, long long *ad
 // CHECK:  call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
 
 // CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i32(i32* [[ADDR32]])
+// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.aarch64.ldxr.p0i32(i32* [[ADDR32]])
 // CHECK-ARM64: trunc i64 [[INTRES]] to i32
 
   sum += __builtin_arm_ldrex((long long *)addr);
 // CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* %addr)
 
 // CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i64 @llvm.arm64.ldxr.p0i64(i64* [[ADDR64]])
+// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* [[ADDR64]])
 
   sum += __builtin_arm_ldrex(addr64);
 // CHECK: [[ADDR64_AS8:%.*]] = bitcast i64* %addr64 to i8*
 // CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* [[ADDR64_AS8]])
 
-// CHECK-ARM64: call i64 @llvm.arm64.ldxr.p0i64(i64* %addr64)
+// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* %addr64)
 
   sum += __builtin_arm_ldrex(addrfloat);
 // CHECK: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
@@ -83,7 +83,7 @@ int test_ldrex(char *addr, long long *ad
 // CHECK: bitcast i32 [[INTRES]] to float
 
 // CHECK-ARM64: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i32(i32* [[INTADDR]])
+// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.aarch64.ldxr.p0i32(i32* [[INTADDR]])
 // CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
 // CHECK-ARM64: bitcast i32 [[TRUNCRES]] to float
 
@@ -97,21 +97,21 @@ int test_ldrex(char *addr, long long *ad
 // CHECK: [[INTRES:%.*]] = or i64 [[RESHIHI]], [[RESLO64]]
 // CHECK: bitcast i64 [[INTRES]] to double
 
-// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i64(i64* [[ADDR64]])
+// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.aarch64.ldxr.p0i64(i64* [[ADDR64]])
 // CHECK-ARM64: bitcast i64 [[INTRES]] to double
 
   sum += *__builtin_arm_ldrex((int **)addr);
 // CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
 // CHECK: inttoptr i32 [[INTRES]] to i32*
 
-// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i64(i64* [[ADDR64]])
+// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.aarch64.ldxr.p0i64(i64* [[ADDR64]])
 // CHECK-ARM64: inttoptr i64 [[INTRES]] to i32*
 
   sum += __builtin_arm_ldrex((struct Simple **)addr)->a;
 // CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
 // CHECK: inttoptr i32 [[INTRES]] to %struct.Simple*
 
-// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i64(i64* [[ADDR64]])
+// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.aarch64.ldxr.p0i64(i64* [[ADDR64]])
 // CHECK-ARM64: inttoptr i64 [[INTRES]] to %struct.Simple*
   return sum;
 }
@@ -124,44 +124,44 @@ int test_strex(char *addr) {
   res |= __builtin_arm_strex(4, addr);
 // CHECK: call i32 @llvm.arm.strex.p0i8(i32 4, i8* %addr)
 
-// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i8(i64 4, i8* %addr)
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i8(i64 4, i8* %addr)
 
   res |= __builtin_arm_strex(42, (short *)addr);
 // CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
 // CHECK:  call i32 @llvm.arm.strex.p0i16(i32 42, i16* [[ADDR16]])
 
 // CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64:  call i32 @llvm.arm64.stxr.p0i16(i64 42, i16* [[ADDR16]])
+// CHECK-ARM64:  call i32 @llvm.aarch64.stxr.p0i16(i64 42, i16* [[ADDR16]])
 
   res |= __builtin_arm_strex(42, (int *)addr);
 // CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
 // CHECK: call i32 @llvm.arm.strex.p0i32(i32 42, i32* [[ADDR32]])
 
 // CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i32(i64 42, i32* [[ADDR32]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 42, i32* [[ADDR32]])
 
   res |= __builtin_arm_strex(42, (long long *)addr);
 // CHECK: call i32 @llvm.arm.strexd(i32 42, i32 0, i8* %addr)
 
 // CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i64(i64 42, i64* [[ADDR64]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 42, i64* [[ADDR64]])
 
   res |= __builtin_arm_strex(2.71828f, (float *)addr);
 // CHECK: call i32 @llvm.arm.strex.p0i32(i32 1076754509, i32* [[ADDR32]])
 
-// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i32(i64 1076754509, i32* [[ADDR32]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 1076754509, i32* [[ADDR32]])
 
   res |= __builtin_arm_strex(3.14159, (double *)addr);
 // CHECK: call i32 @llvm.arm.strexd(i32 -266631570, i32 1074340345, i8* %addr)
 
-// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i64(i64 4614256650576692846, i64* [[ADDR64]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 4614256650576692846, i64* [[ADDR64]])
 
   res |= __builtin_arm_strex(&var, (struct Simple **)addr);
 // CHECK: [[INTVAL:%.*]] = ptrtoint i16* %var to i32
 // CHECK: call i32 @llvm.arm.strex.p0i32(i32 [[INTVAL]], i32* [[ADDR32]])
 
 // CHECK-ARM64: [[INTVAL:%.*]] = ptrtoint i16* %var to i64
-// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i64(i64 [[INTVAL]], i64* [[ADDR64]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 [[INTVAL]], i64* [[ADDR64]])
 
   return res;
 }
@@ -172,7 +172,7 @@ void test_clrex() {
 
   __builtin_arm_clrex();
 // CHECK: call void @llvm.arm.clrex()
-// CHECK-ARM64: call void @llvm.arm64.clrex()
+// CHECK-ARM64: call void @llvm.aarch64.clrex()
 }
 
 #ifdef __aarch64__
@@ -183,7 +183,7 @@ __int128 test_ldrex_128(__int128 *addr)
 
   return __builtin_arm_ldrex(addr);
 // CHECK-ARM64: [[ADDR8:%.*]] = bitcast i128* %addr to i8*
-// CHECK-ARM64: [[STRUCTRES:%.*]] = tail call { i64, i64 } @llvm.arm64.ldxp(i8* [[ADDR8]])
+// CHECK-ARM64: [[STRUCTRES:%.*]] = tail call { i64, i64 } @llvm.aarch64.ldxp(i8* [[ADDR8]])
 // CHECK-ARM64: [[RESHI:%.*]] = extractvalue { i64, i64 } [[STRUCTRES]], 1
 // CHECK-ARM64: [[RESLO:%.*]] = extractvalue { i64, i64 } [[STRUCTRES]], 0
 // CHECK-ARM64: [[RESHI64:%.*]] = zext i64 [[RESHI]] to i128
@@ -201,6 +201,6 @@ int test_strex_128(__int128 *addr, __int
 // CHECK-ARM64: [[VALHI128:%.*]] = lshr i128 %val, 64
 // CHECK-ARM64: [[VALHI:%.*]] = trunc i128 [[VALHI128]] to i64
 // CHECK-ARM64: [[ADDR8:%.*]] = bitcast i128* %addr to i8*
-// CHECK-ARM64: [[RES:%.*]] = tail call i32 @llvm.arm64.stxp(i64 [[VALLO]], i64 [[VALHI]], i8* [[ADDR8]])
+// CHECK-ARM64: [[RES:%.*]] = tail call i32 @llvm.aarch64.stxp(i64 [[VALLO]], i64 [[VALHI]], i8* [[ADDR8]])
 }
 #endif

Modified: cfe/trunk/test/CodeGen/neon-crypto.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/neon-crypto.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/neon-crypto.c (original)
+++ cfe/trunk/test/CodeGen/neon-crypto.c Sat May 24 07:52:07 2014
@@ -14,83 +14,83 @@ uint8x16_t test_vaeseq_u8(uint8x16_t dat
   // CHECK-LABEL: @test_vaeseq_u8
   // CHECK-NO-CRYPTO: warning: implicit declaration of function 'vaeseq_u8' is invalid in C99
   return vaeseq_u8(data, key);
-  // CHECK: call <16 x i8> @llvm.{{arm.neon|arm64.crypto}}.aese(<16 x i8> %data, <16 x i8> %key)
+  // CHECK: call <16 x i8> @llvm.{{arm.neon|aarch64.crypto}}.aese(<16 x i8> %data, <16 x i8> %key)
 }
 
 uint8x16_t test_vaesdq_u8(uint8x16_t data, uint8x16_t key) {
   // CHECK-LABEL: @test_vaesdq_u8
   return vaesdq_u8(data, key);
-  // CHECK: call <16 x i8> @llvm.{{arm.neon|arm64.crypto}}.aesd(<16 x i8> %data, <16 x i8> %key)
+  // CHECK: call <16 x i8> @llvm.{{arm.neon|aarch64.crypto}}.aesd(<16 x i8> %data, <16 x i8> %key)
 }
 
 uint8x16_t test_vaesmcq_u8(uint8x16_t data) {
   // CHECK-LABEL: @test_vaesmcq_u8
   return vaesmcq_u8(data);
-  // CHECK: call <16 x i8> @llvm.{{arm.neon|arm64.crypto}}.aesmc(<16 x i8> %data)
+  // CHECK: call <16 x i8> @llvm.{{arm.neon|aarch64.crypto}}.aesmc(<16 x i8> %data)
 }
 
 uint8x16_t test_vaesimcq_u8(uint8x16_t data) {
   // CHECK-LABEL: @test_vaesimcq_u8
   return vaesimcq_u8(data);
-  // CHECK: call <16 x i8> @llvm.{{arm.neon|arm64.crypto}}.aesimc(<16 x i8> %data)
+  // CHECK: call <16 x i8> @llvm.{{arm.neon|aarch64.crypto}}.aesimc(<16 x i8> %data)
 }
 
 uint32_t test_vsha1h_u32(uint32_t hash_e) {
   // CHECK-LABEL: @test_vsha1h_u32
   return vsha1h_u32(hash_e);
-  // CHECK: call i32 @llvm.{{arm.neon|arm64.crypto}}.sha1h(i32 %hash_e)
+  // CHECK: call i32 @llvm.{{arm.neon|aarch64.crypto}}.sha1h(i32 %hash_e)
 }
 
 uint32x4_t test_vsha1su1q_u32(uint32x4_t w0_3, uint32x4_t w12_15) {
   // CHECK-LABEL: @test_vsha1su1q_u32
   return vsha1su1q_u32(w0_3, w12_15);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha1su1(<4 x i32> %w0_3, <4 x i32> %w12_15)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1su1(<4 x i32> %w0_3, <4 x i32> %w12_15)
 }
 
 uint32x4_t test_vsha256su0q_u32(uint32x4_t w0_3, uint32x4_t w4_7) {
   // CHECK-LABEL: @test_vsha256su0q_u32
   return vsha256su0q_u32(w0_3, w4_7);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7)
 }
 
 uint32x4_t test_vsha1cq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) {
   // CHECK-LABEL: @test_vsha1cq_u32
   return vsha1cq_u32(hash_abcd, hash_e, wk);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
 }
 
 uint32x4_t test_vsha1pq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) {
   // CHECK-LABEL: @test_vsha1pq_u32
   return vsha1pq_u32(hash_abcd, hash_e, wk);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
 }
 
 uint32x4_t test_vsha1mq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) {
   // CHECK-LABEL: @test_vsha1mq_u32
   return vsha1mq_u32(hash_abcd, hash_e, wk);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
 }
 
 uint32x4_t test_vsha1su0q_u32(uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11) {
   // CHECK-LABEL: @test_vsha1su0q_u32
   return vsha1su0q_u32(w0_3, w4_7, w8_11);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha1su0(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1su0(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11)
 }
 
 uint32x4_t test_vsha256hq_u32(uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk) {
   // CHECK-LABEL: @test_vsha256hq_u32
   return vsha256hq_u32(hash_abcd, hash_efgh, wk);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
 }
 
 uint32x4_t test_vsha256h2q_u32(uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk) {
   // CHECK-LABEL: @test_vsha256h2q_u32
   return vsha256h2q_u32(hash_efgh, hash_abcd, wk);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
 }
 
 uint32x4_t test_vsha256su1q_u32(uint32x4_t w0_3, uint32x4_t w8_11, uint32x4_t w12_15) {
   // CHECK-LABEL: @test_vsha256su1q_u32
   return vsha256su1q_u32(w0_3, w8_11, w12_15);
-  // CHECK: call <4 x i32> @llvm.{{arm.neon|arm64.crypto}}.sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
+  // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
 }

Modified: cfe/trunk/test/Driver/arm-alignment.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Driver/arm-alignment.c?rev=209579&r1=209578&r2=209579&view=diff
==============================================================================
--- cfe/trunk/test/Driver/arm-alignment.c (original)
+++ cfe/trunk/test/Driver/arm-alignment.c Sat May 24 07:52:07 2014
@@ -7,17 +7,17 @@
 // RUN: %clang -target arm-none-gnueabi -mno-unaligned-access -munaligned-access -### %s 2> %t
 // RUN: FileCheck --check-prefix=CHECK-UNALIGNED-ARM < %t %s
 
-// RUN: %clang -target arm64-none-gnueabi -munaligned-access -### %s 2> %t
-// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-ARM64 < %t %s
+// RUN: %clang -target aarch64-none-gnueabi -munaligned-access -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-AARCH64 < %t %s
 
-// RUN: %clang -target arm64-none-gnueabi -mstrict-align -munaligned-access -### %s 2> %t
-// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-ARM64 < %t %s
+// RUN: %clang -target aarch64-none-gnueabi -mstrict-align -munaligned-access -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-AARCH64 < %t %s
 
-// RUN: %clang -target arm64-none-gnueabi -mno-unaligned-access -munaligned-access -### %s 2> %t
-// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-ARM64 < %t %s
+// RUN: %clang -target aarch64-none-gnueabi -mno-unaligned-access -munaligned-access -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-AARCH64 < %t %s
 
 // CHECK-UNALIGNED-ARM: "-backend-option" "-arm-no-strict-align"
-// CHECK-UNALIGNED-ARM64: "-backend-option" "-arm64-no-strict-align"
+// CHECK-UNALIGNED-AARCH64: "-backend-option" "-aarch64-no-strict-align"
 
 
 // RUN: %clang -target arm-none-gnueabi -mno-unaligned-access -### %s 2> %t
@@ -32,17 +32,17 @@
 // RUN: %clang -target arm-none-gnueabi -munaligned-access -mstrict-align -### %s 2> %t
 // RUN: FileCheck --check-prefix=CHECK-ALIGNED-ARM < %t %s
 
-// RUN: %clang -target arm64-none-gnueabi -mno-unaligned-access -### %s 2> %t
-// RUN: FileCheck --check-prefix=CHECK-ALIGNED-ARM64 < %t %s
+// RUN: %clang -target aarch64-none-gnueabi -mno-unaligned-access -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-ALIGNED-AARCH64 < %t %s
 
-// RUN: %clang -target arm64-none-gnueabi -mstrict-align -### %s 2> %t
-// RUN: FileCheck --check-prefix=CHECK-ALIGNED-ARM64 < %t %s
+// RUN: %clang -target aarch64-none-gnueabi -mstrict-align -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-ALIGNED-AARCH64 < %t %s
 
-// RUN: %clang -target arm64-none-gnueabi -munaligned-access -mno-unaligned-access -### %s 2> %t
-// RUN: FileCheck --check-prefix=CHECK-ALIGNED-ARM64 < %t %s
+// RUN: %clang -target aarch64-none-gnueabi -munaligned-access -mno-unaligned-access -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-ALIGNED-AARCH64 < %t %s
 
-// RUN: %clang -target arm64-none-gnueabi -munaligned-access -mstrict-align -### %s 2> %t
-// RUN: FileCheck --check-prefix=CHECK-ALIGNED-ARM64 < %t %s
+// RUN: %clang -target aarch64-none-gnueabi -munaligned-access -mstrict-align -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-ALIGNED-AARCH64 < %t %s
 
 // CHECK-ALIGNED-ARM: "-backend-option" "-arm-strict-align"
-// CHECK-ALIGNED-ARM64: "-backend-option" "-arm64-strict-align"
+// CHECK-ALIGNED-AARCH64: "-backend-option" "-aarch64-strict-align"





More information about the cfe-commits mailing list