[llvm] r206291 - AArch64/ARM64: enable more AArch64 tests on ARM64.

Tim Northover tnorthover at apple.com
Tue Apr 15 07:00:29 PDT 2014


Author: tnorthover
Date: Tue Apr 15 09:00:29 2014
New Revision: 206291

URL: http://llvm.org/viewvc/llvm-project?rev=206291&view=rev
Log:
AArch64/ARM64: enable more AArch64 tests on ARM64.

No code changes for this bunch, just some test rejigs.

Added:
    llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll
    llvm/trunk/test/CodeGen/AArch64/literal_pools_int.ll
      - copied, changed from r206290, llvm/trunk/test/CodeGen/AArch64/literal_pools.ll
Removed:
    llvm/trunk/test/CodeGen/AArch64/literal_pools.ll
Modified:
    llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll
    llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll
    llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll
    llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll
    llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll
    llvm/trunk/test/CodeGen/AArch64/fp128.ll
    llvm/trunk/test/CodeGen/AArch64/fpimm.ll
    llvm/trunk/test/CodeGen/AArch64/frameaddr.ll
    llvm/trunk/test/CodeGen/AArch64/free-zext.ll
    llvm/trunk/test/CodeGen/AArch64/global-alignment.ll
    llvm/trunk/test/CodeGen/AArch64/got-abuse.ll
    llvm/trunk/test/CodeGen/AArch64/i128-align.ll
    llvm/trunk/test/CodeGen/AArch64/i128-shift.ll
    llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll
    llvm/trunk/test/CodeGen/AArch64/init-array.ll
    llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll
    llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll

Modified: llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
 define i32 @test_floattoi32(float %in) {
 ; CHECK-LABEL: test_floattoi32:

Modified: llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
 
 ; LLVM should be able to cope with multiple uses of the same flag-setting
 ; instruction at different points of a routine. Either by rematerializing the

Modified: llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-linux-gnu | FileCheck %s
 
 @varfloat = global float 0.0
 @vardouble = global double 0.0

Modified: llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll Tue Apr 15 09:00:29 2014
@@ -1,25 +1,37 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
 
 @varfloat = global float 0.0
 @vardouble = global double 0.0
 
+declare void @use_float(float)
+declare void @use_double(double)
+
 define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
 ; CHECK-LABEL: test_csel:
 
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float 0.0, float 1.0
   store float %val1, float* @varfloat
-; CHECK: ldr [[FLT0:s[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
-; CHECK: fmov [[FLT1:s[0-9]+]], #1.0
-; CHECK: fcsel {{s[0-9]+}}, [[FLT0]], [[FLT1]], hi
+; CHECK-AARCH64: ldr s[[FLT0:[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:.LCPI
+; CHECK-ARM64: movi v[[FLT0:[0-9]+]].2d, #0
+; CHECK: fmov s[[FLT1:[0-9]+]], #1.0
+; CHECK: fcsel {{s[0-9]+}}, s[[FLT0]], s[[FLT1]], hi
 
   %rhs64 = sext i32 %rhs32 to i64
   %tst2 = icmp sle i64 %lhs64, %rhs64
   %val2 = select i1 %tst2, double 1.0, double 0.0
   store double %val2, double* @vardouble
-; CHECK: ldr [[FLT0:d[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
-; CHECK: fmov [[FLT1:d[0-9]+]], #1.0
-; CHECK: fcsel {{d[0-9]+}}, [[FLT1]], [[FLT0]], le
+; CHECK-AARCH64: ldr d[[FLT0:[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:.LCPI
+; FLT0 is reused from above on ARM64.
+; CHECK: fmov d[[FLT1:[0-9]+]], #1.0
+; CHECK: fcsel {{d[0-9]+}}, d[[FLT1]], d[[FLT0]], le
+
+  call void @use_float(float 0.0)
+  call void @use_float(float 1.0)
+
+  call void @use_double(double 0.0)
+  call void @use_double(double 1.0)
 
   ret void
 ; CHECK: ret

Modified: llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
 declare void @bar(i8*, i8*, i32*)
 
 ; SelectionDAG used to try to fold some fp128 operations using the ppc128 type,
@@ -12,6 +13,6 @@ define fp128 @test_folding() {
   %fpval = sitofp i32 %val to fp128
   ; If the value is loaded from a constant pool into an fp128, it's been folded
   ; successfully.
-; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.LCPI
   ret fp128 %fpval
 }

Modified: llvm/trunk/test/CodeGen/AArch64/fp128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp128.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp128.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp128.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
+; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
 
 @lhs = global fp128 zeroinitializer
 @rhs = global fp128 zeroinitializer
@@ -8,8 +9,8 @@ define fp128 @test_add() {
 
   %lhs = load fp128* @lhs
   %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
 
   %val = fadd fp128 %lhs, %rhs
 ; CHECK: bl __addtf3
@@ -21,8 +22,8 @@ define fp128 @test_sub() {
 
   %lhs = load fp128* @lhs
   %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
 
   %val = fsub fp128 %lhs, %rhs
 ; CHECK: bl __subtf3
@@ -34,8 +35,8 @@ define fp128 @test_mul() {
 
   %lhs = load fp128* @lhs
   %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
 
   %val = fmul fp128 %lhs, %rhs
 ; CHECK: bl __multf3
@@ -47,8 +48,8 @@ define fp128 @test_div() {
 
   %lhs = load fp128* @lhs
   %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
 
   %val = fdiv fp128 %lhs, %rhs
 ; CHECK: bl __divtf3
@@ -125,8 +126,8 @@ define i1 @test_setcc1() {
 
   %lhs = load fp128* @lhs
   %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
 
 ; Technically, everything after the call to __letf2 is redundant, but we'll let
 ; LLVM have its fun for now.
@@ -144,8 +145,8 @@ define i1 @test_setcc2() {
 
   %lhs = load fp128* @lhs
   %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
 
 ; Technically, everything after the call to __letf2 is redundant, but we'll let
 ; LLVM have its fun for now.
@@ -169,8 +170,8 @@ define i32 @test_br_cc() {
 
   %lhs = load fp128* @lhs
   %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
+; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
 
   ; olt == !uge, which LLVM unfortunately "optimizes" this to.
   %cond = fcmp olt fp128 %lhs, %rhs
@@ -189,13 +190,13 @@ define i32 @test_br_cc() {
 iftrue:
   ret i32 42
 ; CHECK-NEXT: BB#
-; CHECK-NEXT: movz x0, #42
+; CHECK-NEXT: movz {{x0|w0}}, #42
 ; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]]
 
 iffalse:
   ret i32 29
 ; CHECK: [[RET29]]:
-; CHECK-NEXT: movz x0, #29
+; CHECK-NEXT: movz {{x0|w0}}, #29
 ; CHECK-NEXT: [[REALRET]]:
 ; CHECK: ret
 }
@@ -205,14 +206,15 @@ define void @test_select(i1 %cond, fp128
 
   %val = select i1 %cond, fp128 %lhs, fp128 %rhs
   store fp128 %val, fp128* @lhs
-; CHECK: cmp w0, #0
-; CHECK: str q1, [sp]
+; CHECK: cmp {{w[0-9]+}}, #0
+; CHECK-AARCH64: str q1, [sp]
 ; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
 ; CHECK-NEXT: BB#
-; CHECK-NEXT: str q0, [sp]
+; CHECK-AARCH64-NEXT: str q0, [sp]
+; CHECK-ARM64-NEXT: orr v[[DEST:[0-9]+]].16b, v0.16b, v0.16b
 ; CHECK-NEXT: [[IFFALSE]]:
-; CHECK-NEXT: ldr q0, [sp]
-; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK-AARCH64-NEXT: ldr q[[DEST:[0-9]+]], [sp]
+; CHECK: str q[[DEST]], [{{x[0-9]+}}, {{#?}}:lo12:lhs]
   ret void
 ; CHECK: ret
 }
@@ -228,12 +230,12 @@ define void @test_round() {
   %float = fptrunc fp128 %val to float
   store float %float, float* @varfloat
 ; CHECK: bl __trunctfsf2
-; CHECK: str s0, [{{x[0-9]+}}, #:lo12:varfloat]
+; CHECK: str s0, [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
 
   %double = fptrunc fp128 %val to double
   store double %double, double* @vardouble
 ; CHECK: bl __trunctfdf2
-; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble]
+; CHECK: str d0, [{{x[0-9]+}}, {{#?}}:lo12:vardouble]
 
   ret void
 }
@@ -247,13 +249,13 @@ define void @test_extend() {
   %fromfloat = fpext float %float to fp128
   store volatile fp128 %fromfloat, fp128* @lhs
 ; CHECK: bl __extendsftf2
-; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: str q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
 
   %double = load double* @vardouble
   %fromdouble = fpext double %double to fp128
   store volatile fp128 %fromdouble, fp128* @lhs
 ; CHECK: bl __extenddftf2
-; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
+; CHECK: str q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
 
   ret void
 ; CHECK: ret
@@ -269,9 +271,10 @@ define fp128 @test_neg(fp128 %in) {
   ; Could in principle be optimized to fneg which we can't select, this makes
   ; sure that doesn't happen.
   %ret = fsub fp128 0xL00000000000000008000000000000000, %in
-; CHECK: str q0, [sp, #-16]
-; CHECK-NEXT: ldr q1, [sp], #16
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:[[MINUS0]]]
+; CHECK-AARCH64: str q0, [sp, #-16]
+; CHECK-AARCH64-NEXT: ldr q1, [sp], #16
+; CHECK-ARM64: orr v1.16b, v0.16b, v0.16b
+; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:[[MINUS0]]]
 ; CHECK: bl __subtf3
 
   ret fp128 %ret

Modified: llvm/trunk/test/CodeGen/AArch64/fpimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fpimm.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fpimm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fpimm.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-linux-gnu | FileCheck %s
 
 @varf32 = global float 0.0
 @varf64 = global double 0.0
@@ -13,7 +14,7 @@ define void @check_float() {
 
   %newval2 = fadd float %val, 128.0
   store volatile float %newval2, float* @varf32
-; CHECK-DAG: ldr [[HARD:s[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI0_0
+; CHECK-DAG: ldr [[HARD:s[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:.LCPI0_0
 
 ; CHECK: ret
   ret void
@@ -29,7 +30,7 @@ define void @check_double() {
 
   %newval2 = fadd double %val, 128.0
   store volatile double %newval2, double* @varf64
-; CHECK-DAG: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI1_0
+; CHECK-DAG: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.LCPI1_0
 
 ; CHECK: ret
   ret void

Modified: llvm/trunk/test/CodeGen/AArch64/frameaddr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/frameaddr.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/frameaddr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/frameaddr.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu  | FileCheck %s
+; RUN: llc -o - %s -mtriple=arm64-apple-ios7.0  | FileCheck %s
 
 define i8* @t() nounwind {
 entry:
@@ -12,7 +13,7 @@ define i8* @t2() nounwind {
 entry:
 ; CHECK-LABEL: t2:
 ; CHECK: ldr x[[reg:[0-9]+]], [x29]
-; CHECK: ldr x[[reg]], [x[[reg]]]
+; CHECK: ldr {{x[0-9]+}}, [x[[reg]]]
 	%0 = call i8* @llvm.frameaddress(i32 2)
         ret i8* %0
 }

Modified: llvm/trunk/test/CodeGen/AArch64/free-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/free-zext.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/free-zext.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/free-zext.ll Tue Apr 15 09:00:29 2014
@@ -1,10 +1,11 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
 define i64 @test_free_zext(i8* %a, i16* %b) {
 ; CHECK-LABEL: test_free_zext
-; CHECK: ldrb w0, [x0]
-; CHECK: ldrh w1, [x1]
-; CHECK: add x0, x1, x0
+; CHECK-DAG: ldrb w[[A:[0-9]+]], [x0]
+; CHECK: ldrh w[[B:[0-9]+]], [x1]
+; CHECK: add x0, x[[B]], x[[A]]
   %1 = load i8* %a, align 1
   %conv = zext i8 %1 to i64
   %2 = load i16* %b, align 2

Modified: llvm/trunk/test/CodeGen/AArch64/global-alignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/global-alignment.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/global-alignment.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/global-alignment.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
 
 @var32 = global [3 x i32] zeroinitializer
 @var64 = global [3 x i64] zeroinitializer
@@ -12,7 +13,7 @@ define i64 @test_align32() {
   ; emit an "LDR x0, [x0, #:lo12:var32] instruction to implement this load.
   %val = load i64* %addr
 ; CHECK: adrp [[HIBITS:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], {{#?}}:lo12:var32
 ; CHECK: ldr x0, [x[[ADDR]]]
 
   ret i64 %val
@@ -27,7 +28,7 @@ define i64 @test_align64() {
   %val = load i64* %addr
 ; CHECK: adrp x[[HIBITS:[0-9]+]], var64
 ; CHECK-NOT: add x[[HIBITS]]
-; CHECK: ldr x0, [x[[HIBITS]], #:lo12:var64]
+; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:var64]
 
   ret i64 %val
 }
@@ -41,7 +42,7 @@ define i64 @test_var32_align64() {
   %val = load i64* %addr
 ; CHECK: adrp x[[HIBITS:[0-9]+]], var32_align64
 ; CHECK-NOT: add x[[HIBITS]]
-; CHECK: ldr x0, [x[[HIBITS]], #:lo12:var32_align64]
+; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:var32_align64]
 
   ret i64 %val
 }
@@ -56,7 +57,7 @@ define i64 @test_yet_another_var() {
   ; so we can't fold the load.
   %val = load i64* bitcast({i32, i32}* @yet_another_var to i64*)
 ; CHECK: adrp [[HIBITS:x[0-9]+]], yet_another_var
-; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], #:lo12:yet_another_var
+; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], {{#?}}:lo12:yet_another_var
 ; CHECK: ldr x0, [x[[ADDR]]]
   ret i64 %val
 }
@@ -65,5 +66,5 @@ define i64()* @test_functions() {
 ; CHECK-LABEL: test_functions:
   ret i64()* @test_yet_another_var
 ; CHECK: adrp [[HIBITS:x[0-9]+]], test_yet_another_var
-; CHECK: add x0, [[HIBITS]], #:lo12:test_yet_another_var
+; CHECK: add x0, [[HIBITS]], {{#?}}:lo12:test_yet_another_var
 }

Modified: llvm/trunk/test/CodeGen/AArch64/got-abuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/got-abuse.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/got-abuse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/got-abuse.ll Tue Apr 15 09:00:29 2014
@@ -1,5 +1,7 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -filetype=obj < %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -filetype=obj -o - %s
 
 ; LLVM gives well-defined semantics to this horrible construct (though C says
 ; it's undefined). Regardless, we shouldn't crash. The important feature here is
@@ -17,7 +19,7 @@ define void @foo() nounwind {
 entry:
   call void @consume(i32 ptrtoint (void ()* @func to i32))
 ; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:func
-; CHECK: ldr {{x[0-9]+}}, [x[[ADDRHI]], #:got_lo12:func]
+; CHECK: ldr {{x[0-9]+}}, [x[[ADDRHI]], {{#?}}:got_lo12:func]
   ret void
 }
 

Modified: llvm/trunk/test/CodeGen/AArch64/i128-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/i128-align.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/i128-align.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/i128-align.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-apple-ios7.0 -verify-machineinstrs -o - %s | FileCheck %s
 
 %struct = type { i32, i128, i8 }
 
@@ -13,7 +14,7 @@ define i64 @check_size() {
 
   %diff = sub i64 %endi, %starti
   ret i64 %diff
-; CHECK: movz x0, #48
+; CHECK: {{movz x0, #48|orr x0, xzr, #0x30}}
 }
 
 define i64 @check_field() {
@@ -25,5 +26,5 @@ define i64 @check_field() {
 
   %diff = sub i64 %endi, %starti
   ret i64 %diff
-; CHECK: movz x0, #16
+; CHECK: {{movz x0, #16|orr x0, xzr, #0x10}}
 }

Modified: llvm/trunk/test/CodeGen/AArch64/i128-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/i128-shift.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/i128-shift.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/i128-shift.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; arm64 has its own version of this in long-shift.ll. We'll just use that.
 
 define i128 @test_i128_lsl(i128 %a, i32 %shift) {
 ; CHECK-LABEL: test_i128_lsl:

Modified: llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll Tue Apr 15 09:00:29 2014
@@ -1,4 +1,5 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
 
 @varfloat = global float 0.0
 @vardouble = global double 0.0

Modified: llvm/trunk/test/CodeGen/AArch64/init-array.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/init-array.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/init-array.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/init-array.ll Tue Apr 15 09:00:29 2014
@@ -1,5 +1,7 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -use-init-array < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64-none-none-eabi -verify-machineinstrs -use-init-array < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -use-init-array -o - %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-none-eabi -verify-machineinstrs -use-init-array -o - %s | FileCheck %s
 
 define internal void @_GLOBAL__I_a() section ".text.startup" {
   ret void

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll Tue Apr 15 09:00:29 2014
@@ -1,5 +1,6 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-linux-gnu | FileCheck %s
 
 @var_8bit = global i8 0
 @var_16bit = global i16 0
@@ -160,7 +161,7 @@ define void @ldst_32bit() {
   %val64_unsigned = zext i32 %val32_zext to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Sign-extension to 64-bits
   %addr32_8_sext = getelementptr i8* %addr_8bit, i64 -12
@@ -169,7 +170,7 @@ define void @ldst_32bit() {
   %val64_signed = sext i32 %val32_sext to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldursw {{x[0-9]+}}, [{{x[0-9]+}}, #-12]
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Truncation from 64-bits
   %addr64_8_trunc = getelementptr i8* %addr_8bit, i64 255

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll?rev=206291&r1=206290&r2=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll Tue Apr 15 09:00:29 2014
@@ -1,5 +1,6 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu | FileCheck %s
 
 @var_8bit = global i8 0
 @var_16bit = global i16 0
@@ -20,25 +21,25 @@ define void @ldst_8bit() {
    %val32_signed = sext i8 %val8_sext32 to i32
    store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_8bit
-; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match a zero-extending load volatile 8-bit -> 32-bit
   %val8_zext32 = load volatile i8* @var_8bit
   %val32_unsigned = zext i8 %val8_zext32 to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match an any-extending load volatile 8-bit -> 32-bit
   %val8_anyext = load volatile i8* @var_8bit
   %newval8 = add i8 %val8_anyext, 1
   store volatile i8 %newval8, i8* @var_8bit
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match a sign-extending load volatile 8-bit -> 64-bit
   %val8_sext64 = load volatile i8* @var_8bit
   %val64_signed = sext i8 %val8_sext64 to i64
   store volatile i64 %val64_signed, i64* @var_64bit
-; CHECK: ldrsb {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrsb {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match a zero-extending load volatile 8-bit -> 64-bit.
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
@@ -46,19 +47,19 @@ define void @ldst_8bit() {
   %val8_zext64 = load volatile i8* @var_8bit
   %val64_unsigned = zext i8 %val8_zext64 to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; truncating store volatile 32-bits to 8-bits
   %val32 = load volatile i32* @var_32bit
   %val8_trunc32 = trunc i32 %val32 to i8
   store volatile i8 %val8_trunc32, i8* @var_8bit
-; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; truncating store volatile 64-bits to 8-bits
   %val64 = load volatile i64* @var_64bit
   %val8_trunc64 = trunc i64 %val64 to i8
   store volatile i8 %val8_trunc64, i8* @var_8bit
-; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
    ret void
 }
@@ -74,25 +75,25 @@ define void @ldst_16bit() {
   %val32_signed = sext i16 %val16_sext32 to i32
   store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_16bit
-; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match a zero-extending load volatile 16-bit -> 32-bit
   %val16_zext32 = load volatile i16* @var_16bit
   %val32_unsigned = zext i16 %val16_zext32 to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
-; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match an any-extending load volatile 16-bit -> 32-bit
   %val16_anyext = load volatile i16* @var_16bit
   %newval16 = add i16 %val16_anyext, 1
   store volatile i16 %newval16, i16* @var_16bit
-; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match a sign-extending load volatile 16-bit -> 64-bit
   %val16_sext64 = load volatile i16* @var_16bit
   %val64_signed = sext i16 %val16_sext64 to i64
   store volatile i64 %val64_signed, i64* @var_64bit
-; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match a zero-extending load volatile 16-bit -> 64-bit.
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
@@ -100,19 +101,19 @@ define void @ldst_16bit() {
   %val16_zext64 = load volatile i16* @var_16bit
   %val64_unsigned = zext i16 %val16_zext64 to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
-; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; truncating store volatile 32-bits to 16-bits
   %val32 = load volatile i32* @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
   store volatile i16 %val16_trunc32, i16* @var_16bit
-; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; truncating store volatile 64-bits to 16-bits
   %val64 = load volatile i64* @var_64bit
   %val16_trunc64 = trunc i64 %val64 to i16
   store volatile i16 %val16_trunc64, i16* @var_16bit
-; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
   ret void
 }
@@ -124,29 +125,29 @@ define void @ldst_32bit() {
   %val32_noext = load volatile i32* @var_32bit
   store volatile i32 %val32_noext, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_32bit
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
-; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 
 ; Zero-extension to 64-bits
   %val32_zext = load volatile i32* @var_32bit
   %val64_unsigned = zext i32 %val32_zext to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Sign-extension to 64-bits
   %val32_sext = load volatile i32* @var_32bit
   %val64_signed = sext i32 %val32_sext to i64
   store volatile i64 %val64_signed, i64* @var_64bit
-; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Truncation from 64-bits
   %val64_trunc = load volatile i64* @var_64bit
   %val32_trunc = trunc i64 %val64_trunc to i32
   store volatile i32 %val32_trunc, i32* @var_32bit
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
-; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 
   ret void
 }
@@ -165,7 +166,7 @@ define void @ldst_complex_offsets() {
 ; CHECK: ldst_complex_offsets
   %arr8_addr = load volatile i8** @arr8
 ; CHECK: adrp {{x[0-9]+}}, arr8
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr8]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr8]
 
   %arr8_sub1_addr = getelementptr i8* %arr8_addr, i64 1
   %arr8_sub1 = load volatile i8* %arr8_sub1_addr
@@ -180,7 +181,7 @@ define void @ldst_complex_offsets() {
 
   %arr16_addr = load volatile i16** @arr16
 ; CHECK: adrp {{x[0-9]+}}, arr16
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr16]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr16]
 
   %arr16_sub1_addr = getelementptr i16* %arr16_addr, i64 1
   %arr16_sub1 = load volatile i16* %arr16_sub1_addr
@@ -195,7 +196,7 @@ define void @ldst_complex_offsets() {
 
   %arr32_addr = load volatile i32** @arr32
 ; CHECK: adrp {{x[0-9]+}}, arr32
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr32]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr32]
 
   %arr32_sub1_addr = getelementptr i32* %arr32_addr, i64 1
   %arr32_sub1 = load volatile i32* %arr32_sub1_addr
@@ -210,7 +211,7 @@ define void @ldst_complex_offsets() {
 
   %arr64_addr = load volatile i64** @arr64
 ; CHECK: adrp {{x[0-9]+}}, arr64
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr64]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr64]
 
   %arr64_sub1_addr = getelementptr i64* %arr64_addr, i64 1
   %arr64_sub1 = load volatile i64* %arr64_sub1_addr
@@ -230,11 +231,11 @@ define void @ldst_float() {
 
    %valfp = load volatile float* @var_float
 ; CHECK: adrp {{x[0-9]+}}, var_float
-; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_float]
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_float]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
 
   store volatile float %valfp, float* @var_float
-; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_float]
+; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_float]
 ; CHECK-NOFP-NOT: str {{s[0-9]+}},
 
    ret void
@@ -245,11 +246,11 @@ define void @ldst_double() {
 
    %valfp = load volatile double* @var_double
 ; CHECK: adrp {{x[0-9]+}}, var_double
-; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_double]
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_double]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
 
   store volatile double %valfp, double* @var_double
-; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_double]
+; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_double]
 ; CHECK-NOFP-NOT: str {{d[0-9]+}},
 
    ret void

Removed: llvm/trunk/test/CodeGen/AArch64/literal_pools.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/literal_pools.ll?rev=206290&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/literal_pools.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/literal_pools.ll (removed)
@@ -1,103 +0,0 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK-LARGE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP-LARGE %s
-
- at var32 = global i32 0
- at var64 = global i64 0
-
-define void @foo() {
-; CHECK-LABEL: foo:
-    %val32 = load i32* @var32
-    %val64 = load i64* @var64
-
-    %val32_lit32 = and i32 %val32, 123456785
-    store volatile i32 %val32_lit32, i32* @var32
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{w[0-9]+}}, [x[[LITADDR]]]
-
-    %val64_lit32 = and i64 %val64, 305402420
-    store volatile i64 %val64_lit32, i64* @var64
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{w[0-9]+}}, [x[[LITADDR]]]
-
-    %val64_lit32signed = and i64 %val64, -12345678
-    store volatile i64 %val64_lit32signed, i64* @var64
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldrsw {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldrsw {{x[0-9]+}}, [x[[LITADDR]]]
-
-    %val64_lit64 = and i64 %val64, 1234567898765432
-    store volatile i64 %val64_lit64, i64* @var64
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{x[0-9]+}}, [x[[LITADDR]]]
-
-    ret void
-}
-
- at varfloat = global float 0.0
- at vardouble = global double 0.0
-
-define void @floating_lits() {
-; CHECK-LABEL: floating_lits:
-
-  %floatval = load float* @varfloat
-  %newfloat = fadd float %floatval, 128.0
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
-; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], #:lo12:[[CURLIT]]]
-; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{s[0-9]+}}, [x[[LITADDR]]]
-; CHECK-LARGE: fadd
-; CHECK-NOFP-LARGE-NOT: ldr {{s[0-9]+}},
-; CHECK-NOFP-LARGE-NOT: fadd
-
-  store float %newfloat, float* @varfloat
-
-  %doubleval = load double* @vardouble
-  %newdouble = fadd double %doubleval, 129.0
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
-; CHECK: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]], #:lo12:[[CURLIT]]]
-; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, [[LIT128]]
-; CHECK: fadd {{d[0-9]+}}, {{d[0-9]+}}, [[LIT129]]
-; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
-; CHECK-NOFP-NOT: fadd
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
-; CHECK-NOFP-LARGE-NOT: ldr {{d[0-9]+}},
-
-  store double %newdouble, double* @vardouble
-
-  ret void
-}

Added: llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll?rev=206291&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll Tue Apr 15 09:00:29 2014
@@ -0,0 +1,48 @@
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP-LARGE %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK-LARGE %s
+
+ at varfloat = global float 0.0
+ at vardouble = global double 0.0
+
+define void @floating_lits() {
+; CHECK-LABEL: floating_lits:
+
+  %floatval = load float* @varfloat
+  %newfloat = fadd float %floatval, 128.0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]]
+; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
+
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI[0-9]+_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{s[0-9]+}}, [x[[LITADDR]]]
+; CHECK-LARGE: fadd
+; CHECK-NOFP-LARGE-NOT: ldr {{s[0-9]+}},
+; CHECK-NOFP-LARGE-NOT: fadd
+
+  store float %newfloat, float* @varfloat
+
+  %doubleval = load double* @vardouble
+  %newdouble = fadd double %doubleval, 129.0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]]
+; CHECK: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
+; CHECK-NOFP-NOT: fadd
+
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI[0-9]+_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
+; CHECK-NOFP-LARGE-NOT: ldr {{d[0-9]+}},
+
+  store double %newdouble, double* @vardouble
+
+  ret void
+}

Copied: llvm/trunk/test/CodeGen/AArch64/literal_pools_int.ll (from r206290, llvm/trunk/test/CodeGen/AArch64/literal_pools.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/literal_pools_int.ll?p2=llvm/trunk/test/CodeGen/AArch64/literal_pools_int.ll&p1=llvm/trunk/test/CodeGen/AArch64/literal_pools.ll&r1=206290&r2=206291&rev=206291&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/literal_pools.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/literal_pools_int.ll Tue Apr 15 09:00:29 2014
@@ -1,7 +1,6 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK-LARGE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP-LARGE %s
+; arm64 does not use literal pools for integers so there is nothing to check.
 
 @var32 = global i32 0
 @var64 = global i64 0
@@ -14,7 +13,7 @@ define void @foo() {
     %val32_lit32 = and i32 %val32, 123456785
     store volatile i32 %val32_lit32, i32* @var32
 ; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
 
 ; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
 ; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
@@ -25,7 +24,7 @@ define void @foo() {
     %val64_lit32 = and i64 %val64, 305402420
     store volatile i64 %val64_lit32, i64* @var64
 ; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
 
 ; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
 ; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
@@ -36,7 +35,7 @@ define void @foo() {
     %val64_lit32signed = and i64 %val64, -12345678
     store volatile i64 %val64_lit32signed, i64* @var64
 ; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldrsw {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK: ldrsw {{x[0-9]+}}, [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
 
 ; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
 ; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
@@ -47,7 +46,7 @@ define void @foo() {
     %val64_lit64 = and i64 %val64, 1234567898765432
     store volatile i64 %val64_lit64, i64* @var64
 ; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK: ldr {{x[0-9]+}}, [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
 
 ; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
 ; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
@@ -57,47 +56,3 @@ define void @foo() {
 
     ret void
 }
-
- at varfloat = global float 0.0
- at vardouble = global double 0.0
-
-define void @floating_lits() {
-; CHECK-LABEL: floating_lits:
-
-  %floatval = load float* @varfloat
-  %newfloat = fadd float %floatval, 128.0
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
-; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], #:lo12:[[CURLIT]]]
-; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{s[0-9]+}}, [x[[LITADDR]]]
-; CHECK-LARGE: fadd
-; CHECK-NOFP-LARGE-NOT: ldr {{s[0-9]+}},
-; CHECK-NOFP-LARGE-NOT: fadd
-
-  store float %newfloat, float* @varfloat
-
-  %doubleval = load double* @vardouble
-  %newdouble = fadd double %doubleval, 129.0
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
-; CHECK: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]], #:lo12:[[CURLIT]]]
-; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, [[LIT128]]
-; CHECK: fadd {{d[0-9]+}}, {{d[0-9]+}}, [[LIT129]]
-; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
-; CHECK-NOFP-NOT: fadd
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
-; CHECK-NOFP-LARGE-NOT: ldr {{d[0-9]+}},
-
-  store double %newdouble, double* @vardouble
-
-  ret void
-}





More information about the llvm-commits mailing list