[llvm] 26b653d - [AArch64] Regenerate some test checks. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 11 11:39:44 PDT 2021


Author: David Green
Date: 2021-09-11T19:39:28+01:00
New Revision: 26b653dae493fa845f921b6cc5d76e9267ab617a

URL: https://github.com/llvm/llvm-project/commit/26b653dae493fa845f921b6cc5d76e9267ab617a
DIFF: https://github.com/llvm/llvm-project/commit/26b653dae493fa845f921b6cc5d76e9267ab617a.diff

LOG: [AArch64] Regenerate some test checks. NFC

This regenerates some of the tests that had very-close-to-updated check
line already, in order to make them more maintainable.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
    llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
    llvm/test/CodeGen/AArch64/arm64-addrmode.ll
    llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
    llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
    llvm/test/CodeGen/AArch64/arm64-csel.ll
    llvm/test/CodeGen/AArch64/arm64-fcopysign.ll
    llvm/test/CodeGen/AArch64/arm64-fmadd.ll
    llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-no-helper.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll b/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
index c70200a1a3e63..d15fbe3453251 100644
--- a/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
@@ -1,26 +1,25 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK
-; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-enable-simd-scalar=true | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-enable-simd-scalar=true | FileCheck %s -check-prefix=GENERIC
 
 define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 ; CHECK-LABEL: bar:
-; CHECK: add.2d	v[[REG:[0-9]+]], v0, v1
-; CHECK: add	d[[REG3:[0-9]+]], d[[REG]], d1
-; CHECK: sub	d[[REG2:[0-9]+]], d[[REG]], d1
-; CHECK-NOT: fmov
-; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
-; CHECK-NOT: fmov
-; CHECK: mov.d v0[1], [[COPY_REG2]]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add.2d v2, v0, v1
+; CHECK-NEXT:    add d0, d2, d1
+; CHECK-NEXT:    sub d1, d2, d1
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    mov.d v0[1], x8
+; CHECK-NEXT:    ret
 ;
 ; GENERIC-LABEL: bar:
-; GENERIC: add	v[[REG:[0-9]+]].2d, v0.2d, v1.2d
-; GENERIC: add	d[[REG3:[0-9]+]], d[[REG]], d1
-; GENERIC: sub	d[[REG2:[0-9]+]], d[[REG]], d1
-; GENERIC-NOT: fmov
-; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
-; GENERIC-NOT: fmov
-; GENERIC: mov v0.d[1], [[COPY_REG2]]
-; GENERIC-NEXT: ret
+; GENERIC:       // %bb.0:
+; GENERIC-NEXT:    add v2.2d, v0.2d, v1.2d
+; GENERIC-NEXT:    add d0, d2, d1
+; GENERIC-NEXT:    sub d1, d2, d1
+; GENERIC-NEXT:    fmov x8, d1
+; GENERIC-NEXT:    mov v0.d[1], x8
+; GENERIC-NEXT:    ret
   %add = add <2 x i64> %a, %b
   %vgetq_lane = extractelement <2 x i64> %add, i32 0
   %vgetq_lane2 = extractelement <2 x i64> %b, i32 0
@@ -33,11 +32,14 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 
 define double @subdd_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 ; CHECK-LABEL: subdd_su64:
-; CHECK: sub d0, d1, d0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub d0, d1, d0
+; CHECK-NEXT:    ret
+;
 ; GENERIC-LABEL: subdd_su64:
-; GENERIC: sub d0, d1, d0
-; GENERIC-NEXT: ret
+; GENERIC:       // %bb.0:
+; GENERIC-NEXT:    sub d0, d1, d0
+; GENERIC-NEXT:    ret
   %vecext = extractelement <2 x i64> %a, i32 0
   %vecext1 = extractelement <2 x i64> %b, i32 0
   %sub.i = sub nsw i64 %vecext1, %vecext
@@ -47,11 +49,14 @@ define double @subdd_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 
 define double @vaddd_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 ; CHECK-LABEL: vaddd_su64:
-; CHECK: add d0, d1, d0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add d0, d1, d0
+; CHECK-NEXT:    ret
+;
 ; GENERIC-LABEL: vaddd_su64:
-; GENERIC: add d0, d1, d0
-; GENERIC-NEXT: ret
+; GENERIC:       // %bb.0:
+; GENERIC-NEXT:    add d0, d1, d0
+; GENERIC-NEXT:    ret
   %vecext = extractelement <2 x i64> %a, i32 0
   %vecext1 = extractelement <2 x i64> %b, i32 0
   %add.i = add nsw i64 %vecext1, %vecext
@@ -62,13 +67,18 @@ define double @vaddd_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 ; sub MI doesn't access dsub register.
 define double @add_sub_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 ; CHECK-LABEL: add_sub_su64:
-; CHECK: add d0, d1, d0
-; CHECK: sub d0, {{d[0-9]+}}, d0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d2, xzr
+; CHECK-NEXT:    add d0, d1, d0
+; CHECK-NEXT:    sub d0, d2, d0
+; CHECK-NEXT:    ret
+;
 ; GENERIC-LABEL: add_sub_su64:
-; GENERIC: add d0, d1, d0
-; GENERIC: sub d0, {{d[0-9]+}}, d0
-; GENERIC-NEXT: ret
+; GENERIC:       // %bb.0:
+; GENERIC-NEXT:    fmov d2, xzr
+; GENERIC-NEXT:    add d0, d1, d0
+; GENERIC-NEXT:    sub d0, d2, d0
+; GENERIC-NEXT:    ret
   %vecext = extractelement <2 x i64> %a, i32 0
   %vecext1 = extractelement <2 x i64> %b, i32 0
   %add.i = add i64 %vecext1, %vecext
@@ -78,11 +88,14 @@ define double @add_sub_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 }
 define double @and_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 ; CHECK-LABEL: and_su64:
-; CHECK: and.8b v0, v1, v0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and.8b v0, v1, v0
+; CHECK-NEXT:    ret
+;
 ; GENERIC-LABEL: and_su64:
-; GENERIC: and v0.8b, v1.8b, v0.8b
-; GENERIC-NEXT: ret
+; GENERIC:       // %bb.0:
+; GENERIC-NEXT:    and v0.8b, v1.8b, v0.8b
+; GENERIC-NEXT:    ret
   %vecext = extractelement <2 x i64> %a, i32 0
   %vecext1 = extractelement <2 x i64> %b, i32 0
   %or.i = and i64 %vecext1, %vecext
@@ -92,11 +105,14 @@ define double @and_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 
 define double @orr_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 ; CHECK-LABEL: orr_su64:
-; CHECK: orr.8b v0, v1, v0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr.8b v0, v1, v0
+; CHECK-NEXT:    ret
+;
 ; GENERIC-LABEL: orr_su64:
-; GENERIC: orr v0.8b, v1.8b, v0.8b
-; GENERIC-NEXT: ret
+; GENERIC:       // %bb.0:
+; GENERIC-NEXT:    orr v0.8b, v1.8b, v0.8b
+; GENERIC-NEXT:    ret
   %vecext = extractelement <2 x i64> %a, i32 0
   %vecext1 = extractelement <2 x i64> %b, i32 0
   %or.i = or i64 %vecext1, %vecext
@@ -106,11 +122,14 @@ define double @orr_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 
 define double @xorr_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
 ; CHECK-LABEL: xorr_su64:
-; CHECK: eor.8b v0, v1, v0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor.8b v0, v1, v0
+; CHECK-NEXT:    ret
+;
 ; GENERIC-LABEL: xorr_su64:
-; GENERIC: eor v0.8b, v1.8b, v0.8b
-; GENERIC-NEXT: ret
+; GENERIC:       // %bb.0:
+; GENERIC-NEXT:    eor v0.8b, v1.8b, v0.8b
+; GENERIC-NEXT:    ret
   %vecext = extractelement <2 x i64> %a, i32 0
   %vecext1 = extractelement <2 x i64> %b, i32 0
   %xor.i = xor i64 %vecext1, %vecext

diff  --git a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
index fa3f9d8fd38b3..faff658c2a93b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-enable-collect-loh=false | FileCheck %s
 ; rdar://13452552
 ; Disable the collecting of LOH so that the labels do not get in the
@@ -7,28 +8,37 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 @block = common global i8* null, align 8
 
 define zeroext i8 @fullGtU(i32 %i1, i32 %i2) {
-; CHECK: fullGtU
-; CHECK: adrp [[PAGE:x[0-9]+]], _block at GOTPAGE
-; CHECK: ldr [[ADDR:x[0-9]+]], {{\[}}[[PAGE]], _block at GOTPAGEOFF]
-; CHECK: sxtw [[I1:x[0-9]+]], w0
-; CHECK: sxtw [[I2:x[0-9]+]], w1
-; CHECK-NEXT: ldr [[BLOCKBASE:x[0-9]+]], {{\[}}[[ADDR]]]
-; CHECK-NEXT: ldrb [[BLOCKVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE]], [[I1]]]
-; CHECK-NEXT: ldrb [[BLOCKVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE]], [[I2]]]
-
-; CHECK-NEXT: cmp [[BLOCKVAL1]], [[BLOCKVAL2]]
-; CHECK-NEXT: b.ne
-; Next BB
-; CHECK: add [[BLOCKBASE1:x[0-9]+]], [[I1]], [[BLOCKBASE]]
-; CHECK-NEXT: add [[BLOCKBASE2:x[0-9]+]], [[I2]], [[BLOCKBASE]]
-; CHECK-NEXT: ldrb [[LOADEDVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE1]], #1]
-; CHECK-NEXT: ldrb [[LOADEDVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE2]], #1]
-; CHECK-NEXT: cmp [[LOADEDVAL1]], [[LOADEDVAL2]]
-; CHECK-NEXT: b.ne
-; Next BB
-; CHECK: ldrb [[LOADEDVAL3:w[0-9]+]], {{\[}}[[BLOCKBASE1]], #2]
-; CHECK-NEXT: ldrb [[LOADEDVAL4:w[0-9]+]], {{\[}}[[BLOCKBASE2]], #2]
-; CHECK-NEXT: cmp [[LOADEDVAL3]], [[LOADEDVAL4]]
+; CHECK-LABEL: fullGtU:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    adrp x8, _block at GOTPAGE
+; CHECK-NEXT:    ldr x8, [x8, _block at GOTPAGEOFF]
+; CHECK-NEXT:    ; kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    ; kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtw x9, w0
+; CHECK-NEXT:    sxtw x10, w1
+; CHECK-NEXT:    ldr x8, [x8]
+; CHECK-NEXT:    ldrb w11, [x8, x9]
+; CHECK-NEXT:    ldrb w12, [x8, x10]
+; CHECK-NEXT:    cmp w11, w12
+; CHECK-NEXT:    b.ne LBB0_4
+; CHECK-NEXT:  ; %bb.1: ; %if.end
+; CHECK-NEXT:    add x9, x9, x8
+; CHECK-NEXT:    add x8, x10, x8
+; CHECK-NEXT:    ldrb w10, [x9, #1]
+; CHECK-NEXT:    ldrb w11, [x8, #1]
+; CHECK-NEXT:    cmp w10, w11
+; CHECK-NEXT:    b.ne LBB0_4
+; CHECK-NEXT:  ; %bb.2: ; %if.end25
+; CHECK-NEXT:    ldrb w9, [x9, #2]
+; CHECK-NEXT:    ldrb w8, [x8, #2]
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    b.ne LBB0_4
+; CHECK-NEXT:  ; %bb.3:
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB0_4: ; %if.then36
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
 entry:
   %idxprom = sext i32 %i1 to i64
   %tmp = load i8*, i8** @block, align 8

diff  --git a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
index 6f7f97ef618ca..7109ee6f66be5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
@@ -1,67 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm64-eabi < %s | FileCheck %s
 ; rdar://10232252
 
 @object = external hidden global i64, section "__DATA, __objc_ivar", align 8
 
 ; base + offset (imm9)
-; CHECK: @t1
-; CHECK: ldr xzr, [x0, #8]
-; CHECK: ret
 define void @t1(i64* %object) {
+; CHECK-LABEL: t1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr xzr, [x0, #8]
+; CHECK-NEXT:    ret
   %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 1
   %tmp = load volatile i64, i64* %incdec.ptr, align 8
   ret void
 }
 
 ; base + offset (> imm9)
-; CHECK: @t2
-; CHECK: sub [[ADDREG:x[0-9]+]], x0, #264
-; CHECK: ldr xzr, [
-; CHECK: ret
 define void @t2(i64* %object) {
+; CHECK-LABEL: t2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x0, #264
+; CHECK-NEXT:    ldr xzr, [x8]
+; CHECK-NEXT:    ret
   %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 -33
   %tmp = load volatile i64, i64* %incdec.ptr, align 8
   ret void
 }
 
 ; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes)
-; CHECK: @t3
-; CHECK: ldr xzr, [x0, #32760]
-; CHECK: ret
 define void @t3(i64* %object) {
+; CHECK-LABEL: t3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr xzr, [x0, #32760]
+; CHECK-NEXT:    ret
   %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4095
   %tmp = load volatile i64, i64* %incdec.ptr, align 8
   ret void
 }
 
 ; base + unsigned offset (> imm12 * size of type in bytes)
-; CHECK: @t4
-; CHECK: mov w[[NUM:[0-9]+]], #32768
-; CHECK: ldr xzr, [x0, x[[NUM]]]
-; CHECK: ret
 define void @t4(i64* %object) {
+; CHECK-LABEL: t4:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #32768
+; CHECK-NEXT:    ldr xzr, [x0, x8]
+; CHECK-NEXT:    ret
   %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4096
   %tmp = load volatile i64, i64* %incdec.ptr, align 8
   ret void
 }
 
 ; base + reg
-; CHECK: @t5
-; CHECK: ldr xzr, [x{{[0-9]+}}, x{{[0-9]+}}, lsl #3]
-; CHECK: ret
 define void @t5(i64 %a) {
+; CHECK-LABEL: t5:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, object
+; CHECK-NEXT:    add x8, x8, :lo12:object
+; CHECK-NEXT:    ldr xzr, [x8, x0, lsl #3]
+; CHECK-NEXT:    ret
   %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a
   %tmp = load volatile i64, i64* %incdec.ptr, align 8
   ret void
 }
 
 ; base + reg + imm
-; CHECK: @t6
-; CHECK: add [[ADDREG:x[0-9]+]], x1, x0, lsl #3
-; CHECK-NEXT: mov w[[NUM:[0-9]+]], #32768
-; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]]
-; CHECK: ret
 define void @t6(i64 %a, i64* %object) {
+; CHECK-LABEL: t6:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x1, x0, lsl #3
+; CHECK-NEXT:    mov w9, #32768
+; CHECK-NEXT:    ldr xzr, [x8, x9]
+; CHECK-NEXT:    ret
   %tmp1 = getelementptr inbounds i64, i64* %object, i64 %a
   %incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096
   %tmp = load volatile i64, i64* %incdec.ptr, align 8
@@ -71,8 +80,10 @@ define void @t6(i64 %a, i64* %object) {
 ; Test base + wide immediate
 define void @t7(i64 %a) {
 ; CHECK-LABEL: t7:
-; CHECK: mov w[[NUM:[0-9]+]], #65535
-; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    ldr xzr, [x0, x8]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, 65535   ;0xffff
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -81,8 +92,10 @@ define void @t7(i64 %a) {
 
 define void @t8(i64 %a) {
 ; CHECK-LABEL: t8:
-; CHECK: mov [[REG:x[0-9]+]], #-4662
-; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-4662
+; CHECK-NEXT:    ldr xzr, [x0, x8]
+; CHECK-NEXT:    ret
   %1 = sub i64 %a, 4662   ;-4662 is 0xffffffffffffedca
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -91,8 +104,10 @@ define void @t8(i64 %a) {
 
 define void @t9(i64 %a) {
 ; CHECK-LABEL: t9:
-; CHECK: mov [[REG:x[0-9]+]], #-305463297
-; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-305463297
+; CHECK-NEXT:    ldr xzr, [x0, x8]
+; CHECK-NEXT:    ret
   %1 = add i64 -305463297, %a   ;-305463297 is 0xffffffffedcaffff
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -101,8 +116,10 @@ define void @t9(i64 %a) {
 
 define void @t10(i64 %a) {
 ; CHECK-LABEL: t10:
-; CHECK: mov [[REG:x[0-9]+]], #81909218222800896
-; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #81909218222800896
+; CHECK-NEXT:    ldr xzr, [x0, x8]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, 81909218222800896   ;0x123000000000000
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -111,9 +128,11 @@ define void @t10(i64 %a) {
 
 define void @t11(i64 %a) {
 ; CHECK-LABEL: t11:
-; CHECK: mov w[[NUM:[0-9]+]], #17767
-; CHECK: movk w[[NUM:[0-9]+]], #291
-; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #17767
+; CHECK-NEXT:    movk w8, #291, lsl #16
+; CHECK-NEXT:    ldr xzr, [x0, x8]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, 19088743   ;0x1234567
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -123,8 +142,10 @@ define void @t11(i64 %a) {
 ; Test some boundaries that should not use movz/movn/orr
 define void @t12(i64 %a) {
 ; CHECK-LABEL: t12:
-; CHECK: add [[REG:x[0-9]+]], x0, #4095
-; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, #4095
+; CHECK-NEXT:    ldr xzr, [x8]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, 4095   ;0xfff
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -133,8 +154,10 @@ define void @t12(i64 %a) {
 
 define void @t13(i64 %a) {
 ; CHECK-LABEL: t13:
-; CHECK: sub [[REG:x[0-9]+]], x0, #4095
-; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x0, #4095
+; CHECK-NEXT:    ldr xzr, [x8]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, -4095   ;-0xfff
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -143,8 +166,10 @@ define void @t13(i64 %a) {
 
 define void @t14(i64 %a) {
 ; CHECK-LABEL: t14:
-; CHECK: add [[REG:x[0-9]+]], x0, #291, lsl #12
-; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, #291, lsl #12 // =1191936
+; CHECK-NEXT:    ldr xzr, [x8]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, 1191936   ;0x123000
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -153,8 +178,10 @@ define void @t14(i64 %a) {
 
 define void @t15(i64 %a) {
 ; CHECK-LABEL: t15:
-; CHECK: sub [[REG:x[0-9]+]], x0, #291, lsl #12
-; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x0, #291, lsl #12 // =1191936
+; CHECK-NEXT:    ldr xzr, [x8]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, -1191936   ;0xFFFFFFFFFFEDD000
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -163,7 +190,9 @@ define void @t15(i64 %a) {
 
 define void @t16(i64 %a) {
 ; CHECK-LABEL: t16:
-; CHECK: ldr xzr, [x0, #28672]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr xzr, [x0, #28672]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, 28672   ;0x7000
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8
@@ -172,7 +201,9 @@ define void @t16(i64 %a) {
 
 define void @t17(i64 %a) {
 ; CHECK-LABEL: t17:
-; CHECK: ldur xzr, [x0, #-256]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur xzr, [x0, #-256]
+; CHECK-NEXT:    ret
   %1 = add i64 %a, -256   ;-0x100
   %2 = inttoptr i64 %1 to i64*
   %3 = load volatile i64, i64* %2, align 8

diff  --git a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
index 93af46d45da09..bdcd41f8c6397 100644
--- a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
@@ -23,7 +23,6 @@ define void @foo(%struct.X* nocapture %x, %struct.Y* nocapture %y) nounwind opts
 ; OPT-NEXT:    [[FROMBOOL:%.*]] = trunc i32 [[BF_CLEAR_LOBIT]] to i8
 ; OPT-NEXT:    store i8 [[FROMBOOL]], i8* [[B]], align 1
 ; OPT-NEXT:    ret void
-;
   %tmp = bitcast %struct.X* %x to i32*
   %tmp1 = load i32, i32* %tmp, align 4
   %b = getelementptr inbounds %struct.Y, %struct.Y* %y, i64 0, i32 1
@@ -44,7 +43,6 @@ define i32 @baz(i64 %cav1.coerce) nounwind {
 ; OPT-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP]], 28
 ; OPT-NEXT:    [[BF_VAL_SEXT:%.*]] = ashr exact i32 [[TMP1]], 28
 ; OPT-NEXT:    ret i32 [[BF_VAL_SEXT]]
-;
   %tmp = trunc i64 %cav1.coerce to i32
   %tmp1 = shl i32 %tmp, 28
   %bf.val.sext = ashr exact i32 %tmp1, 28
@@ -61,7 +59,6 @@ define i32 @bar(i64 %cav1.coerce) nounwind {
 ; OPT-NEXT:    [[CAV1_SROA_0_1_INSERT:%.*]] = shl i32 [[TMP]], 22
 ; OPT-NEXT:    [[TMP1:%.*]] = ashr i32 [[CAV1_SROA_0_1_INSERT]], 26
 ; OPT-NEXT:    ret i32 [[TMP1]]
-;
   %tmp = trunc i64 %cav1.coerce to i32
   %cav1.sroa.0.1.insert = shl i32 %tmp, 22
   %tmp1 = ashr i32 %cav1.sroa.0.1.insert, 26
@@ -83,7 +80,6 @@ define void @fct1(%struct.Z* nocapture %x, %struct.A* nocapture %y) nounwind opt
 ; OPT-NEXT:    [[BF_CLEAR_LOBIT:%.*]] = and i64 [[BF_CLEAR]], 1
 ; OPT-NEXT:    store i64 [[BF_CLEAR_LOBIT]], i64* [[B1]], align 8
 ; OPT-NEXT:    ret void
-;
   %tmp = bitcast %struct.Z* %x to i64*
   %tmp1 = load i64, i64* %tmp, align 4
   %b = getelementptr inbounds %struct.A, %struct.A* %y, i64 0, i32 0
@@ -102,7 +98,6 @@ define i64 @fct2(i64 %cav1.coerce) nounwind {
 ; OPT-NEXT:    [[TMP:%.*]] = shl i64 [[CAV1_COERCE:%.*]], 28
 ; OPT-NEXT:    [[BF_VAL_SEXT:%.*]] = ashr exact i64 [[TMP]], 28
 ; OPT-NEXT:    ret i64 [[BF_VAL_SEXT]]
-;
   %tmp = shl i64 %cav1.coerce, 28
   %bf.val.sext = ashr exact i64 %tmp, 28
   ret i64 %bf.val.sext
@@ -117,7 +112,6 @@ define i64 @fct3(i64 %cav1.coerce) nounwind {
 ; OPT-NEXT:    [[CAV1_SROA_0_1_INSERT:%.*]] = shl i64 [[CAV1_COERCE:%.*]], 22
 ; OPT-NEXT:    [[TMP1:%.*]] = ashr i64 [[CAV1_SROA_0_1_INSERT]], 26
 ; OPT-NEXT:    ret i64 [[TMP1]]
-;
   %cav1.sroa.0.1.insert = shl i64 %cav1.coerce, 22
   %tmp1 = ashr i64 %cav1.sroa.0.1.insert, 26
   ret i64 %tmp1
@@ -139,7 +133,6 @@ define void @fct4(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
 ; OPT-NEXT:    store i64 [[OR]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
   %0 = load i64, i64* %y, align 8
   %and = and i64 %0, -16777216
@@ -166,7 +159,6 @@ define void @fct5(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
 ; OPT-NEXT:    store i32 [[OR]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
   %0 = load i32, i32* %y, align 8
   %and = and i32 %0, -8
@@ -196,7 +188,6 @@ define void @fct6(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHR1:%.*]] = lshr i32 [[OR]], 2
 ; OPT-NEXT:    store i32 [[SHR1]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
   %0 = load i32, i32* %y, align 8
@@ -229,7 +220,6 @@ define void @fct7(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHL:%.*]] = shl i32 [[OR]], 2
 ; OPT-NEXT:    store i32 [[SHL]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsl is an alias of ubfm
   %0 = load i32, i32* %y, align 8
@@ -263,7 +253,6 @@ define void @fct8(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHR1:%.*]] = lshr i64 [[OR]], 2
 ; OPT-NEXT:    store i64 [[SHR1]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
   %0 = load i64, i64* %y, align 8
@@ -297,7 +286,6 @@ define void @fct9(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHL:%.*]] = shl i64 [[OR]], 2
 ; OPT-NEXT:    store i64 [[SHL]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
   %0 = load i64, i64* %y, align 8
@@ -329,7 +317,6 @@ define void @fct10(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHL:%.*]] = shl i32 [[OR]], 2
 ; OPT-NEXT:    store i32 [[SHL]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsl is an alias of ubfm
   %0 = load i32, i32* %y, align 8
@@ -360,7 +347,6 @@ define void @fct11(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHL:%.*]] = shl i64 [[OR]], 2
 ; OPT-NEXT:    store i64 [[SHL]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsl is an alias of ubfm
   %0 = load i64, i64* %y, align 8
@@ -381,7 +367,6 @@ define zeroext i1 @fct12bis(i32 %tmp2) unnamed_addr nounwind ssp align 2 {
 ; OPT-NEXT:    [[AND_I_I:%.*]] = and i32 [[TMP2:%.*]], 2048
 ; OPT-NEXT:    [[TOBOOL_I_I:%.*]] = icmp ne i32 [[AND_I_I]], 0
 ; OPT-NEXT:    ret i1 [[TOBOOL_I_I]]
-;
   %and.i.i = and i32 %tmp2, 2048
   %tobool.i.i = icmp ne i32 %and.i.i, 0
   ret i1 %tobool.i.i
@@ -408,7 +393,6 @@ define void @fct12(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHR2:%.*]] = lshr i32 [[SHL]], 4
 ; OPT-NEXT:    store i32 [[SHR2]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
   %0 = load i32, i32* %y, align 8
@@ -441,7 +425,6 @@ define void @fct12_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint s
 ; OPT-NEXT:    [[MASK:%.*]] = and i32 [[LSHR]], 268435455
 ; OPT-NEXT:    store i32 [[MASK]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
   %0 = load i32, i32* %y, align 8
@@ -477,7 +460,6 @@ define void @fct13(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHR2:%.*]] = lshr i64 [[SHL]], 4
 ; OPT-NEXT:    store i64 [[SHR2]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
   %0 = load i64, i64* %y, align 8
@@ -510,7 +492,6 @@ define void @fct13_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint s
 ; OPT-NEXT:    [[MASK:%.*]] = and i64 [[LSHR]], 1152921504606846975
 ; OPT-NEXT:    store i64 [[MASK]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
   %0 = load i64, i64* %y, align 8
@@ -552,7 +533,6 @@ define void @fct14(i32* nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehi
 ; OPT-NEXT:    [[SHL1:%.*]] = shl i32 [[OR1]], 2
 ; OPT-NEXT:    store i32 [[SHL1]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
 ; lsl is an alias of ubfm
@@ -599,7 +579,6 @@ define void @fct15(i64* nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehi
 ; OPT-NEXT:    [[SHL1:%.*]] = shl i64 [[OR1]], 2
 ; OPT-NEXT:    store i64 [[SHL1]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; lsr is an alias of ubfm
 ; lsl is an alias of ubfm
@@ -642,7 +621,6 @@ define void @fct16(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHR2:%.*]] = lshr i32 [[SHL]], 4
 ; OPT-NEXT:    store i32 [[SHR2]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; Create the constant
 ; Do the masking
@@ -679,7 +657,6 @@ define void @fct16_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint s
 ; OPT-NEXT:    [[MASK:%.*]] = and i32 [[LSHR]], 268435455
 ; OPT-NEXT:    store i32 [[MASK]], i32* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; Create the constant
 ; Do the masking
@@ -721,7 +698,6 @@ define void @fct17(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; OPT-NEXT:    [[SHR2:%.*]] = lshr i64 [[SHL]], 4
 ; OPT-NEXT:    store i64 [[SHR2]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; Create the constant
 ; Do the masking
@@ -758,7 +734,6 @@ define void @fct17_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint s
 ; OPT-NEXT:    [[MASK:%.*]] = and i64 [[LSHR]], 1152921504606846975
 ; OPT-NEXT:    store i64 [[MASK]], i64* [[Y]], align 8
 ; OPT-NEXT:    ret void
-;
 entry:
 ; Create the constant
 ; Do the masking
@@ -785,7 +760,6 @@ define i64 @fct18(i32 %xor72) nounwind ssp {
 ; OPT-NEXT:    [[CONV82:%.*]] = zext i32 [[SHR81]] to i64
 ; OPT-NEXT:    [[RESULT:%.*]] = and i64 [[CONV82]], 255
 ; OPT-NEXT:    ret i64 [[RESULT]]
-;
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %result = and i64 %conv82, 255
@@ -868,7 +842,6 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp  {
 ; OPT:       return:
 ; OPT-NEXT:    [[RETVAL_0:%.*]] = phi i32 [ [[CONV]], [[IF_THEN]] ], [ [[ADD]], [[IF_THEN7]] ], [ [[ADD23]], [[IF_THEN17]] ], [ 64, [[IF_END13]] ]
 ; OPT-NEXT:    ret i32 [[RETVAL_0]]
-;
 entry:
   %x.sroa.1.0.extract.shift = lshr i64 %arg1, 16
   %x.sroa.1.0.extract.trunc = trunc i64 %x.sroa.1.0.extract.shift to i16
@@ -949,7 +922,6 @@ define i80 @fct20(i128 %a, i128 %b) {
 ; OPT:       end:
 ; OPT-NEXT:    [[CONV3:%.*]] = phi i80 [ [[CONV]], [[ENTRY:%.*]] ], [ [[CONV2]], [[THEN]] ]
 ; OPT-NEXT:    ret i80 [[CONV3]]
-;
 entry:
   %shr = lshr i128 %a, 18
   %conv = trunc i128 %shr to i80
@@ -981,7 +953,6 @@ define i64 @fct21(i64 %x) {
 ; OPT-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x [64 x i64]], [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 [[AND]]
 ; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8
 ; OPT-NEXT:    ret i64 [[TMP0]]
-;
 entry:
   %shr = lshr i64 %x, 4
   %and = and i64 %shr, 15
@@ -1006,7 +977,6 @@ define i16 @test_ignored_rightbits(i32 %dst, i32 %in) {
 ; OPT-NEXT:    [[OR18:%.*]] = or i32 [[SHL16]], [[INSERTION]]
 ; OPT-NEXT:    [[CONV19:%.*]] = trunc i32 [[OR18]] to i16
 ; OPT-NEXT:    ret i16 [[CONV19]]
-;
   %positioned_field = shl i32 %in, 3
   %positioned_masked_field = and i32 %positioned_field, 120
   %masked_dst = and i32 %dst, 7
@@ -1052,7 +1022,6 @@ define void @sameOperandBFI(i64 %src, i64 %src2, i16 *%ptr) {
 ; OPT-NEXT:    br label [[END]]
 ; OPT:       end:
 ; OPT-NEXT:    ret void
-;
 entry:
   %shr47 = lshr i64 %src, 47
   %src2.trunc = trunc i64 %src2 to i32

diff  --git a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
index ba7bdc4488c71..dcd8e18d9ec4f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
@@ -1,65 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
 
 define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) {
-; CHECK-LABEL: fptosi_v4f64_to_v4i16
-; CHECK-DAG: fcvtzs  v[[LHS:[0-9]+]].2d, v0.2d
-; CHECK-DAG: fcvtzs  v[[RHS:[0-9]+]].2d, v1.2d
-; CHECK-DAG: xtn  v[[XTN0:[0-9]+]].2s, v[[LHS]].2d
-; CHECK-DAG: xtn  v[[XTN1:[0-9]+]].2s, v[[RHS]].2d
-; CHECK:     uzp1  v0.4h, v[[XTN1]].4h, v[[XTN0]].4h
+; CHECK-LABEL: fptosi_v4f64_to_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    xtn v1.2s, v1.2d
+; CHECK-NEXT:    uzp1 v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    ret
   %tmp1 = load <4 x double>, <4 x double>* %ptr
   %tmp2 = fptosi <4 x double> %tmp1 to <4 x i16>
   ret <4 x i16> %tmp2
 }
 
 define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) {
-; CHECK-LABEL: fptosi_v4f64_to_v4i8
-; CHECK-DAG:  fcvtzs  v[[CONV0:[0-9]+]].2d, v0.2d
-; CHECK-DAG:  fcvtzs  v[[CONV1:[0-9]+]].2d, v1.2d
-; CHECK-DAG:  fcvtzs  v[[CONV2:[0-9]+]].2d, v2.2d
-; CHECK-DAG:  fcvtzs  v[[CONV3:[0-9]+]].2d, v3.2d
-; CHECK-DAG:  xtn  v[[XTN0:[0-9]+]].2s, v[[CONV0]].2d
-; CHECK-DAG:  xtn  v[[XTN1:[0-9]+]].2s, v[[CONV1]].2d
-; CHECK-DAG:  xtn  v[[XTN2:[0-9]+]].2s, v[[CONV2]].2d
-; CHECK-DAG:  xtn  v[[XTN3:[0-9]+]].2s, v[[CONV3]].2d
-; CHECK-DAG:  uzp1 v[[UZP0:[0-9]+]].4h, v[[XTN1]].4h, v[[XTN0]].4h
-; CHECK-DAG:  uzp1 v[[UZP1:[0-9]+]].4h, v[[XTN3]].4h, v[[XTN2]].4h
-; CHECK:      uzp1  v0.8b, v[[UZP1:[0-9]+]].8b, v[[UZP0:[0-9]+]].8b
+; CHECK-LABEL: fptosi_v4f64_to_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0, #32]
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    xtn v1.2s, v1.2d
+; CHECK-NEXT:    xtn v2.2s, v2.2d
+; CHECK-NEXT:    xtn v3.2s, v3.2d
+; CHECK-NEXT:    uzp1 v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    uzp1 v1.4h, v3.4h, v2.4h
+; CHECK-NEXT:    uzp1 v0.8b, v1.8b, v0.8b
+; CHECK-NEXT:    ret
   %tmp1 = load <8 x double>, <8 x double>* %ptr
   %tmp2 = fptosi <8 x double> %tmp1 to <8 x i8>
   ret <8 x i8> %tmp2
 }
 
 define <4 x half> @uitofp_v4i64_to_v4f16(<4 x i64>* %ptr) {
-; CHECK: uitofp_v4i64_to_v4f16
-; CHECK-DAG: ucvtf  v[[LHS:[0-9]+]].2d, v0.2d
-; CHECK-DAG: ucvtf  v[[RHS:[0-9]+]].2d, v1.2d
-; CHECK-DAG: fcvtn  v[[MID:[0-9]+]].2s, v[[LHS]].2d
-; CHECK-DAG: fcvtn2  v[[MID]].4s, v[[RHS]].2d
-; CHECK:     fcvtn  v0.4h, v[[MID]].4s
+; CHECK-LABEL: uitofp_v4i64_to_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    ucvtf v1.2d, v1.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
   %tmp1 = load <4 x i64>, <4 x i64>* %ptr
   %tmp2 = uitofp <4 x i64> %tmp1 to <4 x half>
   ret <4 x half> %tmp2
 }
 
 define <4 x i16> @trunc_v4i64_to_v4i16(<4 x i64>* %ptr) {
-; CHECK: trunc_v4i64_to_v4i16
-; CHECK: xtn
-; CHECK: xtn2
-; CHECK: xtn
+; CHECK-LABEL: trunc_v4i64_to_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    xtn2 v0.4s, v1.2d
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
   %tmp1 = load <4 x i64>, <4 x i64>* %ptr
   %tmp2 = trunc <4 x i64> %tmp1 to <4 x i16>
   ret <4 x i16> %tmp2
 }
 
 define <4 x i16> @fptoui_v4f64_to_v4i16(<4 x double>* %ptr) {
-; CHECK-LABEL: fptoui_v4f64_to_v4i16
-; CHECK-DAG: fcvtzs  v[[LHS:[0-9]+]].2d, v0.2d
-; CHECK-DAG: fcvtzs  v[[RHS:[0-9]+]].2d, v1.2d
-; CHECK-DAG: xtn  v[[XTN0:[0-9]+]].2s, v[[LHS]].2d
-; CHECK-DAG: xtn  v[[XTN1:[0-9]+]].2s, v[[RHS]].2d
-; CHECK:     uzp1  v0.4h, v[[XTN1]].4h, v[[XTN0]].4h
+; CHECK-LABEL: fptoui_v4f64_to_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    xtn v1.2s, v1.2d
+; CHECK-NEXT:    uzp1 v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    ret
   %tmp1 = load <4 x double>, <4 x double>* %ptr
   %tmp2 = fptoui <4 x double> %tmp1 to <4 x i16>
   ret <4 x i16> %tmp2

diff  --git a/llvm/test/CodeGen/AArch64/arm64-csel.ll b/llvm/test/CodeGen/AArch64/arm64-csel.ll
index 44e951ed69e1d..07922fac67f10 100644
--- a/llvm/test/CodeGen/AArch64/arm64-csel.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-csel.ll
@@ -1,10 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -debugify-and-strip-all-safe -O3 < %s | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
 target triple = "arm64-unknown-unknown"
 
-; CHECK-LABEL: foo1
-; CHECK: cinc w{{[0-9]+}}, w{{[0-9]+}}, ne
 define i32 @foo1(i32 %b, i32 %c) nounwind readnone ssp {
+; CHECK-LABEL: foo1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w1, #0
+; CHECK-NEXT:    add w8, w1, w0
+; CHECK-NEXT:    cinc w0, w8, ne
+; CHECK-NEXT:    ret
 entry:
   %not.tobool = icmp ne i32 %c, 0
   %add = zext i1 %not.tobool to i32
@@ -13,9 +18,13 @@ entry:
   ret i32 %add1
 }
 
-; CHECK-LABEL: foo2
-; CHECK: cneg w{{[0-9]+}}, w{{[0-9]+}}, ne
 define i32 @foo2(i32 %b, i32 %c) nounwind readnone ssp {
+; CHECK-LABEL: foo2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w1, #0
+; CHECK-NEXT:    cneg w8, w0, ne
+; CHECK-NEXT:    add w0, w8, w1
+; CHECK-NEXT:    ret
 entry:
   %mul = sub i32 0, %b
   %tobool = icmp eq i32 %c, 0
@@ -24,9 +33,13 @@ entry:
   ret i32 %add
 }
 
-; CHECK-LABEL: foo3
-; CHECK: cinv w{{[0-9]+}}, w{{[0-9]+}}, ne
 define i32 @foo3(i32 %b, i32 %c) nounwind readnone ssp {
+; CHECK-LABEL: foo3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w1, #0
+; CHECK-NEXT:    cinv w8, w0, ne
+; CHECK-NEXT:    add w0, w8, w1
+; CHECK-NEXT:    ret
 entry:
   %not.tobool = icmp ne i32 %c, 0
   %xor = sext i1 %not.tobool to i32
@@ -37,9 +50,11 @@ entry:
 
 ; rdar://11632325
 define i32 at foo4(i32 %a) nounwind ssp {
-; CHECK-LABEL: foo4
-; CHECK: cneg
-; CHECK-NEXT: ret
+; CHECK-LABEL: foo4:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    cneg w0, w0, mi
+; CHECK-NEXT:    ret
   %cmp = icmp sgt i32 %a, -1
   %neg = sub nsw i32 0, %a
   %cond = select i1 %cmp, i32 %a, i32 %neg
@@ -47,11 +62,12 @@ define i32 at foo4(i32 %a) nounwind ssp {
 }
 
 define i32 at foo5(i32 %a, i32 %b) nounwind ssp {
+; CHECK-LABEL: foo5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: foo5
-; CHECK: subs
-; CHECK-NEXT: cneg
-; CHECK-NEXT: ret
   %sub = sub nsw i32 %a, %b
   %cmp = icmp sgt i32 %sub, -1
   %sub3 = sub nsw i32 0, %sub
@@ -61,8 +77,12 @@ entry:
 
 ; make sure we can handle branch instruction in optimizeCompare.
 define i32 at foo6(i32 %a, i32 %b) nounwind ssp {
-; CHECK-LABEL: foo6
-; CHECK: b
+; CHECK-LABEL: foo6:
+; CHECK:       // %bb.0: // %common.ret
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    cmp w8, #0
+; CHECK-NEXT:    csinc w0, w8, wzr, le
+; CHECK-NEXT:    ret
   %sub = sub nsw i32 %a, %b
   %cmp = icmp sgt i32 %sub, 0
   br i1 %cmp, label %l.if, label %l.else
@@ -76,13 +96,17 @@ l.else:
 
 ; If CPSR is used multiple times and V flag is used, we don't remove cmp.
 define i32 @foo7(i32 %a, i32 %b) nounwind {
-entry:
 ; CHECK-LABEL: foo7:
-; CHECK: sub
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    cneg w9, w8, mi
+; CHECK-NEXT:    cmn w8, #1
+; CHECK-NEXT:    csel w10, w9, w0, lt
+; CHECK-NEXT:    cmp w8, #0
+; CHECK-NEXT:    csel w0, w10, w9, ge
+; CHECK-NEXT:    ret
+entry:
 ; FIXME: Misspelled CHECK-NEXT
-; CHECK-next: adds
-; CHECK-next: csneg
-; CHECK-next: b
   %sub = sub nsw i32 %a, %b
   %cmp = icmp sgt i32 %sub, -1
   %sub3 = sub nsw i32 0, %sub
@@ -99,10 +123,12 @@ if.else:
 }
 
 define i32 @foo8(i32 %v, i32 %a, i32 %b) nounwind readnone ssp {
-entry:
 ; CHECK-LABEL: foo8:
-; CHECK: cmp w0, #0
-; CHECK: csinv w0, w1, w2, ne
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    csinv w0, w1, w2, ne
+; CHECK-NEXT:    ret
+entry:
   %tobool = icmp eq i32 %v, 0
   %neg = xor i32 -1, %b
   %cond = select i1 %tobool, i32 %neg, i32 %a
@@ -110,54 +136,64 @@ entry:
 }
 
 define i32 @foo9(i32 %v) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo9:
-; CHECK: cmp w0, #0
-; CHECK: mov w[[REG:[0-9]+]], #4
-; CHECK: cinv w0, w[[REG]], eq
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    mov w8, #4
+; CHECK-NEXT:    cinv w0, w8, eq
+; CHECK-NEXT:    ret
+entry:
   %tobool = icmp ne i32 %v, 0
   %cond = select i1 %tobool, i32 4, i32 -5
   ret i32 %cond
 }
 
 define i64 @foo10(i64 %v) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo10:
-; CHECK: cmp x0, #0
-; CHECK: mov w[[REG:[0-9]+]], #4
-; CHECK: cinv x0, x[[REG]], eq
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp x0, #0
+; CHECK-NEXT:    mov w8, #4
+; CHECK-NEXT:    cinv x0, x8, eq
+; CHECK-NEXT:    ret
+entry:
   %tobool = icmp ne i64 %v, 0
   %cond = select i1 %tobool, i64 4, i64 -5
   ret i64 %cond
 }
 
 define i32 @foo11(i32 %v) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo11:
-; CHECK: cmp w0, #0
-; CHECK: mov w[[REG:[0-9]+]], #4
-; CHECK: cneg w0, w[[REG]], eq
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    mov w8, #4
+; CHECK-NEXT:    cneg w0, w8, eq
+; CHECK-NEXT:    ret
+entry:
   %tobool = icmp ne i32 %v, 0
   %cond = select i1 %tobool, i32 4, i32 -4
   ret i32 %cond
 }
 
 define i64 @foo12(i64 %v) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo12:
-; CHECK: cmp x0, #0
-; CHECK: mov w[[REG:[0-9]+]], #4
-; CHECK: cneg x0, x[[REG]], eq
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp x0, #0
+; CHECK-NEXT:    mov w8, #4
+; CHECK-NEXT:    cneg x0, x8, eq
+; CHECK-NEXT:    ret
+entry:
   %tobool = icmp ne i64 %v, 0
   %cond = select i1 %tobool, i64 4, i64 -4
   ret i64 %cond
 }
 
 define i32 @foo13(i32 %v, i32 %a, i32 %b) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo13:
-; CHECK: cmp w0, #0
-; CHECK: csneg w0, w1, w2, ne
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    csneg w0, w1, w2, ne
+; CHECK-NEXT:    ret
+entry:
   %tobool = icmp eq i32 %v, 0
   %sub = sub i32 0, %b
   %cond = select i1 %tobool, i32 %sub, i32 %a
@@ -165,10 +201,12 @@ entry:
 }
 
 define i64 @foo14(i64 %v, i64 %a, i64 %b) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo14:
-; CHECK: cmp x0, #0
-; CHECK: csneg x0, x1, x2, ne
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp x0, #0
+; CHECK-NEXT:    csneg x0, x1, x2, ne
+; CHECK-NEXT:    ret
+entry:
   %tobool = icmp eq i64 %v, 0
   %sub = sub i64 0, %b
   %cond = select i1 %tobool, i64 %sub, i64 %a
@@ -176,54 +214,64 @@ entry:
 }
 
 define i32 @foo15(i32 %a, i32 %b) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo15:
-; CHECK: cmp w0, w1
-; CHECK: mov w[[REG:[0-9]+]], #1
-; CHECK: cinc w0, w[[REG]], gt
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    cinc w0, w8, gt
+; CHECK-NEXT:    ret
+entry:
   %cmp = icmp sgt i32 %a, %b
   %. = select i1 %cmp, i32 2, i32 1
   ret i32 %.
 }
 
 define i32 @foo16(i32 %a, i32 %b) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo16:
-; CHECK: cmp w0, w1
-; CHECK: mov w[[REG:[0-9]+]], #1
-; CHECK: cinc w0, w[[REG]], le
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    cinc w0, w8, le
+; CHECK-NEXT:    ret
+entry:
   %cmp = icmp sgt i32 %a, %b
   %. = select i1 %cmp, i32 1, i32 2
   ret i32 %.
 }
 
 define i64 @foo17(i64 %a, i64 %b) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo17:
-; CHECK: cmp x0, x1
-; CHECK: mov w[[REG:[0-9]+]], #1
-; CHECK: cinc x0, x[[REG]], gt
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    cinc x0, x8, gt
+; CHECK-NEXT:    ret
+entry:
   %cmp = icmp sgt i64 %a, %b
   %. = select i1 %cmp, i64 2, i64 1
   ret i64 %.
 }
 
 define i64 @foo18(i64 %a, i64 %b) nounwind readnone optsize ssp {
-entry:
 ; CHECK-LABEL: foo18:
-; CHECK: cmp x0, x1
-; CHECK: mov w[[REG:[0-9]+]], #1
-; CHECK: cinc x0, x[[REG]], le
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    cinc x0, x8, le
+; CHECK-NEXT:    ret
+entry:
   %cmp = icmp sgt i64 %a, %b
   %. = select i1 %cmp, i64 1, i64 2
   ret i64 %.
 }
 
 define i64 @foo19(i64 %a, i64 %b, i64 %c) {
-entry:
 ; CHECK-LABEL: foo19:
-; CHECK: cinc x0, x2
-; CHECK-NOT: add
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    cinc x0, x2, lo
+; CHECK-NEXT:    ret
+entry:
   %cmp = icmp ult i64 %a, %b
   %inc = zext i1 %cmp to i64
   %inc.c = add i64 %inc, %c
@@ -232,9 +280,11 @@ entry:
 
 define i32 @foo20(i32 %x) {
 ; CHECK-LABEL: foo20:
-; CHECK: cmp w0, #5
-; CHECK: mov w[[REG:[0-9]+]], #6
-; CHECK: csinc w0, w[[REG]], wzr, eq
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, #5
+; CHECK-NEXT:    mov w8, #6
+; CHECK-NEXT:    csinc w0, w8, wzr, eq
+; CHECK-NEXT:    ret
   %cmp = icmp eq i32 %x, 5
   %res = select i1 %cmp, i32 6, i32 1
   ret i32 %res
@@ -242,9 +292,11 @@ define i32 @foo20(i32 %x) {
 
 define i64 @foo21(i64 %x) {
 ; CHECK-LABEL: foo21:
-; CHECK: cmp x0, #5
-; CHECK: mov w[[REG:[0-9]+]], #6
-; CHECK: csinc x0, x[[REG]], xzr, eq
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, #5
+; CHECK-NEXT:    mov w8, #6
+; CHECK-NEXT:    csinc x0, x8, xzr, eq
+; CHECK-NEXT:    ret
   %cmp = icmp eq i64 %x, 5
   %res = select i1 %cmp, i64 6, i64 1
   ret i64 %res
@@ -252,9 +304,11 @@ define i64 @foo21(i64 %x) {
 
 define i32 @foo22(i32 %x) {
 ; CHECK-LABEL: foo22:
-; CHECK: cmp w0, #5
-; CHECK: mov w[[REG:[0-9]+]], #6
-; CHECK: csinc w0, w[[REG]], wzr, ne
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, #5
+; CHECK-NEXT:    mov w8, #6
+; CHECK-NEXT:    csinc w0, w8, wzr, ne
+; CHECK-NEXT:    ret
   %cmp = icmp eq i32 %x, 5
   %res = select i1 %cmp, i32 1, i32 6
   ret i32 %res
@@ -262,9 +316,11 @@ define i32 @foo22(i32 %x) {
 
 define i64 @foo23(i64 %x) {
 ; CHECK-LABEL: foo23:
-; CHECK: cmp x0, #5
-; CHECK: mov w[[REG:[0-9]+]], #6
-; CHECK: csinc x0, x[[REG]], xzr, ne
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, #5
+; CHECK-NEXT:    mov w8, #6
+; CHECK-NEXT:    csinc x0, x8, xzr, ne
+; CHECK-NEXT:    ret
   %cmp = icmp eq i64 %x, 5
   %res = select i1 %cmp, i64 1, i64 6
   ret i64 %res
@@ -272,13 +328,14 @@ define i64 @foo23(i64 %x) {
 
 define i16 @foo24(i8* nocapture readonly %A, i8* nocapture readonly %B) {
 ; CHECK-LABEL: foo24:
-; CHECK:       ldrb    w[[W8:[0-9]+]], [x1]
-; CHECK-NEXT:  ldrb    w[[W9:[0-9]+]], [x0]
-; CHECK-NEXT:  cmp     w[[W8]], #33
-; CHECK-NEXT:  cset    w[[W8]], hi
-; CHECK-NEXT:  cmp     w[[W9]], #3
-; CHECK-NEXT:  cinc    w0, w[[W8]], hi
-; CHECK-NEXT:  ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldrb w8, [x1]
+; CHECK-NEXT:    ldrb w9, [x0]
+; CHECK-NEXT:    cmp w8, #33
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w9, #3
+; CHECK-NEXT:    cinc w0, w8, hi
+; CHECK-NEXT:    ret
 entry:
   %0 = load i8, i8* %A, align 1
   %cmp = icmp ugt i8 %0, 3
@@ -292,13 +349,14 @@ entry:
 
 define i64 @foo25(i64* nocapture readonly %A, i64* nocapture readonly %B) {
 ; CHECK-LABEL: foo25:
-; CHECK:       ldr    x[[X8:[0-9]+]], [x1]
-; CHECK-NEXT:  ldr    x[[X9:[0-9]+]], [x0]
-; CHECK-NEXT:  cmp    x[[X8]], #33
-; CHECK-NEXT:  cset   w[[W8]], hi
-; CHECK-NEXT:  cmp    x[[X9]], #3
-; CHECK-NEXT:  cinc   x0, x[[X8]], hi
-; CHECK-NEXT:  ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x1]
+; CHECK-NEXT:    ldr x9, [x0]
+; CHECK-NEXT:    cmp x8, #33
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp x9, #3
+; CHECK-NEXT:    cinc x0, x8, hi
+; CHECK-NEXT:    ret
 entry:
   %0 = load i64, i64* %A, align 1
   %cmp = icmp ugt i64 %0, 3

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fcopysign.ll b/llvm/test/CodeGen/AArch64/arm64-fcopysign.ll
index 9bcc8eeca2190..4661b4230d69b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fcopysign.ll
@@ -1,22 +1,33 @@
-; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-apple-darwin | FileCheck %s
 
 ; rdar://9332258
 
 define float @test1(float %x, float %y) nounwind {
-entry:
 ; CHECK-LABEL: test1:
-; CHECK: movi.4s	v2, #128, lsl #24
-; CHECK: bit.16b	v0, v1, v2
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    ; kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT:    movi.4s v2, #128, lsl #24
+; CHECK-NEXT:    ; kill: def $s1 killed $s1 def $q1
+; CHECK-NEXT:    bit.16b v0, v1, v2
+; CHECK-NEXT:    ; kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call float @copysignf(float %x, float %y) nounwind readnone
   ret float %0
 }
 
 define double @test2(double %x, double %y) nounwind {
-entry:
 ; CHECK-LABEL: test2:
-; CHECK: movi.2d	v2, #0
-; CHECK: fneg.2d	v2, v2
-; CHECK: bit.16b	v0, v1, v2
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    movi.2d v2, #0000000000000000
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fneg.2d v2, v2
+; CHECK-NEXT:    ; kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    bit.16b v0, v1, v2
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call double @copysign(double %x, double %y) nounwind readnone
   ret double %0
 }
@@ -24,9 +35,15 @@ entry:
 ; rdar://9545768
 define double @test3(double %a, float %b, float %c) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK: fcvt d1, s1
-; CHECK: fneg.2d v2, v{{[0-9]+}}
-; CHECK: bit.16b v0, v1, v2
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    fadd s1, s1, s2
+; CHECK-NEXT:    movi.2d v2, #0000000000000000
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fcvt d1, s1
+; CHECK-NEXT:    fneg.2d v2, v2
+; CHECK-NEXT:    bit.16b v0, v1, v2
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %tmp1 = fadd float %b, %c
   %tmp2 = fpext float %tmp1 to double
   %tmp = tail call double @copysign( double %a, double %tmp2 ) nounwind readnone
@@ -34,11 +51,18 @@ define double @test3(double %a, float %b, float %c) nounwind {
 }
 
 define float @test4() nounwind {
-entry:
 ; CHECK-LABEL: test4:
-; CHECK: fcvt s0, d0
-; CHECK: movi.4s v[[CONST:[0-9]+]], #128, lsl #24
-; CHECK: bit.16b v{{[0-9]+}}, v0, v[[CONST]]
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    bl _bar
+; CHECK-NEXT:    fcvt s0, d0
+; CHECK-NEXT:    fmov s1, #0.50000000
+; CHECK-NEXT:    movi.4s v2, #128, lsl #24
+; CHECK-NEXT:    bit.16b v1, v0, v2
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call double (...) @bar() nounwind
   %1 = fptrunc double %0 to float
   %2 = tail call float @copysignf(float 5.000000e-01, float %1) nounwind readnone

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fmadd.ll b/llvm/test/CodeGen/AArch64/arm64-fmadd.ll
index dffa83aa11b24..6079bc4f07bc5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fmadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fmadd.ll
@@ -1,44 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm64-eabi < %s | FileCheck %s
 
 define float @fma32(float %a, float %b, float %c) nounwind readnone ssp {
-entry:
 ; CHECK-LABEL: fma32:
-; CHECK: fmadd s0, s0, s1, s2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmadd s0, s0, s1, s2
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call float @llvm.fma.f32(float %a, float %b, float %c)
   ret float %0
 }
 
 define float @fnma32(float %a, float %b, float %c) nounwind readnone ssp {
-entry:
 ; CHECK-LABEL: fnma32:
-; CHECK: fnmadd s0, s0, s1, s2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fnmadd s0, s0, s1, s2
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call float @llvm.fma.f32(float %a, float %b, float %c)
   %mul = fmul float %0, -1.000000e+00
   ret float %mul
 }
 
 define float @fms32(float %a, float %b, float %c) nounwind readnone ssp {
-entry:
 ; CHECK-LABEL: fms32:
-; CHECK: fmsub s0, s0, s1, s2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub s0, s0, s1, s2
+; CHECK-NEXT:    ret
+entry:
   %mul = fmul float %b, -1.000000e+00
   %0 = tail call float @llvm.fma.f32(float %a, float %mul, float %c)
   ret float %0
 }
 
 define float @fms32_com(float %a, float %b, float %c) nounwind readnone ssp {
-entry:
 ; CHECK-LABEL: fms32_com:
-; CHECK: fmsub s0, s1, s0, s2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub s0, s1, s0, s2
+; CHECK-NEXT:    ret
+entry:
   %mul = fmul float %b, -1.000000e+00
   %0 = tail call float @llvm.fma.f32(float %mul, float %a, float %c)
   ret float %0
 }
 
 define float @fnms32(float %a, float %b, float %c) nounwind readnone ssp {
-entry:
 ; CHECK-LABEL: fnms32:
-; CHECK: fnmsub s0, s0, s1, s2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fnmsub s0, s0, s1, s2
+; CHECK-NEXT:    ret
+entry:
   %mul = fmul float %c, -1.000000e+00
   %0 = tail call float @llvm.fma.f32(float %a, float %b, float %mul)
   ret float %0
@@ -46,7 +57,9 @@ entry:
 
 define double @fma64(double %a, double %b, double %c) nounwind readnone ssp {
 ; CHECK-LABEL: fma64:
-; CHECK: fmadd d0, d0, d1, d2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmadd d0, d0, d1, d2
+; CHECK-NEXT:    ret
 entry:
   %0 = tail call double @llvm.fma.f64(double %a, double %b, double %c)
   ret double %0
@@ -54,7 +67,9 @@ entry:
 
 define double @fnma64(double %a, double %b, double %c) nounwind readnone ssp {
 ; CHECK-LABEL: fnma64:
-; CHECK: fnmadd d0, d0, d1, d2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fnmadd d0, d0, d1, d2
+; CHECK-NEXT:    ret
 entry:
   %0 = tail call double @llvm.fma.f64(double %a, double %b, double %c)
   %mul = fmul double %0, -1.000000e+00
@@ -63,7 +78,9 @@ entry:
 
 define double @fms64(double %a, double %b, double %c) nounwind readnone ssp {
 ; CHECK-LABEL: fms64:
-; CHECK: fmsub d0, d0, d1, d2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d0, d1, d2
+; CHECK-NEXT:    ret
 entry:
   %mul = fmul double %b, -1.000000e+00
   %0 = tail call double @llvm.fma.f64(double %a, double %mul, double %c)
@@ -72,7 +89,9 @@ entry:
 
 define double @fms64_com(double %a, double %b, double %c) nounwind readnone ssp {
 ; CHECK-LABEL: fms64_com:
-; CHECK: fmsub d0, d1, d0, d2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d1, d0, d2
+; CHECK-NEXT:    ret
 entry:
   %mul = fmul double %b, -1.000000e+00
   %0 = tail call double @llvm.fma.f64(double %mul, double %a, double %c)
@@ -81,7 +100,9 @@ entry:
 
 define double @fnms64(double %a, double %b, double %c) nounwind readnone ssp {
 ; CHECK-LABEL: fnms64:
-; CHECK: fnmsub d0, d0, d1, d2
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fnmsub d0, d0, d1, d2
+; CHECK-NEXT:    ret
 entry:
   %mul = fmul double %c, -1.000000e+00
   %0 = tail call double @llvm.fma.f64(double %a, double %b, double %mul)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-no-helper.ll b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-no-helper.ll
index a5da39cb54353..e6f1c7d39f2a2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-no-helper.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-no-helper.ll
@@ -1,35 +1,85 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-apple-ios7.0  -homogeneous-prolog-epilog -frame-helper-size-threshold=6 | FileCheck %s
 ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu  -homogeneous-prolog-epilog -frame-helper-size-threshold=6 | FileCheck %s --check-prefixes=CHECK-LINUX
 
-; CHECK-LABEL: __Z3foofffi:
-; CHECK:      stp     d11, d10, [sp, #-64]!
-; CHECK-NEXT: stp     d9, d8, [sp, #16]
-; CHECK-NEXT: stp     x20, x19, [sp, #32]
-; CHECK-NEXT: stp     x29, x30, [sp, #48]
-; CHECK-NEXT: add     x29, sp, #48
-; CHECK:      bl      __Z3goof
-; CHECK:      bl      __Z3goof
-; CHECK:      ldp     x29, x30, [sp, #48]
-; CHECK:      ldp     x20, x19, [sp, #32]
-; CHECK:      ldp     d9, d8, [sp, #16]
-; CHECK:      ldp     d11, d10, [sp], #64
-; CHECK:      ret
-
-; CHECK-LINUX-LABEL: _Z3foofffi:
-; CHECK-LINUX:      stp     d11, d10, [sp, #-64]!
-; CHECK-LINUX-NEXT: stp     d9, d8, [sp, #16]
-; CHECK-LINUX-NEXT: stp     x29, x30, [sp, #32]
-; CHECK-LINUX-NEXT: stp     x20, x19, [sp, #48]
-; CHECK-LINUX-NEXT: add     x29, sp, #32
-; CHECK-LINUX:      bl      _Z3goof
-; CHECK-LINUX:      bl      _Z3goof
-; CHECK-LINUX:      ldp     x20, x19, [sp, #48]
-; CHECK-LINUX:      ldp     x29, x30, [sp, #32]
-; CHECK-LINUX:      ldp     d9, d8, [sp, #16]
-; CHECK-LINUX:      ldp     d11, d10, [sp], #64
-; CHECK-LINUX:      ret
-
 define float @_Z3foofffi(float %b, float %x, float %y, i32 %z) uwtable ssp minsize "frame-pointer"="non-leaf" {
+; CHECK-LABEL: _Z3foofffi:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    stp d11, d10, [sp, #-64]!
+; CHECK-NEXT:    stp d9, d8, [sp, #16]
+; CHECK-NEXT:    stp x20, x19, [sp, #32]
+; CHECK-NEXT:    stp x29, x30, [sp, #48]
+; CHECK-NEXT:    add x29, sp, #48
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    .cfi_offset w19, -24
+; CHECK-NEXT:    .cfi_offset w20, -32
+; CHECK-NEXT:    .cfi_offset b8, -40
+; CHECK-NEXT:    .cfi_offset b9, -48
+; CHECK-NEXT:    .cfi_offset b10, -56
+; CHECK-NEXT:    .cfi_offset b11, -64
+; CHECK-NEXT:    fmov s3, #1.00000000
+; CHECK-NEXT:    fadd s8, s0, s3
+; CHECK-NEXT:    fadd s0, s8, s1
+; CHECK-NEXT:    scvtf s4, w0
+; CHECK-NEXT:    fadd s0, s0, s2
+; CHECK-NEXT:    fsub s9, s0, s4
+; CHECK-NEXT:    fmov s0, s8
+; CHECK-NEXT:    sub w19, w0, #1
+; CHECK-NEXT:    bl __Z3goof
+; CHECK-NEXT:    fmov s10, s0
+; CHECK-NEXT:    fmov s0, s9
+; CHECK-NEXT:    bl __Z3goof
+; CHECK-NEXT:    fadd s0, s10, s0
+; CHECK-NEXT:    fmul s0, s8, s0
+; CHECK-NEXT:    fadd s0, s9, s0
+; CHECK-NEXT:    scvtf s1, w19
+; CHECK-NEXT:    ldp x29, x30, [sp, #48]
+; CHECK-NEXT:    ldp x20, x19, [sp, #32]
+; CHECK-NEXT:    ldp d9, d8, [sp, #16]
+; CHECK-NEXT:    fsub s0, s0, s1
+; CHECK-NEXT:    ldp d11, d10, [sp], #64
+; CHECK-NEXT:    ret
+;
+; CHECK-LINUX-LABEL: _Z3foofffi:
+; CHECK-LINUX:       // %bb.0: // %entry
+; CHECK-LINUX-NEXT:    stp d11, d10, [sp, #-64]!
+; CHECK-LINUX-NEXT:    stp d9, d8, [sp, #16]
+; CHECK-LINUX-NEXT:    stp x29, x30, [sp, #32]
+; CHECK-LINUX-NEXT:    stp x20, x19, [sp, #48]
+; CHECK-LINUX-NEXT:    add x29, sp, #32
+; CHECK-LINUX-NEXT:    .cfi_def_cfa w29, 32
+; CHECK-LINUX-NEXT:    .cfi_offset w19, -8
+; CHECK-LINUX-NEXT:    .cfi_offset w20, -16
+; CHECK-LINUX-NEXT:    .cfi_offset w30, -24
+; CHECK-LINUX-NEXT:    .cfi_offset w29, -32
+; CHECK-LINUX-NEXT:    .cfi_offset b8, -40
+; CHECK-LINUX-NEXT:    .cfi_offset b9, -48
+; CHECK-LINUX-NEXT:    .cfi_offset b10, -56
+; CHECK-LINUX-NEXT:    .cfi_offset b11, -64
+; CHECK-LINUX-NEXT:    fmov s3, #1.00000000
+; CHECK-LINUX-NEXT:    fadd s8, s0, s3
+; CHECK-LINUX-NEXT:    fadd s0, s8, s1
+; CHECK-LINUX-NEXT:    scvtf s4, w0
+; CHECK-LINUX-NEXT:    fadd s0, s0, s2
+; CHECK-LINUX-NEXT:    fsub s9, s0, s4
+; CHECK-LINUX-NEXT:    fmov s0, s8
+; CHECK-LINUX-NEXT:    sub w19, w0, #1
+; CHECK-LINUX-NEXT:    bl _Z3goof
+; CHECK-LINUX-NEXT:    fmov s10, s0
+; CHECK-LINUX-NEXT:    fmov s0, s9
+; CHECK-LINUX-NEXT:    bl _Z3goof
+; CHECK-LINUX-NEXT:    fadd s0, s10, s0
+; CHECK-LINUX-NEXT:    fmul s0, s8, s0
+; CHECK-LINUX-NEXT:    fadd s0, s9, s0
+; CHECK-LINUX-NEXT:    scvtf s1, w19
+; CHECK-LINUX-NEXT:    ldp x20, x19, [sp, #48]
+; CHECK-LINUX-NEXT:    ldp x29, x30, [sp, #32]
+; CHECK-LINUX-NEXT:    ldp d9, d8, [sp, #16]
+; CHECK-LINUX-NEXT:    fsub s0, s0, s1
+; CHECK-LINUX-NEXT:    ldp d11, d10, [sp], #64
+; CHECK-LINUX-NEXT:    ret
 entry:
   %inc = fadd float %b, 1.000000e+00
   %add = fadd float %inc, %x
@@ -47,19 +97,22 @@ entry:
   ret float %sub6
 }
 
-; CHECK-LABEL: __Z3zoov:
-; CHECK:      stp     x29, x30, [sp, #-16]!
-; CHECK:      bl      __Z3hoo
-; CHECK:      ldp     x29, x30, [sp], #16
-; CHECK-NEXT: ret
-
-; CHECK-LINUX-LABEL: _Z3zoov:
-; CHECK-LINUX:      stp     x29, x30, [sp, #-16]!
-; CHECK-LINUX:      bl      _Z3hoo
-; CHECK-LINUX:      ldp     x29, x30, [sp], #16
-; CHECK-LINUX-NEXT: ret
-
 define i32 @_Z3zoov() nounwind ssp minsize {
+; CHECK-LABEL: _Z3zoov:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]!
+; CHECK-NEXT:    bl __Z3hoov
+; CHECK-NEXT:    add w0, w0, #1
+; CHECK-NEXT:    ldp x29, x30, [sp], #16
+; CHECK-NEXT:    ret
+;
+; CHECK-LINUX-LABEL: _Z3zoov:
+; CHECK-LINUX:       // %bb.0:
+; CHECK-LINUX-NEXT:    stp x29, x30, [sp, #-16]!
+; CHECK-LINUX-NEXT:    bl _Z3hoov
+; CHECK-LINUX-NEXT:    add w0, w0, #1
+; CHECK-LINUX-NEXT:    ldp x29, x30, [sp], #16
+; CHECK-LINUX-NEXT:    ret
   %1 = tail call i32 @_Z3hoov() #2
   %2 = add nsw i32 %1, 1
   ret i32 %2


        


More information about the llvm-commits mailing list