[llvm] 6b53817 - [AArch64] Regenerate some test checks. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 7 16:01:07 PDT 2021


Author: David Green
Date: 2021-09-08T00:00:56+01:00
New Revision: 6b53817e713ca02415e710e5c7b0d2a643d93889

URL: https://github.com/llvm/llvm-project/commit/6b53817e713ca02415e710e5c7b0d2a643d93889
DIFF: https://github.com/llvm/llvm-project/commit/6b53817e713ca02415e710e5c7b0d2a643d93889.diff

LOG: [AArch64] Regenerate some test checks. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sibling-call.ll
    llvm/test/CodeGen/AArch64/vcvt-oversize.ll
    llvm/test/CodeGen/AArch64/vector-fcopysign.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sibling-call.ll b/llvm/test/CodeGen/AArch64/sibling-call.ll
index a361d8f63ff35..18c5840bebb2a 100644
--- a/llvm/test/CodeGen/AArch64/sibling-call.ll
+++ b/llvm/test/CodeGen/AArch64/sibling-call.ll
@@ -1,4 +1,5 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-ldst-opt=0 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 
 declare void @callee_stack0()
 declare void @callee_stack8([8 x i64], i64)
@@ -6,92 +7,110 @@ declare void @callee_stack16([8 x i64], i64, i64)
 
 define dso_local void @caller_to0_from0() nounwind {
 ; CHECK-LABEL: caller_to0_from0:
-; CHECK-NEXT: // %bb.
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    b callee_stack0
   tail call void @callee_stack0()
   ret void
-; CHECK-NEXT: b callee_stack0
 }
 
 define dso_local void @caller_to0_from8([8 x i64], i64) nounwind{
 ; CHECK-LABEL: caller_to0_from8:
-; CHECK-NEXT: // %bb.
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    b callee_stack0
 
   tail call void @callee_stack0()
   ret void
-; CHECK-NEXT: b callee_stack0
 }
 
 define dso_local void @caller_to8_from0() {
 ; CHECK-LABEL: caller_to8_from0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    str x8, [sp]
+; CHECK-NEXT:    bl callee_stack8
+; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    ret
 
 ; Caller isn't going to clean up any extra stack we allocate, so it
 ; can't be a tail call.
   tail call void @callee_stack8([8 x i64] undef, i64 42)
   ret void
-; CHECK: bl callee_stack8
 }
 
 define dso_local void @caller_to8_from8([8 x i64], i64 %a) {
 ; CHECK-LABEL: caller_to8_from8:
-; CHECK-NOT: sub sp, sp,
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    str x8, [sp]
+; CHECK-NEXT:    b callee_stack8
 
 ; This should reuse our stack area for the 42
   tail call void @callee_stack8([8 x i64] undef, i64 42)
   ret void
-; CHECK: str {{x[0-9]+}}, [sp]
-; CHECK-NEXT: b callee_stack8
 }
 
 define dso_local void @caller_to16_from8([8 x i64], i64 %a) {
 ; CHECK-LABEL: caller_to16_from8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    bl callee_stack16
+; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    ret
 
 ; Shouldn't be a tail call: we can't use SP+8 because our caller might
 ; have something there. This may sound obvious but implementation does
 ; some funky aligning.
   tail call void @callee_stack16([8 x i64] undef, i64 undef, i64 undef)
-; CHECK: bl callee_stack16
   ret void
 }
 
 define dso_local void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) {
 ; CHECK-LABEL: caller_to8_from24:
-; CHECK-NOT: sub sp, sp
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    str x8, [sp]
+; CHECK-NEXT:    b callee_stack8
 
 ; Reuse our area, putting "42" at incoming sp
   tail call void @callee_stack8([8 x i64] undef, i64 42)
   ret void
-; CHECK: str {{x[0-9]+}}, [sp]
-; CHECK-NEXT: b callee_stack8
 }
 
 define dso_local void @caller_to16_from16([8 x i64], i64 %a, i64 %b) {
 ; CHECK-LABEL: caller_to16_from16:
-; CHECK-NOT: sub sp, sp,
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp x8, x9, [sp]
+; CHECK-NEXT:    stp x9, x8, [sp]
+; CHECK-NEXT:    b callee_stack16
 
 ; Here we want to make sure that both loads happen before the stores:
 ; otherwise either %a or %b will be wrongly clobbered.
   tail call void @callee_stack16([8 x i64] undef, i64 %b, i64 %a)
   ret void
 
-; CHECK: ldr [[VAL0:x[0-9]+]],
-; CHECK: ldr [[VAL1:x[0-9]+]],
-; CHECK: str [[VAL0]],
-; CHECK: str [[VAL1]],
 
-; CHECK-NOT: add sp, sp,
-; CHECK: b callee_stack16
 }
 
 @func = dso_local global void(i32)* null
 
 define dso_local void @indirect_tail() {
 ; CHECK-LABEL: indirect_tail:
-; CHECK-NOT: sub sp, sp
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, func
+; CHECK-NEXT:    ldr x1, [x8, :lo12:func]
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    br x1
 
   %fptr = load void(i32)*, void(i32)** @func
   tail call void %fptr(i32 42)
   ret void
-; CHECK: ldr [[FPTR:x[1-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:func]
-; CHECK: mov w0, #{{42|0x2a}}
-; CHECK: br [[FPTR]]
 }

diff  --git a/llvm/test/CodeGen/AArch64/vcvt-oversize.ll b/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
index 823fe44744c52..f41bf42a2f37b 100644
--- a/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
+++ b/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
@@ -1,15 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
 
 define <8 x i8> @float_to_i8(<8 x float>* %in) {
 ; CHECK-LABEL: float_to_i8:
-; CHECK: ldp     q0, q1, [x0]
-; CHECK-DAG: fadd v[[LSB:[0-9]+]].4s, v0.4s, v0.4s
-; CHECK-DAG: fadd v[[MSB:[0-9]+]].4s, v1.4s, v1.4s
-; CHECK-DAG: fcvtzs v[[LSB2:[0-9]+]].4s, v[[LSB]].4s
-; CHECK-DAG: fcvtzs v[[MSB2:[0-9]+]].4s, v[[MSB]].4s
-; CHECK-DAG: xtn v[[TMP:[0-9]+]].4h, v[[LSB]].4s
-; CHECK-DAG: xtn v[[TMP2:[0-9]+]].4h, v[[MSB]].4s
-; CHECK-DAG: uzp1 v0.8b, v[[TMP]].8b, v[[TMP2]].8b
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    fadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT:    fadd v1.4s, v1.4s, v1.4s
+; CHECK-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    xtn v1.4h, v1.4s
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    uzp1 v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ret
   %l = load <8 x float>, <8 x float>* %in
   %scale = fmul <8 x float> %l, <float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0>
   %conv = fptoui <8 x float> %scale to <8 x i8>

diff  --git a/llvm/test/CodeGen/AArch64/vector-fcopysign.ll b/llvm/test/CodeGen/AArch64/vector-fcopysign.ll
index 755bed466f597..2e857c4a40927 100644
--- a/llvm/test/CodeGen/AArch64/vector-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/vector-fcopysign.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -mtriple aarch64-apple-darwin -asm-verbose=false -disable-post-ra | FileCheck --check-prefixes=CHECK,NOFP16 %s
-; RUN: llc < %s -mtriple aarch64-apple-darwin -asm-verbose=false -disable-post-ra -mattr=+v8.2a,+fullfp16 | FileCheck --check-prefixes=CHECK,FP16 %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple aarch64-apple-darwin | FileCheck --check-prefixes=CHECK,NOFP16 %s
+; RUN: llc < %s -mtriple aarch64-apple-darwin -mattr=+v8.2a,+fullfp16 | FileCheck --check-prefixes=CHECK,FP16 %s
 
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
@@ -8,6 +9,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 ; WidenVecRes same
 define <1 x float> @test_copysign_v1f32_v1f32(<1 x float> %a, <1 x float> %b) #0 {
 ; CHECK-LABEL: test_copysign_v1f32_v1f32:
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.2s v2, #128, lsl #24
 ; CHECK-NEXT:    bit.8b v0, v1, v2
 ; CHECK-NEXT:    ret
@@ -18,8 +20,10 @@ define <1 x float> @test_copysign_v1f32_v1f32(<1 x float> %a, <1 x float> %b) #0
 ; WidenVecRes mismatched
 define <1 x float> @test_copysign_v1f32_v1f64(<1 x float> %a, <1 x double> %b) #0 {
 ; CHECK-LABEL: test_copysign_v1f32_v1f64:
-; CHECK-NEXT:    fcvtn v1.2s, v1.2d
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    ; kill: def $d1 killed $d1 def $q1
 ; CHECK-NEXT:    movi.2s v2, #128, lsl #24
+; CHECK-NEXT:    fcvtn v1.2s, v1.2d
 ; CHECK-NEXT:    bit.8b v0, v1, v2
 ; CHECK-NEXT:    ret
   %tmp0 = fptrunc <1 x double> %b to <1 x float>
@@ -34,10 +38,13 @@ declare <1 x float> @llvm.copysign.v1f32(<1 x float> %a, <1 x float> %b) #0
 ; WidenVecOp #1
 define <1 x double> @test_copysign_v1f64_v1f32(<1 x double> %a, <1 x float> %b) #0 {
 ; CHECK-LABEL: test_copysign_v1f64_v1f32:
-; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.2d v2, #0000000000000000
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
 ; CHECK-NEXT:    fneg.2d v2, v2
 ; CHECK-NEXT:    bit.16b v0, v1, v2
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
   %tmp0 = fpext <1 x float> %b to <1 x double>
   %r = call <1 x double> @llvm.copysign.v1f64(<1 x double> %a, <1 x double> %tmp0)
@@ -46,9 +53,13 @@ define <1 x double> @test_copysign_v1f64_v1f32(<1 x double> %a, <1 x float> %b)
 
 define <1 x double> @test_copysign_v1f64_v1f64(<1 x double> %a, <1 x double> %b) #0 {
 ; CHECK-LABEL: test_copysign_v1f64_v1f64:
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.2d v2, #0000000000000000
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    fneg.2d v2, v2
+; CHECK-NEXT:    ; kill: def $d1 killed $d1 def $q1
 ; CHECK-NEXT:    bit.16b v0, v1, v2
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
   %r = call <1 x double> @llvm.copysign.v1f64(<1 x double> %a, <1 x double> %b)
   ret <1 x double> %r
@@ -60,6 +71,7 @@ declare <1 x double> @llvm.copysign.v1f64(<1 x double> %a, <1 x double> %b) #0
 
 define <2 x float> @test_copysign_v2f32_v2f32(<2 x float> %a, <2 x float> %b) #0 {
 ; CHECK-LABEL: test_copysign_v2f32_v2f32:
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.2s v2, #128, lsl #24
 ; CHECK-NEXT:    bit.8b v0, v1, v2
 ; CHECK-NEXT:    ret
@@ -69,6 +81,7 @@ define <2 x float> @test_copysign_v2f32_v2f32(<2 x float> %a, <2 x float> %b) #0
 
 define <2 x float> @test_copysign_v2f32_v2f64(<2 x float> %a, <2 x double> %b) #0 {
 ; CHECK-LABEL: test_copysign_v2f32_v2f64:
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    fcvtn v1.2s, v1.2d
 ; CHECK-NEXT:    movi.2s v2, #128, lsl #24
 ; CHECK-NEXT:    bit.8b v0, v1, v2
@@ -84,6 +97,7 @@ declare <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) #0
 
 define <4 x float> @test_copysign_v4f32_v4f32(<4 x float> %a, <4 x float> %b) #0 {
 ; CHECK-LABEL: test_copysign_v4f32_v4f32:
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.4s v2, #128, lsl #24
 ; CHECK-NEXT:    bit.16b v0, v1, v2
 ; CHECK-NEXT:    ret
@@ -94,6 +108,7 @@ define <4 x float> @test_copysign_v4f32_v4f32(<4 x float> %a, <4 x float> %b) #0
 ; SplitVecOp #1
 define <4 x float> @test_copysign_v4f32_v4f64(<4 x float> %a, <4 x double> %b) #0 {
 ; CHECK-LABEL: test_copysign_v4f32_v4f64:
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    fcvtn v1.2s, v1.2d
 ; CHECK-NEXT:    fcvtn2 v1.4s, v2.2d
 ; CHECK-NEXT:    movi.4s v2, #128, lsl #24
@@ -110,8 +125,9 @@ declare <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) #0
 
 define <2 x double> @test_copysign_v2f64_v232(<2 x double> %a, <2 x float> %b) #0 {
 ; CHECK-LABEL: test_copysign_v2f64_v232:
-; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.2d v2, #0000000000000000
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
 ; CHECK-NEXT:    fneg.2d v2, v2
 ; CHECK-NEXT:    bit.16b v0, v1, v2
 ; CHECK-NEXT:    ret
@@ -122,6 +138,7 @@ define <2 x double> @test_copysign_v2f64_v232(<2 x double> %a, <2 x float> %b) #
 
 define <2 x double> @test_copysign_v2f64_v2f64(<2 x double> %a, <2 x double> %b) #0 {
 ; CHECK-LABEL: test_copysign_v2f64_v2f64:
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.2d v2, #0000000000000000
 ; CHECK-NEXT:    fneg.2d v2, v2
 ; CHECK-NEXT:    bit.16b v0, v1, v2
@@ -137,9 +154,10 @@ declare <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) #0
 ; SplitVecRes mismatched
 define <4 x double> @test_copysign_v4f64_v4f32(<4 x double> %a, <4 x float> %b) #0 {
 ; CHECK-LABEL: test_copysign_v4f64_v4f32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    movi.2d v4, #0000000000000000
 ; CHECK-NEXT:    fcvtl v3.2d, v2.2s
 ; CHECK-NEXT:    fcvtl2 v2.2d, v2.4s
-; CHECK-NEXT:    movi.2d v4, #0000000000000000
 ; CHECK-NEXT:    fneg.2d v4, v4
 ; CHECK-NEXT:    bit.16b v1, v2, v4
 ; CHECK-NEXT:    bit.16b v0, v3, v4
@@ -152,6 +170,7 @@ define <4 x double> @test_copysign_v4f64_v4f32(<4 x double> %a, <4 x float> %b)
 ; SplitVecRes same
 define <4 x double> @test_copysign_v4f64_v4f64(<4 x double> %a, <4 x double> %b) #0 {
 ; CHECK-LABEL: test_copysign_v4f64_v4f64:
+; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.2d v4, #0000000000000000
 ; CHECK-NEXT:    fneg.2d v4, v4
 ; CHECK-NEXT:    bit.16b v0, v2, v4
@@ -166,75 +185,86 @@ declare <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) #0
 ;============ v4f16
 
 define <4 x half> @test_copysign_v4f16_v4f16(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-LABEL: test_copysign_v4f16_v4f16:
+; NOFP16-LABEL: test_copysign_v4f16_v4f16:
+; NOFP16:       ; %bb.0:
+; NOFP16-NEXT:    ; kill: def $d1 killed $d1 def $q1
+; NOFP16-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; NOFP16-NEXT:    mov h2, v1[1]
 ; NOFP16-NEXT:    mov h3, v0[1]
 ; NOFP16-NEXT:    movi.4s v4, #128, lsl #24
-; NOFP16-NEXT:    fcvt    s5, h1
-; NOFP16-NEXT:    fcvt    s6, h0
+; NOFP16-NEXT:    fcvt s5, h1
+; NOFP16-NEXT:    fcvt s6, h0
+; NOFP16-NEXT:    fcvt s2, h2
+; NOFP16-NEXT:    fcvt s3, h3
 ; NOFP16-NEXT:    bit.16b v6, v5, v4
 ; NOFP16-NEXT:    mov h5, v1[2]
-; NOFP16-NEXT:    fcvt    s2, h2
-; NOFP16-NEXT:    fcvt    s3, h3
 ; NOFP16-NEXT:    bit.16b v3, v2, v4
 ; NOFP16-NEXT:    mov h2, v0[2]
-; NOFP16-NEXT:    fcvt    s5, h5
-; NOFP16-NEXT:    fcvt    s2, h2
-; NOFP16-NEXT:    bit.16b v2, v5, v4
+; NOFP16-NEXT:    fcvt s5, h5
+; NOFP16-NEXT:    fcvt s2, h2
 ; NOFP16-NEXT:    mov h1, v1[3]
 ; NOFP16-NEXT:    mov h0, v0[3]
-; NOFP16-NEXT:    fcvt    s1, h1
-; NOFP16-NEXT:    fcvt    s5, h0
-; NOFP16-NEXT:    fcvt    h0, s6
+; NOFP16-NEXT:    bit.16b v2, v5, v4
+; NOFP16-NEXT:    fcvt s1, h1
+; NOFP16-NEXT:    fcvt s5, h0
+; NOFP16-NEXT:    fcvt h0, s6
 ; NOFP16-NEXT:    bit.16b v5, v1, v4
-; NOFP16-NEXT:    fcvt    h1, s3
-; NOFP16-NEXT:    fcvt    h2, s2
-; NOFP16-NEXT:    mov.h   v0[1], v1[0]
-; NOFP16-NEXT:    mov.h   v0[2], v2[0]
-; NOFP16-NEXT:    fcvt    h1, s5
-; NOFP16-NEXT:    mov.h   v0[3], v1[0]
+; NOFP16-NEXT:    fcvt h1, s3
+; NOFP16-NEXT:    fcvt h2, s2
+; NOFP16-NEXT:    mov.h v0[1], v1[0]
+; NOFP16-NEXT:    mov.h v0[2], v2[0]
+; NOFP16-NEXT:    fcvt h1, s5
+; NOFP16-NEXT:    mov.h v0[3], v1[0]
+; NOFP16-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; NOFP16-NEXT:    ret
-
+;
+; FP16-LABEL: test_copysign_v4f16_v4f16:
+; FP16:       ; %bb.0:
 ; FP16-NEXT:    movi.4h v2, #128, lsl #8
-; FP16-NEXT:    bit.8b  v0, v1, v2
+; FP16-NEXT:    bit.8b v0, v1, v2
 ; FP16-NEXT:    ret
   %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b)
   ret <4 x half> %r
 }
 
 define <4 x half> @test_copysign_v4f16_v4f32(<4 x half> %a, <4 x float> %b) #0 {
-; CHECK-LABEL: test_copysign_v4f16_v4f32:
-; NOFP16-NEXT:    fcvtn   v1.4h, v1.4s
-; NOFP16-NEXT:    mov h2, v0[1]
+; NOFP16-LABEL: test_copysign_v4f16_v4f32:
+; NOFP16:       ; %bb.0:
+; NOFP16-NEXT:    fcvtn v1.4h, v1.4s
+; NOFP16-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; NOFP16-NEXT:    movi.4s v3, #128, lsl #24
-; NOFP16-NEXT:    fcvt    s4, h0
-; NOFP16-NEXT:    mov h5, v0[2]
-; NOFP16-NEXT:    fcvt    s2, h2
-; NOFP16-NEXT:    fcvt    s6, h1
+; NOFP16-NEXT:    fcvt s4, h0
+; NOFP16-NEXT:    fcvt s6, h1
+; NOFP16-NEXT:    mov h2, v0[1]
 ; NOFP16-NEXT:    bit.16b v4, v6, v3
 ; NOFP16-NEXT:    mov h6, v1[1]
-; NOFP16-NEXT:    fcvt    s5, h5
-; NOFP16-NEXT:    fcvt    s6, h6
+; NOFP16-NEXT:    fcvt s2, h2
+; NOFP16-NEXT:    fcvt s6, h6
+; NOFP16-NEXT:    mov h5, v0[2]
 ; NOFP16-NEXT:    bit.16b v2, v6, v3
 ; NOFP16-NEXT:    mov h6, v1[2]
-; NOFP16-NEXT:    fcvt    s6, h6
-; NOFP16-NEXT:    bit.16b v5, v6, v3
+; NOFP16-NEXT:    fcvt s5, h5
+; NOFP16-NEXT:    fcvt s6, h6
 ; NOFP16-NEXT:    mov h0, v0[3]
-; NOFP16-NEXT:    fcvt    s6, h0
+; NOFP16-NEXT:    bit.16b v5, v6, v3
+; NOFP16-NEXT:    fcvt s6, h0
 ; NOFP16-NEXT:    mov h0, v1[3]
-; NOFP16-NEXT:    fcvt    s1, h0
-; NOFP16-NEXT:    fcvt    h0, s4
+; NOFP16-NEXT:    fcvt s1, h0
+; NOFP16-NEXT:    fcvt h0, s4
 ; NOFP16-NEXT:    bit.16b v6, v1, v3
-; NOFP16-NEXT:    fcvt    h1, s2
-; NOFP16-NEXT:    fcvt    h2, s5
-; NOFP16-NEXT:    mov.h   v0[1], v1[0]
-; NOFP16-NEXT:    mov.h   v0[2], v2[0]
-; NOFP16-NEXT:    fcvt    h1, s6
-; NOFP16-NEXT:    mov.h   v0[3], v1[0]
+; NOFP16-NEXT:    fcvt h1, s2
+; NOFP16-NEXT:    fcvt h2, s5
+; NOFP16-NEXT:    mov.h v0[1], v1[0]
+; NOFP16-NEXT:    mov.h v0[2], v2[0]
+; NOFP16-NEXT:    fcvt h1, s6
+; NOFP16-NEXT:    mov.h v0[3], v1[0]
+; NOFP16-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; NOFP16-NEXT:    ret
-
+;
+; FP16-LABEL: test_copysign_v4f16_v4f32:
+; FP16:       ; %bb.0:
 ; FP16-NEXT:    fcvtn v1.4h, v1.4s
-; FP16-NEXT:    movi.4h    v2, #128, lsl #8
+; FP16-NEXT:    movi.4h v2, #128, lsl #8
 ; FP16-NEXT:    bit.8b v0, v1, v2
 ; FP16-NEXT:    ret
   %tmp0 = fptrunc <4 x float> %b to <4 x half>
@@ -243,45 +273,50 @@ define <4 x half> @test_copysign_v4f16_v4f32(<4 x half> %a, <4 x float> %b) #0 {
 }
 
 define <4 x half> @test_copysign_v4f16_v4f64(<4 x half> %a, <4 x double> %b) #0 {
-; CHECK-LABEL: test_copysign_v4f16_v4f64:
-; NOFP16-NEXT:    mov d3, v2[1]
+; NOFP16-LABEL: test_copysign_v4f16_v4f64:
+; NOFP16:       ; %bb.0:
+; NOFP16-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; NOFP16-NEXT:    mov d4, v1[1]
 ; NOFP16-NEXT:    movi.4s v5, #128, lsl #24
-; NOFP16-NEXT:    fcvt    s1, d1
-; NOFP16-NEXT:    fcvt    s6, h0
+; NOFP16-NEXT:    fcvt s1, d1
+; NOFP16-NEXT:    fcvt s6, h0
 ; NOFP16-NEXT:    bit.16b v6, v1, v5
 ; NOFP16-NEXT:    mov h1, v0[1]
-; NOFP16-NEXT:    fcvt    s2, d2
-; NOFP16-NEXT:    fcvt    s4, d4
-; NOFP16-NEXT:    fcvt    s1, h1
+; NOFP16-NEXT:    fcvt s4, d4
+; NOFP16-NEXT:    fcvt s1, h1
 ; NOFP16-NEXT:    bit.16b v1, v4, v5
 ; NOFP16-NEXT:    mov h4, v0[2]
+; NOFP16-NEXT:    mov d3, v2[1]
+; NOFP16-NEXT:    fcvt s2, d2
 ; NOFP16-NEXT:    mov h0, v0[3]
-; NOFP16-NEXT:    fcvt    s4, h4
-; NOFP16-NEXT:    fcvt    s3, d3
-; NOFP16-NEXT:    fcvt    s7, h0
-; NOFP16-NEXT:    fcvt    h0, s6
+; NOFP16-NEXT:    fcvt s4, h4
+; NOFP16-NEXT:    fcvt s3, d3
+; NOFP16-NEXT:    fcvt s7, h0
+; NOFP16-NEXT:    fcvt h0, s6
 ; NOFP16-NEXT:    bit.16b v4, v2, v5
+; NOFP16-NEXT:    fcvt h1, s1
 ; NOFP16-NEXT:    bit.16b v7, v3, v5
-; NOFP16-NEXT:    fcvt    h1, s1
-; NOFP16-NEXT:    fcvt    h2, s4
-; NOFP16-NEXT:    mov.h   v0[1], v1[0]
-; NOFP16-NEXT:    mov.h   v0[2], v2[0]
-; NOFP16-NEXT:    fcvt    h1, s7
-; NOFP16-NEXT:    mov.h   v0[3], v1[0]
+; NOFP16-NEXT:    fcvt h2, s4
+; NOFP16-NEXT:    mov.h v0[1], v1[0]
+; NOFP16-NEXT:    mov.h v0[2], v2[0]
+; NOFP16-NEXT:    fcvt h1, s7
+; NOFP16-NEXT:    mov.h v0[3], v1[0]
+; NOFP16-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; NOFP16-NEXT:    ret
-
+;
+; FP16-LABEL: test_copysign_v4f16_v4f64:
+; FP16:       ; %bb.0:
 ; FP16-NEXT:    mov d3, v1[1]
-; FP16-NEXT:    fcvt    h1, d1
-; FP16-NEXT:    fcvt    h3, d3
-; FP16-NEXT:    mov.h   v1[1], v3[0]
-; FP16-NEXT:    fcvt    h3, d2
+; FP16-NEXT:    fcvt h1, d1
+; FP16-NEXT:    fcvt h3, d3
+; FP16-NEXT:    mov.h v1[1], v3[0]
+; FP16-NEXT:    fcvt h3, d2
 ; FP16-NEXT:    mov d2, v2[1]
-; FP16-NEXT:    fcvt    h2, d2
-; FP16-NEXT:    mov.h   v1[2], v3[0]
-; FP16-NEXT:    mov.h   v1[3], v2[0]
+; FP16-NEXT:    fcvt h2, d2
+; FP16-NEXT:    mov.h v1[2], v3[0]
+; FP16-NEXT:    mov.h v1[3], v2[0]
 ; FP16-NEXT:    movi.4h v2, #128, lsl #8
-; FP16-NEXT:    bit.8b  v0, v1, v2
+; FP16-NEXT:    bit.8b v0, v1, v2
 ; FP16-NEXT:    ret
   %tmp0 = fptrunc <4 x double> %b to <4 x half>
   %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0)
@@ -293,132 +328,138 @@ declare <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) #0
 ;============ v8f16
 
 define <8 x half> @test_copysign_v8f16_v8f16(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-LABEL: test_copysign_v8f16_v8f16:
+; NOFP16-LABEL: test_copysign_v8f16_v8f16:
+; NOFP16:       ; %bb.0:
 ; NOFP16-NEXT:    mov h4, v1[1]
 ; NOFP16-NEXT:    mov h5, v0[1]
-; NOFP16-NEXT:    movi.4s v2, #128, lsl #24
-; NOFP16-NEXT:    fcvt    s6, h1
-; NOFP16-NEXT:    fcvt    s3, h0
 ; NOFP16-NEXT:    mov h7, v1[2]
 ; NOFP16-NEXT:    mov h16, v0[2]
 ; NOFP16-NEXT:    mov h17, v1[3]
 ; NOFP16-NEXT:    mov h18, v0[3]
+; NOFP16-NEXT:    movi.4s v2, #128, lsl #24
+; NOFP16-NEXT:    fcvt s6, h1
+; NOFP16-NEXT:    fcvt s3, h0
+; NOFP16-NEXT:    fcvt s4, h4
+; NOFP16-NEXT:    fcvt s5, h5
+; NOFP16-NEXT:    fcvt s7, h7
+; NOFP16-NEXT:    fcvt s16, h16
+; NOFP16-NEXT:    fcvt s17, h17
+; NOFP16-NEXT:    fcvt s18, h18
 ; NOFP16-NEXT:    bit.16b v3, v6, v2
 ; NOFP16-NEXT:    mov h6, v1[4]
-; NOFP16-NEXT:    fcvt    s4, h4
-; NOFP16-NEXT:    fcvt    s5, h5
 ; NOFP16-NEXT:    bit.16b v5, v4, v2
 ; NOFP16-NEXT:    mov h4, v0[4]
-; NOFP16-NEXT:    fcvt    s7, h7
-; NOFP16-NEXT:    fcvt    s16, h16
 ; NOFP16-NEXT:    bit.16b v16, v7, v2
 ; NOFP16-NEXT:    mov h7, v1[5]
-; NOFP16-NEXT:    fcvt    s17, h17
-; NOFP16-NEXT:    fcvt    s18, h18
 ; NOFP16-NEXT:    bit.16b v18, v17, v2
 ; NOFP16-NEXT:    mov h17, v0[5]
-; NOFP16-NEXT:    fcvt    s6, h6
-; NOFP16-NEXT:    fcvt    s4, h4
+; NOFP16-NEXT:    fcvt s6, h6
+; NOFP16-NEXT:    fcvt s4, h4
+; NOFP16-NEXT:    fcvt s7, h7
+; NOFP16-NEXT:    fcvt s17, h17
 ; NOFP16-NEXT:    bit.16b v4, v6, v2
 ; NOFP16-NEXT:    mov h6, v1[6]
-; NOFP16-NEXT:    fcvt    s7, h7
-; NOFP16-NEXT:    fcvt    s17, h17
 ; NOFP16-NEXT:    bit.16b v17, v7, v2
 ; NOFP16-NEXT:    mov h7, v0[6]
-; NOFP16-NEXT:    fcvt    s6, h6
-; NOFP16-NEXT:    fcvt    s7, h7
-; NOFP16-NEXT:    bit.16b v7, v6, v2
+; NOFP16-NEXT:    fcvt s6, h6
+; NOFP16-NEXT:    fcvt s7, h7
 ; NOFP16-NEXT:    mov h1, v1[7]
 ; NOFP16-NEXT:    mov h0, v0[7]
-; NOFP16-NEXT:    fcvt    s1, h1
-; NOFP16-NEXT:    fcvt    s6, h0
+; NOFP16-NEXT:    bit.16b v7, v6, v2
+; NOFP16-NEXT:    fcvt s1, h1
+; NOFP16-NEXT:    fcvt s6, h0
 ; NOFP16-NEXT:    bit.16b v6, v1, v2
-; NOFP16-NEXT:    fcvt    h0, s3
-; NOFP16-NEXT:    fcvt    h1, s5
-; NOFP16-NEXT:    mov.h   v0[1], v1[0]
-; NOFP16-NEXT:    fcvt    h1, s16
-; NOFP16-NEXT:    mov.h   v0[2], v1[0]
-; NOFP16-NEXT:    fcvt    h1, s18
-; NOFP16-NEXT:    fcvt    h2, s4
-; NOFP16-NEXT:    fcvt    h3, s17
-; NOFP16-NEXT:    fcvt    h4, s7
-; NOFP16-NEXT:    mov.h   v0[3], v1[0]
-; NOFP16-NEXT:    mov.h   v0[4], v2[0]
-; NOFP16-NEXT:    mov.h   v0[5], v3[0]
-; NOFP16-NEXT:    mov.h   v0[6], v4[0]
-; NOFP16-NEXT:    fcvt    h1, s6
-; NOFP16-NEXT:    mov.h   v0[7], v1[0]
+; NOFP16-NEXT:    fcvt h0, s3
+; NOFP16-NEXT:    fcvt h1, s5
+; NOFP16-NEXT:    mov.h v0[1], v1[0]
+; NOFP16-NEXT:    fcvt h1, s16
+; NOFP16-NEXT:    mov.h v0[2], v1[0]
+; NOFP16-NEXT:    fcvt h1, s18
+; NOFP16-NEXT:    fcvt h2, s4
+; NOFP16-NEXT:    mov.h v0[3], v1[0]
+; NOFP16-NEXT:    fcvt h3, s17
+; NOFP16-NEXT:    mov.h v0[4], v2[0]
+; NOFP16-NEXT:    fcvt h4, s7
+; NOFP16-NEXT:    mov.h v0[5], v3[0]
+; NOFP16-NEXT:    mov.h v0[6], v4[0]
+; NOFP16-NEXT:    fcvt h1, s6
+; NOFP16-NEXT:    mov.h v0[7], v1[0]
 ; NOFP16-NEXT:    ret
-
+;
+; FP16-LABEL: test_copysign_v8f16_v8f16:
+; FP16:       ; %bb.0:
 ; FP16-NEXT:    movi.8h v2, #128, lsl #8
-; FP16-NEXT:    bit.16b  v0, v1, v2
+; FP16-NEXT:    bit.16b v0, v1, v2
 ; FP16-NEXT:    ret
   %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b)
   ret <8 x half> %r
 }
 
 define <8 x half> @test_copysign_v8f16_v8f32(<8 x half> %a, <8 x float> %b) #0 {
-; CHECK-LABEL: test_copysign_v8f16_v8f32:
-; NOFP16-NEXT:    fcvtn   v2.4h, v2.4s
-; NOFP16-NEXT:    fcvtn   v4.4h, v1.4s
-; NOFP16-NEXT:    mov h5, v0[1]
+; NOFP16-LABEL: test_copysign_v8f16_v8f32:
+; NOFP16:       ; %bb.0:
+; NOFP16-NEXT:    fcvtn v4.4h, v1.4s
+; NOFP16-NEXT:    fcvtn v2.4h, v2.4s
 ; NOFP16-NEXT:    movi.4s v1, #128, lsl #24
-; NOFP16-NEXT:    fcvt    s3, h0
-; NOFP16-NEXT:    mov h6, v0[2]
-; NOFP16-NEXT:    mov h7, v0[3]
+; NOFP16-NEXT:    fcvt s3, h0
 ; NOFP16-NEXT:    mov h16, v0[4]
-; NOFP16-NEXT:    mov h17, v0[5]
-; NOFP16-NEXT:    fcvt    s5, h5
-; NOFP16-NEXT:    fcvt    s18, h4
-; NOFP16-NEXT:    fcvt    s16, h16
+; NOFP16-NEXT:    fcvt s18, h4
+; NOFP16-NEXT:    fcvt s16, h16
 ; NOFP16-NEXT:    bit.16b v3, v18, v1
-; NOFP16-NEXT:    fcvt    s18, h2
+; NOFP16-NEXT:    fcvt s18, h2
+; NOFP16-NEXT:    mov h5, v0[1]
 ; NOFP16-NEXT:    bit.16b v16, v18, v1
 ; NOFP16-NEXT:    mov h18, v4[1]
-; NOFP16-NEXT:    fcvt    s6, h6
-; NOFP16-NEXT:    fcvt    s18, h18
+; NOFP16-NEXT:    fcvt s5, h5
+; NOFP16-NEXT:    fcvt s18, h18
+; NOFP16-NEXT:    mov h7, v0[3]
 ; NOFP16-NEXT:    bit.16b v5, v18, v1
 ; NOFP16-NEXT:    mov h18, v4[2]
-; NOFP16-NEXT:    fcvt    s18, h18
-; NOFP16-NEXT:    bit.16b v6, v18, v1
-; NOFP16-NEXT:    mov h18, v0[6]
-; NOFP16-NEXT:    fcvt    s7, h7
 ; NOFP16-NEXT:    mov h4, v4[3]
-; NOFP16-NEXT:    fcvt    s17, h17
-; NOFP16-NEXT:    fcvt    s4, h4
+; NOFP16-NEXT:    fcvt s7, h7
+; NOFP16-NEXT:    fcvt s4, h4
+; NOFP16-NEXT:    mov h6, v0[2]
+; NOFP16-NEXT:    mov h17, v0[5]
 ; NOFP16-NEXT:    bit.16b v7, v4, v1
 ; NOFP16-NEXT:    mov h4, v2[1]
-; NOFP16-NEXT:    fcvt    s18, h18
-; NOFP16-NEXT:    fcvt    s4, h4
+; NOFP16-NEXT:    fcvt s6, h6
+; NOFP16-NEXT:    fcvt s18, h18
+; NOFP16-NEXT:    fcvt s17, h17
+; NOFP16-NEXT:    fcvt s4, h4
+; NOFP16-NEXT:    bit.16b v6, v18, v1
+; NOFP16-NEXT:    mov h18, v0[6]
 ; NOFP16-NEXT:    bit.16b v17, v4, v1
 ; NOFP16-NEXT:    mov h4, v2[2]
-; NOFP16-NEXT:    fcvt    s4, h4
-; NOFP16-NEXT:    bit.16b v18, v4, v1
+; NOFP16-NEXT:    fcvt s18, h18
+; NOFP16-NEXT:    fcvt s4, h4
 ; NOFP16-NEXT:    mov h0, v0[7]
-; NOFP16-NEXT:    fcvt    s4, h0
+; NOFP16-NEXT:    bit.16b v18, v4, v1
+; NOFP16-NEXT:    fcvt s4, h0
 ; NOFP16-NEXT:    mov h0, v2[3]
-; NOFP16-NEXT:    fcvt    s0, h0
+; NOFP16-NEXT:    fcvt s0, h0
 ; NOFP16-NEXT:    bit.16b v4, v0, v1
-; NOFP16-NEXT:    fcvt    h0, s3
-; NOFP16-NEXT:    fcvt    h1, s5
-; NOFP16-NEXT:    mov.h   v0[1], v1[0]
-; NOFP16-NEXT:    fcvt    h1, s16
-; NOFP16-NEXT:    fcvt    h2, s6
-; NOFP16-NEXT:    fcvt    h3, s7
-; NOFP16-NEXT:    fcvt    h5, s17
-; NOFP16-NEXT:    fcvt    h6, s18
-; NOFP16-NEXT:    mov.h   v0[2], v2[0]
-; NOFP16-NEXT:    mov.h   v0[3], v3[0]
-; NOFP16-NEXT:    mov.h   v0[4], v1[0]
-; NOFP16-NEXT:    mov.h   v0[5], v5[0]
-; NOFP16-NEXT:    mov.h   v0[6], v6[0]
-; NOFP16-NEXT:    fcvt    h1, s4
-; NOFP16-NEXT:    mov.h   v0[7]
+; NOFP16-NEXT:    fcvt h0, s3
+; NOFP16-NEXT:    fcvt h1, s5
+; NOFP16-NEXT:    mov.h v0[1], v1[0]
+; NOFP16-NEXT:    fcvt h2, s6
+; NOFP16-NEXT:    fcvt h3, s7
+; NOFP16-NEXT:    mov.h v0[2], v2[0]
+; NOFP16-NEXT:    fcvt h1, s16
+; NOFP16-NEXT:    mov.h v0[3], v3[0]
+; NOFP16-NEXT:    fcvt h5, s17
+; NOFP16-NEXT:    mov.h v0[4], v1[0]
+; NOFP16-NEXT:    fcvt h6, s18
+; NOFP16-NEXT:    mov.h v0[5], v5[0]
+; NOFP16-NEXT:    mov.h v0[6], v6[0]
+; NOFP16-NEXT:    fcvt h1, s4
+; NOFP16-NEXT:    mov.h v0[7], v1[0]
 ; NOFP16-NEXT:    ret
-
-; FP16-NEXT:    fcvtn   v2.4h, v2.4s
-; FP16-NEXT:    fcvtn   v1.4h, v1.4s
-; FP16-NEXT:    mov.d   v1[1], v2[0]
+;
+; FP16-LABEL: test_copysign_v8f16_v8f32:
+; FP16:       ; %bb.0:
+; FP16-NEXT:    fcvtn v2.4h, v2.4s
+; FP16-NEXT:    fcvtn v1.4h, v1.4s
+; FP16-NEXT:    mov.d v1[1], v2[0]
 ; FP16-NEXT:    movi.8h v2, #128, lsl #8
 ; FP16-NEXT:    bit.16b v0, v1, v2
 ; FP16-NEXT:    ret


        


More information about the llvm-commits mailing list