[llvm] f6c2ec2 - [AArch64] Add v1i64 test coverage and cleanup vqadd/vqsub tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 1 23:52:54 PDT 2025


Author: David Green
Date: 2025-06-02T07:52:49+01:00
New Revision: f6c2ec2fe1b02952c06b6129c89325dec80790bc

URL: https://github.com/llvm/llvm-project/commit/f6c2ec2fe1b02952c06b6129c89325dec80790bc
DIFF: https://github.com/llvm/llvm-project/commit/f6c2ec2fe1b02952c06b6129c89325dec80790bc.diff

LOG: [AArch64] Add v1i64 test coverage and cleanup vqadd/vqsub tests. NFC

These were missing, as per #142323

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/arm64-vqadd.ll
    llvm/test/CodeGen/AArch64/arm64-vqsub.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/arm64-vqadd.ll b/llvm/test/CodeGen/AArch64/arm64-vqadd.ll
index df8864fe6ea32..fa515fe352d8f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vqadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vqadd.ll
@@ -1,129 +1,225 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
 define <8 x i8> @sqadd8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqadd8b:
-;CHECK: sqadd.8b
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+; CHECK-LABEL: sqadd8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqadd.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqadd4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqadd4h:
-;CHECK: sqadd.4h
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+; CHECK-LABEL: sqadd4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqadd.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqadd2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqadd2s:
-;CHECK: sqadd.2s
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+; CHECK-LABEL: sqadd2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqadd.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @sqadd1d(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sqadd1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    adds x8, x9, x8
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x8, x9, x8, vs
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
+
 define <8 x i8> @uqadd8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqadd8b:
-;CHECK: uqadd.8b
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+; CHECK-LABEL: uqadd8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqadd.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @uqadd4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqadd4h:
-;CHECK: uqadd.4h
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+; CHECK-LABEL: uqadd4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqadd.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @uqadd2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqadd2s:
-;CHECK: uqadd.2s
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+; CHECK-LABEL: uqadd2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqadd.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @uqadd1d(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: uqadd1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    adds x8, x9, x8
+; CHECK-NEXT:    csinv x8, x8, xzr, lo
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqadd.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
+
 define <16 x i8> @sqadd16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqadd16b:
-;CHECK: sqadd.16b
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+; CHECK-LABEL: sqadd16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqadd.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @sqadd8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqadd8h:
-;CHECK: sqadd.8h
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+; CHECK-LABEL: sqadd8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqadd.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @sqadd4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqadd4s:
-;CHECK: sqadd.4s
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+; CHECK-LABEL: sqadd4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqadd.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @sqadd2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqadd2d:
-;CHECK: sqadd.2d
-	%tmp1 = load <2 x i64>, ptr %A
-	%tmp2 = load <2 x i64>, ptr %B
-	%tmp3 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-	ret <2 x i64> %tmp3
+; CHECK-LABEL: sqadd2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqadd.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
+
 define <16 x i8> @uqadd16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqadd16b:
-;CHECK: uqadd.16b
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+; CHECK-LABEL: uqadd16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqadd.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @uqadd8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqadd8h:
-;CHECK: uqadd.8h
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+; CHECK-LABEL: uqadd8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqadd.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @uqadd4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqadd4s:
-;CHECK: uqadd.4s
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+; CHECK-LABEL: uqadd4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqadd.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @uqadd2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqadd2d:
-;CHECK: uqadd.2d
-	%tmp1 = load <2 x i64>, ptr %A
-	%tmp2 = load <2 x i64>, ptr %B
-	%tmp3 = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-	ret <2 x i64> %tmp3
+; CHECK-LABEL: uqadd2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqadd.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.sqadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
@@ -147,78 +243,130 @@ declare <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32>, <4 x i32>) nounwind
 declare <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <8 x i8> @usqadd8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usqadd8b:
-;CHECK: usqadd.8b
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.usqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+; CHECK-LABEL: usqadd8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    usqadd.8b v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.usqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @usqadd4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usqadd4h:
-;CHECK: usqadd.4h
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.usqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+; CHECK-LABEL: usqadd4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    usqadd.4h v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.usqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @usqadd2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usqadd2s:
-;CHECK: usqadd.2s
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.usqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+; CHECK-LABEL: usqadd2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    usqadd.2s v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.usqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @usqadd1d(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: usqadd1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    usqadd d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
+
 define <16 x i8> @usqadd16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usqadd16b:
-;CHECK: usqadd.16b
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.usqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+; CHECK-LABEL: usqadd16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    usqadd.16b v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.usqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @usqadd8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usqadd8h:
-;CHECK: usqadd.8h
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+; CHECK-LABEL: usqadd8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    usqadd.8h v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @usqadd4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usqadd4s:
-;CHECK: usqadd.4s
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+; CHECK-LABEL: usqadd4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    usqadd.4s v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @usqadd2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usqadd2d:
-;CHECK: usqadd.2d
-	%tmp1 = load <2 x i64>, ptr %A
-	%tmp2 = load <2 x i64>, ptr %B
-	%tmp3 = call <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-	ret <2 x i64> %tmp3
+; CHECK-LABEL: usqadd2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    usqadd.2d v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 define i64 @usqadd_d(i64 %l, i64 %r) nounwind {
 ; CHECK-LABEL: usqadd_d:
-; CHECK: usqadd {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    fmov d1, x1
+; CHECK-NEXT:    usqadd d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %sum = call i64 @llvm.aarch64.neon.usqadd.i64(i64 %l, i64 %r)
   ret i64 %sum
 }
 
 define i32 @usqadd_s(i32 %l, i32 %r) nounwind {
 ; CHECK-LABEL: usqadd_s:
-; CHECK: usqadd {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    fmov s1, w1
+; CHECK-NEXT:    usqadd s0, s1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %sum = call i32 @llvm.aarch64.neon.usqadd.i32(i32 %l, i32 %r)
   ret i32 %sum
 }
@@ -235,86 +383,132 @@ declare <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
+
 define <8 x i8> @suqadd8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: suqadd8b:
-;CHECK: suqadd.8b
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+; CHECK-LABEL: suqadd8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    suqadd.8b v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @suqadd4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: suqadd4h:
-;CHECK: suqadd.4h
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+; CHECK-LABEL: suqadd4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    suqadd.4h v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @suqadd2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: suqadd2s:
-;CHECK: suqadd.2s
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+; CHECK-LABEL: suqadd2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    suqadd.2s v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @suqadd1d(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: suqadd1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    suqadd d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
+
 define <16 x i8> @suqadd16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: suqadd16b:
-;CHECK: suqadd.16b
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+; CHECK-LABEL: suqadd16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    suqadd.16b v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @suqadd8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: suqadd8h:
-;CHECK: suqadd.8h
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+; CHECK-LABEL: suqadd8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    suqadd.8h v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @suqadd4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: suqadd4s:
-;CHECK: suqadd.4s
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+; CHECK-LABEL: suqadd4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    suqadd.4s v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @suqadd2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: suqadd2d:
-;CHECK: suqadd.2d
-	%tmp1 = load <2 x i64>, ptr %A
-	%tmp2 = load <2 x i64>, ptr %B
-	%tmp3 = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-	ret <2 x i64> %tmp3
-}
-
-define <1 x i64> @suqadd_1d(<1 x i64> %l, <1 x i64> %r) nounwind {
-; CHECK-LABEL: suqadd_1d:
-; CHECK: suqadd {{d[0-9]+}}, {{d[0-9]+}}
-  %sum = call <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64> %l, <1 x i64> %r)
-  ret <1 x i64> %sum
+; CHECK-LABEL: suqadd2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    suqadd.2d v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 define i64 @suqadd_d(i64 %l, i64 %r) nounwind {
 ; CHECK-LABEL: suqadd_d:
-; CHECK: suqadd {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    fmov d1, x1
+; CHECK-NEXT:    suqadd d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %sum = call i64 @llvm.aarch64.neon.suqadd.i64(i64 %l, i64 %r)
   ret i64 %sum
 }
 
 define i32 @suqadd_s(i32 %l, i32 %r) nounwind {
 ; CHECK-LABEL: suqadd_s:
-; CHECK: suqadd {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    fmov s1, w1
+; CHECK-NEXT:    suqadd s0, s1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %sum = call i32 @llvm.aarch64.neon.suqadd.i32(i32 %l, i32 %r)
   ret i32 %sum
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vqsub.ll b/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
index dee21291fa149..ffcb7d668d637 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
@@ -1,129 +1,225 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
 define <8 x i8> @sqsub8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub8b:
-;CHECK: sqsub.8b
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+; CHECK-LABEL: sqsub8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqsub.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqsub4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub4h:
-;CHECK: sqsub.4h
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+; CHECK-LABEL: sqsub4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqsub.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqsub2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub2s:
-;CHECK: sqsub.2s
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+; CHECK-LABEL: sqsub2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqsub.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
+define <1 x i64> @sqsub1d(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sqsub1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    subs x8, x9, x8
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x8, x9, x8, vs
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqsub.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
+}
+
+
 define <8 x i8> @uqsub8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub8b:
-;CHECK: uqsub.8b
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+; CHECK-LABEL: uqsub8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqsub.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @uqsub4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub4h:
-;CHECK: uqsub.4h
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+; CHECK-LABEL: uqsub4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqsub.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @uqsub2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub2s:
-;CHECK: uqsub.2s
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+; CHECK-LABEL: uqsub2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqsub.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
+define <1 x i64> @uqsub1d(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: uqsub1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    subs x8, x9, x8
+; CHECK-NEXT:    csel x8, xzr, x8, lo
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqsub.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
+}
+
+
 define <16 x i8> @sqsub16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub16b:
-;CHECK: sqsub.16b
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+; CHECK-LABEL: sqsub16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqsub.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @sqsub8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub8h:
-;CHECK: sqsub.8h
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+; CHECK-LABEL: sqsub8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqsub.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @sqsub4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub4s:
-;CHECK: sqsub.4s
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+; CHECK-LABEL: sqsub4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqsub.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @sqsub2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub2d:
-;CHECK: sqsub.2d
-	%tmp1 = load <2 x i64>, ptr %A
-	%tmp2 = load <2 x i64>, ptr %B
-	%tmp3 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-	ret <2 x i64> %tmp3
+; CHECK-LABEL: sqsub2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqsub.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
+
 define <16 x i8> @uqsub16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub16b:
-;CHECK: uqsub.16b
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+; CHECK-LABEL: uqsub16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqsub.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @uqsub8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub8h:
-;CHECK: uqsub.8h
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+; CHECK-LABEL: uqsub8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqsub.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @uqsub4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub4s:
-;CHECK: uqsub.4s
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+; CHECK-LABEL: uqsub4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqsub.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @uqsub2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub2d:
-;CHECK: uqsub.2d
-	%tmp1 = load <2 x i64>, ptr %A
-	%tmp2 = load <2 x i64>, ptr %B
-	%tmp3 = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-	ret <2 x i64> %tmp3
+; CHECK-LABEL: uqsub2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqsub.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.sqsub.v8i8(<8 x i8>, <8 x i8>) nounwind readnone


        


More information about the llvm-commits mailing list