[llvm] d20afbd - [AArch64] Additional testing for uqshl and regenerate arm64-vshift.ll. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue May 16 05:16:47 PDT 2023


Author: David Green
Date: 2023-05-16T13:16:42+01:00
New Revision: d20afbd9025b536ee9ae3b3c34b8e643a410f743

URL: https://github.com/llvm/llvm-project/commit/d20afbd9025b536ee9ae3b3c34b8e643a410f743
DIFF: https://github.com/llvm/llvm-project/commit/d20afbd9025b536ee9ae3b3c34b8e643a410f743.diff

LOG: [AArch64] Additional testing for uqshl and regenerate arm64-vshift.ll. NFC

This tries to fill in some missing testing for neon shift intrinsics, and
regenerates the existing tests. See D148309 and D148311.

Added: 
    llvm/test/CodeGen/AArch64/add-extract.ll

Modified: 
    llvm/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll
    llvm/test/CodeGen/AArch64/arm64-vshift.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/add-extract.ll b/llvm/test/CodeGen/AArch64/add-extract.ll
new file mode 100644
index 0000000000000..34aac188101e1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/add-extract.ll
@@ -0,0 +1,124 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
+
+define i64 @add_i64_ext_load(<1 x i64> %A, ptr %B) nounwind {
+; CHECK-LABEL: add_i64_ext_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+  %a = extractelement <1 x i64> %A, i32 0
+  %b = load i64, ptr %B
+  %c = add i64 %a, %b
+  ret i64 %c
+}
+
+define i64 @sub_i64_ext_load(<1 x i64> %A, ptr %B) nounwind {
+; CHECK-LABEL: sub_i64_ext_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %a = extractelement <1 x i64> %A, i32 0
+  %b = load i64, ptr %B
+  %c = sub i64 %a, %b
+  ret i64 %c
+}
+
+define void @add_i64_ext_load_store(<1 x i64> %A, ptr %B) nounwind {
+; CHECK-LABEL: add_i64_ext_load_store:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    add x8, x9, x8
+; CHECK-NEXT:    str x8, [x0]
+; CHECK-NEXT:    ret
+  %a = extractelement <1 x i64> %A, i32 0
+  %b = load i64, ptr %B
+  %c = add i64 %a, %b
+  store i64 %c, ptr %B
+  ret void
+}
+
+define i64 @add_v2i64_ext_load(<2 x i64> %A, ptr %B) nounwind {
+; CHECK-LABEL: add_v2i64_ext_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+  %a = extractelement <2 x i64> %A, i32 0
+  %b = load i64, ptr %B
+  %c = add i64 %a, %b
+  ret i64 %c
+}
+
+define i64 @add_i64_ext_ext(<1 x i64> %A, <1 x i64> %B) nounwind {
+; CHECK-LABEL: add_i64_ext_ext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    add x0, x8, x9
+; CHECK-NEXT:    ret
+  %a = extractelement <1 x i64> %A, i32 0
+  %b = extractelement <1 x i64> %B, i32 0
+  %c = add i64 %a, %b
+  ret i64 %c
+}
+
+define i32 @add_i32_ext_load(<1 x i32> %A, ptr %B) nounwind {
+; CHECK-LABEL: add_i32_ext_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    add w0, w9, w8
+; CHECK-NEXT:    ret
+  %a = extractelement <1 x i32> %A, i32 0
+  %b = load i32, ptr %B
+  %c = add i32 %a, %b
+  ret i32 %c
+}
+
+define i64 @add_i64_ext_ext_test1(<1 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK-LABEL: add_i64_ext_ext_test1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov x8, v1.d[1]
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    fmov x10, d1
+; CHECK-NEXT:    add x9, x9, x10
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+  %a = extractelement <1 x i64> %A, i32 0
+  %b = extractelement <2 x i64> %B, i32 0
+  %c = extractelement <2 x i64> %B, i32 1
+  %d = add i64 %a, %b
+  %e = add i64 %d, %c
+  ret i64 %e
+}
+
+define i64 @sub_i64_ext_ext_test1(<1 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK-LABEL: sub_i64_ext_ext_test1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov x8, v1.d[1]
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    fmov x10, d1
+; CHECK-NEXT:    sub x9, x9, x10
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %a = extractelement <1 x i64> %A, i32 0
+  %b = extractelement <2 x i64> %B, i32 0
+  %c = extractelement <2 x i64> %B, i32 1
+  %d = sub i64 %a, %b
+  %e = sub i64 %d, %c
+  ret i64 %e
+}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll b/llvm/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll
index 79ed067d9ad49..0a215a1539b3a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll
@@ -1,19 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi | FileCheck %s
 
-; Check if sqshl/uqshl with constant shift amout can be selected. 
+; Check if sqshl/uqshl with constant shift amount can be selected.
 define i64 @test_vqshld_s64_i(i64 %a) {
 ; CHECK-LABEL: test_vqshld_s64_i:
-; CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #36
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    sqshl d0, d0, #36
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %1 = tail call i64 @llvm.aarch64.neon.sqshl.i64(i64 %a, i64 36)
   ret i64 %1
 }
 
 define i64 @test_vqshld_u64_i(i64 %a) {
 ; CHECK-LABEL: test_vqshld_u64_i:
-; CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #36
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    uqshl d0, d0, #36
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %1 = tail call i64 @llvm.aarch64.neon.uqshl.i64(i64 %a, i64 36)
   ret i64 %1
 }
 
+define i32 @test_vqshld_s32_i(i32 %a) {
+; CHECK-LABEL: test_vqshld_s32_i:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    sqshl s0, s0, #16
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %1 = tail call i32 @llvm.aarch64.neon.sqshl.i32(i32 %a, i32 16)
+  ret i32 %1
+}
+
+define i32 @test_vqshld_u32_i(i32 %a) {
+; CHECK-LABEL: test_vqshld_u32_i:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    uqshl s0, s0, #16
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %1 = tail call i32 @llvm.aarch64.neon.uqshl.i32(i32 %a, i32 16)
+  ret i32 %1
+}
+
 declare i64 @llvm.aarch64.neon.uqshl.i64(i64, i64)
 declare i64 @llvm.aarch64.neon.sqshl.i64(i64, i64)
+
+declare i32 @llvm.aarch64.neon.uqshl.i32(i32, i32)
+declare i32 @llvm.aarch64.neon.sqshl.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 7805f2917715b..dca690e1f693b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -1,197 +1,292 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -enable-misched=false | FileCheck %s
 
 define <8 x i8> @sqshl8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl8b:
-;CHECK: sqshl.8b
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = load <8 x i8>, ptr %B
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sqshl8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqshl.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqshl4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl4h:
-;CHECK: sqshl.4h
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = load <4 x i16>, ptr %B
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sqshl4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqshl.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqshl2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl2s:
-;CHECK: sqshl.2s
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = load <2 x i32>, ptr %B
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sqshl2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqshl.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
 define <1 x i64> @sqshl1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl1d:
-;CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp2 = load <1 x i64>, ptr %B
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: sqshl1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
 define <1 x i64> @sqshl1d_constant(ptr %A) nounwind {
-;CHECK-LABEL: sqshl1d_constant:
-;CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: sqshl1d_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sqshl d0, d0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
+  ret <1 x i64> %tmp3
 }
 
 define i64 @sqshl_scalar(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl_scalar:
-;CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, ptr %A
-        %tmp2 = load i64, ptr %B
-        %tmp3 = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %tmp1, i64 %tmp2)
-        ret i64 %tmp3
+; CHECK-LABEL: sqshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    sqshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp2 = load i64, ptr %B
+  %tmp3 = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %tmp1, i64 %tmp2)
+  ret i64 %tmp3
 }
 
 define i64 @sqshl_scalar_constant(ptr %A) nounwind {
-;CHECK-LABEL: sqshl_scalar_constant:
-;CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %tmp1, i64 1)
-        ret i64 %tmp3
+; CHECK-LABEL: sqshl_scalar_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    sqshl d0, d0, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %tmp1, i64 1)
+  ret i64 %tmp3
 }
 
 define <8 x i8> @uqshl8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl8b:
-;CHECK: uqshl.8b
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = load <8 x i8>, ptr %B
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: uqshl8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqshl.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @uqshl4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl4h:
-;CHECK: uqshl.4h
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = load <4 x i16>, ptr %B
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: uqshl4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqshl.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @uqshl2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl2s:
-;CHECK: uqshl.2s
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = load <2 x i32>, ptr %B
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: uqshl2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqshl.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqshl16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl16b:
-;CHECK: sqshl.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = load <16 x i8>, ptr %B
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: sqshl16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshl.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @sqshl8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl8h:
-;CHECK: sqshl.8h
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = load <8 x i16>, ptr %B
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: sqshl8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshl.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @sqshl4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl4s:
-;CHECK: sqshl.4s
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = load <4 x i32>, ptr %B
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: sqshl4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshl.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @sqshl2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqshl2d:
-;CHECK: sqshl.2d
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = load <2 x i64>, ptr %B
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: sqshl2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshl.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 define <16 x i8> @uqshl16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl16b:
-;CHECK: uqshl.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = load <16 x i8>, ptr %B
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: uqshl16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqshl.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @uqshl8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl8h:
-;CHECK: uqshl.8h
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = load <8 x i16>, ptr %B
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: uqshl8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqshl.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @uqshl4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl4s:
-;CHECK: uqshl.4s
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = load <4 x i32>, ptr %B
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: uqshl4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqshl.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @uqshl2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl2d:
-;CHECK: uqshl.2d
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = load <2 x i64>, ptr %B
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: uqshl2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqshl.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 define <1 x i64> @uqshl1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl1d:
-;CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp2 = load <1 x i64>, ptr %B
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: uqshl1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
 define <1 x i64> @uqshl1d_constant(ptr %A) nounwind {
-;CHECK-LABEL: uqshl1d_constant:
-;CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: uqshl1d_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    uqshl d0, d0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
+  ret <1 x i64> %tmp3
 }
 
 define i64 @uqshl_scalar(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqshl_scalar:
-;CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, ptr %A
-        %tmp2 = load i64, ptr %B
-        %tmp3 = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %tmp1, i64 %tmp2)
-        ret i64 %tmp3
+; CHECK-LABEL: uqshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    uqshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp2 = load i64, ptr %B
+  %tmp3 = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %tmp1, i64 %tmp2)
+  ret i64 %tmp3
 }
 
 define i64 @uqshl_scalar_constant(ptr %A) nounwind {
-;CHECK-LABEL: uqshl_scalar_constant:
-;CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %tmp1, i64 1)
-        ret i64 %tmp3
+; CHECK-LABEL: uqshl_scalar_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    uqshl d0, d0, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %tmp1, i64 1)
+  ret i64 %tmp3
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.sqshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
@@ -218,205 +313,299 @@ declare <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32>, <4 x i32>) nounwind
 declare <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <8 x i8> @srshl8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl8b:
-;CHECK: srshl.8b
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = load <8 x i8>, ptr %B
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: srshl8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    srshl.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @srshl4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl4h:
-;CHECK: srshl.4h
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = load <4 x i16>, ptr %B
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: srshl4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    srshl.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @srshl2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl2s:
-;CHECK: srshl.2s
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = load <2 x i32>, ptr %B
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: srshl2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    srshl.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
 define <1 x i64> @srshl1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl1d:
-;CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp2 = load <1 x i64>, ptr %B
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: srshl1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    srshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
 define <1 x i64> @srshl1d_constant(ptr %A) nounwind {
-;CHECK-LABEL: srshl1d_constant:
-;CHECK: mov w[[GCONST:[0-9]+]], #1
-;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
-;CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: srshl1d_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    srshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
+  ret <1 x i64> %tmp3
 }
 
 define i64 @srshl_scalar(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl_scalar:
-;CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, ptr %A
-        %tmp2 = load i64, ptr %B
-        %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 %tmp2)
-        ret i64 %tmp3
+; CHECK-LABEL: srshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    srshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp2 = load i64, ptr %B
+  %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 %tmp2)
+  ret i64 %tmp3
 }
 
 define i64 @srshl_scalar_constant(ptr %A) nounwind {
-;CHECK-LABEL: srshl_scalar_constant:
-;CHECK: mov w[[GCONST:[0-9]+]], #1
-;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
-;CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 1)
-        ret i64 %tmp3
+; CHECK-LABEL: srshl_scalar_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #1 // =0x1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    srshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 1)
+  ret i64 %tmp3
 }
 
 define <8 x i8> @urshl8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl8b:
-;CHECK: urshl.8b
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = load <8 x i8>, ptr %B
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: urshl8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    urshl.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @urshl4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl4h:
-;CHECK: urshl.4h
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = load <4 x i16>, ptr %B
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: urshl4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    urshl.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @urshl2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl2s:
-;CHECK: urshl.2s
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = load <2 x i32>, ptr %B
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: urshl2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    urshl.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
 define <1 x i64> @urshl1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl1d:
-;CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp2 = load <1 x i64>, ptr %B
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: urshl1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    urshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
 define <1 x i64> @urshl1d_constant(ptr %A) nounwind {
-;CHECK-LABEL: urshl1d_constant:
-;CHECK: mov w[[GCONST:[0-9]+]], #1
-;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
-;CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: urshl1d_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    urshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
+  ret <1 x i64> %tmp3
 }
 
 define i64 @urshl_scalar(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl_scalar:
-;CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, ptr %A
-        %tmp2 = load i64, ptr %B
-        %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 %tmp2)
-        ret i64 %tmp3
+; CHECK-LABEL: urshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    urshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp2 = load i64, ptr %B
+  %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 %tmp2)
+  ret i64 %tmp3
 }
 
 define i64 @urshl_scalar_constant(ptr %A) nounwind {
-;CHECK-LABEL: urshl_scalar_constant:
-;CHECK: mov w[[GCONST:[0-9]+]], #1
-;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
-;CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 1)
-        ret i64 %tmp3
+; CHECK-LABEL: urshl_scalar_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #1 // =0x1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    urshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 1)
+  ret i64 %tmp3
 }
 
 define <16 x i8> @srshl16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl16b:
-;CHECK: srshl.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = load <16 x i8>, ptr %B
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: srshl16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    srshl.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @srshl8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl8h:
-;CHECK: srshl.8h
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = load <8 x i16>, ptr %B
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: srshl8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    srshl.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @srshl4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl4s:
-;CHECK: srshl.4s
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = load <4 x i32>, ptr %B
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: srshl4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    srshl.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @srshl2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srshl2d:
-;CHECK: srshl.2d
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = load <2 x i64>, ptr %B
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: srshl2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    srshl.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 define <16 x i8> @urshl16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl16b:
-;CHECK: urshl.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = load <16 x i8>, ptr %B
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: urshl16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    urshl.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @urshl8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl8h:
-;CHECK: urshl.8h
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = load <8 x i16>, ptr %B
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: urshl8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    urshl.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @urshl4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl4s:
-;CHECK: urshl.4s
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = load <4 x i32>, ptr %B
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: urshl4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    urshl.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @urshl2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: urshl2d:
-;CHECK: urshl.2d
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = load <2 x i64>, ptr %B
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: urshl2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    urshl.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.srshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
@@ -442,205 +631,299 @@ declare <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32>, <4 x i32>) nounwind
 declare <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <8 x i8> @sqrshl8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl8b:
-;CHECK: sqrshl.8b
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = load <8 x i8>, ptr %B
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sqrshl8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqrshl.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqrshl4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl4h:
-;CHECK: sqrshl.4h
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = load <4 x i16>, ptr %B
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sqrshl4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqrshl.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqrshl2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl2s:
-;CHECK: sqrshl.2s
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = load <2 x i32>, ptr %B
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sqrshl2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqrshl.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
 define <8 x i8> @uqrshl8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl8b:
-;CHECK: uqrshl.8b
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = load <8 x i8>, ptr %B
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: uqrshl8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqrshl.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @uqrshl4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl4h:
-;CHECK: uqrshl.4h
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = load <4 x i16>, ptr %B
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: uqrshl4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqrshl.4h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @uqrshl2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl2s:
-;CHECK: uqrshl.2s
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = load <2 x i32>, ptr %B
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: uqrshl2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqrshl.2s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqrshl16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl16b:
-;CHECK: sqrshl.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = load <16 x i8>, ptr %B
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: sqrshl16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshl.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @sqrshl8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl8h:
-;CHECK: sqrshl.8h
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = load <8 x i16>, ptr %B
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: sqrshl8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshl.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @sqrshl4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl4s:
-;CHECK: sqrshl.4s
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = load <4 x i32>, ptr %B
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: sqrshl4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshl.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @sqrshl2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl2d:
-;CHECK: sqrshl.2d
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = load <2 x i64>, ptr %B
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: sqrshl2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshl.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 define <1 x i64> @sqrshl1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl1d:
-;CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp2 = load <1 x i64>, ptr %B
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: sqrshl1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqrshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
 define <1 x i64> @sqrshl1d_constant(ptr %A) nounwind {
-;CHECK-LABEL: sqrshl1d_constant:
-;CHECK: mov w[[GCONST:[0-9]+]], #1
-;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
-;CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: sqrshl1d_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    sqrshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
+  ret <1 x i64> %tmp3
 }
 
 define i64 @sqrshl_scalar(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqrshl_scalar:
-;CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, ptr %A
-        %tmp2 = load i64, ptr %B
-        %tmp3 = call i64 @llvm.aarch64.neon.sqrshl.i64(i64 %tmp1, i64 %tmp2)
-        ret i64 %tmp3
+; CHECK-LABEL: sqrshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    sqrshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp2 = load i64, ptr %B
+  %tmp3 = call i64 @llvm.aarch64.neon.sqrshl.i64(i64 %tmp1, i64 %tmp2)
+  ret i64 %tmp3
 }
 
 define i64 @sqrshl_scalar_constant(ptr %A) nounwind {
-;CHECK-LABEL: sqrshl_scalar_constant:
-;CHECK: mov w[[GCONST:[0-9]+]], #1
-;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
-;CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.sqrshl.i64(i64 %tmp1, i64 1)
-        ret i64 %tmp3
+; CHECK-LABEL: sqrshl_scalar_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #1 // =0x1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    sqrshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.sqrshl.i64(i64 %tmp1, i64 1)
+  ret i64 %tmp3
 }
 
 define <16 x i8> @uqrshl16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl16b:
-;CHECK: uqrshl.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = load <16 x i8>, ptr %B
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: uqrshl16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqrshl.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @uqrshl8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl8h:
-;CHECK: uqrshl.8h
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = load <8 x i16>, ptr %B
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: uqrshl8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqrshl.8h v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @uqrshl4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl4s:
-;CHECK: uqrshl.4s
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = load <4 x i32>, ptr %B
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: uqrshl4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqrshl.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @uqrshl2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl2d:
-;CHECK: uqrshl.2d
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = load <2 x i64>, ptr %B
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: uqrshl2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqrshl.2d v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+  ret <2 x i64> %tmp3
 }
 
 define <1 x i64> @uqrshl1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl1d:
-;CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp2 = load <1 x i64>, ptr %B
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: uqrshl1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqrshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
+  ret <1 x i64> %tmp3
 }
 
 define <1 x i64> @uqrshl1d_constant(ptr %A) nounwind {
-;CHECK-LABEL: uqrshl1d_constant:
-;CHECK: mov w[[GCONST:[0-9]+]], #1
-;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
-;CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: uqrshl1d_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    uqrshl d0, d0, d1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
+  ret <1 x i64> %tmp3
 }
 
 define i64 @uqrshl_scalar(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqrshl_scalar:
-;CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, ptr %A
-        %tmp2 = load i64, ptr %B
-        %tmp3 = call i64 @llvm.aarch64.neon.uqrshl.i64(i64 %tmp1, i64 %tmp2)
-        ret i64 %tmp3
+; CHECK-LABEL: uqrshl_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    uqrshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp2 = load i64, ptr %B
+  %tmp3 = call i64 @llvm.aarch64.neon.uqrshl.i64(i64 %tmp1, i64 %tmp2)
+  ret i64 %tmp3
 }
 
 define i64 @uqrshl_scalar_constant(ptr %A) nounwind {
-;CHECK-LABEL: uqrshl_scalar_constant:
-;CHECK: mov w[[GCONST:[0-9]+]], #1
-;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
-;CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.uqrshl.i64(i64 %tmp1, i64 1)
-        ret i64 %tmp3
+; CHECK-LABEL: uqrshl_scalar_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #1 // =0x1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    uqrshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.uqrshl.i64(i64 %tmp1, i64 1)
+  ret i64 %tmp3
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
@@ -666,219 +949,319 @@ declare <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32>, <4 x i32>) nounwind
 declare <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <8 x i8> @urshr8b(ptr %A) nounwind {
-;CHECK-LABEL: urshr8b:
-;CHECK: urshr.8b
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: urshr8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    urshr.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @urshr4h(ptr %A) nounwind {
-;CHECK-LABEL: urshr4h:
-;CHECK: urshr.4h
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: urshr4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    urshr.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @urshr2s(ptr %A) nounwind {
-;CHECK-LABEL: urshr2s:
-;CHECK: urshr.2s
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: urshr2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    urshr.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @urshr16b(ptr %A) nounwind {
-;CHECK-LABEL: urshr16b:
-;CHECK: urshr.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: urshr16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    urshr.16b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @urshr8h(ptr %A) nounwind {
-;CHECK-LABEL: urshr8h:
-;CHECK: urshr.8h
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: urshr8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    urshr.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @urshr4s(ptr %A) nounwind {
-;CHECK-LABEL: urshr4s:
-;CHECK: urshr.4s
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: urshr4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    urshr.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @urshr2d(ptr %A) nounwind {
-;CHECK-LABEL: urshr2d:
-;CHECK: urshr.2d
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: urshr2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    urshr.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
+  ret <2 x i64> %tmp3
 }
 
 define <1 x i64> @urshr1d(ptr %A) nounwind {
-;CHECK-LABEL: urshr1d:
-;CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: urshr1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    urshr d0, d0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
+  ret <1 x i64> %tmp3
 }
 
 define i64 @urshr_scalar(ptr %A) nounwind {
-;CHECK-LABEL: urshr_scalar:
-;CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 -1)
-        ret i64 %tmp3
+; CHECK-LABEL: urshr_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    urshr d0, d0, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 -1)
+  ret i64 %tmp3
 }
 
 define <8 x i8> @srshr8b(ptr %A) nounwind {
-;CHECK-LABEL: srshr8b:
-;CHECK: srshr.8b
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: srshr8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    srshr.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @srshr4h(ptr %A) nounwind {
-;CHECK-LABEL: srshr4h:
-;CHECK: srshr.4h
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: srshr4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    srshr.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @srshr2s(ptr %A) nounwind {
-;CHECK-LABEL: srshr2s:
-;CHECK: srshr.2s
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: srshr2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    srshr.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @srshr16b(ptr %A) nounwind {
-;CHECK-LABEL: srshr16b:
-;CHECK: srshr.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: srshr16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    srshr.16b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @srshr8h(ptr %A) nounwind {
-;CHECK-LABEL: srshr8h:
-;CHECK: srshr.8h
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: srshr8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    srshr.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @srshr4s(ptr %A) nounwind {
-;CHECK-LABEL: srshr4s:
-;CHECK: srshr.4s
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: srshr4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    srshr.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @srshr2d(ptr %A) nounwind {
-;CHECK-LABEL: srshr2d:
-;CHECK: srshr.2d
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: srshr2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    srshr.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
+  ret <2 x i64> %tmp3
 }
 
 define <1 x i64> @srshr1d(ptr %A) nounwind {
-;CHECK-LABEL: srshr1d:
-;CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: srshr1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    srshr d0, d0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
+  ret <1 x i64> %tmp3
 }
 
 define i64 @srshr_scalar(ptr %A) nounwind {
-;CHECK-LABEL: srshr_scalar:
-;CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 -1)
-        ret i64 %tmp3
+; CHECK-LABEL: srshr_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    srshr d0, d0, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 -1)
+  ret i64 %tmp3
 }
 
 define <8 x i8> @sqshlu8b(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu8b:
-;CHECK: sqshlu.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sqshlu8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sqshlu.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqshlu4h(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu4h:
-;CHECK: sqshlu.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sqshlu4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sqshlu.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqshlu2s(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu2s:
-;CHECK: sqshlu.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sqshlu2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sqshlu.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqshlu16b(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu16b:
-;CHECK: sqshlu.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: sqshlu16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshlu.16b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @sqshlu8h(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu8h:
-;CHECK: sqshlu.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: sqshlu8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshlu.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @sqshlu4s(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu4s:
-;CHECK: sqshlu.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: sqshlu4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshlu.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @sqshlu2d(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu2d:
-;CHECK: sqshlu.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: sqshlu2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshlu.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
+  ret <2 x i64> %tmp3
 }
 
 define <1 x i64> @sqshlu1d_constant(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu1d_constant:
-;CHECK: sqshlu {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: sqshlu1d_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sqshlu d0, d0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
+  ret <1 x i64> %tmp3
 }
 
-define i64 @sqshlu_scalar_constant(ptr %A) nounwind {
-;CHECK-LABEL: sqshlu_scalar_constant:
-;CHECK: sqshlu {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.sqshlu.i64(i64 %tmp1, i64 1)
-        ret i64 %tmp3
+define i64 @sqshlu_i64_constant(ptr %A) nounwind {
+; CHECK-LABEL: sqshlu_i64_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    sqshlu d0, d0, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.sqshlu.i64(i64 %tmp1, i64 1)
+  ret i64 %tmp3
+}
+
+define i32 @sqshlu_i32_constant(ptr %A) nounwind {
+; CHECK-LABEL: sqshlu_i32_constant:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    sqshlu s0, s0, #1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %tmp1 = load i32, ptr %A
+  %tmp3 = call i32 @llvm.aarch64.neon.sqshlu.i32(i32 %tmp1, i32 1)
+  ret i32 %tmp3
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
@@ -886,6 +1269,7 @@ declare <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16>, <4 x i16>) nounwind
 declare <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
 declare i64 @llvm.aarch64.neon.sqshlu.i64(i64, i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.sqshlu.i32(i32, i32) nounwind readnone
 
 declare <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
 declare <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
@@ -893,57 +1277,78 @@ declare <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32>, <4 x i32>) nounwind
 declare <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <8 x i8> @rshrn8b(ptr %A) nounwind {
-;CHECK-LABEL: rshrn8b:
-;CHECK: rshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: rshrn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    rshrn.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @rshrn4h(ptr %A) nounwind {
-;CHECK-LABEL: rshrn4h:
-;CHECK: rshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: rshrn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    rshrn.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @rshrn2s(ptr %A) nounwind {
-;CHECK-LABEL: rshrn2s:
-;CHECK: rshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: rshrn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    rshrn.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @rshrn16b(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: rshrn16b:
-;CHECK: rshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, ptr %ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        ret <16 x i8> %tmp4
+; CHECK-LABEL: rshrn16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    rshrn2.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <8 x i8>, ptr %ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %tmp4
 }
 
 define <8 x i16> @rshrn8h(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: rshrn8h:
-;CHECK: rshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, ptr %ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-        ret <8 x i16> %tmp4
+; CHECK-LABEL: rshrn8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    rshrn2.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <4 x i16>, ptr %ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @rshrn4s(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: rshrn4s:
-;CHECK: rshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, ptr %ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-        ret <4 x i32> %tmp4
+; CHECK-LABEL: rshrn4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    rshrn2.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <2 x i32>, ptr %ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %tmp4
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32) nounwind readnone
@@ -951,63 +1356,84 @@ declare <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32>, i32) nounwind readno
 declare <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64>, i32) nounwind readnone
 
 define <8 x i8> @shrn8b(ptr %A) nounwind {
-;CHECK-LABEL: shrn8b:
-;CHECK: shrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: shrn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    shrn.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @shrn4h(ptr %A) nounwind {
-;CHECK-LABEL: shrn4h:
-;CHECK: shrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: shrn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    shrn.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+  %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @shrn2s(ptr %A) nounwind {
-;CHECK-LABEL: shrn2s:
-;CHECK: shrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: shrn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    shrn.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
+  %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @shrn16b(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: shrn16b:
-;CHECK: shrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, ptr %ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
-        %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        ret <16 x i8> %tmp4
+; CHECK-LABEL: shrn16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    shrn2.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <8 x i8>, ptr %ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
+  %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %tmp4
 }
 
 define <8 x i16> @shrn8h(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: shrn8h:
-;CHECK: shrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, ptr %ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
-        %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-        ret <8 x i16> %tmp4
+; CHECK-LABEL: shrn8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    shrn2.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <4 x i16>, ptr %ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+  %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
+  %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @shrn4s(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: shrn4s:
-;CHECK: shrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, ptr %ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
-        %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-        ret <4 x i32> %tmp4
+; CHECK-LABEL: shrn4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    shrn2.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <2 x i32>, ptr %ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
+  %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
+  %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %tmp4
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.shrn.v8i8(<8 x i16>, i32) nounwind readnone
@@ -1016,64 +1442,89 @@ declare <2 x i32> @llvm.aarch64.neon.shrn.v2i32(<2 x i64>, i32) nounwind readnon
 
 define i32 @sqshrn1s(i64 %A) nounwind {
 ; CHECK-LABEL: sqshrn1s:
-; CHECK: sqshrn {{s[0-9]+}}, d0, #1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    sqshrn s0, d0, #1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %tmp = call i32 @llvm.aarch64.neon.sqshrn.i32(i64 %A, i32 1)
   ret i32 %tmp
 }
 
 define <8 x i8> @sqshrn8b(ptr %A) nounwind {
-;CHECK-LABEL: sqshrn8b:
-;CHECK: sqshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sqshrn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshrn.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqshrn4h(ptr %A) nounwind {
-;CHECK-LABEL: sqshrn4h:
-;CHECK: sqshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sqshrn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshrn.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqshrn2s(ptr %A) nounwind {
-;CHECK-LABEL: sqshrn2s:
-;CHECK: sqshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sqshrn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshrn.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  ret <2 x i32> %tmp3
 }
 
 
 define <16 x i8> @sqshrn16b(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqshrn16b:
-;CHECK: sqshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, ptr %ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        ret <16 x i8> %tmp4
+; CHECK-LABEL: sqshrn16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshrn2.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <8 x i8>, ptr %ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %tmp4
 }
 
 define <8 x i16> @sqshrn8h(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqshrn8h:
-;CHECK: sqshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, ptr %ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-        ret <8 x i16> %tmp4
+; CHECK-LABEL: sqshrn8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshrn2.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <4 x i16>, ptr %ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @sqshrn4s(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqshrn4s:
-;CHECK: sqshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, ptr %ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-        ret <4 x i32> %tmp4
+; CHECK-LABEL: sqshrn4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshrn2.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <2 x i32>, ptr %ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %tmp4
 }
 
 declare i32  @llvm.aarch64.neon.sqshrn.i32(i64, i32) nounwind readnone
@@ -1083,63 +1534,88 @@ declare <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64>, i32) nounwind readn
 
 define i32 @sqshrun1s(i64 %A) nounwind {
 ; CHECK-LABEL: sqshrun1s:
-; CHECK: sqshrun {{s[0-9]+}}, d0, #1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    sqshrun s0, d0, #1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %tmp = call i32 @llvm.aarch64.neon.sqshrun.i32(i64 %A, i32 1)
   ret i32 %tmp
 }
 
 define <8 x i8> @sqshrun8b(ptr %A) nounwind {
-;CHECK-LABEL: sqshrun8b:
-;CHECK: sqshrun.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sqshrun8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshrun.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqshrun4h(ptr %A) nounwind {
-;CHECK-LABEL: sqshrun4h:
-;CHECK: sqshrun.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sqshrun4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshrun.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqshrun2s(ptr %A) nounwind {
-;CHECK-LABEL: sqshrun2s:
-;CHECK: sqshrun.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sqshrun2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshrun.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqshrun16b(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqshrun16b:
-;CHECK: sqshrun2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, ptr %ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
-        %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        ret <16 x i8> %tmp4
+; CHECK-LABEL: sqshrun16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshrun2.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <8 x i8>, ptr %ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
+  %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %tmp4
 }
 
 define <8 x i16> @sqshrun8h(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqshrun8h:
-;CHECK: sqshrun2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, ptr %ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
-        %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-        ret <8 x i16> %tmp4
+; CHECK-LABEL: sqshrun8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshrun2.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <4 x i16>, ptr %ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
+  %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @sqshrun4s(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqshrun4s:
-;CHECK: sqshrun2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, ptr %ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
-        %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-        ret <4 x i32> %tmp4
+; CHECK-LABEL: sqshrun4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqshrun2.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <2 x i32>, ptr %ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
+  %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %tmp4
 }
 
 declare i32  @llvm.aarch64.neon.sqshrun.i32(i64, i32) nounwind readnone
@@ -1149,63 +1625,88 @@ declare <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64>, i32) nounwind read
 
 define i32 @sqrshrn1s(i64 %A) nounwind {
 ; CHECK-LABEL: sqrshrn1s:
-; CHECK: sqrshrn {{s[0-9]+}}, d0, #1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    sqrshrn s0, d0, #1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %tmp = call i32 @llvm.aarch64.neon.sqrshrn.i32(i64 %A, i32 1)
   ret i32 %tmp
 }
 
 define <8 x i8> @sqrshrn8b(ptr %A) nounwind {
-;CHECK-LABEL: sqrshrn8b:
-;CHECK: sqrshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sqrshrn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqrshrn.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqrshrn4h(ptr %A) nounwind {
-;CHECK-LABEL: sqrshrn4h:
-;CHECK: sqrshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sqrshrn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqrshrn.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqrshrn2s(ptr %A) nounwind {
-;CHECK-LABEL: sqrshrn2s:
-;CHECK: sqrshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sqrshrn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqrshrn.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqrshrn16b(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqrshrn16b:
-;CHECK: sqrshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, ptr %ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        ret <16 x i8> %tmp4
+; CHECK-LABEL: sqrshrn16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshrn2.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <8 x i8>, ptr %ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %tmp4
 }
 
 define <8 x i16> @sqrshrn8h(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqrshrn8h:
-;CHECK: sqrshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, ptr %ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-        ret <8 x i16> %tmp4
+; CHECK-LABEL: sqrshrn8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshrn2.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <4 x i16>, ptr %ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @sqrshrn4s(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqrshrn4s:
-;CHECK: sqrshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, ptr %ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-        ret <4 x i32> %tmp4
+; CHECK-LABEL: sqrshrn4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshrn2.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <2 x i32>, ptr %ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %tmp4
 }
 
 declare i32  @llvm.aarch64.neon.sqrshrn.i32(i64, i32) nounwind readnone
@@ -1215,63 +1716,88 @@ declare <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64>, i32) nounwind read
 
 define i32 @sqrshrun1s(i64 %A) nounwind {
 ; CHECK-LABEL: sqrshrun1s:
-; CHECK: sqrshrun {{s[0-9]+}}, d0, #1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    sqrshrun s0, d0, #1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %tmp = call i32 @llvm.aarch64.neon.sqrshrun.i32(i64 %A, i32 1)
   ret i32 %tmp
 }
 
 define <8 x i8> @sqrshrun8b(ptr %A) nounwind {
-;CHECK-LABEL: sqrshrun8b:
-;CHECK: sqrshrun.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sqrshrun8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqrshrun.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqrshrun4h(ptr %A) nounwind {
-;CHECK-LABEL: sqrshrun4h:
-;CHECK: sqrshrun.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sqrshrun4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqrshrun.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqrshrun2s(ptr %A) nounwind {
-;CHECK-LABEL: sqrshrun2s:
-;CHECK: sqrshrun.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sqrshrun2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqrshrun.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqrshrun16b(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqrshrun16b:
-;CHECK: sqrshrun2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, ptr %ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
-        %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        ret <16 x i8> %tmp4
+; CHECK-LABEL: sqrshrun16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshrun2.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <8 x i8>, ptr %ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
+  %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %tmp4
 }
 
 define <8 x i16> @sqrshrun8h(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqrshrun8h:
-;CHECK: sqrshrun2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, ptr %ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
-        %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-        ret <8 x i16> %tmp4
+; CHECK-LABEL: sqrshrun8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshrun2.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <4 x i16>, ptr %ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
+  %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @sqrshrun4s(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: sqrshrun4s:
-;CHECK: sqrshrun2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, ptr %ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
-        %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-        ret <4 x i32> %tmp4
+; CHECK-LABEL: sqrshrun4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqrshrun2.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <2 x i32>, ptr %ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
+  %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %tmp4
 }
 
 declare i32  @llvm.aarch64.neon.sqrshrun.i32(i64, i32) nounwind readnone
@@ -1281,63 +1807,88 @@ declare <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64>, i32) nounwind rea
 
 define i32 @uqrshrn1s(i64 %A) nounwind {
 ; CHECK-LABEL: uqrshrn1s:
-; CHECK: uqrshrn {{s[0-9]+}}, d0, #1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    uqrshrn s0, d0, #1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %tmp = call i32 @llvm.aarch64.neon.uqrshrn.i32(i64 %A, i32 1)
   ret i32 %tmp
 }
 
 define <8 x i8> @uqrshrn8b(ptr %A) nounwind {
-;CHECK-LABEL: uqrshrn8b:
-;CHECK: uqrshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: uqrshrn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqrshrn.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @uqrshrn4h(ptr %A) nounwind {
-;CHECK-LABEL: uqrshrn4h:
-;CHECK: uqrshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: uqrshrn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqrshrn.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @uqrshrn2s(ptr %A) nounwind {
-;CHECK-LABEL: uqrshrn2s:
-;CHECK: uqrshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: uqrshrn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqrshrn.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @uqrshrn16b(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: uqrshrn16b:
-;CHECK: uqrshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, ptr %ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        ret <16 x i8> %tmp4
+; CHECK-LABEL: uqrshrn16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqrshrn2.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <8 x i8>, ptr %ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %tmp4
 }
 
 define <8 x i16> @uqrshrn8h(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: uqrshrn8h:
-;CHECK: uqrshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, ptr %ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-        ret <8 x i16> %tmp4
+; CHECK-LABEL: uqrshrn8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqrshrn2.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <4 x i16>, ptr %ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @uqrshrn4s(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: uqrshrn4s:
-;CHECK: uqrshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, ptr %ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-        ret <4 x i32> %tmp4
+; CHECK-LABEL: uqrshrn4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqrshrn2.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <2 x i32>, ptr %ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %tmp4
 }
 
 declare i32  @llvm.aarch64.neon.uqrshrn.i32(i64, i32) nounwind readnone
@@ -1347,48 +1898,69 @@ declare <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64>, i32) nounwind read
 
 define i32 @uqshrn1s(i64 %A) nounwind {
 ; CHECK-LABEL: uqshrn1s:
-; CHECK: uqshrn {{s[0-9]+}}, d0, #1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    uqshrn s0, d0, #1
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %tmp = call i32 @llvm.aarch64.neon.uqshrn.i32(i64 %A, i32 1)
   ret i32 %tmp
 }
 
 define <8 x i8> @uqshrn8b(ptr %A) nounwind {
-;CHECK-LABEL: uqshrn8b:
-;CHECK: uqshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: uqshrn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqshrn.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @uqshrn4h(ptr %A) nounwind {
-;CHECK-LABEL: uqshrn4h:
-;CHECK: uqshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: uqshrn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqshrn.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @uqshrn2s(ptr %A) nounwind {
-;CHECK-LABEL: uqshrn2s:
-;CHECK: uqshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: uqshrn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqshrn.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @uqshrn16b(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: uqshrn16b:
-;CHECK: uqshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, ptr %ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
-        %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        ret <16 x i8> %tmp4
+; CHECK-LABEL: uqshrn16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqshrn2.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %out = load <8 x i8>, ptr %ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
+  %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %tmp4
 }
 
 define <8 x i16> @uqshrn8h(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: uqshrn8h:
-;CHECK: uqshrn2.8h v0, {{v[0-9]+}}, #1
+; CHECK-LABEL: uqshrn8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqshrn2.8h v0, v1, #1
+; CHECK-NEXT:    ret
   %out = load <4 x i16>, ptr %ret
   %tmp1 = load <4 x i32>, ptr %A
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
@@ -1397,8 +1969,12 @@ define <8 x i16> @uqshrn8h(ptr %ret, ptr %A) nounwind {
 }
 
 define <4 x i32> @uqshrn4s(ptr %ret, ptr %A) nounwind {
-;CHECK-LABEL: uqshrn4s:
-;CHECK: uqshrn2.4s v0, {{v[0-9]+}}, #1
+; CHECK-LABEL: uqshrn4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqshrn2.4s v0, v1, #1
+; CHECK-NEXT:    ret
   %out = load <2 x i32>, ptr %ret
   %tmp1 = load <2 x i64>, ptr %A
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
@@ -1412,60 +1988,78 @@ declare <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32>, i32) nounwind readn
 declare <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64>, i32) nounwind readnone
 
 define <8 x i16> @ushll8h(ptr %A) nounwind {
-;CHECK-LABEL: ushll8h:
-;CHECK: ushll.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
-        %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: ushll8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ushll.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
+  %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @ushll4s(ptr %A) nounwind {
-;CHECK-LABEL: ushll4s:
-;CHECK: ushll.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
-        %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: ushll4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ushll.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
+  %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @ushll2d(ptr %A) nounwind {
-;CHECK-LABEL: ushll2d:
-;CHECK: ushll.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
-        %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: ushll2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ushll.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
+  %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
+  ret <2 x i64> %tmp3
 }
 
 define <8 x i16> @ushll2_8h(ptr %A) nounwind {
-;CHECK-LABEL: ushll2_8h:
-;CHECK: ushll.8h v0, {{v[0-9]+}}, #1
-        %load1 = load <16 x i8>, ptr %A
-        %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
-        %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: ushll2_8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0, #8]
+; CHECK-NEXT:    ushll.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %load1 = load <16 x i8>, ptr %A
+  %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
+  %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @ushll2_4s(ptr %A) nounwind {
-;CHECK-LABEL: ushll2_4s:
-;CHECK: ushll.4s v0, {{v[0-9]+}}, #1
-        %load1 = load <8 x i16>, ptr %A
-        %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-        %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
-        %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: ushll2_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0, #8]
+; CHECK-NEXT:    ushll.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %load1 = load <8 x i16>, ptr %A
+  %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
+  %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @ushll2_2d(ptr %A) nounwind {
-;CHECK-LABEL: ushll2_2d:
-;CHECK: ushll.2d v0, {{v[0-9]+}}, #1
-        %load1 = load <4 x i32>, ptr %A
-        %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
-        %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
-        %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: ushll2_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0, #8]
+; CHECK-NEXT:    ushll.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %load1 = load <4 x i32>, ptr %A
+  %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
+  %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
+  ret <2 x i64> %tmp3
 }
 
 declare <16 x i8> @llvm.aarch64.neon.ushl.v16i8(<16 x i8>, <16 x i8>)
@@ -1476,8 +2070,11 @@ declare <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64>, <1 x i64>)
 declare i64 @llvm.aarch64.neon.ushl.i64(i64, i64)
 
 define <8 x i16> @neon.ushll8h_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.ushll8h_constant_shift
-;CHECK: ushll.8h v0, {{v[0-9]+}}, #1
+; CHECK-LABEL: neon.ushll8h_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ushll.8h v0, v0, #1
+; CHECK-NEXT:    ret
   %tmp1 = load <8 x i8>, ptr %A
   %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> %tmp2, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
@@ -1485,8 +2082,12 @@ define <8 x i16> @neon.ushll8h_constant_shift(ptr %A) nounwind {
 }
 
 define <8 x i16> @neon.ushl8h_no_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.ushl8h_no_constant_shift
-;CHECK: ushl.8h v0, v0, v0
+; CHECK-LABEL: neon.ushl8h_no_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ushll.8h v0, v0, #0
+; CHECK-NEXT:    ushl.8h v0, v0, v0
+; CHECK-NEXT:    ret
   %tmp1 = load <8 x i8>, ptr %A
   %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp2)
@@ -1495,11 +2096,11 @@ define <8 x i16> @neon.ushl8h_no_constant_shift(ptr %A) nounwind {
 
 define <4 x i32> @neon.ushl8h_constant_shift_extend_not_2x(ptr %A) nounwind {
 ; CHECK-LABEL: neon.ushl8h_constant_shift_extend_not_2x:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr s0, [x0]
-; CHECK-NEXT: ushll.8h v0, v0, #0
-; CHECK-NEXT: ushll.4s v0, v0, #1
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ushll.8h v0, v0, #0
+; CHECK-NEXT:    ushll.4s v0, v0, #1
+; CHECK-NEXT:    ret
   %tmp1 = load <4 x i8>, ptr %A
   %tmp2 = zext <4 x i8> %tmp1 to <4 x i32>
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
@@ -1507,18 +2108,22 @@ define <4 x i32> @neon.ushl8h_constant_shift_extend_not_2x(ptr %A) nounwind {
 }
 
 define <8 x i16> @neon.ushl8_noext_constant_shift(ptr %A) nounwind {
-; CHECK-LABEL: neon.ushl8_noext_constant_shift
-; CHECK:      ldr       q0, [x0]
-; CHECK-NEXT: shl.8h   v0, v0, #1
-; CHECK-NEXT: ret
+; CHECK-LABEL: neon.ushl8_noext_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    shl.8h v0, v0, #1
+; CHECK-NEXT:    ret
   %tmp1 = load <8 x i16>, ptr %A
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @neon.ushll4s_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.ushll4s_constant_shift
-;CHECK: ushll.4s v0, {{v[0-9]+}}, #1
+; CHECK-LABEL: neon.ushll4s_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ushll.4s v0, v0, #1
+; CHECK-NEXT:    ret
   %tmp1 = load <4 x i16>, ptr %A
   %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
@@ -1527,10 +2132,13 @@ define <4 x i32> @neon.ushll4s_constant_shift(ptr %A) nounwind {
 
 ; FIXME: unnecessary ushll.4s v0, v0, #0?
 define <4 x i32> @neon.ushll4s_neg_constant_shift(ptr %A) nounwind {
-; CHECK-LABEL: neon.ushll4s_neg_constant_shift
-; CHECK: movi.2d v1, #0xffffffffffffffff
-; CHECK: ushll.4s v0, v0, #0
-; CHECK: ushl.4s v0, v0, v1
+; CHECK-LABEL: neon.ushll4s_neg_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    movi.2d v1, #0xffffffffffffffff
+; CHECK-NEXT:    ushll.4s v0, v0, #0
+; CHECK-NEXT:    ushl.4s v0, v0, v1
+; CHECK-NEXT:    ret
   %tmp1 = load <4 x i16>, ptr %A
   %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
@@ -1539,16 +2147,22 @@ define <4 x i32> @neon.ushll4s_neg_constant_shift(ptr %A) nounwind {
 
 ; FIXME: should be constant folded.
 define <4 x i32> @neon.ushll4s_constant_fold() nounwind {
-; CHECK-LABEL: neon.ushll4s_constant_fold
-; CHECK: shl.4s v0, v0, #1
-;
+; CHECK-LABEL: neon.ushll4s_constant_fold:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI160_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI160_0]
+; CHECK-NEXT:    shl.4s v0, v0, #1
+; CHECK-NEXT:    ret
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
   ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @neon.ushll2d_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.ushll2d_constant_shift
-;CHECK: ushll.2d v0, {{v[0-9]+}}, #1
+; CHECK-LABEL: neon.ushll2d_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ushll.2d v0, v0, #1
+; CHECK-NEXT:    ret
   %tmp1 = load <2 x i32>, ptr %A
   %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.ushl.v2i64(<2 x i64> %tmp2, <2 x i64> <i64 1, i64 1>)
@@ -1556,8 +2170,13 @@ define <2 x i64> @neon.ushll2d_constant_shift(ptr %A) nounwind {
 }
 
 define <1 x i64> @neon.ushl_vscalar_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.ushl_vscalar_constant_shift
-;CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #1
+; CHECK-LABEL: neon.ushl_vscalar_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi.2d v1, #0000000000000000
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    zip1.2s v0, v0, v1
+; CHECK-NEXT:    shl d0, d0, #1
+; CHECK-NEXT:    ret
   %tmp1 = load <1 x i32>, ptr %A
   %tmp2 = zext <1 x i32> %tmp1 to <1 x i64>
   %tmp3 = call <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64> %tmp2, <1 x i64> <i64 1>)
@@ -1565,8 +2184,13 @@ define <1 x i64> @neon.ushl_vscalar_constant_shift(ptr %A) nounwind {
 }
 
 define i64 @neon.ushl_scalar_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.ushl_scalar_constant_shift
-;CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #1
+; CHECK-LABEL: neon.ushl_scalar_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    shl d0, d0, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %tmp1 = load i32, ptr %A
   %tmp2 = zext i32 %tmp1 to i64
   %tmp3 = call i64 @llvm.aarch64.neon.ushl.i64(i64 %tmp2, i64 1)
@@ -1574,21 +2198,27 @@ define i64 @neon.ushl_scalar_constant_shift(ptr %A) nounwind {
 }
 
 define <8 x i16> @sshll8h(ptr %A) nounwind {
-;CHECK-LABEL: sshll8h:
-;CHECK: sshll.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
-        %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: sshll8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sshll.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
+  %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  ret <8 x i16> %tmp3
 }
 
 define <2 x i64> @sshll2d(ptr %A) nounwind {
-;CHECK-LABEL: sshll2d:
-;CHECK: sshll.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
-        %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: sshll2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sshll.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
+  %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
+  ret <2 x i64> %tmp3
 }
 
 declare <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8>, <16 x i8>)
@@ -1599,99 +2229,135 @@ declare <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64>, <1 x i64>)
 declare i64 @llvm.aarch64.neon.sshl.i64(i64, i64)
 
 define <16 x i8> @neon.sshl16b_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshl16b_constant_shift
-;CHECK: shl.16b {{v[0-9]+}}, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
-        ret <16 x i8> %tmp2
+; CHECK-LABEL: neon.sshl16b_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    shl.16b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  ret <16 x i8> %tmp2
 }
 
 define <16 x i8> @neon.sshl16b_non_splat_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshl16b_non_splat_constant_shift
-;CHECK: sshl.16b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 6, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
-        ret <16 x i8> %tmp2
+; CHECK-LABEL: neon.sshl16b_non_splat_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI167_0
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI167_0]
+; CHECK-NEXT:    sshl.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 6, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  ret <16 x i8> %tmp2
 }
 
 define <16 x i8> @neon.sshl16b_neg_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshl16b_neg_constant_shift
-;CHECK: sshl.16b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2>)
-        ret <16 x i8> %tmp2
+; CHECK-LABEL: neon.sshl16b_neg_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi.16b v1, #254
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sshl.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2>)
+  ret <16 x i8> %tmp2
 }
 
 define <8 x i16> @neon.sshll8h_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshll8h_constant_shift
-;CHECK: sshll.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.sshl.v8i16(<8 x i16> %tmp2, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: neon.sshll8h_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sshll.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.sshl.v8i16(<8 x i16> %tmp2, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @neon.sshl4s_wrong_ext_constant_shift(ptr %A) nounwind {
 ; CHECK-LABEL: neon.sshl4s_wrong_ext_constant_shift:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr s0, [x0]
-; CHECK-NEXT: sshll.8h v0, v0, #0
-; CHECK-NEXT: sshll.4s v0, v0, #1
-; CHECK-NEXT: ret
-        %tmp1 = load <4 x i8>, ptr %A
-        %tmp2 = sext <4 x i8> %tmp1 to <4 x i32>
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
-        ret <4 x i32> %tmp3
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    sshll.8h v0, v0, #0
+; CHECK-NEXT:    sshll.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i8>, ptr %A
+  %tmp2 = sext <4 x i8> %tmp1 to <4 x i32>
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  ret <4 x i32> %tmp3
 }
 
 define <4 x i32> @neon.sshll4s_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshll4s_constant_shift
-;CHECK: sshll.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: neon.sshll4s_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sshll.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  ret <4 x i32> %tmp3
 }
 
 define <4 x i32> @neon.sshll4s_neg_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshll4s_neg_constant_shift
-;CHECK: movi.2d v1, #0xffffffffffffffff
-;CHECK: sshll.4s v0, v0, #0
-;CHECK: sshl.4s v0, v0, v1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: neon.sshll4s_neg_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    movi.2d v1, #0xffffffffffffffff
+; CHECK-NEXT:    sshll.4s v0, v0, #0
+; CHECK-NEXT:    sshl.4s v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  ret <4 x i32> %tmp3
 }
 
 ; FIXME: should be constant folded.
 define <4 x i32> @neon.sshl4s_constant_fold() nounwind {
-;CHECK-LABEL: neon.sshl4s_constant_fold
-;CHECK: shl.4s {{v[0-9]+}}, {{v[0-9]+}}, #2
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: neon.sshl4s_constant_fold:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI173_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI173_0]
+; CHECK-NEXT:    shl.4s v0, v0, #2
+; CHECK-NEXT:    ret
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>)
+  ret <4 x i32> %tmp3
 }
 
 define <4 x i32> @neon.sshl4s_no_fold(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshl4s_no_fold
-;CHECK: shl.4s {{v[0-9]+}}, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: neon.sshl4s_no_fold:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    shl.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @neon.sshll2d_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshll2d_constant_shift
-;CHECK: sshll.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> %tmp2, <2 x i64> <i64 1, i64 1>)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: neon.sshll2d_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sshll.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> %tmp2, <2 x i64> <i64 1, i64 1>)
+  ret <2 x i64> %tmp3
 }
 
 define <1 x i64> @neon.sshll_vscalar_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshll_vscalar_constant_shift
-;CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #1
+; CHECK-LABEL: neon.sshll_vscalar_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi.2d v1, #0000000000000000
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    zip1.2s v0, v0, v1
+; CHECK-NEXT:    shl d0, d0, #1
+; CHECK-NEXT:    ret
   %tmp1 = load <1 x i32>, ptr %A
   %tmp2 = zext <1 x i32> %tmp1 to <1 x i64>
   %tmp3 = call <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64> %tmp2, <1 x i64> <i64 1>)
@@ -1699,690 +2365,956 @@ define <1 x i64> @neon.sshll_vscalar_constant_shift(ptr %A) nounwind {
 }
 
 define i64 @neon.sshll_scalar_constant_shift(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshll_scalar_constant_shift
-;CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #1
+; CHECK-LABEL: neon.sshll_scalar_constant_shift:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    shl d0, d0, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %tmp1 = load i32, ptr %A
   %tmp2 = zext i32 %tmp1 to i64
   %tmp3 = call i64 @llvm.aarch64.neon.sshl.i64(i64 %tmp2, i64 1)
   ret i64 %tmp3
 }
 
+define i64 @neon.sshll_scalar_constant_shift_m1(ptr %A) nounwind {
+; CHECK-LABEL: neon.sshll_scalar_constant_shift_m1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov x9, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    sshl d0, d0, d1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i32, ptr %A
+  %tmp2 = zext i32 %tmp1 to i64
+  %tmp3 = call i64 @llvm.aarch64.neon.sshl.i64(i64 %tmp2, i64 -1)
+  ret i64 %tmp3
+}
+
 ; FIXME: should be constant folded.
 define <2 x i64> @neon.sshl2d_constant_fold() nounwind {
-;CHECK-LABEL: neon.sshl2d_constant_fold
-;CHECK: shl.2d {{v[0-9]+}}, {{v[0-9]+}}, #1
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> <i64 99, i64 1000>, <2 x i64> <i64 1, i64 1>)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: neon.sshl2d_constant_fold:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI179_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI179_0]
+; CHECK-NEXT:    shl.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> <i64 99, i64 1000>, <2 x i64> <i64 1, i64 1>)
+  ret <2 x i64> %tmp3
 }
 
 define <2 x i64> @neon.sshl2d_no_fold(ptr %A) nounwind {
-;CHECK-LABEL: neon.sshl2d_no_fold
-;CHECK: shl.2d {{v[0-9]+}}, {{v[0-9]+}}, #2
-        %tmp2 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> %tmp2, <2 x i64> <i64 2, i64 2>)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: neon.sshl2d_no_fold:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    shl.2d v0, v0, #2
+; CHECK-NEXT:    ret
+  %tmp2 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> %tmp2, <2 x i64> <i64 2, i64 2>)
+  ret <2 x i64> %tmp3
 }
 
 define <8 x i16> @sshll2_8h(ptr %A) nounwind {
-;CHECK-LABEL: sshll2_8h:
-;CHECK: sshll.8h v0, {{v[0-9]+}}, #1
-        %load1 = load <16 x i8>, ptr %A
-        %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-        %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
-        %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: sshll2_8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0, #8]
+; CHECK-NEXT:    sshll.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %load1 = load <16 x i8>, ptr %A
+  %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
+  %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @sshll2_4s(ptr %A) nounwind {
-;CHECK-LABEL: sshll2_4s:
-;CHECK: sshll.4s v0, {{v[0-9]+}}, #1
-        %load1 = load <8 x i16>, ptr %A
-        %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-        %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
-        %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: sshll2_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0, #8]
+; CHECK-NEXT:    sshll.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %load1 = load <8 x i16>, ptr %A
+  %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
+  %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @sshll2_2d(ptr %A) nounwind {
-;CHECK-LABEL: sshll2_2d:
-;CHECK: sshll.2d v0, {{v[0-9]+}}, #1
-        %load1 = load <4 x i32>, ptr %A
-        %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
-        %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
-        %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: sshll2_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0, #8]
+; CHECK-NEXT:    sshll.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %load1 = load <4 x i32>, ptr %A
+  %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
+  %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
+  ret <2 x i64> %tmp3
 }
 
 define <8 x i8> @sqshli8b(ptr %A) nounwind {
-;CHECK-LABEL: sqshli8b:
-;CHECK: sqshl.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sqshli8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sqshl.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqshli4h(ptr %A) nounwind {
-;CHECK-LABEL: sqshli4h:
-;CHECK: sqshl.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sqshli4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sqshl.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqshli2s(ptr %A) nounwind {
-;CHECK-LABEL: sqshli2s:
-;CHECK: sqshl.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sqshli2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sqshl.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqshli16b(ptr %A) nounwind {
-;CHECK-LABEL: sqshli16b:
-;CHECK: sqshl.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: sqshli16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshl.16b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @sqshli8h(ptr %A) nounwind {
-;CHECK-LABEL: sqshli8h:
-;CHECK: sqshl.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: sqshli8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshl.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @sqshli4s(ptr %A) nounwind {
-;CHECK-LABEL: sqshli4s:
-;CHECK: sqshl.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: sqshli4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshl.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @sqshli2d(ptr %A) nounwind {
-;CHECK-LABEL: sqshli2d:
-;CHECK: sqshl.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: sqshli2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sqshl.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
+  ret <2 x i64> %tmp3
 }
 
 define <8 x i8> @uqshli8b(ptr %A) nounwind {
-;CHECK-LABEL: uqshli8b:
-;CHECK: uqshl.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: uqshli8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    uqshl.8b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  ret <8 x i8> %tmp3
 }
 
 define <8 x i8> @uqshli8b_1(ptr %A) nounwind {
-;CHECK-LABEL: uqshli8b_1:
-;CHECK: movi.8b [[REG:v[0-9]+]], #8
-;CHECK: uqshl.8b v0, v0, [[REG]]
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: uqshli8b_1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi.8b v1, #8
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    uqshl.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @uqshli4h(ptr %A) nounwind {
-;CHECK-LABEL: uqshli4h:
-;CHECK: uqshl.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: uqshli4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    uqshl.4h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @uqshli2s(ptr %A) nounwind {
-;CHECK-LABEL: uqshli2s:
-;CHECK: uqshl.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: uqshli2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    uqshl.2s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
+  ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @uqshli16b(ptr %A) nounwind {
-;CHECK-LABEL: uqshli16b:
-;CHECK: uqshl.16b
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: uqshli16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqshl.16b v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @uqshli8h(ptr %A) nounwind {
-;CHECK-LABEL: uqshli8h:
-;CHECK: uqshl.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: uqshli8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqshl.8h v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @uqshli4s(ptr %A) nounwind {
-;CHECK-LABEL: uqshli4s:
-;CHECK: uqshl.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: uqshli4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqshl.4s v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @uqshli2d(ptr %A) nounwind {
-;CHECK-LABEL: uqshli2d:
-;CHECK: uqshl.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: uqshli2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uqshl.2d v0, v0, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
+  ret <2 x i64> %tmp3
 }
 
 define <8 x i8> @ursra8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra8b:
-;CHECK: ursra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <8 x i8>, ptr %B
-        %tmp5 = add <8 x i8> %tmp3, %tmp4
-        ret <8 x i8> %tmp5
+; CHECK-LABEL: ursra8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    ursra.8b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  %tmp4 = load <8 x i8>, ptr %B
+  %tmp5 = add <8 x i8> %tmp3, %tmp4
+  ret <8 x i8> %tmp5
 }
 
 define <4 x i16> @ursra4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra4h:
-;CHECK: ursra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <4 x i16>, ptr %B
-        %tmp5 = add <4 x i16> %tmp3, %tmp4
-        ret <4 x i16> %tmp5
+; CHECK-LABEL: ursra4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    ursra.4h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+  %tmp4 = load <4 x i16>, ptr %B
+  %tmp5 = add <4 x i16> %tmp3, %tmp4
+  ret <4 x i16> %tmp5
 }
 
 define <2 x i32> @ursra2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra2s:
-;CHECK: ursra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
-        %tmp4 = load <2 x i32>, ptr %B
-        %tmp5 = add <2 x i32> %tmp3, %tmp4
-        ret <2 x i32> %tmp5
+; CHECK-LABEL: ursra2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    ursra.2s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
+  %tmp4 = load <2 x i32>, ptr %B
+  %tmp5 = add <2 x i32> %tmp3, %tmp4
+  ret <2 x i32> %tmp5
 }
 
 define <16 x i8> @ursra16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra16b:
-;CHECK: ursra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <16 x i8>, ptr %B
-        %tmp5 = add <16 x i8> %tmp3, %tmp4
-         ret <16 x i8> %tmp5
+; CHECK-LABEL: ursra16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ursra.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  %tmp4 = load <16 x i8>, ptr %B
+  %tmp5 = add <16 x i8> %tmp3, %tmp4
+   ret <16 x i8> %tmp5
 }
 
 define <8 x i16> @ursra8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra8h:
-;CHECK: ursra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <8 x i16>, ptr %B
-        %tmp5 = add <8 x i16> %tmp3, %tmp4
-         ret <8 x i16> %tmp5
+; CHECK-LABEL: ursra8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ursra.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+  %tmp4 = load <8 x i16>, ptr %B
+  %tmp5 = add <8 x i16> %tmp3, %tmp4
+   ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @ursra4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra4s:
-;CHECK: ursra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        %tmp4 = load <4 x i32>, ptr %B
-        %tmp5 = add <4 x i32> %tmp3, %tmp4
-         ret <4 x i32> %tmp5
+; CHECK-LABEL: ursra4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ursra.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  %tmp4 = load <4 x i32>, ptr %B
+  %tmp5 = add <4 x i32> %tmp3, %tmp4
+   ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @ursra2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra2d:
-;CHECK: ursra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
-        %tmp4 = load <2 x i64>, ptr %B
-        %tmp5 = add <2 x i64> %tmp3, %tmp4
-         ret <2 x i64> %tmp5
+; CHECK-LABEL: ursra2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ursra.2d v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
+  %tmp4 = load <2 x i64>, ptr %B
+  %tmp5 = add <2 x i64> %tmp3, %tmp4
+   ret <2 x i64> %tmp5
 }
 
 define <1 x i64> @ursra1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra1d:
-;CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
-        %tmp4 = load <1 x i64>, ptr %B
-        %tmp5 = add <1 x i64> %tmp3, %tmp4
-        ret <1 x i64> %tmp5
+; CHECK-LABEL: ursra1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    ursra d0, d1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
+  %tmp4 = load <1 x i64>, ptr %B
+  %tmp5 = add <1 x i64> %tmp3, %tmp4
+  ret <1 x i64> %tmp5
 }
 
 define i64 @ursra_scalar(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ursra_scalar:
-;CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 -1)
-        %tmp4 = load i64, ptr %B
-        %tmp5 = add i64 %tmp3, %tmp4
-        ret i64 %tmp5
+; CHECK-LABEL: ursra_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    ursra d0, d1, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 -1)
+  %tmp4 = load i64, ptr %B
+  %tmp5 = add i64 %tmp3, %tmp4
+  ret i64 %tmp5
 }
 
 define <8 x i8> @srsra8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra8b:
-;CHECK: srsra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <8 x i8>, ptr %B
-        %tmp5 = add <8 x i8> %tmp3, %tmp4
-        ret <8 x i8> %tmp5
+; CHECK-LABEL: srsra8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    srsra.8b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  %tmp4 = load <8 x i8>, ptr %B
+  %tmp5 = add <8 x i8> %tmp3, %tmp4
+  ret <8 x i8> %tmp5
 }
 
 define <4 x i16> @srsra4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra4h:
-;CHECK: srsra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <4 x i16>, ptr %B
-        %tmp5 = add <4 x i16> %tmp3, %tmp4
-        ret <4 x i16> %tmp5
+; CHECK-LABEL: srsra4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    srsra.4h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+  %tmp4 = load <4 x i16>, ptr %B
+  %tmp5 = add <4 x i16> %tmp3, %tmp4
+  ret <4 x i16> %tmp5
 }
 
 define <2 x i32> @srsra2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra2s:
-;CHECK: srsra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
-        %tmp4 = load <2 x i32>, ptr %B
-        %tmp5 = add <2 x i32> %tmp3, %tmp4
-        ret <2 x i32> %tmp5
+; CHECK-LABEL: srsra2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    srsra.2s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
+  %tmp4 = load <2 x i32>, ptr %B
+  %tmp5 = add <2 x i32> %tmp3, %tmp4
+  ret <2 x i32> %tmp5
 }
 
 define <16 x i8> @srsra16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra16b:
-;CHECK: srsra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <16 x i8>, ptr %B
-        %tmp5 = add <16 x i8> %tmp3, %tmp4
-         ret <16 x i8> %tmp5
+; CHECK-LABEL: srsra16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    srsra.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+  %tmp4 = load <16 x i8>, ptr %B
+  %tmp5 = add <16 x i8> %tmp3, %tmp4
+   ret <16 x i8> %tmp5
 }
 
 define <8 x i16> @srsra8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra8h:
-;CHECK: srsra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <8 x i16>, ptr %B
-        %tmp5 = add <8 x i16> %tmp3, %tmp4
-         ret <8 x i16> %tmp5
+; CHECK-LABEL: srsra8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    srsra.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+  %tmp4 = load <8 x i16>, ptr %B
+  %tmp5 = add <8 x i16> %tmp3, %tmp4
+   ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @srsra4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra4s:
-;CHECK: srsra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        %tmp4 = load <4 x i32>, ptr %B
-        %tmp5 = add <4 x i32> %tmp3, %tmp4
-         ret <4 x i32> %tmp5
+; CHECK-LABEL: srsra4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    srsra.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+  %tmp4 = load <4 x i32>, ptr %B
+  %tmp5 = add <4 x i32> %tmp3, %tmp4
+   ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @srsra2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra2d:
-;CHECK: srsra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
-        %tmp4 = load <2 x i64>, ptr %B
-        %tmp5 = add <2 x i64> %tmp3, %tmp4
-         ret <2 x i64> %tmp5
+; CHECK-LABEL: srsra2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    srsra.2d v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
+  %tmp4 = load <2 x i64>, ptr %B
+  %tmp5 = add <2 x i64> %tmp3, %tmp4
+   ret <2 x i64> %tmp5
 }
 
 define <1 x i64> @srsra1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra1d:
-;CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
-        %tmp4 = load <1 x i64>, ptr %B
-        %tmp5 = add <1 x i64> %tmp3, %tmp4
-        ret <1 x i64> %tmp5
+; CHECK-LABEL: srsra1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    srsra d0, d1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
+  %tmp4 = load <1 x i64>, ptr %B
+  %tmp5 = add <1 x i64> %tmp3, %tmp4
+  ret <1 x i64> %tmp5
 }
 
 define i64 @srsra_scalar(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: srsra_scalar:
-;CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, ptr %A
-        %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 -1)
-        %tmp4 = load i64, ptr %B
-        %tmp5 = add i64 %tmp3, %tmp4
-        ret i64 %tmp5
+; CHECK-LABEL: srsra_scalar:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    srsra d0, d1, #1
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load i64, ptr %A
+  %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 -1)
+  %tmp4 = load i64, ptr %B
+  %tmp5 = add i64 %tmp3, %tmp4
+  ret i64 %tmp5
 }
 
 define <8 x i8> @usra8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usra8b:
-;CHECK: usra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <8 x i8>, ptr %B
-        %tmp5 = add <8 x i8> %tmp3, %tmp4
-        ret <8 x i8> %tmp5
+; CHECK-LABEL: usra8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    usra.8b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp4 = load <8 x i8>, ptr %B
+  %tmp5 = add <8 x i8> %tmp3, %tmp4
+  ret <8 x i8> %tmp5
 }
 
 define <4 x i16> @usra4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usra4h:
-;CHECK: usra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <4 x i16>, ptr %B
-        %tmp5 = add <4 x i16> %tmp3, %tmp4
-        ret <4 x i16> %tmp5
+; CHECK-LABEL: usra4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    usra.4h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
+  %tmp4 = load <4 x i16>, ptr %B
+  %tmp5 = add <4 x i16> %tmp3, %tmp4
+  ret <4 x i16> %tmp5
 }
 
 define <2 x i32> @usra2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usra2s:
-;CHECK: usra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
-        %tmp4 = load <2 x i32>, ptr %B
-        %tmp5 = add <2 x i32> %tmp3, %tmp4
-        ret <2 x i32> %tmp5
+; CHECK-LABEL: usra2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    usra.2s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
+  %tmp4 = load <2 x i32>, ptr %B
+  %tmp5 = add <2 x i32> %tmp3, %tmp4
+  ret <2 x i32> %tmp5
 }
 
 define <16 x i8> @usra16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usra16b:
-;CHECK: usra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <16 x i8>, ptr %B
-        %tmp5 = add <16 x i8> %tmp3, %tmp4
-         ret <16 x i8> %tmp5
+; CHECK-LABEL: usra16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    usra.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp4 = load <16 x i8>, ptr %B
+  %tmp5 = add <16 x i8> %tmp3, %tmp4
+   ret <16 x i8> %tmp5
 }
 
 define <8 x i16> @usra8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usra8h:
-;CHECK: usra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <8 x i16>, ptr %B
-        %tmp5 = add <8 x i16> %tmp3, %tmp4
-         ret <8 x i16> %tmp5
+; CHECK-LABEL: usra8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    usra.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %tmp4 = load <8 x i16>, ptr %B
+  %tmp5 = add <8 x i16> %tmp3, %tmp4
+   ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @usra4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usra4s:
-;CHECK: usra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp4 = load <4 x i32>, ptr %B
-        %tmp5 = add <4 x i32> %tmp3, %tmp4
-         ret <4 x i32> %tmp5
+; CHECK-LABEL: usra4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    usra.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+  %tmp4 = load <4 x i32>, ptr %B
+  %tmp5 = add <4 x i32> %tmp3, %tmp4
+   ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @usra2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usra2d:
-;CHECK: usra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp4 = load <2 x i64>, ptr %B
-        %tmp5 = add <2 x i64> %tmp3, %tmp4
-         ret <2 x i64> %tmp5
+; CHECK-LABEL: usra2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    usra.2d v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
+  %tmp4 = load <2 x i64>, ptr %B
+  %tmp5 = add <2 x i64> %tmp3, %tmp4
+   ret <2 x i64> %tmp5
 }
 
 define <1 x i64> @usra1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: usra1d:
-;CHECK: usra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp3 = lshr <1 x i64> %tmp1, <i64 1>
-        %tmp4 = load <1 x i64>, ptr %B
-        %tmp5 = add <1 x i64> %tmp3, %tmp4
-         ret <1 x i64> %tmp5
+; CHECK-LABEL: usra1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    usra d0, d1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp3 = lshr <1 x i64> %tmp1, <i64 1>
+  %tmp4 = load <1 x i64>, ptr %B
+  %tmp5 = add <1 x i64> %tmp3, %tmp4
+   ret <1 x i64> %tmp5
 }
 
 define <8 x i8> @ssra8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ssra8b:
-;CHECK: ssra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp3 = ashr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <8 x i8>, ptr %B
-        %tmp5 = add <8 x i8> %tmp3, %tmp4
-        ret <8 x i8> %tmp5
+; CHECK-LABEL: ssra8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    ssra.8b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp3 = ashr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp4 = load <8 x i8>, ptr %B
+  %tmp5 = add <8 x i8> %tmp3, %tmp4
+  ret <8 x i8> %tmp5
 }
 
 define <4 x i16> @ssra4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ssra4h:
-;CHECK: ssra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp3 = ashr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <4 x i16>, ptr %B
-        %tmp5 = add <4 x i16> %tmp3, %tmp4
-        ret <4 x i16> %tmp5
+; CHECK-LABEL: ssra4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    ssra.4h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp3 = ashr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
+  %tmp4 = load <4 x i16>, ptr %B
+  %tmp5 = add <4 x i16> %tmp3, %tmp4
+  ret <4 x i16> %tmp5
 }
 
 define <2 x i32> @ssra2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ssra2s:
-;CHECK: ssra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp3 = ashr <2 x i32> %tmp1, <i32 1, i32 1>
-        %tmp4 = load <2 x i32>, ptr %B
-        %tmp5 = add <2 x i32> %tmp3, %tmp4
-        ret <2 x i32> %tmp5
+; CHECK-LABEL: ssra2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    ssra.2s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp3 = ashr <2 x i32> %tmp1, <i32 1, i32 1>
+  %tmp4 = load <2 x i32>, ptr %B
+  %tmp5 = add <2 x i32> %tmp3, %tmp4
+  ret <2 x i32> %tmp5
 }
 
 define <16 x i8> @ssra16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ssra16b:
-;CHECK: ssra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp3 = ashr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <16 x i8>, ptr %B
-        %tmp5 = add <16 x i8> %tmp3, %tmp4
-         ret <16 x i8> %tmp5
+; CHECK-LABEL: ssra16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ssra.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp3 = ashr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp4 = load <16 x i8>, ptr %B
+  %tmp5 = add <16 x i8> %tmp3, %tmp4
+   ret <16 x i8> %tmp5
 }
 
 define <8 x i16> @ssra8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ssra8h:
-;CHECK: ssra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp3 = ashr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <8 x i16>, ptr %B
-        %tmp5 = add <8 x i16> %tmp3, %tmp4
-         ret <8 x i16> %tmp5
+; CHECK-LABEL: ssra8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ssra.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp3 = ashr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %tmp4 = load <8 x i16>, ptr %B
+  %tmp5 = add <8 x i16> %tmp3, %tmp4
+   ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @ssra4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ssra4s:
-;CHECK: ssra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp3 = ashr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp4 = load <4 x i32>, ptr %B
-        %tmp5 = add <4 x i32> %tmp3, %tmp4
-         ret <4 x i32> %tmp5
+; CHECK-LABEL: ssra4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ssra.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp3 = ashr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+  %tmp4 = load <4 x i32>, ptr %B
+  %tmp5 = add <4 x i32> %tmp3, %tmp4
+   ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @ssra2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: ssra2d:
-;CHECK: ssra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp3 = ashr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp4 = load <2 x i64>, ptr %B
-        %tmp5 = add <2 x i64> %tmp3, %tmp4
-         ret <2 x i64> %tmp5
+; CHECK-LABEL: ssra2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ssra.2d v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp3 = ashr <2 x i64> %tmp1, <i64 1, i64 1>
+  %tmp4 = load <2 x i64>, ptr %B
+  %tmp5 = add <2 x i64> %tmp3, %tmp4
+   ret <2 x i64> %tmp5
 }
 
 define <8 x i8> @shr_orr8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shr_orr8b:
-;CHECK: shr.8b v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.8b
-;CHECK-NEXT: ret
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp4 = load <8 x i8>, ptr %B
-        %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp5 = or <8 x i8> %tmp3, %tmp4
-        ret <8 x i8> %tmp5
+; CHECK-LABEL: shr_orr8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ushr.8b v0, v0, #1
+; CHECK-NEXT:    orr.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp4 = load <8 x i8>, ptr %B
+  %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp5 = or <8 x i8> %tmp3, %tmp4
+  ret <8 x i8> %tmp5
 }
 
 define <4 x i16> @shr_orr4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shr_orr4h:
-;CHECK: shr.4h v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.8b
-;CHECK-NEXT: ret
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp4 = load <4 x i16>, ptr %B
-        %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
-        %tmp5 = or <4 x i16> %tmp3, %tmp4
-        ret <4 x i16> %tmp5
+; CHECK-LABEL: shr_orr4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ushr.4h v0, v0, #1
+; CHECK-NEXT:    orr.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp4 = load <4 x i16>, ptr %B
+  %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
+  %tmp5 = or <4 x i16> %tmp3, %tmp4
+  ret <4 x i16> %tmp5
 }
 
 define <2 x i32> @shr_orr2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shr_orr2s:
-;CHECK: shr.2s v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.8b
-;CHECK-NEXT: ret
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp4 = load <2 x i32>, ptr %B
-        %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
-        %tmp5 = or <2 x i32> %tmp3, %tmp4
-        ret <2 x i32> %tmp5
+; CHECK-LABEL: shr_orr2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ushr.2s v0, v0, #1
+; CHECK-NEXT:    orr.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp4 = load <2 x i32>, ptr %B
+  %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
+  %tmp5 = or <2 x i32> %tmp3, %tmp4
+  ret <2 x i32> %tmp5
 }
 
 define <16 x i8> @shr_orr16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shr_orr16b:
-;CHECK: shr.16b v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.16b
-;CHECK-NEXT: ret
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp4 = load <16 x i8>, ptr %B
-        %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp5 = or <16 x i8> %tmp3, %tmp4
-         ret <16 x i8> %tmp5
+; CHECK-LABEL: shr_orr16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ushr.16b v0, v0, #1
+; CHECK-NEXT:    orr.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp4 = load <16 x i8>, ptr %B
+  %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp5 = or <16 x i8> %tmp3, %tmp4
+   ret <16 x i8> %tmp5
 }
 
 define <8 x i16> @shr_orr8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shr_orr8h:
-;CHECK: shr.8h v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.16b
-;CHECK-NEXT: ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp4 = load <8 x i16>, ptr %B
-        %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp5 = or <8 x i16> %tmp3, %tmp4
-         ret <8 x i16> %tmp5
+; CHECK-LABEL: shr_orr8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ushr.8h v0, v0, #1
+; CHECK-NEXT:    orr.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp4 = load <8 x i16>, ptr %B
+  %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %tmp5 = or <8 x i16> %tmp3, %tmp4
+   ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @shr_orr4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shr_orr4s:
-;CHECK: shr.4s v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.16b
-;CHECK-NEXT: ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp4 = load <4 x i32>, ptr %B
-        %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp5 = or <4 x i32> %tmp3, %tmp4
-         ret <4 x i32> %tmp5
+; CHECK-LABEL: shr_orr4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ushr.4s v0, v0, #1
+; CHECK-NEXT:    orr.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp4 = load <4 x i32>, ptr %B
+  %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+  %tmp5 = or <4 x i32> %tmp3, %tmp4
+   ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @shr_orr2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shr_orr2d:
-;CHECK: shr.2d v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.16b
-;CHECK-NEXT: ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp4 = load <2 x i64>, ptr %B
-        %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp5 = or <2 x i64> %tmp3, %tmp4
-         ret <2 x i64> %tmp5
+; CHECK-LABEL: shr_orr2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ushr.2d v0, v0, #1
+; CHECK-NEXT:    orr.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp4 = load <2 x i64>, ptr %B
+  %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
+  %tmp5 = or <2 x i64> %tmp3, %tmp4
+   ret <2 x i64> %tmp5
 }
 
 define <8 x i8> @shl_orr8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shl_orr8b:
-;CHECK: shl.8b v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.8b
-;CHECK-NEXT: ret
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp4 = load <8 x i8>, ptr %B
-        %tmp3 = shl <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp5 = or <8 x i8> %tmp3, %tmp4
-        ret <8 x i8> %tmp5
+; CHECK-LABEL: shl_orr8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    shl.8b v0, v0, #1
+; CHECK-NEXT:    orr.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp4 = load <8 x i8>, ptr %B
+  %tmp3 = shl <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp5 = or <8 x i8> %tmp3, %tmp4
+  ret <8 x i8> %tmp5
 }
 
 define <4 x i16> @shl_orr4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shl_orr4h:
-;CHECK: shl.4h v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.8b
-;CHECK-NEXT: ret
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp4 = load <4 x i16>, ptr %B
-        %tmp3 = shl <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
-        %tmp5 = or <4 x i16> %tmp3, %tmp4
-        ret <4 x i16> %tmp5
+; CHECK-LABEL: shl_orr4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    shl.4h v0, v0, #1
+; CHECK-NEXT:    orr.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp4 = load <4 x i16>, ptr %B
+  %tmp3 = shl <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
+  %tmp5 = or <4 x i16> %tmp3, %tmp4
+  ret <4 x i16> %tmp5
 }
 
 define <2 x i32> @shl_orr2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shl_orr2s:
-;CHECK: shl.2s v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.8b
-;CHECK-NEXT: ret
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp4 = load <2 x i32>, ptr %B
-        %tmp3 = shl <2 x i32> %tmp1, <i32 1, i32 1>
-        %tmp5 = or <2 x i32> %tmp3, %tmp4
-        ret <2 x i32> %tmp5
+; CHECK-LABEL: shl_orr2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    shl.2s v0, v0, #1
+; CHECK-NEXT:    orr.8b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp4 = load <2 x i32>, ptr %B
+  %tmp3 = shl <2 x i32> %tmp1, <i32 1, i32 1>
+  %tmp5 = or <2 x i32> %tmp3, %tmp4
+  ret <2 x i32> %tmp5
 }
 
 define <16 x i8> @shl_orr16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shl_orr16b:
-;CHECK: shl.16b v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.16b
-;CHECK-NEXT: ret
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp4 = load <16 x i8>, ptr %B
-        %tmp3 = shl <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp5 = or <16 x i8> %tmp3, %tmp4
-         ret <16 x i8> %tmp5
+; CHECK-LABEL: shl_orr16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    shl.16b v0, v0, #1
+; CHECK-NEXT:    orr.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp4 = load <16 x i8>, ptr %B
+  %tmp3 = shl <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp5 = or <16 x i8> %tmp3, %tmp4
+   ret <16 x i8> %tmp5
 }
 
 define <8 x i16> @shl_orr8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shl_orr8h:
-;CHECK: shl.8h v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.16b
-;CHECK-NEXT: ret
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp4 = load <8 x i16>, ptr %B
-        %tmp3 = shl <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp5 = or <8 x i16> %tmp3, %tmp4
-         ret <8 x i16> %tmp5
+; CHECK-LABEL: shl_orr8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    shl.8h v0, v0, #1
+; CHECK-NEXT:    orr.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp4 = load <8 x i16>, ptr %B
+  %tmp3 = shl <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %tmp5 = or <8 x i16> %tmp3, %tmp4
+   ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @shl_orr4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shl_orr4s:
-;CHECK: shl.4s v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.16b
-;CHECK-NEXT: ret
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp4 = load <4 x i32>, ptr %B
-        %tmp3 = shl <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp5 = or <4 x i32> %tmp3, %tmp4
-         ret <4 x i32> %tmp5
+; CHECK-LABEL: shl_orr4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    shl.4s v0, v0, #1
+; CHECK-NEXT:    orr.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp4 = load <4 x i32>, ptr %B
+  %tmp3 = shl <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+  %tmp5 = or <4 x i32> %tmp3, %tmp4
+   ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @shl_orr2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: shl_orr2d:
-;CHECK: shl.2d v0, {{v[0-9]+}}, #1
-;CHECK-NEXT: orr.16b
-;CHECK-NEXT: ret
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp4 = load <2 x i64>, ptr %B
-        %tmp3 = shl <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp5 = or <2 x i64> %tmp3, %tmp4
-         ret <2 x i64> %tmp5
+; CHECK-LABEL: shl_orr2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    shl.2d v0, v0, #1
+; CHECK-NEXT:    orr.16b v0, v0, v1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp4 = load <2 x i64>, ptr %B
+  %tmp3 = shl <2 x i64> %tmp1, <i64 1, i64 1>
+  %tmp5 = or <2 x i64> %tmp3, %tmp4
+   ret <2 x i64> %tmp5
 }
 
 define <8 x i16> @shll(<8 x i8> %in) {
 ; CHECK-LABEL: shll:
-; CHECK: shll.8h v0, {{v[0-9]+}}, #8
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shll.8h v0, v0, #8
+; CHECK-NEXT:    ret
   %ext = zext <8 x i8> %in to <8 x i16>
   %res = shl <8 x i16> %ext, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   ret <8 x i16> %res
 }
 
 define <4 x i32> @shll_high(<8 x i16> %in) {
-; CHECK-LABEL: shll_high
-; CHECK: shll2.4s v0, {{v[0-9]+}}, #16
+; CHECK-LABEL: shll_high:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shll2.4s v0, v0, #16
+; CHECK-NEXT:    ret
   %extract = shufflevector <8 x i16> %in, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext = zext <4 x i16> %extract to <4 x i32>
   %res = shl <4 x i32> %ext, <i32 16, i32 16, i32 16, i32 16>
@@ -2390,75 +3322,107 @@ define <4 x i32> @shll_high(<8 x i16> %in) {
 }
 
 define <8 x i8> @sli8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sli8b:
-;CHECK: sli.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, ptr %A
-        %tmp2 = load <8 x i8>, ptr %B
-        %tmp3 = call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, i32 1)
-        ret <8 x i8> %tmp3
+; CHECK-LABEL: sli8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sli.8b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, i32 1)
+  ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sli4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sli4h:
-;CHECK: sli.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, ptr %A
-        %tmp2 = load <4 x i16>, ptr %B
-        %tmp3 = call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, i32 1)
-        ret <4 x i16> %tmp3
+; CHECK-LABEL: sli4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sli.4h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, i32 1)
+  ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sli2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sli2s:
-;CHECK: sli.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, ptr %A
-        %tmp2 = load <2 x i32>, ptr %B
-        %tmp3 = call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, i32 1)
-        ret <2 x i32> %tmp3
+; CHECK-LABEL: sli2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sli.2s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, i32 1)
+  ret <2 x i32> %tmp3
 }
 
 define <1 x i64> @sli1d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sli1d:
-;CHECK: sli d0, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, ptr %A
-        %tmp2 = load <1 x i64>, ptr %B
-        %tmp3 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, i32 1)
-        ret <1 x i64> %tmp3
+; CHECK-LABEL: sli1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sli d0, d1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, i32 1)
+  ret <1 x i64> %tmp3
 }
 
 define <16 x i8> @sli16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sli16b:
-;CHECK: sli.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, ptr %A
-        %tmp2 = load <16 x i8>, ptr %B
-        %tmp3 = call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, i32 1)
-        ret <16 x i8> %tmp3
+; CHECK-LABEL: sli16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sli.16b v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, i32 1)
+  ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @sli8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sli8h:
-;CHECK: sli.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, ptr %A
-        %tmp2 = load <8 x i16>, ptr %B
-        %tmp3 = call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, i32 1)
-        ret <8 x i16> %tmp3
+; CHECK-LABEL: sli8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sli.8h v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, i32 1)
+  ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @sli4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sli4s:
-;CHECK: sli.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, ptr %A
-        %tmp2 = load <4 x i32>, ptr %B
-        %tmp3 = call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, i32 1)
-        ret <4 x i32> %tmp3
+; CHECK-LABEL: sli4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sli.4s v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, i32 1)
+  ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @sli2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sli2d:
-;CHECK: sli.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, ptr %A
-        %tmp2 = load <2 x i64>, ptr %B
-        %tmp3 = call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, i32 1)
-        ret <2 x i64> %tmp3
+; CHECK-LABEL: sli2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sli.2d v0, v1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, i32 1)
+  ret <2 x i64> %tmp3
 }
 
 declare <8 x i8>  @llvm.aarch64.neon.vsli.v8i8(<8 x i8>, <8 x i8>, i32) nounwind readnone
@@ -2473,8 +3437,10 @@ declare <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64>, <2 x i64>, i32) nounw
 
 define <1 x i64> @ashr_v1i64(<1 x i64> %a, <1 x i64> %b) {
 ; CHECK-LABEL: ashr_v1i64:
-; CHECK: neg d{{[0-9]+}}, d{{[0-9]+}}
-; CHECK: sshl d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg d1, d1
+; CHECK-NEXT:    sshl d0, d0, d1
+; CHECK-NEXT:    ret
   %c = ashr <1 x i64> %a, %b
   ret <1 x i64> %c
 }


        


More information about the llvm-commits mailing list