[llvm] 5ef89b7 - [ARM][AArch64] Regenerate hadd tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 6 02:54:24 PST 2023


Author: David Green
Date: 2023-02-06T10:54:18Z
New Revision: 5ef89b7f777ad6b75a0ad3a7355e1315cc390662

URL: https://github.com/llvm/llvm-project/commit/5ef89b7f777ad6b75a0ad3a7355e1315cc390662
DIFF: https://github.com/llvm/llvm-project/commit/5ef89b7f777ad6b75a0ad3a7355e1315cc390662.diff

LOG: [ARM][AArch64] Regenerate hadd tests. NFC

This just runs the existing tests through opt -O1, which helps canonicalizing
the code and adds additional flags which can be useful for matching.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/arm64-vhadd.ll
    llvm/test/CodeGen/AArch64/sve2-hadd.ll
    llvm/test/CodeGen/Thumb2/mve-vhadd.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
index bfb1cfbf6cf8c..0f6b653ff076f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
@@ -1,335 +1,332 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @shadd8b(ptr %A, ptr %B) nounwind {
+define <8 x i8> @shadd8b(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: shadd8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    shadd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+  %tmp1 = load <8 x i8>, ptr %A, align 8
+  %tmp2 = load <8 x i8>, ptr %B, align 8
+  %tmp3 = tail call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @shadd16b(ptr %A, ptr %B) nounwind {
+define <16 x i8> @shadd16b(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: shadd16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    shadd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+  %tmp1 = load <16 x i8>, ptr %A, align 16
+  %tmp2 = load <16 x i8>, ptr %B, align 16
+  %tmp3 = tail call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @shadd4h(ptr %A, ptr %B) nounwind {
+define <4 x i16> @shadd4h(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: shadd4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    shadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+  %tmp1 = load <4 x i16>, ptr %A, align 8
+  %tmp2 = load <4 x i16>, ptr %B, align 8
+  %tmp3 = tail call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @shadd8h(ptr %A, ptr %B) nounwind {
+define <8 x i16> @shadd8h(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: shadd8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    shadd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+  %tmp1 = load <8 x i16>, ptr %A, align 16
+  %tmp2 = load <8 x i16>, ptr %B, align 16
+  %tmp3 = tail call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @shadd2s(ptr %A, ptr %B) nounwind {
+define <2 x i32> @shadd2s(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: shadd2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    shadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+  %tmp1 = load <2 x i32>, ptr %A, align 8
+  %tmp2 = load <2 x i32>, ptr %B, align 8
+  %tmp3 = tail call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @shadd4s(ptr %A, ptr %B) nounwind {
+define <4 x i32> @shadd4s(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: shadd4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    shadd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+  %tmp1 = load <4 x i32>, ptr %A, align 16
+  %tmp2 = load <4 x i32>, ptr %B, align 16
+  %tmp3 = tail call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
-define <8 x i8> @uhadd8b(ptr %A, ptr %B) nounwind {
+define <8 x i8> @uhadd8b(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: uhadd8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uhadd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+  %tmp1 = load <8 x i8>, ptr %A, align 8
+  %tmp2 = load <8 x i8>, ptr %B, align 8
+  %tmp3 = tail call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @uhadd16b(ptr %A, ptr %B) nounwind {
+define <16 x i8> @uhadd16b(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: uhadd16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uhadd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+  %tmp1 = load <16 x i8>, ptr %A, align 16
+  %tmp2 = load <16 x i8>, ptr %B, align 16
+  %tmp3 = tail call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @uhadd4h(ptr %A, ptr %B) nounwind {
+define <4 x i16> @uhadd4h(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: uhadd4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+  %tmp1 = load <4 x i16>, ptr %A, align 8
+  %tmp2 = load <4 x i16>, ptr %B, align 8
+  %tmp3 = tail call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @uhadd8h(ptr %A, ptr %B) nounwind {
+define <8 x i16> @uhadd8h(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: uhadd8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uhadd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+  %tmp1 = load <8 x i16>, ptr %A, align 16
+  %tmp2 = load <8 x i16>, ptr %B, align 16
+  %tmp3 = tail call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @uhadd2s(ptr %A, ptr %B) nounwind {
+define <2 x i32> @uhadd2s(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: uhadd2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+  %tmp1 = load <2 x i32>, ptr %A, align 8
+  %tmp2 = load <2 x i32>, ptr %B, align 8
+  %tmp3 = tail call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @uhadd4s(ptr %A, ptr %B) nounwind {
+define <4 x i32> @uhadd4s(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: uhadd4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uhadd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
-}
-
-declare <8 x i8>  @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8>  @llvm.aarch64.neon.uhadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-define <8 x i8> @srhadd8b(ptr %A, ptr %B) nounwind {
+  %tmp1 = load <4 x i32>, ptr %A, align 16
+  %tmp2 = load <4 x i32>, ptr %B, align 16
+  %tmp3 = tail call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>)
+declare <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32>, <4 x i32>)
+
+define <8 x i8> @srhadd8b(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: srhadd8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    srhadd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+  %tmp1 = load <8 x i8>, ptr %A, align 8
+  %tmp2 = load <8 x i8>, ptr %B, align 8
+  %tmp3 = tail call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @srhadd16b(ptr %A, ptr %B) nounwind {
+define <16 x i8> @srhadd16b(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: srhadd16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    srhadd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+  %tmp1 = load <16 x i8>, ptr %A, align 16
+  %tmp2 = load <16 x i8>, ptr %B, align 16
+  %tmp3 = tail call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @srhadd4h(ptr %A, ptr %B) nounwind {
+define <4 x i16> @srhadd4h(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: srhadd4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    srhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+  %tmp1 = load <4 x i16>, ptr %A, align 8
+  %tmp2 = load <4 x i16>, ptr %B, align 8
+  %tmp3 = tail call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @srhadd8h(ptr %A, ptr %B) nounwind {
+define <8 x i16> @srhadd8h(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: srhadd8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    srhadd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+  %tmp1 = load <8 x i16>, ptr %A, align 16
+  %tmp2 = load <8 x i16>, ptr %B, align 16
+  %tmp3 = tail call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @srhadd2s(ptr %A, ptr %B) nounwind {
+define <2 x i32> @srhadd2s(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: srhadd2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    srhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+  %tmp1 = load <2 x i32>, ptr %A, align 8
+  %tmp2 = load <2 x i32>, ptr %B, align 8
+  %tmp3 = tail call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @srhadd4s(ptr %A, ptr %B) nounwind {
+define <4 x i32> @srhadd4s(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: srhadd4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    srhadd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+  %tmp1 = load <4 x i32>, ptr %A, align 16
+  %tmp2 = load <4 x i32>, ptr %B, align 16
+  %tmp3 = tail call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
-define <8 x i8> @urhadd8b(ptr %A, ptr %B) nounwind {
+define <8 x i8> @urhadd8b(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: urhadd8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    urhadd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = load <8 x i8>, ptr %B
-	%tmp3 = call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i8> %tmp3
+  %tmp1 = load <8 x i8>, ptr %A, align 8
+  %tmp2 = load <8 x i8>, ptr %B, align 8
+  %tmp3 = tail call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+  ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @urhadd16b(ptr %A, ptr %B) nounwind {
+define <16 x i8> @urhadd16b(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: urhadd16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    urhadd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = load <16 x i8>, ptr %B
-	%tmp3 = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-	ret <16 x i8> %tmp3
+  %tmp1 = load <16 x i8>, ptr %A, align 16
+  %tmp2 = load <16 x i8>, ptr %B, align 16
+  %tmp3 = tail call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+  ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @urhadd4h(ptr %A, ptr %B) nounwind {
+define <4 x i16> @urhadd4h(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: urhadd4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    urhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = load <4 x i16>, ptr %B
-	%tmp3 = call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i16> %tmp3
+  %tmp1 = load <4 x i16>, ptr %A, align 8
+  %tmp2 = load <4 x i16>, ptr %B, align 8
+  %tmp3 = tail call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+  ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @urhadd8h(ptr %A, ptr %B) nounwind {
+define <8 x i16> @urhadd8h(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: urhadd8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    urhadd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = load <8 x i16>, ptr %B
-	%tmp3 = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-	ret <8 x i16> %tmp3
+  %tmp1 = load <8 x i16>, ptr %A, align 16
+  %tmp2 = load <8 x i16>, ptr %B, align 16
+  %tmp3 = tail call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+  ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @urhadd2s(ptr %A, ptr %B) nounwind {
+define <2 x i32> @urhadd2s(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: urhadd2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    urhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = load <2 x i32>, ptr %B
-	%tmp3 = call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i32> %tmp3
+  %tmp1 = load <2 x i32>, ptr %A, align 8
+  %tmp2 = load <2 x i32>, ptr %B, align 8
+  %tmp3 = tail call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+  ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @urhadd4s(ptr %A, ptr %B) nounwind {
+define <4 x i32> @urhadd4s(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: urhadd4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    urhadd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = load <4 x i32>, ptr %B
-	%tmp3 = call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-	ret <4 x i32> %tmp3
+  %tmp1 = load <4 x i32>, ptr %A, align 16
+  %tmp2 = load <4 x i32>, ptr %B, align 16
+  %tmp3 = tail call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+  ret <4 x i32> %tmp3
 }
 
-define void @testLowerToSRHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
+define void @testLowerToSRHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSRHADD8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.8b v0, v0, v1
@@ -337,15 +334,15 @@ define void @testLowerToSRHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) noun
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <8 x i8> %src1 to <8 x i16>
   %sextsrc2 = sext <8 x i8> %src2 to <8 x i16>
-  %add1 = add <8 x i16> %sextsrc1, %sextsrc2
-  %add2 = add <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add1 = add nsw <8 x i16> %sextsrc1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nsw <8 x i16> %add1, %sextsrc2
   %resulti16 = lshr <8 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
   store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSRHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
+define void @testLowerToSRHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSRHADD4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.4h v0, v0, v1
@@ -353,15 +350,15 @@ define void @testLowerToSRHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <4 x i16> %src1 to <4 x i32>
   %sextsrc2 = sext <4 x i16> %src2 to <4 x i32>
-  %add1 = add <4 x i32> %sextsrc1, %sextsrc2
-  %add2 = add <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
+  %add1 = add nsw <4 x i32> %sextsrc1, <i32 1, i32 1, i32 1, i32 1>
+  %add2 = add nsw <4 x i32> %add1, %sextsrc2
   %resulti16 = lshr <4 x i32> %add2, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
   store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSRHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
+define void @testLowerToSRHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSRHADD2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.2s v0, v0, v1
@@ -369,15 +366,15 @@ define void @testLowerToSRHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <2 x i32> %src1 to <2 x i64>
   %sextsrc2 = sext <2 x i32> %src2 to <2 x i64>
-  %add1 = add <2 x i64> %sextsrc1, %sextsrc2
-  %add2 = add <2 x i64> %add1, <i64 1, i64 1>
+  %add1 = add nsw <2 x i64> %sextsrc1, <i64 1, i64 1>
+  %add2 = add nsw <2 x i64> %add1, %sextsrc2
   %resulti16 = lshr <2 x i64> %add2, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
   store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSRHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
+define void @testLowerToSRHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSRHADD16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.16b v0, v0, v1
@@ -385,15 +382,15 @@ define void @testLowerToSRHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) n
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <16 x i8> %src1 to <16 x i16>
   %sextsrc2 = sext <16 x i8> %src2 to <16 x i16>
-  %add1 = add <16 x i16> %sextsrc1, %sextsrc2
-  %add2 = add <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add1 = add nsw <16 x i16> %sextsrc1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nsw <16 x i16> %add1, %sextsrc2
   %resulti16 = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
   store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSRHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
+define void @testLowerToSRHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSRHADD8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.8h v0, v0, v1
@@ -401,15 +398,15 @@ define void @testLowerToSRHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <8 x i16> %src1 to <8 x i32>
   %sextsrc2 = sext <8 x i16> %src2 to <8 x i32>
-  %add1 = add <8 x i32> %sextsrc1, %sextsrc2
-  %add2 = add <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %add1 = add nsw <8 x i32> %sextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %add2 = add nsw <8 x i32> %add1, %sextsrc2
   %resulti16 = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
   store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSRHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
+define void @testLowerToSRHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSRHADD4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.4s v0, v0, v1
@@ -417,15 +414,15 @@ define void @testLowerToSRHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <4 x i32> %src1 to <4 x i64>
   %sextsrc2 = sext <4 x i32> %src2 to <4 x i64>
-  %add1 = add <4 x i64> %sextsrc1, %sextsrc2
-  %add2 = add <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
+  %add1 = add nsw <4 x i64> %sextsrc1, <i64 1, i64 1, i64 1, i64 1>
+  %add2 = add nsw <4 x i64> %add1, %sextsrc2
   %resulti16 = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
   store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
+define void @testLowerToSHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.8b v0, v0, v1
@@ -433,14 +430,14 @@ define void @testLowerToSHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounw
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <8 x i8> %src1 to <8 x i16>
   %sextsrc2 = sext <8 x i8> %src2 to <8 x i16>
-  %add = add <8 x i16> %sextsrc1, %sextsrc2
+  %add = add nsw <8 x i16> %sextsrc1, %sextsrc2
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
   store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
+define void @testLowerToSHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.4h v0, v0, v1
@@ -448,14 +445,14 @@ define void @testLowerToSHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nou
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <4 x i16> %src1 to <4 x i32>
   %sextsrc2 = sext <4 x i16> %src2 to <4 x i32>
-  %add = add <4 x i32> %sextsrc1, %sextsrc2
+  %add = add nsw <4 x i32> %sextsrc1, %sextsrc2
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
   store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
+define void @testLowerToSHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.2s v0, v0, v1
@@ -463,14 +460,14 @@ define void @testLowerToSHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nou
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <2 x i32> %src1 to <2 x i64>
   %sextsrc2 = sext <2 x i32> %src2 to <2 x i64>
-  %add = add <2 x i64> %sextsrc1, %sextsrc2
+  %add = add nsw <2 x i64> %sextsrc1, %sextsrc2
   %resulti16 = lshr <2 x i64> %add, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
   store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
+define void @testLowerToSHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.16b v0, v0, v1
@@ -478,14 +475,14 @@ define void @testLowerToSHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <16 x i8> %src1 to <16 x i16>
   %sextsrc2 = sext <16 x i8> %src2 to <16 x i16>
-  %add = add <16 x i16> %sextsrc1, %sextsrc2
+  %add = add nsw <16 x i16> %sextsrc1, %sextsrc2
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
   store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
+define void @testLowerToSHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.8h v0, v0, v1
@@ -493,14 +490,14 @@ define void @testLowerToSHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nou
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <8 x i16> %src1 to <8 x i32>
   %sextsrc2 = sext <8 x i16> %src2 to <8 x i32>
-  %add = add <8 x i32> %sextsrc1, %sextsrc2
+  %add = add nsw <8 x i32> %sextsrc1, %sextsrc2
   %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
   store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
+define void @testLowerToSHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.4s v0, v0, v1
@@ -508,14 +505,14 @@ define void @testLowerToSHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nou
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <4 x i32> %src1 to <4 x i64>
   %sextsrc2 = sext <4 x i32> %src2 to <4 x i64>
-  %add = add <4 x i64> %sextsrc1, %sextsrc2
+  %add = add nsw <4 x i64> %sextsrc1, %sextsrc2
   %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
   store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToURHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
+define void @testLowerToURHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToURHADD8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.8b v0, v0, v1
@@ -523,15 +520,15 @@ define void @testLowerToURHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) noun
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <8 x i8> %src1 to <8 x i16>
   %zextsrc2 = zext <8 x i8> %src2 to <8 x i16>
-  %add1 = add <8 x i16> %zextsrc1, %zextsrc2
-  %add2 = add <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add1 = add nuw nsw <8 x i16> %zextsrc1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nuw nsw <8 x i16> %add1, %zextsrc2
   %resulti16 = lshr <8 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
   store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToURHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
+define void @testLowerToURHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToURHADD4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.4h v0, v0, v1
@@ -539,15 +536,15 @@ define void @testLowerToURHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i16> %src1 to <4 x i32>
   %zextsrc2 = zext <4 x i16> %src2 to <4 x i32>
-  %add1 = add <4 x i32> %zextsrc1, %zextsrc2
-  %add2 = add <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
+  %add1 = add nuw nsw <4 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1>
+  %add2 = add nuw nsw <4 x i32> %add1, %zextsrc2
   %resulti16 = lshr <4 x i32> %add2, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
   store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToURHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
+define void @testLowerToURHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToURHADD2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.2s v0, v0, v1
@@ -555,15 +552,15 @@ define void @testLowerToURHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <2 x i32> %src1 to <2 x i64>
   %zextsrc2 = zext <2 x i32> %src2 to <2 x i64>
-  %add1 = add <2 x i64> %zextsrc1, %zextsrc2
-  %add2 = add <2 x i64> %add1, <i64 1, i64 1>
+  %add1 = add nuw nsw <2 x i64> %zextsrc1, <i64 1, i64 1>
+  %add2 = add nuw nsw <2 x i64> %add1, %zextsrc2
   %resulti16 = lshr <2 x i64> %add2, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
   store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToURHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
+define void @testLowerToURHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToURHADD16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.16b v0, v0, v1
@@ -571,15 +568,15 @@ define void @testLowerToURHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) n
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <16 x i8> %src1 to <16 x i16>
   %zextsrc2 = zext <16 x i8> %src2 to <16 x i16>
-  %add1 = add <16 x i16> %zextsrc1, %zextsrc2
-  %add2 = add <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add1 = add nuw nsw <16 x i16> %zextsrc1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nuw nsw <16 x i16> %add1, %zextsrc2
   %resulti16 = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
   store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToURHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
+define void @testLowerToURHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToURHADD8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.8h v0, v0, v1
@@ -587,15 +584,15 @@ define void @testLowerToURHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
   %zextsrc2 = zext <8 x i16> %src2 to <8 x i32>
-  %add1 = add <8 x i32> %zextsrc1, %zextsrc2
-  %add2 = add <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %add1 = add nuw nsw <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %add2 = add nuw nsw <8 x i32> %add1, %zextsrc2
   %resulti16 = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
   store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToURHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
+define void @testLowerToURHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToURHADD4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.4s v0, v0, v1
@@ -603,15 +600,15 @@ define void @testLowerToURHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i32> %src1 to <4 x i64>
   %zextsrc2 = zext <4 x i32> %src2 to <4 x i64>
-  %add1 = add <4 x i64> %zextsrc1, %zextsrc2
-  %add2 = add <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
+  %add1 = add nuw nsw <4 x i64> %zextsrc1, <i64 1, i64 1, i64 1, i64 1>
+  %add2 = add nuw nsw <4 x i64> %add1, %zextsrc2
   %resulti16 = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
   store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
+define void @testLowerToUHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.8b v0, v0, v1
@@ -619,14 +616,14 @@ define void @testLowerToUHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounw
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <8 x i8> %src1 to <8 x i16>
   %zextsrc2 = zext <8 x i8> %src2 to <8 x i16>
-  %add = add <8 x i16> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <8 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
   store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
+define void @testLowerToUHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.4h v0, v0, v1
@@ -634,14 +631,14 @@ define void @testLowerToUHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nou
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i16> %src1 to <4 x i32>
   %zextsrc2 = zext <4 x i16> %src2 to <4 x i32>
-  %add = add <4 x i32> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <4 x i32> %zextsrc1, %zextsrc2
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
   store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
+define void @testLowerToUHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.2s v0, v0, v1
@@ -649,14 +646,14 @@ define void @testLowerToUHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nou
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <2 x i32> %src1 to <2 x i64>
   %zextsrc2 = zext <2 x i32> %src2 to <2 x i64>
-  %add = add <2 x i64> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <2 x i64> %zextsrc1, %zextsrc2
   %resulti16 = lshr <2 x i64> %add, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
   store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
+define void @testLowerToUHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.16b v0, v0, v1
@@ -664,14 +661,14 @@ define void @testLowerToUHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) no
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <16 x i8> %src1 to <16 x i16>
   %zextsrc2 = zext <16 x i8> %src2 to <16 x i16>
-  %add = add <16 x i16> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <16 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
   store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
+define void @testLowerToUHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.8h v0, v0, v1
@@ -679,14 +676,14 @@ define void @testLowerToUHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nou
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
   %zextsrc2 = zext <8 x i16> %src2 to <8 x i32>
-  %add = add <8 x i32> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <8 x i32> %zextsrc1, %zextsrc2
   %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
   store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
+define void @testLowerToUHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.4s v0, v0, v1
@@ -694,15 +691,14 @@ define void @testLowerToUHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nou
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i32> %src1 to <4 x i64>
   %zextsrc2 = zext <4 x i32> %src2 to <4 x i64>
-  %add = add <4 x i64> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <4 x i64> %zextsrc1, %zextsrc2
   %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
   store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-
-define <4 x i32> @hadd16_sext_asr(<4 x i16> %src1, <4 x i16> %src2) nounwind {
+define <4 x i32> @hadd16_sext_asr(<4 x i16> %src1, <4 x i16> %src2) {
 ; CHECK-LABEL: hadd16_sext_asr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.4h v0, v0, v1
@@ -710,12 +706,12 @@ define <4 x i32> @hadd16_sext_asr(<4 x i16> %src1, <4 x i16> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = sext <4 x i16> %src1 to <4 x i32>
   %zextsrc2 = sext <4 x i16> %src2 to <4 x i32>
-  %add = add <4 x i32> %zextsrc1, %zextsrc2
+  %add = add nsw <4 x i32> %zextsrc1, %zextsrc2
   %resulti16 = ashr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %resulti16
 }
 
-define <4 x i32> @hadd16_zext_asr(<4 x i16> %src1, <4 x i16> %src2) nounwind {
+define <4 x i32> @hadd16_zext_asr(<4 x i16> %src1, <4 x i16> %src2) {
 ; CHECK-LABEL: hadd16_zext_asr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.4h v0, v0, v1
@@ -723,12 +719,12 @@ define <4 x i32> @hadd16_zext_asr(<4 x i16> %src1, <4 x i16> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i16> %src1 to <4 x i32>
   %zextsrc2 = zext <4 x i16> %src2 to <4 x i32>
-  %add = add <4 x i32> %zextsrc1, %zextsrc2
-  %resulti16 = ashr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+  %add = add nuw nsw <4 x i32> %zextsrc1, %zextsrc2
+  %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %resulti16
 }
 
-define <4 x i32> @hadd16_sext_lsr(<4 x i16> %src1, <4 x i16> %src2) nounwind {
+define <4 x i32> @hadd16_sext_lsr(<4 x i16> %src1, <4 x i16> %src2) {
 ; CHECK-LABEL: hadd16_sext_lsr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    saddl.4s v0, v0, v1
@@ -736,12 +732,12 @@ define <4 x i32> @hadd16_sext_lsr(<4 x i16> %src1, <4 x i16> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = sext <4 x i16> %src1 to <4 x i32>
   %zextsrc2 = sext <4 x i16> %src2 to <4 x i32>
-  %add = add <4 x i32> %zextsrc1, %zextsrc2
+  %add = add nsw <4 x i32> %zextsrc1, %zextsrc2
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %resulti16
 }
 
-define <4 x i32> @hadd16_zext_lsr(<4 x i16> %src1, <4 x i16> %src2) nounwind {
+define <4 x i32> @hadd16_zext_lsr(<4 x i16> %src1, <4 x i16> %src2) {
 ; CHECK-LABEL: hadd16_zext_lsr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.4h v0, v0, v1
@@ -749,14 +745,12 @@ define <4 x i32> @hadd16_zext_lsr(<4 x i16> %src1, <4 x i16> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i16> %src1 to <4 x i32>
   %zextsrc2 = zext <4 x i16> %src2 to <4 x i32>
-  %add = add <4 x i32> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <4 x i32> %zextsrc1, %zextsrc2
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %resulti16
 }
 
-
-
-define <4 x i64> @hadd32_sext_asr(<4 x i32> %src1, <4 x i32> %src2) nounwind {
+define <4 x i64> @hadd32_sext_asr(<4 x i32> %src1, <4 x i32> %src2) {
 ; CHECK-LABEL: hadd32_sext_asr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.4s v0, v0, v1
@@ -765,12 +759,12 @@ define <4 x i64> @hadd32_sext_asr(<4 x i32> %src1, <4 x i32> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = sext <4 x i32> %src1 to <4 x i64>
   %zextsrc2 = sext <4 x i32> %src2 to <4 x i64>
-  %add = add <4 x i64> %zextsrc1, %zextsrc2
+  %add = add nsw <4 x i64> %zextsrc1, %zextsrc2
   %resulti32 = ashr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   ret <4 x i64> %resulti32
 }
 
-define <4 x i64> @hadd32_zext_asr(<4 x i32> %src1, <4 x i32> %src2) nounwind {
+define <4 x i64> @hadd32_zext_asr(<4 x i32> %src1, <4 x i32> %src2) {
 ; CHECK-LABEL: hadd32_zext_asr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.4s v0, v0, v1
@@ -779,12 +773,12 @@ define <4 x i64> @hadd32_zext_asr(<4 x i32> %src1, <4 x i32> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i32> %src1 to <4 x i64>
   %zextsrc2 = zext <4 x i32> %src2 to <4 x i64>
-  %add = add <4 x i64> %zextsrc1, %zextsrc2
-  %resulti32 = ashr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+  %add = add nuw nsw <4 x i64> %zextsrc1, %zextsrc2
+  %resulti32 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   ret <4 x i64> %resulti32
 }
 
-define <4 x i64> @hadd32_sext_lsr(<4 x i32> %src1, <4 x i32> %src2) nounwind {
+define <4 x i64> @hadd32_sext_lsr(<4 x i32> %src1, <4 x i32> %src2) {
 ; CHECK-LABEL: hadd32_sext_lsr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    saddl.2d v2, v0, v1
@@ -794,12 +788,12 @@ define <4 x i64> @hadd32_sext_lsr(<4 x i32> %src1, <4 x i32> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = sext <4 x i32> %src1 to <4 x i64>
   %zextsrc2 = sext <4 x i32> %src2 to <4 x i64>
-  %add = add <4 x i64> %zextsrc1, %zextsrc2
+  %add = add nsw <4 x i64> %zextsrc1, %zextsrc2
   %resulti32 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   ret <4 x i64> %resulti32
 }
 
-define <4 x i64> @hadd32_zext_lsr(<4 x i32> %src1, <4 x i32> %src2) nounwind {
+define <4 x i64> @hadd32_zext_lsr(<4 x i32> %src1, <4 x i32> %src2) {
 ; CHECK-LABEL: hadd32_zext_lsr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.4s v0, v0, v1
@@ -808,13 +802,12 @@ define <4 x i64> @hadd32_zext_lsr(<4 x i32> %src1, <4 x i32> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i32> %src1 to <4 x i64>
   %zextsrc2 = zext <4 x i32> %src2 to <4 x i64>
-  %add = add <4 x i64> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <4 x i64> %zextsrc1, %zextsrc2
   %resulti32 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   ret <4 x i64> %resulti32
 }
 
-
-define <4 x i16> @hadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
+define <4 x i16> @hadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) {
 ; CHECK-LABEL: hadd8_sext_asr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shl.4h v0, v0, #8
@@ -825,12 +818,12 @@ define <4 x i16> @hadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = sext <4 x i8> %src1 to <4 x i16>
   %zextsrc2 = sext <4 x i8> %src2 to <4 x i16>
-  %add = add <4 x i16> %zextsrc1, %zextsrc2
+  %add = add nsw <4 x i16> %zextsrc1, %zextsrc2
   %resulti8 = ashr <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
   ret <4 x i16> %resulti8
 }
 
-define <4 x i16> @hadd8_zext_asr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
+define <4 x i16> @hadd8_zext_asr(<4 x i8> %src1, <4 x i8> %src2) {
 ; CHECK-LABEL: hadd8_zext_asr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    bic.4h v0, #255, lsl #8
@@ -840,12 +833,12 @@ define <4 x i16> @hadd8_zext_asr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
   %zextsrc2 = zext <4 x i8> %src2 to <4 x i16>
-  %add = add <4 x i16> %zextsrc1, %zextsrc2
-  %resulti8 = ashr <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
+  %add = add nuw nsw <4 x i16> %zextsrc1, %zextsrc2
+  %resulti8 = lshr <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
   ret <4 x i16> %resulti8
 }
 
-define <4 x i16> @hadd8_sext_lsr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
+define <4 x i16> @hadd8_sext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
 ; CHECK-LABEL: hadd8_sext_lsr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shl.4h v0, v0, #8
@@ -856,12 +849,12 @@ define <4 x i16> @hadd8_sext_lsr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = sext <4 x i8> %src1 to <4 x i16>
   %zextsrc2 = sext <4 x i8> %src2 to <4 x i16>
-  %add = add <4 x i16> %zextsrc1, %zextsrc2
+  %add = add nsw <4 x i16> %zextsrc1, %zextsrc2
   %resulti8 = lshr <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
   ret <4 x i16> %resulti8
 }
 
-define <4 x i16> @hadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
+define <4 x i16> @hadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
 ; CHECK-LABEL: hadd8_zext_lsr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    bic.4h v0, #255, lsl #8
@@ -871,14 +864,12 @@ define <4 x i16> @hadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
   %zextsrc2 = zext <4 x i8> %src2 to <4 x i16>
-  %add = add <4 x i16> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <4 x i16> %zextsrc1, %zextsrc2
   %resulti8 = lshr <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
   ret <4 x i16> %resulti8
 }
 
-
-
-define void @testLowerToSHADD8b_c(<8 x i8> %src1, ptr %dest) nounwind {
+define void @testLowerToSHADD8b_c(<8 x i8> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD8b_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8b v1, #10
@@ -886,14 +877,14 @@ define void @testLowerToSHADD8b_c(<8 x i8> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <8 x i8> %src1 to <8 x i16>
-  %add = add <8 x i16> %sextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %add = add nsw <8 x i16> %sextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
   store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD4h_c(<4 x i16> %src1, ptr %dest) nounwind {
+define void @testLowerToSHADD4h_c(<4 x i16> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD4h_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.4h v1, #10
@@ -901,14 +892,14 @@ define void @testLowerToSHADD4h_c(<4 x i16> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <4 x i16> %src1 to <4 x i32>
-  %add = add <4 x i32> %sextsrc1, <i32 10, i32 10, i32 10, i32 10>
+  %add = add nsw <4 x i32> %sextsrc1, <i32 10, i32 10, i32 10, i32 10>
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
   store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD2s_c(<2 x i32> %src1, ptr %dest) nounwind {
+define void @testLowerToSHADD2s_c(<2 x i32> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD2s_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.2s v1, #10
@@ -916,14 +907,14 @@ define void @testLowerToSHADD2s_c(<2 x i32> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <2 x i32> %src1 to <2 x i64>
-  %add = add <2 x i64> %sextsrc1, <i64 10, i64 10>
+  %add = add nsw <2 x i64> %sextsrc1, <i64 10, i64 10>
   %resulti16 = lshr <2 x i64> %add, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
   store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD16b_c(<16 x i8> %src1, ptr %dest) nounwind {
+define void @testLowerToSHADD16b_c(<16 x i8> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD16b_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v1, #10
@@ -931,14 +922,14 @@ define void @testLowerToSHADD16b_c(<16 x i8> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <16 x i8> %src1 to <16 x i16>
-  %add = add <16 x i16> %sextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %add = add nsw <16 x i16> %sextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
   store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD8h_c(<8 x i16> %src1, ptr %dest) nounwind {
+define void @testLowerToSHADD8h_c(<8 x i16> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD8h_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8h v1, #10
@@ -946,14 +937,14 @@ define void @testLowerToSHADD8h_c(<8 x i16> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <8 x i16> %src1 to <8 x i32>
-  %add = add <8 x i32> %sextsrc1, <i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
+  %add = add nsw <8 x i32> %sextsrc1, <i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
   %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
   store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD4s_c(<4 x i32> %src1, ptr %dest) nounwind {
+define void @testLowerToSHADD4s_c(<4 x i32> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToSHADD4s_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.4s v1, #10
@@ -961,14 +952,14 @@ define void @testLowerToSHADD4s_c(<4 x i32> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %sextsrc1 = sext <4 x i32> %src1 to <4 x i64>
-  %add = add <4 x i64> %sextsrc1, <i64 10, i64 10, i64 10, i64 10>
+  %add = add nsw <4 x i64> %sextsrc1, <i64 10, i64 10, i64 10, i64 10>
   %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
   store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD8b_c(<8 x i8> %src1, ptr %dest) nounwind {
+define void @testLowerToUHADD8b_c(<8 x i8> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD8b_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8b v1, #10
@@ -976,14 +967,14 @@ define void @testLowerToUHADD8b_c(<8 x i8> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <8 x i8> %src1 to <8 x i16>
-  %add = add <8 x i16> %zextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %add = add nuw nsw <8 x i16> %zextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
   store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD4h_c(<4 x i16> %src1, ptr %dest) nounwind {
+define void @testLowerToUHADD4h_c(<4 x i16> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD4h_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.4h v1, #10
@@ -991,14 +982,14 @@ define void @testLowerToUHADD4h_c(<4 x i16> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i16> %src1 to <4 x i32>
-  %add = add <4 x i32> %zextsrc1, <i32 10, i32 10, i32 10, i32 10>
+  %add = add nuw nsw <4 x i32> %zextsrc1, <i32 10, i32 10, i32 10, i32 10>
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
   store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD2s_c(<2 x i32> %src1, ptr %dest) nounwind {
+define void @testLowerToUHADD2s_c(<2 x i32> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD2s_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.2s v1, #10
@@ -1006,14 +997,14 @@ define void @testLowerToUHADD2s_c(<2 x i32> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <2 x i32> %src1 to <2 x i64>
-  %add = add <2 x i64> %zextsrc1, <i64 10, i64 10>
+  %add = add nuw nsw <2 x i64> %zextsrc1, <i64 10, i64 10>
   %resulti16 = lshr <2 x i64> %add, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
   store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD16b_c(<16 x i8> %src1, ptr %dest) nounwind {
+define void @testLowerToUHADD16b_c(<16 x i8> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD16b_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v1, #10
@@ -1021,14 +1012,14 @@ define void @testLowerToUHADD16b_c(<16 x i8> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <16 x i8> %src1 to <16 x i16>
-  %add = add <16 x i16> %zextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %add = add nuw nsw <16 x i16> %zextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
   store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD8h_c(<8 x i16> %src1, ptr %dest) nounwind {
+define void @testLowerToUHADD8h_c(<8 x i16> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD8h_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8h v1, #10
@@ -1036,14 +1027,14 @@ define void @testLowerToUHADD8h_c(<8 x i16> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
-  %add = add <8 x i32> %zextsrc1, <i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
+  %add = add nuw nsw <8 x i32> %zextsrc1, <i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
   %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
   store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD4s_c(<4 x i32> %src1, ptr %dest) nounwind {
+define void @testLowerToUHADD4s_c(<4 x i32> %src1, ptr nocapture writeonly %dest) {
 ; CHECK-LABEL: testLowerToUHADD4s_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.4s v1, #10
@@ -1051,15 +1042,14 @@ define void @testLowerToUHADD4s_c(<4 x i32> %src1, ptr %dest) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i32> %src1 to <4 x i64>
-  %add = add <4 x i64> %zextsrc1, <i64 10, i64 10, i64 10, i64 10>
+  %add = add nuw nsw <4 x i64> %zextsrc1, <i64 10, i64 10, i64 10, i64 10>
   %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
   store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-
-define <8 x i8> @andmaskv8i8(<8 x i16> %src1, <8 x i8> %src2) nounwind {
+define <8 x i8> @andmaskv8i8(<8 x i16> %src1, <8 x i8> %src2) {
 ; CHECK-LABEL: andmaskv8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8b v2, #7
@@ -1069,13 +1059,13 @@ define <8 x i8> @andmaskv8i8(<8 x i16> %src1, <8 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = and <8 x i16> %src1, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
   %zextsrc2 = zext <8 x i8> %src2 to <8 x i16>
-  %add = add <8 x i16> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <8 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
   ret <8 x i8> %result
 }
 
-define <16 x i8> @andmaskv16i8(<16 x i16> %src1, <16 x i8> %src2) nounwind {
+define <16 x i8> @andmaskv16i8(<16 x i16> %src1, <16 x i8> %src2) {
 ; CHECK-LABEL: andmaskv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v3, #7
@@ -1085,13 +1075,13 @@ define <16 x i8> @andmaskv16i8(<16 x i16> %src1, <16 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = and <16 x i16> %src1, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
   %zextsrc2 = zext <16 x i8> %src2 to <16 x i16>
-  %add = add <16 x i16> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <16 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
   ret <16 x i8> %result
 }
 
-define <16 x i8> @andmask2v16i8(<16 x i16> %src1, <16 x i16> %src2) nounwind {
+define <16 x i8> @andmask2v16i8(<16 x i16> %src1, <16 x i16> %src2) {
 ; CHECK-LABEL: andmask2v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v4, #3
@@ -1104,13 +1094,13 @@ define <16 x i8> @andmask2v16i8(<16 x i16> %src1, <16 x i16> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = and <16 x i16> %src1, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
   %zextsrc2 = and <16 x i16> %src2, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-  %add = add <16 x i16> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <16 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
   ret <16 x i8> %result
 }
 
-define <8 x i8> @andmask2v8i8(<8 x i16> %src1, <8 x i16> %src2) nounwind {
+define <8 x i8> @andmask2v8i8(<8 x i16> %src1, <8 x i16> %src2) {
 ; CHECK-LABEL: andmask2v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8b v2, #7
@@ -1121,13 +1111,13 @@ define <8 x i8> @andmask2v8i8(<8 x i16> %src1, <8 x i16> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = and <8 x i16> %src1, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
   %zextsrc2 = and <8 x i16> %src2, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
-  %add = add <8 x i16> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <8 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
   ret <8 x i8> %result
 }
 
-define <8 x i16> @andmask3v8i8(<8 x i16> %src1, <8 x i16> %src2) nounwind {
+define <8 x i16> @andmask3v8i8(<8 x i16> %src1, <8 x i16> %src2) {
 ; CHECK-LABEL: andmask3v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8h v2, #7
@@ -1137,12 +1127,12 @@ define <8 x i16> @andmask3v8i8(<8 x i16> %src1, <8 x i16> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %zextsrc1 = and <8 x i16> %src1, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
   %zextsrc2 = and <8 x i16> %src2, <i16 511, i16 511, i16 511, i16 511, i16 511, i16 511, i16 511, i16 511>
-  %add = add <8 x i16> %zextsrc1, %zextsrc2
+  %add = add nuw nsw <8 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ret <8 x i16> %resulti16
 }
 
-define <16 x i8> @sextmaskv16i8(<16 x i16> %src1, <16 x i8> %src2) nounwind {
+define <16 x i8> @sextmaskv16i8(<16 x i16> %src1, <16 x i8> %src2) {
 ; CHECK-LABEL: sextmaskv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sshr.8h v1, v1, #11
@@ -1152,13 +1142,13 @@ define <16 x i8> @sextmaskv16i8(<16 x i16> %src1, <16 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %sextsrc1 = ashr <16 x i16> %src1, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
   %sextsrc2 = sext <16 x i8> %src2 to <16 x i16>
-  %add = add <16 x i16> %sextsrc1, %sextsrc2
-  %resulti16 = ashr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %result = trunc <16 x i16> %resulti16 to <16 x i8>
+  %add = add nsw <16 x i16> %sextsrc1, %sextsrc2
+  %1 = ashr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <16 x i16> %1 to <16 x i8>
   ret <16 x i8> %result
 }
 
-define <8 x i8> @sextmaskv8i8(<8 x i16> %src1, <8 x i8> %src2) nounwind {
+define <8 x i8> @sextmaskv8i8(<8 x i16> %src1, <8 x i8> %src2) {
 ; CHECK-LABEL: sextmaskv8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sshr.8h v0, v0, #11
@@ -1167,13 +1157,13 @@ define <8 x i8> @sextmaskv8i8(<8 x i16> %src1, <8 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %sextsrc1 = ashr <8 x i16> %src1, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
   %sextsrc2 = sext <8 x i8> %src2 to <8 x i16>
-  %add = add <8 x i16> %sextsrc1, %sextsrc2
-  %resulti16 = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %result = trunc <8 x i16> %resulti16 to <8 x i8>
+  %add = add nsw <8 x i16> %sextsrc1, %sextsrc2
+  %1 = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <8 x i16> %1 to <8 x i8>
   ret <8 x i8> %result
 }
 
-define <8 x i8> @sextmask2v8i8(<8 x i16> %src1, <8 x i8> %src2) nounwind {
+define <8 x i8> @sextmask2v8i8(<8 x i16> %src1, <8 x i8> %src2) {
 ; CHECK-LABEL: sextmask2v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shrn.8b v0, v0, #8
@@ -1181,13 +1171,13 @@ define <8 x i8> @sextmask2v8i8(<8 x i16> %src1, <8 x i8> %src2) nounwind {
 ; CHECK-NEXT:    ret
   %sextsrc1 = ashr <8 x i16> %src1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   %sextsrc2 = sext <8 x i8> %src2 to <8 x i16>
-  %add = add <8 x i16> %sextsrc1, %sextsrc2
-  %resulti16 = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %result = trunc <8 x i16> %resulti16 to <8 x i8>
+  %add = add nsw <8 x i16> %sextsrc1, %sextsrc2
+  %1 = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <8 x i16> %1 to <8 x i8>
   ret <8 x i8> %result
 }
 
-define <8 x i8> @sextmask3v8i8(<8 x i16> %src1, <8 x i8> %src2) nounwind {
+define <8 x i8> @sextmask3v8i8(<8 x i16> %src1, <8 x i8> %src2) {
 ; CHECK-LABEL: sextmask3v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sshr.8h v0, v0, #7
@@ -1195,27 +1185,23 @@ define <8 x i8> @sextmask3v8i8(<8 x i16> %src1, <8 x i8> %src2) nounwind {
 ; CHECK-NEXT:    shadd.8h v0, v0, v1
 ; CHECK-NEXT:    xtn.8b v0, v0
 ; CHECK-NEXT:    ret
-  %sextsrc1 = ashr <8 x i16> %src1, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %1 = ashr <8 x i16> %src1, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
   %sextsrc2 = sext <8 x i8> %src2 to <8 x i16>
-  %add = add <8 x i16> %sextsrc1, %sextsrc2
-  %resulti16 = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %result = trunc <8 x i16> %resulti16 to <8 x i8>
+  %add = add nsw <8 x i16> %1, %sextsrc2
+  %2 = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <8 x i16> %2 to <8 x i8>
   ret <8 x i8> %result
 }
 
-
-declare <8 x i8>  @llvm.aarch64.neon.srhadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8>  @llvm.aarch64.neon.urhadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve2-hadd.ll b/llvm/test/CodeGen/AArch64/sve2-hadd.ll
index 6ad2ad0feacf6..323669ac7601a 100644
--- a/llvm/test/CodeGen/AArch64/sve2-hadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-hadd.ll
@@ -10,7 +10,7 @@ define <vscale x 2 x i64> @hadds_v2i64(<vscale x 2 x i64> %s0, <vscale x 2 x i64
 entry:
   %s0s = sext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
   %s1s = sext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
-  %m = add <vscale x 2 x i128> %s0s, %s1s
+  %m = add nsw <vscale x 2 x i128> %s0s, %s1s
   %s = lshr <vscale x 2 x i128> %m, shufflevector (<vscale x 2 x i128> insertelement (<vscale x 2 x i128> poison, i128 1, i32 0), <vscale x 2 x i128> poison, <vscale x 2 x i32> zeroinitializer)
   %s2 = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %s2
@@ -25,7 +25,7 @@ define <vscale x 2 x i64> @haddu_v2i64(<vscale x 2 x i64> %s0, <vscale x 2 x i64
 entry:
   %s0s = zext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
   %s1s = zext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
-  %m = add <vscale x 2 x i128> %s0s, %s1s
+  %m = add nuw nsw <vscale x 2 x i128> %s0s, %s1s
   %s = lshr <vscale x 2 x i128> %m, shufflevector (<vscale x 2 x i128> insertelement (<vscale x 2 x i128> poison, i128 1, i32 0), <vscale x 2 x i128> poison, <vscale x 2 x i32> zeroinitializer)
   %s2 = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %s2
@@ -42,7 +42,7 @@ define <vscale x 2 x i32> @hadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32
 entry:
   %s0s = sext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
   %s1s = sext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
-  %m = add <vscale x 2 x i64> %s0s, %s1s
+  %m = add nsw <vscale x 2 x i64> %s0s, %s1s
   %s = lshr <vscale x 2 x i64> %m, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
   %s2 = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %s2
@@ -58,7 +58,7 @@ define <vscale x 2 x i32> @haddu_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32
 entry:
   %s0s = zext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
   %s1s = zext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
-  %m = add <vscale x 2 x i64> %s0s, %s1s
+  %m = add nuw nsw <vscale x 2 x i64> %s0s, %s1s
   %s = lshr <vscale x 2 x i64> %m, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
   %s2 = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %s2
@@ -73,7 +73,7 @@ define <vscale x 4 x i32> @hadds_v4i32(<vscale x 4 x i32> %s0, <vscale x 4 x i32
 entry:
   %s0s = sext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
   %s1s = sext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
-  %m = add <vscale x 4 x i64> %s0s, %s1s
+  %m = add nsw <vscale x 4 x i64> %s0s, %s1s
   %s = lshr <vscale x 4 x i64> %m, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
   %s2 = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %s2
@@ -88,7 +88,7 @@ define <vscale x 4 x i32> @haddu_v4i32(<vscale x 4 x i32> %s0, <vscale x 4 x i32
 entry:
   %s0s = zext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
   %s1s = zext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
-  %m = add <vscale x 4 x i64> %s0s, %s1s
+  %m = add nuw nsw <vscale x 4 x i64> %s0s, %s1s
   %s = lshr <vscale x 4 x i64> %m, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
   %s2 = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %s2
@@ -107,7 +107,7 @@ define <vscale x 2 x i16> @hadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16
 entry:
   %s0s = sext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
   %s1s = sext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
-  %m = add <vscale x 2 x i32> %s0s, %s1s
+  %m = add nsw <vscale x 2 x i32> %s0s, %s1s
   %s = lshr <vscale x 2 x i32> %m, shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
   %s2 = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %s2
@@ -124,7 +124,7 @@ define <vscale x 2 x i16> @haddu_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16
 entry:
   %s0s = zext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
   %s1s = zext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
-  %m = add <vscale x 2 x i32> %s0s, %s1s
+  %m = add nuw nsw <vscale x 2 x i32> %s0s, %s1s
   %s = lshr <vscale x 2 x i32> %m, shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
   %s2 = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %s2
@@ -142,7 +142,7 @@ define <vscale x 4 x i16> @hadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16
 entry:
   %s0s = sext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
   %s1s = sext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
-  %m = add <vscale x 4 x i32> %s0s, %s1s
+  %m = add nsw <vscale x 4 x i32> %s0s, %s1s
   %s = lshr <vscale x 4 x i32> %m, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
   %s2 = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %s2
@@ -159,7 +159,7 @@ define <vscale x 4 x i16> @haddu_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16
 entry:
   %s0s = zext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
   %s1s = zext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
-  %m = add <vscale x 4 x i32> %s0s, %s1s
+  %m = add nuw nsw <vscale x 4 x i32> %s0s, %s1s
   %s = lshr <vscale x 4 x i32> %m, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
   %s2 = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %s2
@@ -174,7 +174,7 @@ define <vscale x 8 x i16> @hadds_v8i16(<vscale x 8 x i16> %s0, <vscale x 8 x i16
 entry:
   %s0s = sext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
   %s1s = sext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
-  %m = add <vscale x 8 x i32> %s0s, %s1s
+  %m = add nsw <vscale x 8 x i32> %s0s, %s1s
   %s = lshr <vscale x 8 x i32> %m, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
   %s2 = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %s2
@@ -189,7 +189,7 @@ define <vscale x 8 x i16> @haddu_v8i16(<vscale x 8 x i16> %s0, <vscale x 8 x i16
 entry:
   %s0s = zext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
   %s1s = zext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
-  %m = add <vscale x 8 x i32> %s0s, %s1s
+  %m = add nuw nsw <vscale x 8 x i32> %s0s, %s1s
   %s = lshr <vscale x 8 x i32> %m, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
   %s2 = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %s2
@@ -208,7 +208,7 @@ define <vscale x 4 x i8> @hadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s
 entry:
   %s0s = sext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
   %s1s = sext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
-  %m = add <vscale x 4 x i16> %s0s, %s1s
+  %m = add nsw <vscale x 4 x i16> %s0s, %s1s
   %s = lshr <vscale x 4 x i16> %m, shufflevector (<vscale x 4 x i16> insertelement (<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer)
   %s2 = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %s2
@@ -225,7 +225,7 @@ define <vscale x 4 x i8> @haddu_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s
 entry:
   %s0s = zext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
   %s1s = zext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
-  %m = add <vscale x 4 x i16> %s0s, %s1s
+  %m = add nuw nsw <vscale x 4 x i16> %s0s, %s1s
   %s = lshr <vscale x 4 x i16> %m, shufflevector (<vscale x 4 x i16> insertelement (<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer)
   %s2 = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %s2
@@ -243,7 +243,7 @@ define <vscale x 8 x i8> @hadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s
 entry:
   %s0s = sext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
   %s1s = sext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
-  %m = add <vscale x 8 x i16> %s0s, %s1s
+  %m = add nsw <vscale x 8 x i16> %s0s, %s1s
   %s = lshr <vscale x 8 x i16> %m, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
   %s2 = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %s2
@@ -260,7 +260,7 @@ define <vscale x 8 x i8> @haddu_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s
 entry:
   %s0s = zext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
   %s1s = zext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
-  %m = add <vscale x 8 x i16> %s0s, %s1s
+  %m = add nuw nsw <vscale x 8 x i16> %s0s, %s1s
   %s = lshr <vscale x 8 x i16> %m, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
   %s2 = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %s2
@@ -275,7 +275,7 @@ define <vscale x 16 x i8> @hadds_v16i8(<vscale x 16 x i8> %s0, <vscale x 16 x i8
 entry:
   %s0s = sext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
   %s1s = sext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
-  %m = add <vscale x 16 x i16> %s0s, %s1s
+  %m = add nsw <vscale x 16 x i16> %s0s, %s1s
   %s = lshr <vscale x 16 x i16> %m, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
   %s2 = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %s2
@@ -290,7 +290,7 @@ define <vscale x 16 x i8> @haddu_v16i8(<vscale x 16 x i8> %s0, <vscale x 16 x i8
 entry:
   %s0s = zext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
   %s1s = zext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
-  %m = add <vscale x 16 x i16> %s0s, %s1s
+  %m = add nuw nsw <vscale x 16 x i16> %s0s, %s1s
   %s = lshr <vscale x 16 x i16> %m, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
   %s2 = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %s2
@@ -305,8 +305,8 @@ define <vscale x 2 x i64> @rhadds_v2i64(<vscale x 2 x i64> %s0, <vscale x 2 x i6
 entry:
   %s0s = sext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
   %s1s = sext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
-  %add = add <vscale x 2 x i128> %s0s, %s1s
-  %add2 = add <vscale x 2 x i128> %add, shufflevector (<vscale x 2 x i128> insertelement (<vscale x 2 x i128> poison, i128 1, i32 0), <vscale x 2 x i128> poison, <vscale x 2 x i32> zeroinitializer)
+  %add = add <vscale x 2 x i128> %s0s, shufflevector (<vscale x 2 x i128> insertelement (<vscale x 2 x i128> poison, i128 1, i32 0), <vscale x 2 x i128> poison, <vscale x 2 x i32> zeroinitializer)
+  %add2 = add <vscale x 2 x i128> %add, %s1s
   %s = lshr <vscale x 2 x i128> %add2, shufflevector (<vscale x 2 x i128> insertelement (<vscale x 2 x i128> poison, i128 1, i32 0), <vscale x 2 x i128> poison, <vscale x 2 x i32> zeroinitializer)
   %result = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %result
@@ -321,8 +321,8 @@ define <vscale x 2 x i64> @rhaddu_v2i64(<vscale x 2 x i64> %s0, <vscale x 2 x i6
 entry:
   %s0s = zext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
   %s1s = zext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
-  %add = add <vscale x 2 x i128> %s0s, %s1s
-  %add2 = add <vscale x 2 x i128> %add, shufflevector (<vscale x 2 x i128> insertelement (<vscale x 2 x i128> poison, i128 1, i32 0), <vscale x 2 x i128> poison, <vscale x 2 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 2 x i128> %s0s, shufflevector (<vscale x 2 x i128> insertelement (<vscale x 2 x i128> poison, i128 1, i32 0), <vscale x 2 x i128> poison, <vscale x 2 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 2 x i128> %add, %s1s
   %s = lshr <vscale x 2 x i128> %add2, shufflevector (<vscale x 2 x i128> insertelement (<vscale x 2 x i128> poison, i128 1, i32 0), <vscale x 2 x i128> poison, <vscale x 2 x i32> zeroinitializer)
   %result = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %result
@@ -342,8 +342,8 @@ define <vscale x 2 x i32> @rhadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i3
 entry:
   %s0s = sext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
   %s1s = sext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
-  %add = add <vscale x 2 x i64> %s0s, %s1s
-  %add2 = add <vscale x 2 x i64> %add, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+  %add = add <vscale x 2 x i64> %s0s, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+  %add2 = add <vscale x 2 x i64> %add, %s1s
   %s = lshr <vscale x 2 x i64> %add2, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
   %result = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %result
@@ -362,8 +362,8 @@ define <vscale x 2 x i32> @rhaddu_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i3
 entry:
   %s0s = zext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
   %s1s = zext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
-  %add = add <vscale x 2 x i64> %s0s, %s1s
-  %add2 = add <vscale x 2 x i64> %add, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 2 x i64> %s0s, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 2 x i64> %add, %s1s
   %s = lshr <vscale x 2 x i64> %add2, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
   %result = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %result
@@ -378,8 +378,8 @@ define <vscale x 4 x i32> @rhadds_v4i32(<vscale x 4 x i32> %s0, <vscale x 4 x i3
 entry:
   %s0s = sext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
   %s1s = sext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
-  %add = add <vscale x 4 x i64> %s0s, %s1s
-  %add2 = add <vscale x 4 x i64> %add, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+  %add = add <vscale x 4 x i64> %s0s, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+  %add2 = add <vscale x 4 x i64> %add, %s1s
   %s = lshr <vscale x 4 x i64> %add2, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
   %result = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %result
@@ -394,8 +394,8 @@ define <vscale x 4 x i32> @rhaddu_v4i32(<vscale x 4 x i32> %s0, <vscale x 4 x i3
 entry:
   %s0s = zext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
   %s1s = zext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
-  %add = add <vscale x 4 x i64> %s0s, %s1s
-  %add2 = add <vscale x 4 x i64> %add, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 4 x i64> %s0s, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 4 x i64> %add, %s1s
   %s = lshr <vscale x 4 x i64> %add2, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
   %result = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %result
@@ -416,8 +416,8 @@ define <vscale x 2 x i16> @rhadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i1
 entry:
   %s0s = sext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
   %s1s = sext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
-  %add = add <vscale x 2 x i32> %s0s, %s1s
-  %add2 = add <vscale x 2 x i32> %add, shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
+  %add = add <vscale x 2 x i32> %s0s, shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
+  %add2 = add <vscale x 2 x i32> %add, %s1s
   %s = lshr <vscale x 2 x i32> %add2, shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
   %result = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %result
@@ -436,8 +436,8 @@ define <vscale x 2 x i16> @rhaddu_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i1
 entry:
   %s0s = zext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
   %s1s = zext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
-  %add = add <vscale x 2 x i32> %s0s, %s1s
-  %add2 = add <vscale x 2 x i32> %add, shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 2 x i32> %s0s, shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 2 x i32> %add, %s1s
   %s = lshr <vscale x 2 x i32> %add2, shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
   %result = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %result
@@ -457,8 +457,8 @@ define <vscale x 4 x i16> @rhadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i1
 entry:
   %s0s = sext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
   %s1s = sext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
-  %add = add <vscale x 4 x i32> %s0s, %s1s
-  %add2 = add <vscale x 4 x i32> %add, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %add = add <vscale x 4 x i32> %s0s, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %add2 = add <vscale x 4 x i32> %add, %s1s
   %s = lshr <vscale x 4 x i32> %add2, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
   %result = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %result
@@ -477,8 +477,8 @@ define <vscale x 4 x i16> @rhaddu_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i1
 entry:
   %s0s = zext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
   %s1s = zext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
-  %add = add <vscale x 4 x i32> %s0s, %s1s
-  %add2 = add <vscale x 4 x i32> %add, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 4 x i32> %s0s, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 4 x i32> %add, %s1s
   %s = lshr <vscale x 4 x i32> %add2, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
   %result = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %result
@@ -493,8 +493,8 @@ define <vscale x 8 x i16> @rhadds_v8i16(<vscale x 8 x i16> %s0, <vscale x 8 x i1
 entry:
   %s0s = sext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
   %s1s = sext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
-  %add = add <vscale x 8 x i32> %s0s, %s1s
-  %add2 = add <vscale x 8 x i32> %add, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+  %add = add <vscale x 8 x i32> %s0s, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+  %add2 = add <vscale x 8 x i32> %add, %s1s
   %s = lshr <vscale x 8 x i32> %add2, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
   %result = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %result
@@ -509,8 +509,8 @@ define <vscale x 8 x i16> @rhaddu_v8i16(<vscale x 8 x i16> %s0, <vscale x 8 x i1
 entry:
   %s0s = zext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
   %s1s = zext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
-  %add = add <vscale x 8 x i32> %s0s, %s1s
-  %add2 = add <vscale x 8 x i32> %add, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 8 x i32> %s0s, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 8 x i32> %add, %s1s
   %s = lshr <vscale x 8 x i32> %add2, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
   %result = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %result
@@ -531,8 +531,8 @@ define <vscale x 4 x i8> @rhadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %
 entry:
   %s0s = sext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
   %s1s = sext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
-  %add = add <vscale x 4 x i16> %s0s, %s1s
-  %add2 = add <vscale x 4 x i16> %add, shufflevector (<vscale x 4 x i16> insertelement (<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer)
+  %add = add <vscale x 4 x i16> %s0s, shufflevector (<vscale x 4 x i16> insertelement (<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer)
+  %add2 = add <vscale x 4 x i16> %add, %s1s
   %s = lshr <vscale x 4 x i16> %add2, shufflevector (<vscale x 4 x i16> insertelement (<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer)
   %result = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %result
@@ -551,8 +551,8 @@ define <vscale x 4 x i8> @rhaddu_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %
 entry:
   %s0s = zext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
   %s1s = zext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
-  %add = add <vscale x 4 x i16> %s0s, %s1s
-  %add2 = add <vscale x 4 x i16> %add, shufflevector (<vscale x 4 x i16> insertelement (<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 4 x i16> %s0s, shufflevector (<vscale x 4 x i16> insertelement (<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 4 x i16> %add, %s1s
   %s = lshr <vscale x 4 x i16> %add2, shufflevector (<vscale x 4 x i16> insertelement (<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer)
   %result = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %result
@@ -572,8 +572,8 @@ define <vscale x 8 x i8> @rhadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %
 entry:
   %s0s = sext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
   %s1s = sext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
-  %add = add <vscale x 8 x i16> %s0s, %s1s
-  %add2 = add <vscale x 8 x i16> %add, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
+  %add = add <vscale x 8 x i16> %s0s, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
+  %add2 = add <vscale x 8 x i16> %add, %s1s
   %s = lshr <vscale x 8 x i16> %add2, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
   %result = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %result
@@ -592,8 +592,8 @@ define <vscale x 8 x i8> @rhaddu_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %
 entry:
   %s0s = zext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
   %s1s = zext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
-  %add = add <vscale x 8 x i16> %s0s, %s1s
-  %add2 = add <vscale x 8 x i16> %add, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 8 x i16> %s0s, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 8 x i16> %add, %s1s
   %s = lshr <vscale x 8 x i16> %add2, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
   %result = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %result
@@ -608,8 +608,8 @@ define <vscale x 16 x i8> @rhadds_v16i8(<vscale x 16 x i8> %s0, <vscale x 16 x i
 entry:
   %s0s = sext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
   %s1s = sext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
-  %add = add <vscale x 16 x i16> %s0s, %s1s
-  %add2 = add <vscale x 16 x i16> %add, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
+  %add = add <vscale x 16 x i16> %s0s, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
+  %add2 = add <vscale x 16 x i16> %add, %s1s
   %s = lshr <vscale x 16 x i16> %add2, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
   %result = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %result
@@ -624,8 +624,8 @@ define <vscale x 16 x i8> @rhaddu_v16i8(<vscale x 16 x i8> %s0, <vscale x 16 x i
 entry:
   %s0s = zext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
   %s1s = zext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
-  %add = add <vscale x 16 x i16> %s0s, %s1s
-  %add2 = add <vscale x 16 x i16> %add, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
+  %add = add nuw nsw <vscale x 16 x i16> %s0s, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
+  %add2 = add nuw nsw <vscale x 16 x i16> %add, %s1s
   %s = lshr <vscale x 16 x i16> %add2, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
   %result = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %result

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vhadd.ll b/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
index 248a929e858bf..fa0ff389f300e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
 
 define arm_aapcs_vfpcc <4 x i32> @vhadds_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
 ; CHECK-LABEL: vhadds_v4i32:
@@ -9,7 +9,7 @@ define arm_aapcs_vfpcc <4 x i32> @vhadds_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
 entry:
   %s0s = sext <4 x i32> %s0 to <4 x i64>
   %s1s = sext <4 x i32> %s1 to <4 x i64>
-  %m = add <4 x i64> %s0s, %s1s
+  %m = add nsw <4 x i64> %s0s, %s1s
   %s = lshr <4 x i64> %m, <i64 1, i64 1, i64 1, i64 1>
   %s2 = trunc <4 x i64> %s to <4 x i32>
   ret <4 x i32> %s2
@@ -23,7 +23,7 @@ define arm_aapcs_vfpcc <4 x i32> @vhaddu_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
 entry:
   %s0s = zext <4 x i32> %s0 to <4 x i64>
   %s1s = zext <4 x i32> %s1 to <4 x i64>
-  %m = add <4 x i64> %s0s, %s1s
+  %m = add nuw nsw <4 x i64> %s0s, %s1s
   %s = lshr <4 x i64> %m, <i64 1, i64 1, i64 1, i64 1>
   %s2 = trunc <4 x i64> %s to <4 x i32>
   ret <4 x i32> %s2
@@ -40,7 +40,7 @@ define arm_aapcs_vfpcc <4 x i16> @vhadds_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
 entry:
   %s0s = sext <4 x i16> %s0 to <4 x i32>
   %s1s = sext <4 x i16> %s1 to <4 x i32>
-  %m = add <4 x i32> %s0s, %s1s
+  %m = add nsw <4 x i32> %s0s, %s1s
   %s = lshr <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1>
   %s2 = trunc <4 x i32> %s to <4 x i16>
   ret <4 x i16> %s2
@@ -51,13 +51,12 @@ define arm_aapcs_vfpcc <4 x i16> @vhaddu_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.u16 q1, q1
 ; CHECK-NEXT:    vmovlb.u16 q0, q0
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    vhadd.u32 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = zext <4 x i16> %s0 to <4 x i32>
   %s1s = zext <4 x i16> %s1 to <4 x i32>
-  %m = add <4 x i32> %s0s, %s1s
+  %m = add nuw nsw <4 x i32> %s0s, %s1s
   %s = lshr <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1>
   %s2 = trunc <4 x i32> %s to <4 x i16>
   ret <4 x i16> %s2
@@ -71,7 +70,7 @@ define arm_aapcs_vfpcc <8 x i16> @vhadds_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
 entry:
   %s0s = sext <8 x i16> %s0 to <8 x i32>
   %s1s = sext <8 x i16> %s1 to <8 x i32>
-  %m = add <8 x i32> %s0s, %s1s
+  %m = add nsw <8 x i32> %s0s, %s1s
   %s = lshr <8 x i32> %m, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %s2 = trunc <8 x i32> %s to <8 x i16>
   ret <8 x i16> %s2
@@ -85,7 +84,7 @@ define arm_aapcs_vfpcc <8 x i16> @vhaddu_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
 entry:
   %s0s = zext <8 x i16> %s0 to <8 x i32>
   %s1s = zext <8 x i16> %s1 to <8 x i32>
-  %m = add <8 x i32> %s0s, %s1s
+  %m = add nuw nsw <8 x i32> %s0s, %s1s
   %s = lshr <8 x i32> %m, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %s2 = trunc <8 x i32> %s to <8 x i16>
   ret <8 x i16> %s2
@@ -105,7 +104,7 @@ define arm_aapcs_vfpcc <4 x i8> @vhadds_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
 entry:
   %s0s = sext <4 x i8> %s0 to <4 x i16>
   %s1s = sext <4 x i8> %s1 to <4 x i16>
-  %m = add <4 x i16> %s0s, %s1s
+  %m = add nsw <4 x i16> %s0s, %s1s
   %s = lshr <4 x i16> %m, <i16 1, i16 1, i16 1, i16 1>
   %s2 = trunc <4 x i16> %s to <4 x i8>
   ret <4 x i8> %s2
@@ -117,13 +116,12 @@ define arm_aapcs_vfpcc <4 x i8> @vhaddu_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
 ; CHECK-NEXT:    vmov.i32 q2, #0xff
 ; CHECK-NEXT:    vand q1, q1, q2
 ; CHECK-NEXT:    vand q0, q0, q2
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    vhadd.u32 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = zext <4 x i8> %s0 to <4 x i16>
   %s1s = zext <4 x i8> %s1 to <4 x i16>
-  %m = add <4 x i16> %s0s, %s1s
+  %m = add nuw nsw <4 x i16> %s0s, %s1s
   %s = lshr <4 x i16> %m, <i16 1, i16 1, i16 1, i16 1>
   %s2 = trunc <4 x i16> %s to <4 x i8>
   ret <4 x i8> %s2
@@ -140,7 +138,7 @@ define arm_aapcs_vfpcc <8 x i8> @vhadds_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
 entry:
   %s0s = sext <8 x i8> %s0 to <8 x i16>
   %s1s = sext <8 x i8> %s1 to <8 x i16>
-  %m = add <8 x i16> %s0s, %s1s
+  %m = add nsw <8 x i16> %s0s, %s1s
   %s = lshr <8 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %s2 = trunc <8 x i16> %s to <8 x i8>
   ret <8 x i8> %s2
@@ -151,13 +149,12 @@ define arm_aapcs_vfpcc <8 x i8> @vhaddu_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.u8 q1, q1
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
-; CHECK-NEXT:    vadd.i16 q0, q0, q1
-; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    vhadd.u16 q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %s0s = zext <8 x i8> %s0 to <8 x i16>
   %s1s = zext <8 x i8> %s1 to <8 x i16>
-  %m = add <8 x i16> %s0s, %s1s
+  %m = add nuw nsw <8 x i16> %s0s, %s1s
   %s = lshr <8 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %s2 = trunc <8 x i16> %s to <8 x i8>
   ret <8 x i8> %s2
@@ -171,7 +168,7 @@ define arm_aapcs_vfpcc <16 x i8> @vhadds_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
 entry:
   %s0s = sext <16 x i8> %s0 to <16 x i16>
   %s1s = sext <16 x i8> %s1 to <16 x i16>
-  %m = add <16 x i16> %s0s, %s1s
+  %m = add nsw <16 x i16> %s0s, %s1s
   %s = lshr <16 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %s2 = trunc <16 x i16> %s to <16 x i8>
   ret <16 x i8> %s2
@@ -185,7 +182,7 @@ define arm_aapcs_vfpcc <16 x i8> @vhaddu_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
 entry:
   %s0s = zext <16 x i8> %s0 to <16 x i16>
   %s1s = zext <16 x i8> %s1 to <16 x i16>
-  %m = add <16 x i16> %s0s, %s1s
+  %m = add nuw nsw <16 x i16> %s0s, %s1s
   %s = lshr <16 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %s2 = trunc <16 x i16> %s to <16 x i8>
   ret <16 x i8> %s2
@@ -199,8 +196,8 @@ define arm_aapcs_vfpcc <4 x i32> @vrhadds_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
 entry:
   %s0s = sext <4 x i32> %s0 to <4 x i64>
   %s1s = sext <4 x i32> %s1 to <4 x i64>
-  %add = add <4 x i64> %s0s, %s1s
-  %add2 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+  %add = add nsw <4 x i64> %s0s, <i64 1, i64 1, i64 1, i64 1>
+  %add2 = add nsw <4 x i64> %add, %s1s
   %s = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %s to <4 x i32>
   ret <4 x i32> %result
@@ -214,8 +211,8 @@ define arm_aapcs_vfpcc <4 x i32> @vrhaddu_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
 entry:
   %s0s = zext <4 x i32> %s0 to <4 x i64>
   %s1s = zext <4 x i32> %s1 to <4 x i64>
-  %add = add <4 x i64> %s0s, %s1s
-  %add2 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+  %add = add nuw nsw <4 x i64> %s0s, <i64 1, i64 1, i64 1, i64 1>
+  %add2 = add nuw nsw <4 x i64> %add, %s1s
   %s = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %s to <4 x i32>
   ret <4 x i32> %result
@@ -234,8 +231,8 @@ define arm_aapcs_vfpcc <4 x i16> @vrhadds_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
 entry:
   %s0s = sext <4 x i16> %s0 to <4 x i32>
   %s1s = sext <4 x i16> %s1 to <4 x i32>
-  %add = add <4 x i32> %s0s, %s1s
-  %add2 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+  %add = add nsw <4 x i32> %s0s, <i32 1, i32 1, i32 1, i32 1>
+  %add2 = add nsw <4 x i32> %add, %s1s
   %s = lshr <4 x i32> %add2, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %s to <4 x i16>
   ret <4 x i16> %result
@@ -254,8 +251,8 @@ define arm_aapcs_vfpcc <4 x i16> @vrhaddu_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
 entry:
   %s0s = zext <4 x i16> %s0 to <4 x i32>
   %s1s = zext <4 x i16> %s1 to <4 x i32>
-  %add = add <4 x i32> %s0s, %s1s
-  %add2 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+  %add = add nuw nsw <4 x i32> %s0s, <i32 1, i32 1, i32 1, i32 1>
+  %add2 = add nuw nsw <4 x i32> %add, %s1s
   %s = lshr <4 x i32> %add2, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %s to <4 x i16>
   ret <4 x i16> %result
@@ -269,8 +266,8 @@ define arm_aapcs_vfpcc <8 x i16> @vrhadds_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
 entry:
   %s0s = sext <8 x i16> %s0 to <8 x i32>
   %s1s = sext <8 x i16> %s1 to <8 x i32>
-  %add = add <8 x i32> %s0s, %s1s
-  %add2 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %add = add nsw <8 x i32> %s0s, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %add2 = add nsw <8 x i32> %add, %s1s
   %s = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %s to <8 x i16>
   ret <8 x i16> %result
@@ -284,8 +281,8 @@ define arm_aapcs_vfpcc <8 x i16> @vrhaddu_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
 entry:
   %s0s = zext <8 x i16> %s0 to <8 x i32>
   %s1s = zext <8 x i16> %s1 to <8 x i32>
-  %add = add <8 x i32> %s0s, %s1s
-  %add2 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %add = add nuw nsw <8 x i32> %s0s, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %add2 = add nuw nsw <8 x i32> %add, %s1s
   %s = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %s to <8 x i16>
   ret <8 x i16> %result
@@ -307,8 +304,8 @@ define arm_aapcs_vfpcc <4 x i8> @vrhadds_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
 entry:
   %s0s = sext <4 x i8> %s0 to <4 x i16>
   %s1s = sext <4 x i8> %s1 to <4 x i16>
-  %add = add <4 x i16> %s0s, %s1s
-  %add2 = add <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
+  %add = add nsw <4 x i16> %s0s, <i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nsw <4 x i16> %add, %s1s
   %s = lshr <4 x i16> %add2, <i16 1, i16 1, i16 1, i16 1>
   %result = trunc <4 x i16> %s to <4 x i8>
   ret <4 x i8> %result
@@ -328,8 +325,8 @@ define arm_aapcs_vfpcc <4 x i8> @vrhaddu_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
 entry:
   %s0s = zext <4 x i8> %s0 to <4 x i16>
   %s1s = zext <4 x i8> %s1 to <4 x i16>
-  %add = add <4 x i16> %s0s, %s1s
-  %add2 = add <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
+  %add = add nuw nsw <4 x i16> %s0s, <i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nuw nsw <4 x i16> %add, %s1s
   %s = lshr <4 x i16> %add2, <i16 1, i16 1, i16 1, i16 1>
   %result = trunc <4 x i16> %s to <4 x i8>
   ret <4 x i8> %result
@@ -348,8 +345,8 @@ define arm_aapcs_vfpcc <8 x i8> @vrhadds_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
 entry:
   %s0s = sext <8 x i8> %s0 to <8 x i16>
   %s1s = sext <8 x i8> %s1 to <8 x i16>
-  %add = add <8 x i16> %s0s, %s1s
-  %add2 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add = add nsw <8 x i16> %s0s, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nsw <8 x i16> %add, %s1s
   %s = lshr <8 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %s to <8 x i8>
   ret <8 x i8> %result
@@ -368,8 +365,8 @@ define arm_aapcs_vfpcc <8 x i8> @vrhaddu_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
 entry:
   %s0s = zext <8 x i8> %s0 to <8 x i16>
   %s1s = zext <8 x i8> %s1 to <8 x i16>
-  %add = add <8 x i16> %s0s, %s1s
-  %add2 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add = add nuw nsw <8 x i16> %s0s, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nuw nsw <8 x i16> %add, %s1s
   %s = lshr <8 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %s to <8 x i8>
   ret <8 x i8> %result
@@ -383,8 +380,8 @@ define arm_aapcs_vfpcc <16 x i8> @vrhadds_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
 entry:
   %s0s = sext <16 x i8> %s0 to <16 x i16>
   %s1s = sext <16 x i8> %s1 to <16 x i16>
-  %add = add <16 x i16> %s0s, %s1s
-  %add2 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add = add nsw <16 x i16> %s0s, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nsw <16 x i16> %add, %s1s
   %s = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %s to <16 x i8>
   ret <16 x i8> %result
@@ -398,17 +395,14 @@ define arm_aapcs_vfpcc <16 x i8> @vrhaddu_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
 entry:
   %s0s = zext <16 x i8> %s0 to <16 x i16>
   %s1s = zext <16 x i8> %s1 to <16 x i16>
-  %add = add <16 x i16> %s0s, %s1s
-  %add2 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add = add nuw nsw <16 x i16> %s0s, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %add2 = add nuw nsw <16 x i16> %add, %s1s
   %s = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %s to <16 x i8>
   ret <16 x i8> %result
 }
 
-
-
-
-define void @vhadd_loop_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -447,7 +441,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -486,7 +480,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -525,7 +519,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -564,7 +558,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -603,7 +597,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -642,7 +636,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -682,7 +676,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -722,7 +716,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -762,7 +756,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -802,7 +796,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -842,7 +836,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture writeonly %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}


        


More information about the llvm-commits mailing list