[llvm] 1dee88f - [AArch64][SME]: Add streaming-compatible testing files.

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 30 17:37:47 PST 2022


Author: Hassnaa Hamdi
Date: 2022-12-01T01:37:36Z
New Revision: 1dee88fac17d4c125e95413cb79e90bf2058ab99

URL: https://github.com/llvm/llvm-project/commit/1dee88fac17d4c125e95413cb79e90bf2058ab99
DIFF: https://github.com/llvm/llvm-project/commit/1dee88fac17d4c125e95413cb79e90bf2058ab99.diff

LOG: [AArch64][SME]: Add streaming-compatible testing files.

Testing files:
 - int-compares.ll
 - int-immediates.ll
 - log-reduce.ll

Reviewed By: david-arm, sdesmalen

Differential Revision: https://reviews.llvm.org/D138717

Added: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll
new file mode 100644
index 000000000000..8d2c791660cc
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll
@@ -0,0 +1,414 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; ICMP EQ
+;
+
+define <8 x i8> @icmp_eq_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
+; CHECK-LABEL: icmp_eq_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, z1.b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %cmp = icmp eq <8 x i8> %op1, %op2
+  %sext = sext <8 x i1> %cmp to <8 x i8>
+  ret <8 x i8> %sext
+}
+
+define <16 x i8> @icmp_eq_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
+; CHECK-LABEL: icmp_eq_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, z1.b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %cmp = icmp eq <16 x i8> %op1, %op2
+  %sext = sext <16 x i1> %cmp to <16 x i8>
+  ret <16 x i8> %sext
+}
+
+define void @icmp_eq_v32i8(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_eq_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    cmpeq p1.b, p0/z, z0.b, z2.b
+; CHECK-NEXT:    mov z0.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    cmpeq p0.b, p0/z, z1.b, z3.b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
+  %cmp = icmp eq <32 x i8> %op1, %op2
+  %sext = sext <32 x i1> %cmp to <32 x i8>
+  store <32 x i8> %sext, ptr %a
+  ret void
+}
+
+define <4 x i16> @icmp_eq_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
+; CHECK-LABEL: icmp_eq_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %cmp = icmp eq <4 x i16> %op1, %op2
+  %sext = sext <4 x i1> %cmp to <4 x i16>
+  ret <4 x i16> %sext
+}
+
+define <8 x i16> @icmp_eq_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
+; CHECK-LABEL: icmp_eq_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %cmp = icmp eq <8 x i16> %op1, %op2
+  %sext = sext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %sext
+}
+
+define void @icmp_eq_v16i16(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_eq_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    cmpeq p1.h, p0/z, z0.h, z2.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    cmpeq p0.h, p0/z, z1.h, z3.h
+; CHECK-NEXT:    mov z1.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
+  %cmp = icmp eq <16 x i16> %op1, %op2
+  %sext = sext <16 x i1> %cmp to <16 x i16>
+  store <16 x i16> %sext, ptr %a
+  ret void
+}
+
+define <2 x i32> @icmp_eq_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
+; CHECK-LABEL: icmp_eq_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %cmp = icmp eq <2 x i32> %op1, %op2
+  %sext = sext <2 x i1> %cmp to <2 x i32>
+  ret <2 x i32> %sext
+}
+
+define <4 x i32> @icmp_eq_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
+; CHECK-LABEL: icmp_eq_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %cmp = icmp eq <4 x i32> %op1, %op2
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %sext
+}
+
+define void @icmp_eq_v8i32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_eq_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    cmpeq p1.s, p0/z, z0.s, z2.s
+; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    cmpeq p0.s, p0/z, z1.s, z3.s
+; CHECK-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
+  %cmp = icmp eq <8 x i32> %op1, %op2
+  %sext = sext <8 x i1> %cmp to <8 x i32>
+  store <8 x i32> %sext, ptr %a
+  ret void
+}
+
+define <1 x i64> @icmp_eq_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
+; CHECK-LABEL: icmp_eq_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
+; CHECK-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %cmp = icmp eq <1 x i64> %op1, %op2
+  %sext = sext <1 x i1> %cmp to <1 x i64>
+  ret <1 x i64> %sext
+}
+
+define <2 x i64> @icmp_eq_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
+; CHECK-LABEL: icmp_eq_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
+; CHECK-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %cmp = icmp eq <2 x i64> %op1, %op2
+  %sext = sext <2 x i1> %cmp to <2 x i64>
+  ret <2 x i64> %sext
+}
+
+define void @icmp_eq_v4i64(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_eq_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    cmpeq p1.d, p0/z, z0.d, z2.d
+; CHECK-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z3.d
+; CHECK-NEXT:    mov z1.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
+  %cmp = icmp eq <4 x i64> %op1, %op2
+  %sext = sext <4 x i1> %cmp to <4 x i64>
+  store <4 x i64> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP NE
+;
+
+define void @icmp_ne_v32i8(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_ne_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    cmpne p1.b, p0/z, z0.b, z2.b
+; CHECK-NEXT:    mov z0.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    cmpne p0.b, p0/z, z1.b, z3.b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
+  %cmp = icmp ne <32 x i8> %op1, %op2
+  %sext = sext <32 x i1> %cmp to <32 x i8>
+  store <32 x i8> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP SGE
+;
+
+define void @icmp_sge_v8i16(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_sge_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    cmpge p0.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i16>, ptr %a
+  %op2 = load <8 x i16>, ptr %b
+  %cmp = icmp sge <8 x i16> %op1, %op2
+  %sext = sext <8 x i1> %cmp to <8 x i16>
+  store <8 x i16> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP SGT
+;
+
+define void @icmp_sgt_v16i16(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_sgt_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    cmpgt p1.h, p0/z, z0.h, z2.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    cmpgt p0.h, p0/z, z1.h, z3.h
+; CHECK-NEXT:    mov z1.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
+  %cmp = icmp sgt <16 x i16> %op1, %op2
+  %sext = sext <16 x i1> %cmp to <16 x i16>
+  store <16 x i16> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP SLE
+;
+
+define void @icmp_sle_v4i32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_sle_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    cmpge p0.s, p0/z, z1.s, z0.s
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i32>, ptr %a
+  %op2 = load <4 x i32>, ptr %b
+  %cmp = icmp sle <4 x i32> %op1, %op2
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  store <4 x i32> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP SLT
+;
+
+define void @icmp_slt_v8i32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_slt_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    cmpgt p1.s, p0/z, z2.s, z0.s
+; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    cmpgt p0.s, p0/z, z3.s, z1.s
+; CHECK-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
+  %cmp = icmp slt <8 x i32> %op1, %op2
+  %sext = sext <8 x i1> %cmp to <8 x i32>
+  store <8 x i32> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP UGE
+;
+
+define void @icmp_uge_v2i64(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_uge_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    cmphs p0.d, p0/z, z0.d, z1.d
+; CHECK-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x i64>, ptr %a
+  %op2 = load <2 x i64>, ptr %b
+  %cmp = icmp uge <2 x i64> %op1, %op2
+  %sext = sext <2 x i1> %cmp to <2 x i64>
+  store <2 x i64> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP UGT
+;
+
+define void @icmp_ugt_v2i64(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_ugt_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    cmphi p0.d, p0/z, z0.d, z1.d
+; CHECK-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x i64>, ptr %a
+  %op2 = load <2 x i64>, ptr %b
+  %cmp = icmp ugt <2 x i64> %op1, %op2
+  %sext = sext <2 x i1> %cmp to <2 x i64>
+  store <2 x i64> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP ULE
+;
+
+define void @icmp_ule_v2i64(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_ule_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    cmphs p0.d, p0/z, z1.d, z0.d
+; CHECK-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x i64>, ptr %a
+  %op2 = load <2 x i64>, ptr %b
+  %cmp = icmp ule <2 x i64> %op1, %op2
+  %sext = sext <2 x i1> %cmp to <2 x i64>
+  store <2 x i64> %sext, ptr %a
+  ret void
+}
+
+;
+; ICMP ULT
+;
+
+define void @icmp_ult_v2i64(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: icmp_ult_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    cmphi p0.d, p0/z, z1.d, z0.d
+; CHECK-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x i64>, ptr %a
+  %op2 = load <2 x i64>, ptr %b
+  %cmp = icmp ult <2 x i64> %op1, %op2
+  %sext = sext <2 x i1> %cmp to <2 x i64>
+  store <2 x i64> %sext, ptr %a
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll
new file mode 100644
index 000000000000..e1013e1810fe
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll
@@ -0,0 +1,1144 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Although SVE immediate packing should be fully tested using scalable vectors,
+; these tests protects against the possibility that scalable nodes, resulting
+; from lowering fixed length vector operations, trigger 
diff erent isel patterns.
+
+; FIXME: These instructions should have the immediate form
+
+;
+; ADD
+;
+
+define void @add_v32i8(ptr %a) #0 {
+; CHECK-LABEL: add_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    add z1.b, z1.b, z0.b
+; CHECK-NEXT:    add z0.b, z2.b, z0.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i32 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = add <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @add_v16i16(ptr %a) #0 {
+; CHECK-LABEL: add_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI1_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT:    add z1.h, z1.h, z0.h
+; CHECK-NEXT:    add z0.h, z2.h, z0.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = add <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @add_v8i32(ptr %a) #0 {
+; CHECK-LABEL: add_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI2_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    add z1.s, z1.s, z0.s
+; CHECK-NEXT:    add z0.s, z2.s, z0.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = add <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @add_v4i64(ptr %a) #0 {
+; CHECK-LABEL: add_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    add z1.d, z1.d, z0.d
+; CHECK-NEXT:    add z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = add <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; AND
+;
+
+define void @and_v32i8(ptr %a) #0 {
+; CHECK-LABEL: and_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    and z1.d, z1.d, z0.d
+; CHECK-NEXT:    and z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i32 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = and <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @and_v16i16(ptr %a) #0 {
+; CHECK-LABEL: and_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI5_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI5_0]
+; CHECK-NEXT:    and z1.d, z1.d, z0.d
+; CHECK-NEXT:    and z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = and <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @and_v8i32(ptr %a) #0 {
+; CHECK-LABEL: and_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    and z1.d, z1.d, z0.d
+; CHECK-NEXT:    and z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = and <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @and_v4i64(ptr %a) #0 {
+; CHECK-LABEL: and_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    and z1.d, z1.d, z0.d
+; CHECK-NEXT:    and z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = and <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; ASHR
+;
+
+define void @ashr_v32i8(ptr %a) #0 {
+; CHECK-LABEL: ashr_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI8_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT:    asr z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT:    asrr z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i32 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = ashr <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @ashr_v16i16(ptr %a) #0 {
+; CHECK-LABEL: ashr_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    asr z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT:    asrr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = ashr <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @ashr_v8i32(ptr %a) #0 {
+; CHECK-LABEL: ashr_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI10_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI10_0]
+; CHECK-NEXT:    asr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    asrr z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = ashr <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @ashr_v4i64(ptr %a) #0 {
+; CHECK-LABEL: ashr_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI11_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI11_0]
+; CHECK-NEXT:    asr z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    asrr z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = ashr <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; ICMP
+;
+
+define void @icmp_eq_v32i8(ptr %a) #0 {
+; CHECK-LABEL: icmp_eq_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI12_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI12_0]
+; CHECK-NEXT:    cmpeq p1.b, p0/z, z1.b, z0.b
+; CHECK-NEXT:    cmpeq p0.b, p0/z, z2.b, z0.b
+; CHECK-NEXT:    mov z0.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %cmp = icmp eq <32 x i8> %op1, %op2
+  %res = sext <32 x i1> %cmp to <32 x i8>
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @icmp_sge_v16i16(ptr %a) #0 {
+; CHECK-LABEL: icmp_sge_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI13_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI13_0]
+; CHECK-NEXT:    cmpge p1.h, p0/z, z1.h, z0.h
+; CHECK-NEXT:    cmpge p0.h, p0/z, z2.h, z0.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %cmp = icmp sge <16 x i16> %op1, %op2
+  %res = sext <16 x i1> %cmp to <16 x i16>
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @icmp_sgt_v8i32(ptr %a) #0 {
+; CHECK-LABEL: icmp_sgt_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI14_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI14_0]
+; CHECK-NEXT:    cmpgt p1.s, p0/z, z1.s, z0.s
+; CHECK-NEXT:    cmpgt p0.s, p0/z, z2.s, z0.s
+; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 -8, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %cmp = icmp sgt <8 x i32> %op1, %op2
+  %res = sext <8 x i1> %cmp to <8 x i32>
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @icmp_ult_v4i64(ptr %a) #0 {
+; CHECK-LABEL: icmp_ult_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI15_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI15_0]
+; CHECK-NEXT:    cmphi p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z0.d, z2.d
+; CHECK-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %cmp = icmp ult <4 x i64> %op1, %op2
+  %res = sext <4 x i1> %cmp to <4 x i64>
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; LSHR
+;
+
+define void @lshr_v32i8(ptr %a) #0 {
+; CHECK-LABEL: lshr_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI16_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI16_0]
+; CHECK-NEXT:    lsr z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT:    lsrr z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = lshr <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @lshr_v16i16(ptr %a) #0 {
+; CHECK-LABEL: lshr_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI17_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI17_0]
+; CHECK-NEXT:    lsr z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT:    lsrr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = lshr <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @lshr_v8i32(ptr %a) #0 {
+; CHECK-LABEL: lshr_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI18_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI18_0]
+; CHECK-NEXT:    lsr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    lsrr z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = lshr <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @lshr_v4i64(ptr %a) #0 {
+; CHECK-LABEL: lshr_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI19_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI19_0]
+; CHECK-NEXT:    lsr z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    lsrr z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = lshr <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; MUL
+;
+
+define void @mul_v32i8(ptr %a) #0 {
+; CHECK-LABEL: mul_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI20_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI20_0]
+; CHECK-NEXT:    mul z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT:    mul z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = mul <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @mul_v16i16(ptr %a) #0 {
+; CHECK-LABEL: mul_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI21_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI21_0]
+; CHECK-NEXT:    mul z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT:    mul z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = mul <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @mul_v8i32(ptr %a) #0 {
+; CHECK-LABEL: mul_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI22_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI22_0]
+; CHECK-NEXT:    mul z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    mul z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = mul <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @mul_v4i64(ptr %a) #0 {
+; CHECK-LABEL: mul_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI23_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI23_0]
+; CHECK-NEXT:    mul z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = mul <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; OR
+;
+
+define void @or_v32i8(ptr %a) #0 {
+; CHECK-LABEL: or_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI24_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI24_0]
+; CHECK-NEXT:    orr z1.d, z1.d, z0.d
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = or <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @or_v16i16(ptr %a) #0 {
+; CHECK-LABEL: or_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI25_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI25_0]
+; CHECK-NEXT:    orr z1.d, z1.d, z0.d
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = or <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @or_v8i32(ptr %a) #0 {
+; CHECK-LABEL: or_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI26_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI26_0]
+; CHECK-NEXT:    orr z1.d, z1.d, z0.d
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = or <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @or_v4i64(ptr %a) #0 {
+; CHECK-LABEL: or_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI27_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI27_0]
+; CHECK-NEXT:    orr z1.d, z1.d, z0.d
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = or <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; SHL
+;
+
+define void @shl_v32i8(ptr %a) #0 {
+; CHECK-LABEL: shl_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI28_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI28_0]
+; CHECK-NEXT:    lsl z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT:    lslr z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = shl <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @shl_v16i16(ptr %a) #0 {
+; CHECK-LABEL: shl_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI29_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI29_0]
+; CHECK-NEXT:    lsl z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT:    lslr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = shl <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @shl_v8i32(ptr %a) #0 {
+; CHECK-LABEL: shl_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI30_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI30_0]
+; CHECK-NEXT:    lsl z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    lslr z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = shl <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @shl_v4i64(ptr %a) #0 {
+; CHECK-LABEL: shl_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI31_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI31_0]
+; CHECK-NEXT:    lsl z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    lslr z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = shl <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; SMAX
+;
+
+define void @smax_v32i8(ptr %a) #0 {
+; CHECK-LABEL: smax_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI32_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI32_0]
+; CHECK-NEXT:    smax z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT:    smax z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %op1, <32 x i8> %op2)
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @smax_v16i16(ptr %a) #0 {
+; CHECK-LABEL: smax_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI33_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI33_0]
+; CHECK-NEXT:    smax z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT:    smax z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %op1, <16 x i16> %op2)
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @smax_v8i32(ptr %a) #0 {
+; CHECK-LABEL: smax_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI34_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI34_0]
+; CHECK-NEXT:    smax z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    smax z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %op1, <8 x i32> %op2)
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @smax_v4i64(ptr %a) #0 {
+; CHECK-LABEL: smax_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI35_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI35_0]
+; CHECK-NEXT:    smax z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    smax z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %op1, <4 x i64> %op2)
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; SMIN
+;
+
+define void @smin_v32i8(ptr %a) #0 {
+; CHECK-LABEL: smin_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI36_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI36_0]
+; CHECK-NEXT:    smin z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT:    smin z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %op1, <32 x i8> %op2)
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @smin_v16i16(ptr %a) #0 {
+; CHECK-LABEL: smin_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI37_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI37_0]
+; CHECK-NEXT:    smin z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT:    smin z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %op1, <16 x i16> %op2)
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @smin_v8i32(ptr %a) #0 {
+; CHECK-LABEL: smin_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI38_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI38_0]
+; CHECK-NEXT:    smin z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    smin z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %op1, <8 x i32> %op2)
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @smin_v4i64(ptr %a) #0 {
+; CHECK-LABEL: smin_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI39_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI39_0]
+; CHECK-NEXT:    smin z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    smin z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %op1, <4 x i64> %op2)
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; SUB
+;
+
+define void @sub_v32i8(ptr %a) #0 {
+; CHECK-LABEL: sub_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI40_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI40_0]
+; CHECK-NEXT:    sub z1.b, z1.b, z0.b
+; CHECK-NEXT:    sub z0.b, z2.b, z0.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = sub <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @sub_v16i16(ptr %a) #0 {
+; CHECK-LABEL: sub_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI41_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI41_0]
+; CHECK-NEXT:    sub z1.h, z1.h, z0.h
+; CHECK-NEXT:    sub z0.h, z2.h, z0.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = sub <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @sub_v8i32(ptr %a) #0 {
+; CHECK-LABEL: sub_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI42_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI42_0]
+; CHECK-NEXT:    sub z1.s, z1.s, z0.s
+; CHECK-NEXT:    sub z0.s, z2.s, z0.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = sub <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @sub_v4i64(ptr %a) #0 {
+; CHECK-LABEL: sub_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI43_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI43_0]
+; CHECK-NEXT:    sub z1.d, z1.d, z0.d
+; CHECK-NEXT:    sub z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = sub <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; UMAX
+;
+
+define void @umax_v32i8(ptr %a) #0 {
+; CHECK-LABEL: umax_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI44_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI44_0]
+; CHECK-NEXT:    umax z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT:    umax z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %op1, <32 x i8> %op2)
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @umax_v16i16(ptr %a) #0 {
+; CHECK-LABEL: umax_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI45_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI45_0]
+; CHECK-NEXT:    umax z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT:    umax z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %op1, <16 x i16> %op2)
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @umax_v8i32(ptr %a) #0 {
+; CHECK-LABEL: umax_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI46_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI46_0]
+; CHECK-NEXT:    umax z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    umax z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %op1, <8 x i32> %op2)
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @umax_v4i64(ptr %a) #0 {
+; CHECK-LABEL: umax_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI47_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI47_0]
+; CHECK-NEXT:    umax z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    umax z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %op1, <4 x i64> %op2)
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; UMIN
+;
+
+define void @umin_v32i8(ptr %a) #0 {
+; CHECK-LABEL: umin_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI48_0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI48_0]
+; CHECK-NEXT:    umin z1.b, p0/m, z1.b, z0.b
+; CHECK-NEXT:    umin z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %op1, <32 x i8> %op2)
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @umin_v16i16(ptr %a) #0 {
+; CHECK-LABEL: umin_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI49_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI49_0]
+; CHECK-NEXT:    umin z1.h, p0/m, z1.h, z0.h
+; CHECK-NEXT:    umin z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %op1, <16 x i16> %op2)
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @umin_v8i32(ptr %a) #0 {
+; CHECK-LABEL: umin_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI50_0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI50_0]
+; CHECK-NEXT:    umin z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    umin z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %op1, <8 x i32> %op2)
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @umin_v4i64(ptr %a) #0 {
+; CHECK-LABEL: umin_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI51_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI51_0]
+; CHECK-NEXT:    umin z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    umin z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %op1, <4 x i64> %op2)
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; XOR
+;
+
+define void @xor_v32i8(ptr %a) #0 {
+; CHECK-LABEL: xor_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI52_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI52_0]
+; CHECK-NEXT:    eor z1.d, z1.d, z0.d
+; CHECK-NEXT:    eor z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, ptr %a
+  %ins = insertelement <32 x i8> undef, i8 7, i64 0
+  %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer
+  %res = xor <32 x i8> %op1, %op2
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define void @xor_v16i16(ptr %a) #0 {
+; CHECK-LABEL: xor_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI53_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI53_0]
+; CHECK-NEXT:    eor z1.d, z1.d, z0.d
+; CHECK-NEXT:    eor z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, ptr %a
+  %ins = insertelement <16 x i16> undef, i16 15, i64 0
+  %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer
+  %res = xor <16 x i16> %op1, %op2
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define void @xor_v8i32(ptr %a) #0 {
+; CHECK-LABEL: xor_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI54_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI54_0]
+; CHECK-NEXT:    eor z1.d, z1.d, z0.d
+; CHECK-NEXT:    eor z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, ptr %a
+  %ins = insertelement <8 x i32> undef, i32 31, i64 0
+  %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer
+  %res = xor <8 x i32> %op1, %op2
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define void @xor_v4i64(ptr %a) #0 {
+; CHECK-LABEL: xor_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI55_0
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI55_0]
+; CHECK-NEXT:    eor z1.d, z1.d, z0.d
+; CHECK-NEXT:    eor z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, ptr %a
+  %ins = insertelement <4 x i64> undef, i64 63, i64 0
+  %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer
+  %res = xor <4 x i64> %op1, %op2
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
+
+declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
+
+declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
+
+declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll
new file mode 100644
index 000000000000..cf2c826db54e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll
@@ -0,0 +1,561 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; ANDV
+;
+
+define i8 @andv_v4i8(<4 x i8> %a) #0 {
+; CHECK-LABEL: andv_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    andv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @andv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: andv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    andv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @andv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: andv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    andv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @andv_v32i8(ptr %a) #0 {
+; CHECK-LABEL: andv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    and z0.d, z1.d, z0.d
+; CHECK-NEXT:    andv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, ptr %a
+  %res = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i16 @andv_v2i16(<2 x i16> %a) #0 {
+; CHECK-LABEL: andv_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    andv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @andv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: andv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    andv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @andv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: andv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    andv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @andv_v16i16(ptr %a) #0 {
+; CHECK-LABEL: andv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    and z0.d, z1.d, z0.d
+; CHECK-NEXT:    andv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, ptr %a
+  %res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i32 @andv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: andv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    andv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @andv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: andv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    andv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @andv_v8i32(ptr %a) #0 {
+; CHECK-LABEL: andv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    and z0.d, z1.d, z0.d
+; CHECK-NEXT:    andv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, ptr %a
+  %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+define i64 @andv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: andv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    andv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %res = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @andv_v4i64(ptr %a) #0 {
+; CHECK-LABEL: andv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    and z0.d, z1.d, z0.d
+; CHECK-NEXT:    andv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, ptr %a
+  %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+;
+; EORV
+;
+
+define i8 @eorv_v4i8(<4 x i8> %a) #0 {
+; CHECK-LABEL: eorv_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    eorv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @eorv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: eorv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    eorv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @eorv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: eorv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    eorv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @eorv_v32i8(ptr %a) #0 {
+; CHECK-LABEL: eorv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    eor z0.d, z1.d, z0.d
+; CHECK-NEXT:    eorv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, ptr %a
+  %res = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i16 @eorv_v2i16(<2 x i16> %a) #0 {
+; CHECK-LABEL: eorv_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    eorv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.xor.v2i16(<2 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @eorv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: eorv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    eorv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @eorv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: eorv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    eorv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @eorv_v16i16(ptr %a) #0 {
+; CHECK-LABEL: eorv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    eor z0.d, z1.d, z0.d
+; CHECK-NEXT:    eorv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, ptr %a
+  %res = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i32 @eorv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: eorv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    eorv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @eorv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: eorv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    eorv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @eorv_v8i32(ptr %a) #0 {
+; CHECK-LABEL: eorv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    eor z0.d, z1.d, z0.d
+; CHECK-NEXT:    eorv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, ptr %a
+  %res = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+define i64 @eorv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: eorv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    eorv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %res = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @eorv_v4i64(ptr %a) #0 {
+; CHECK-LABEL: eorv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    eor z0.d, z1.d, z0.d
+; CHECK-NEXT:    eorv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, ptr %a
+  %res = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+;
+; ORV
+;
+
+define i8 @orv_v4i8(<4 x i8> %a) #0 {
+; CHECK-LABEL: orv_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    orv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @orv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: orv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    orv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @orv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: orv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    orv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @orv_v32i8(ptr %a) #0 {
+; CHECK-LABEL: orv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
+; CHECK-NEXT:    orv b0, p0, z0.b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, ptr %a
+  %res = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i16 @orv_v2i16(<2 x i16> %a) #0 {
+; CHECK-LABEL: orv_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    orv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @orv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: orv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    orv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @orv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: orv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    orv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @orv_v16i16(ptr %a) #0 {
+; CHECK-LABEL: orv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
+; CHECK-NEXT:    orv h0, p0, z0.h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, ptr %a
+  %res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i32 @orv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: orv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    orv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @orv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: orv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    orv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @orv_v8i32(ptr %a) #0 {
+; CHECK-LABEL: orv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
+; CHECK-NEXT:    orv s0, p0, z0.s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, ptr %a
+  %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+define i64 @orv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: orv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    orv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %res = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @orv_v4i64(ptr %a) #0 {
+; CHECK-LABEL: orv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
+; CHECK-NEXT:    orv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, ptr %a
+  %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>)
+
+declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>)
+
+declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>)
+
+declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>)
+
+declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>)
+
+declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>)
+
+declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>)
+
+declare i16 @llvm.vector.reduce.xor.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>)
+
+declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>)


        


More information about the llvm-commits mailing list