[llvm] 12cb8ca - [AArch64] Add CodeGen tests for vector reduction intrinsics. NFC

Rosie Sumpter via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 23 05:56:43 PDT 2021


Author: Rosie Sumpter
Date: 2021-06-23T13:46:16+01:00
New Revision: 12cb8ca6686041990f83eab1897b41f70f6f123c

URL: https://github.com/llvm/llvm-project/commit/12cb8ca6686041990f83eab1897b41f70f6f123c
DIFF: https://github.com/llvm/llvm-project/commit/12cb8ca6686041990f83eab1897b41f70f6f123c.diff

LOG: [AArch64] Add CodeGen tests for vector reduction intrinsics. NFC

Tests are added for vector reduce OR, AND and XOR.

Differential Revision: https://reviews.llvm.org/D104771

Added: 
    llvm/test/CodeGen/AArch64/reduce-and.ll
    llvm/test/CodeGen/AArch64/reduce-or.ll
    llvm/test/CodeGen/AArch64/reduce-xor.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/reduce-and.ll b/llvm/test/CodeGen/AArch64/reduce-and.ll
new file mode 100644
index 000000000000..69525edee21d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/reduce-and.ll
@@ -0,0 +1,337 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -mattr=+neon | FileCheck %s
+
+define i1 @test_redand_v1i1(<1 x i1> %a) {
+; CHECK-LABEL: test_redand_v1i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w0, w0, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redand_v2i1(<2 x i1> %a) {
+; CHECK-LABEL: test_redand_v2i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.and.v2i1(<2 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redand_v4i1(<4 x i1> %a) {
+; CHECK-LABEL: test_redand_v4i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    and w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    and w9, w10, w9
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redand_v8i1(<8 x i1> %a) {
+; CHECK-LABEL: test_redand_v8i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w14, v0.b[1]
+; CHECK-NEXT:    umov w15, v0.b[0]
+; CHECK-NEXT:    umov w13, v0.b[2]
+; CHECK-NEXT:    and w14, w15, w14
+; CHECK-NEXT:    umov w12, v0.b[3]
+; CHECK-NEXT:    and w13, w14, w13
+; CHECK-NEXT:    umov w11, v0.b[4]
+; CHECK-NEXT:    and w12, w13, w12
+; CHECK-NEXT:    umov w10, v0.b[5]
+; CHECK-NEXT:    and w11, w12, w11
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    and w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.b[7]
+; CHECK-NEXT:    and w9, w10, w9
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redand_v16i1(<16 x i1> %a) {
+; CHECK-LABEL: test_redand_v16i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.and.v16i1(<16 x i1> %a)
+  ret i1 %or_result
+}
+
+define i8 @test_redand_v1i8(<1 x i8> %a) {
+; CHECK-LABEL: test_redand_v1i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w0, v0.b[0]
+; CHECK-NEXT:    ret
+  %and_result = call i8 @llvm.vector.reduce.and.v1i8(<1 x i8> %a)
+  ret i8 %and_result
+}
+
+define i8 @test_redand_v3i8(<3 x i8> %a) {
+; CHECK-LABEL: test_redand_v3i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, w1
+; CHECK-NEXT:    and w8, w8, w2
+; CHECK-NEXT:    and w0, w8, #0xff
+; CHECK-NEXT:    ret
+  %and_result = call i8 @llvm.vector.reduce.and.v3i8(<3 x i8> %a)
+  ret i8 %and_result
+}
+
+define i8 @test_redand_v4i8(<4 x i8> %a) {
+; CHECK-LABEL: test_redand_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    and w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    and w9, w10, w9
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %and_result = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %a)
+  ret i8 %and_result
+}
+
+define i8 @test_redand_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: test_redand_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w14, v0.b[1]
+; CHECK-NEXT:    umov w15, v0.b[0]
+; CHECK-NEXT:    umov w13, v0.b[2]
+; CHECK-NEXT:    and w14, w15, w14
+; CHECK-NEXT:    umov w12, v0.b[3]
+; CHECK-NEXT:    and w13, w14, w13
+; CHECK-NEXT:    umov w11, v0.b[4]
+; CHECK-NEXT:    and w12, w13, w12
+; CHECK-NEXT:    umov w10, v0.b[5]
+; CHECK-NEXT:    and w11, w12, w11
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    and w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.b[7]
+; CHECK-NEXT:    and w9, w10, w9
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %and_result = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %a)
+  ret i8 %and_result
+}
+
+define i8 @test_redand_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: test_redand_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+  %and_result = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %a)
+  ret i8 %and_result
+}
+
+define i8 @test_redand_v32i8(<32 x i8> %a) {
+; CHECK-LABEL: test_redand_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+  %and_result = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %a)
+  ret i8 %and_result
+}
+
+define i16 @test_redand_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: test_redand_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    and w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    and w9, w10, w9
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %and_result = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %a)
+  ret i16 %and_result
+}
+
+define i16 @test_redand_v8i16(<8 x i16> %a) {
+; CHECK-LABEL: test_redand_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    and w8, w8, w10
+; CHECK-NEXT:    umov w9, v0.h[3]
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+  %and_result = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %a)
+  ret i16 %and_result
+}
+
+define i16 @test_redand_v16i16(<16 x i16> %a) {
+; CHECK-LABEL: test_redand_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    and w8, w8, w10
+; CHECK-NEXT:    umov w9, v0.h[3]
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
+  %and_result = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %a)
+  ret i16 %and_result
+}
+
+define i32 @test_redand_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: test_redand_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %and_result = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %a)
+  ret i32 %and_result
+}
+
+define i32 @test_redand_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: test_redand_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %and_result = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a)
+  ret i32 %and_result
+}
+
+define i32 @test_redand_v8i32(<8 x i32> %a) {
+; CHECK-LABEL: test_redand_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %and_result = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %a)
+  ret i32 %and_result
+}
+
+define i64 @test_redand_v2i64(<2 x i64> %a) {
+; CHECK-LABEL: test_redand_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %and_result = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %a)
+  ret i64 %and_result
+}
+
+define i64 @test_redand_v4i64(<4 x i64> %a) {
+; CHECK-LABEL: test_redand_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %and_result = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %a)
+  ret i64 %and_result
+}
+
+declare i1 @llvm.vector.reduce.and.v1i1(<1 x i1>)
+declare i1 @llvm.vector.reduce.and.v2i1(<2 x i1>)
+declare i1 @llvm.vector.reduce.and.v4i1(<4 x i1>)
+declare i1 @llvm.vector.reduce.and.v8i1(<8 x i1>)
+declare i1 @llvm.vector.reduce.and.v16i1(<16 x i1>)
+declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>)
+declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>)
+declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
+declare i8 @llvm.vector.reduce.and.v1i8(<1 x i8>)
+declare i8 @llvm.vector.reduce.and.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>)

diff  --git a/llvm/test/CodeGen/AArch64/reduce-or.ll b/llvm/test/CodeGen/AArch64/reduce-or.ll
new file mode 100644
index 000000000000..e832352f086b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/reduce-or.ll
@@ -0,0 +1,336 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define i1 @test_redor_v1i1(<1 x i1> %a) {
+; CHECK-LABEL: test_redor_v1i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w0, w0, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redor_v2i1(<2 x i1> %a) {
+; CHECK-LABEL: test_redor_v2i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.or.v2i1(<2 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redor_v4i1(<4 x i1> %a) {
+; CHECK-LABEL: test_redor_v4i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    orr w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    orr w9, w10, w9
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redor_v8i1(<8 x i1> %a) {
+; CHECK-LABEL: test_redor_v8i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w14, v0.b[1]
+; CHECK-NEXT:    umov w15, v0.b[0]
+; CHECK-NEXT:    umov w13, v0.b[2]
+; CHECK-NEXT:    orr w14, w15, w14
+; CHECK-NEXT:    umov w12, v0.b[3]
+; CHECK-NEXT:    orr w13, w14, w13
+; CHECK-NEXT:    umov w11, v0.b[4]
+; CHECK-NEXT:    orr w12, w13, w12
+; CHECK-NEXT:    umov w10, v0.b[5]
+; CHECK-NEXT:    orr w11, w12, w11
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    orr w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.b[7]
+; CHECK-NEXT:    orr w9, w10, w9
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redor_v16i1(<16 x i1> %a) {
+; CHECK-LABEL: test_redor_v16i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> %a)
+  ret i1 %or_result
+}
+
+define i8 @test_redor_v1i8(<1 x i8> %a) {
+; CHECK-LABEL: test_redor_v1i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w0, v0.b[0]
+; CHECK-NEXT:    ret
+  %or_result = call i8 @llvm.vector.reduce.or.v1i8(<1 x i8> %a)
+  ret i8 %or_result
+}
+
+define i8 @test_redor_v3i8(<3 x i8> %a) {
+; CHECK-LABEL: test_redor_v3i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, w0, w1
+; CHECK-NEXT:    orr w0, w8, w2
+; CHECK-NEXT:    ret
+  %or_result = call i8 @llvm.vector.reduce.or.v3i8(<3 x i8> %a)
+  ret i8 %or_result
+}
+
+define i8 @test_redor_v4i8(<4 x i8> %a) {
+; CHECK-LABEL: test_redor_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    orr w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    orr w9, w10, w9
+; CHECK-NEXT:    orr w0, w9, w8
+; CHECK-NEXT:    ret
+  %or_result = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %a)
+  ret i8 %or_result
+}
+
+define i8 @test_redor_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: test_redor_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w14, v0.b[1]
+; CHECK-NEXT:    umov w15, v0.b[0]
+; CHECK-NEXT:    umov w13, v0.b[2]
+; CHECK-NEXT:    orr w14, w15, w14
+; CHECK-NEXT:    umov w12, v0.b[3]
+; CHECK-NEXT:    orr w13, w14, w13
+; CHECK-NEXT:    umov w11, v0.b[4]
+; CHECK-NEXT:    orr w12, w13, w12
+; CHECK-NEXT:    umov w10, v0.b[5]
+; CHECK-NEXT:    orr w11, w12, w11
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    orr w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.b[7]
+; CHECK-NEXT:    orr w9, w10, w9
+; CHECK-NEXT:    orr w0, w9, w8
+; CHECK-NEXT:    ret
+  %or_result = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %a)
+  ret i8 %or_result
+}
+
+define i8 @test_redor_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: test_redor_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    orr w0, w8, w9
+; CHECK-NEXT:    ret
+  %or_result = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %a)
+  ret i8 %or_result
+}
+
+define i8 @test_redor_v32i8(<32 x i8> %a) {
+; CHECK-LABEL: test_redor_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    orr w0, w8, w9
+; CHECK-NEXT:    ret
+  %or_result = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %a)
+  ret i8 %or_result
+}
+
+define i16 @test_redor_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: test_redor_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    orr w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    orr w9, w10, w9
+; CHECK-NEXT:    orr w0, w9, w8
+; CHECK-NEXT:    ret
+  %or_result = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %a)
+  ret i16 %or_result
+}
+
+define i16 @test_redor_v8i16(<8 x i16> %a) {
+; CHECK-LABEL: test_redor_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    orr w8, w8, w10
+; CHECK-NEXT:    umov w9, v0.h[3]
+; CHECK-NEXT:    orr w0, w8, w9
+; CHECK-NEXT:    ret
+  %or_result = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %a)
+  ret i16 %or_result
+}
+
+define i16 @test_redor_v16i16(<16 x i16> %a) {
+; CHECK-LABEL: test_redor_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    orr w8, w8, w10
+; CHECK-NEXT:    umov w9, v0.h[3]
+; CHECK-NEXT:    orr w0, w8, w9
+; CHECK-NEXT:    ret
+  %or_result = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %a)
+  ret i16 %or_result
+}
+
+define i32 @test_redor_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: test_redor_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    orr w0, w9, w8
+; CHECK-NEXT:    ret
+  %or_result = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %a)
+  ret i32 %or_result
+}
+
+define i32 @test_redor_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: test_redor_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    orr w0, w9, w8
+; CHECK-NEXT:    ret
+  %or_result = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %a)
+  ret i32 %or_result
+}
+
+define i32 @test_redor_v8i32(<8 x i32> %a) {
+; CHECK-LABEL: test_redor_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    orr w0, w9, w8
+; CHECK-NEXT:    ret
+  %or_result = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %a)
+  ret i32 %or_result
+}
+
+define i64 @test_redor_v2i64(<2 x i64> %a) {
+; CHECK-LABEL: test_redor_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %or_result = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %a)
+  ret i64 %or_result
+}
+
+define i64 @test_redor_v4i64(<4 x i64> %a) {
+; CHECK-LABEL: test_redor_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %or_result = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %a)
+  ret i64 %or_result
+}
+
+declare i1 @llvm.vector.reduce.or.v1i1(<1 x i1>)
+declare i1 @llvm.vector.reduce.or.v2i1(<2 x i1>)
+declare i1 @llvm.vector.reduce.or.v4i1(<4 x i1>)
+declare i1 @llvm.vector.reduce.or.v8i1(<8 x i1>)
+declare i1 @llvm.vector.reduce.or.v16i1(<16 x i1>)
+declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>)
+declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>)
+declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>)
+declare i8 @llvm.vector.reduce.or.v1i8(<1 x i8>)
+declare i8 @llvm.vector.reduce.or.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>)

diff  --git a/llvm/test/CodeGen/AArch64/reduce-xor.ll b/llvm/test/CodeGen/AArch64/reduce-xor.ll
new file mode 100644
index 000000000000..2cec995926e1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/reduce-xor.ll
@@ -0,0 +1,336 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -mattr=+neon | FileCheck %s
+
+define i1 @test_redxor_v1i1(<1 x i1> %a) {
+; CHECK-LABEL: test_redxor_v1i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w0, w0, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redxor_v2i1(<2 x i1> %a) {
+; CHECK-LABEL: test_redxor_v2i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.xor.v2i1(<2 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redxor_v4i1(<4 x i1> %a) {
+; CHECK-LABEL: test_redxor_v4i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    eor w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    eor w9, w10, w9
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.xor.v4i1(<4 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redxor_v8i1(<8 x i1> %a) {
+; CHECK-LABEL: test_redxor_v8i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w14, v0.b[1]
+; CHECK-NEXT:    umov w15, v0.b[0]
+; CHECK-NEXT:    umov w13, v0.b[2]
+; CHECK-NEXT:    eor w14, w15, w14
+; CHECK-NEXT:    umov w12, v0.b[3]
+; CHECK-NEXT:    eor w13, w14, w13
+; CHECK-NEXT:    umov w11, v0.b[4]
+; CHECK-NEXT:    eor w12, w13, w12
+; CHECK-NEXT:    umov w10, v0.b[5]
+; CHECK-NEXT:    eor w11, w12, w11
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    eor w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.b[7]
+; CHECK-NEXT:    eor w9, w10, w9
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.xor.v8i1(<8 x i1> %a)
+  ret i1 %or_result
+}
+
+define i1 @test_redxor_v16i1(<16 x i1> %a) {
+; CHECK-LABEL: test_redxor_v16i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %or_result = call i1 @llvm.vector.reduce.xor.v16i1(<16 x i1> %a)
+  ret i1 %or_result
+}
+
+define i8 @test_redxor_v1i8(<1 x i8> %a) {
+; CHECK-LABEL: test_redxor_v1i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w0, v0.b[0]
+; CHECK-NEXT:    ret
+  %xor_result = call i8 @llvm.vector.reduce.xor.v1i8(<1 x i8> %a)
+  ret i8 %xor_result
+}
+
+define i8 @test_redxor_v3i8(<3 x i8> %a) {
+; CHECK-LABEL: test_redxor_v3i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor w8, w0, w1
+; CHECK-NEXT:    eor w0, w8, w2
+; CHECK-NEXT:    ret
+  %xor_result = call i8 @llvm.vector.reduce.xor.v3i8(<3 x i8> %a)
+  ret i8 %xor_result
+}
+
+define i8 @test_redxor_v4i8(<4 x i8> %a) {
+; CHECK-LABEL: test_redxor_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    eor w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    eor w9, w10, w9
+; CHECK-NEXT:    eor w0, w9, w8
+; CHECK-NEXT:    ret
+  %xor_result = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %a)
+  ret i8 %xor_result
+}
+
+define i8 @test_redxor_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: test_redxor_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w14, v0.b[1]
+; CHECK-NEXT:    umov w15, v0.b[0]
+; CHECK-NEXT:    umov w13, v0.b[2]
+; CHECK-NEXT:    eor w14, w15, w14
+; CHECK-NEXT:    umov w12, v0.b[3]
+; CHECK-NEXT:    eor w13, w14, w13
+; CHECK-NEXT:    umov w11, v0.b[4]
+; CHECK-NEXT:    eor w12, w13, w12
+; CHECK-NEXT:    umov w10, v0.b[5]
+; CHECK-NEXT:    eor w11, w12, w11
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    eor w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.b[7]
+; CHECK-NEXT:    eor w9, w10, w9
+; CHECK-NEXT:    eor w0, w9, w8
+; CHECK-NEXT:    ret
+  %xor_result = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %a)
+  ret i8 %xor_result
+}
+
+define i8 @test_redxor_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: test_redxor_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    eor w0, w8, w9
+; CHECK-NEXT:    ret
+  %xor_result = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %a)
+  ret i8 %xor_result
+}
+
+define i8 @test_redxor_v32i8(<32 x i8> %a) {
+; CHECK-LABEL: test_redxor_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[2]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[3]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[4]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    eor w0, w8, w9
+; CHECK-NEXT:    ret
+  %xor_result = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %a)
+  ret i8 %xor_result
+}
+
+define i16 @test_redxor_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: test_redxor_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w10, v0.h[1]
+; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[2]
+; CHECK-NEXT:    eor w10, w11, w10
+; CHECK-NEXT:    umov w8, v0.h[3]
+; CHECK-NEXT:    eor w9, w10, w9
+; CHECK-NEXT:    eor w0, w9, w8
+; CHECK-NEXT:    ret
+  %xor_result = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %a)
+  ret i16 %xor_result
+}
+
+define i16 @test_redxor_v8i16(<8 x i16> %a) {
+; CHECK-LABEL: test_redxor_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    eor w8, w8, w10
+; CHECK-NEXT:    umov w9, v0.h[3]
+; CHECK-NEXT:    eor w0, w8, w9
+; CHECK-NEXT:    ret
+  %xor_result = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %a)
+  ret i16 %xor_result
+}
+
+define i16 @test_redxor_v16i16(<16 x i16> %a) {
+; CHECK-LABEL: test_redxor_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    eor w8, w8, w10
+; CHECK-NEXT:    umov w9, v0.h[3]
+; CHECK-NEXT:    eor w0, w8, w9
+; CHECK-NEXT:    ret
+  %xor_result = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %a)
+  ret i16 %xor_result
+}
+
+define i32 @test_redxor_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: test_redxor_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    eor w0, w9, w8
+; CHECK-NEXT:    ret
+  %xor_result = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %a)
+  ret i32 %xor_result
+}
+
+define i32 @test_redxor_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: test_redxor_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    eor w0, w9, w8
+; CHECK-NEXT:    ret
+  %xor_result = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a)
+  ret i32 %xor_result
+}
+
+define i32 @test_redxor_v8i32(<8 x i32> %a) {
+; CHECK-LABEL: test_redxor_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    eor w0, w9, w8
+; CHECK-NEXT:    ret
+  %xor_result = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %a)
+  ret i32 %xor_result
+}
+
+define i64 @test_redxor_v2i64(<2 x i64> %a) {
+; CHECK-LABEL: test_redxor_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %xor_result = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %a)
+  ret i64 %xor_result
+}
+
+define i64 @test_redxor_v4i64(<4 x i64> %a) {
+; CHECK-LABEL: test_redxor_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %xor_result = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %a)
+  ret i64 %xor_result
+}
+
+declare i1 @llvm.vector.reduce.xor.v1i1(<1 x i1>)
+declare i1 @llvm.vector.reduce.xor.v2i1(<2 x i1>)
+declare i1 @llvm.vector.reduce.xor.v4i1(<4 x i1>)
+declare i1 @llvm.vector.reduce.xor.v8i1(<8 x i1>)
+declare i1 @llvm.vector.reduce.xor.v16i1(<16 x i1>)
+declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>)
+declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>)
+declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>)
+declare i8 @llvm.vector.reduce.xor.v1i8(<1 x i8>)
+declare i8 @llvm.vector.reduce.xor.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>)


        


More information about the llvm-commits mailing list