[llvm] r359723 - [AArch64] Add tests for bool vector reductions; NFC
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Wed May 1 13:18:36 PDT 2019
Author: nikic
Date: Wed May 1 13:18:36 2019
New Revision: 359723
URL: http://llvm.org/viewvc/llvm-project?rev=359723&view=rev
Log:
[AArch64] Add tests for bool vector reductions; NFC
Baseline tests for PR41635.
Added:
llvm/trunk/test/CodeGen/AArch64/vecreduce-bool.ll
Added: llvm/trunk/test/CodeGen/AArch64/vecreduce-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/vecreduce-bool.ll?rev=359723&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/vecreduce-bool.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/vecreduce-bool.ll Wed May 1 13:18:36 2019
@@ -0,0 +1,304 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare i1 @llvm.experimental.vector.reduce.and.i1.v1i1(<1 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.and.i1.v2i1(<2 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.and.i1.v4i1(<4 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.and.i1.v8i1(<8 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.and.i1.v16i1(<16 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.and.i1.v32i1(<32 x i1> %a)
+
+declare i1 @llvm.experimental.vector.reduce.or.i1.v1i1(<1 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.or.i1.v2i1(<2 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.or.i1.v4i1(<4 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.or.i1.v8i1(<8 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.or.i1.v16i1(<16 x i1> %a)
+declare i1 @llvm.experimental.vector.reduce.or.i1.v32i1(<32 x i1> %a)
+
+define i32 @reduce_and_v1(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_and_v1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: smov w8, v0.b[0]
+; CHECK-NEXT: cmp w8, #0 // =0
+; CHECK-NEXT: csel w0, w0, w1, lt
+; CHECK-NEXT: ret
+ %x = icmp slt <1 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.and.i1.v1i1(<1 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_and_v2(<2 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_and_v2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.2s, v0.2s, #24
+; CHECK-NEXT: sshr v0.2s, v0.2s, #24
+; CHECK-NEXT: cmlt v0.2s, v0.2s, #0
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <2 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.and.i1.v2i1(<2 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_and_v4(<4 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_and_v4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-NEXT: cmlt v0.4h, v0.4h, #0
+; CHECK-NEXT: umov w10, v0.h[1]
+; CHECK-NEXT: umov w11, v0.h[0]
+; CHECK-NEXT: umov w9, v0.h[2]
+; CHECK-NEXT: and w10, w11, w10
+; CHECK-NEXT: umov w8, v0.h[3]
+; CHECK-NEXT: and w9, w10, w9
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <4 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.and.i1.v4i1(<4 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_and_v8(<8 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_and_v8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.8b, v0.8b, #0
+; CHECK-NEXT: umov w14, v0.b[1]
+; CHECK-NEXT: umov w15, v0.b[0]
+; CHECK-NEXT: umov w13, v0.b[2]
+; CHECK-NEXT: and w14, w15, w14
+; CHECK-NEXT: umov w12, v0.b[3]
+; CHECK-NEXT: and w13, w14, w13
+; CHECK-NEXT: umov w11, v0.b[4]
+; CHECK-NEXT: and w12, w13, w12
+; CHECK-NEXT: umov w10, v0.b[5]
+; CHECK-NEXT: and w11, w12, w11
+; CHECK-NEXT: umov w9, v0.b[6]
+; CHECK-NEXT: and w10, w11, w10
+; CHECK-NEXT: umov w8, v0.b[7]
+; CHECK-NEXT: and w9, w10, w9
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <8 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.and.i1.v8i1(<8 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_and_v16(<16 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_and_v16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: umov w8, v0.b[1]
+; CHECK-NEXT: umov w9, v0.b[0]
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: umov w9, v0.b[2]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[3]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[4]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[5]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[6]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[7]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <16 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.and.i1.v16i1(<16 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_and_v32(<32 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_and_v32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: umov w8, v0.b[1]
+; CHECK-NEXT: umov w9, v0.b[0]
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: umov w9, v0.b[2]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[3]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[4]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[5]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[6]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[7]
+; CHECK-NEXT: and w8, w8, w9
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <32 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.and.i1.v32i1(<32 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_or_v1(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_or_v1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: smov w8, v0.b[0]
+; CHECK-NEXT: cmp w8, #0 // =0
+; CHECK-NEXT: csel w0, w0, w1, lt
+; CHECK-NEXT: ret
+ %x = icmp slt <1 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.or.i1.v1i1(<1 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_or_v2(<2 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_or_v2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.2s, v0.2s, #24
+; CHECK-NEXT: sshr v0.2s, v0.2s, #24
+; CHECK-NEXT: cmlt v0.2s, v0.2s, #0
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: orr w8, w9, w8
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <2 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.or.i1.v2i1(<2 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_or_v4(<4 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_or_v4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-NEXT: cmlt v0.4h, v0.4h, #0
+; CHECK-NEXT: umov w10, v0.h[1]
+; CHECK-NEXT: umov w11, v0.h[0]
+; CHECK-NEXT: umov w9, v0.h[2]
+; CHECK-NEXT: orr w10, w11, w10
+; CHECK-NEXT: umov w8, v0.h[3]
+; CHECK-NEXT: orr w9, w10, w9
+; CHECK-NEXT: orr w8, w9, w8
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <4 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.or.i1.v4i1(<4 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_or_v8(<8 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_or_v8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.8b, v0.8b, #0
+; CHECK-NEXT: umov w14, v0.b[1]
+; CHECK-NEXT: umov w15, v0.b[0]
+; CHECK-NEXT: umov w13, v0.b[2]
+; CHECK-NEXT: orr w14, w15, w14
+; CHECK-NEXT: umov w12, v0.b[3]
+; CHECK-NEXT: orr w13, w14, w13
+; CHECK-NEXT: umov w11, v0.b[4]
+; CHECK-NEXT: orr w12, w13, w12
+; CHECK-NEXT: umov w10, v0.b[5]
+; CHECK-NEXT: orr w11, w12, w11
+; CHECK-NEXT: umov w9, v0.b[6]
+; CHECK-NEXT: orr w10, w11, w10
+; CHECK-NEXT: umov w8, v0.b[7]
+; CHECK-NEXT: orr w9, w10, w9
+; CHECK-NEXT: orr w8, w9, w8
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <8 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.or.i1.v8i1(<8 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_or_v16(<16 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_or_v16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: umov w8, v0.b[1]
+; CHECK-NEXT: umov w9, v0.b[0]
+; CHECK-NEXT: orr w8, w9, w8
+; CHECK-NEXT: umov w9, v0.b[2]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[3]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[4]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[5]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[6]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[7]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <16 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.or.i1.v16i1(<16 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
+
+define i32 @reduce_or_v32(<32 x i8> %a0, i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: reduce_or_v32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: umov w8, v0.b[1]
+; CHECK-NEXT: umov w9, v0.b[0]
+; CHECK-NEXT: orr w8, w9, w8
+; CHECK-NEXT: umov w9, v0.b[2]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[3]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[4]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[5]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[6]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: umov w9, v0.b[7]
+; CHECK-NEXT: orr w8, w8, w9
+; CHECK-NEXT: tst w8, #0x1
+; CHECK-NEXT: csel w0, w0, w1, ne
+; CHECK-NEXT: ret
+ %x = icmp slt <32 x i8> %a0, zeroinitializer
+ %y = call i1 @llvm.experimental.vector.reduce.or.i1.v32i1(<32 x i1> %x)
+ %z = select i1 %y, i32 %a1, i32 %a2
+ ret i32 %z
+}
More information about the llvm-commits
mailing list