[llvm] bcbad75 - [AArch64][SVE] NFC: Add test file for predicate vector reductions.

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 9 07:01:26 PST 2022


Author: Sander de Smalen
Date: 2022-02-09T15:00:08Z
New Revision: bcbad75a7c388a4a3d6a458324ac0aa65ff40dd1

URL: https://github.com/llvm/llvm-project/commit/bcbad75a7c388a4a3d6a458324ac0aa65ff40dd1
DIFF: https://github.com/llvm/llvm-project/commit/bcbad75a7c388a4a3d6a458324ac0aa65ff40dd1.diff

LOG: [AArch64][SVE] NFC: Add test file for predicate vector reductions.

This adds some tests for vector reductions which can and should
be implemented with ptest as opposed to promoted ANDV/ORV reduction.

Added: 
    llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll
new file mode 100644
index 000000000000..9f3121b767eb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll
@@ -0,0 +1,181 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64 -mattr=+sve < %s | FileCheck %s
+
+define i1 @ptest_v16i1_256bit_min_sve(float* %a, float * %b) vscale_range(2, 0) {
+; CHECK-LABEL: ptest_v16i1_256bit_min_sve:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #8
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, #0.0
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z1.s, #0.0
+; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.s, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    eor z1.d, z2.d, z1.d
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    orv b0, p0, z1.b
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %v0 = bitcast float* %a to <16 x float>*
+  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v2 = fcmp une <16 x float> %v1, zeroinitializer
+  %v3 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v2)
+  ret i1 %v3
+}
+
+define i1 @ptest_v16i1_512bit_min_sve(float* %a, float * %b) vscale_range(4, 0) {
+; CHECK-LABEL: ptest_v16i1_512bit_min_sve:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl16
+; CHECK-NEXT:    mov z1.s, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    orv b0, p0, z0.b
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %v0 = bitcast float* %a to <16 x float>*
+  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v2 = fcmp une <16 x float> %v1, zeroinitializer
+  %v3 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v2)
+  ret i1 %v3
+}
+
+define i1 @ptest_v16i1_512bit_sve(float* %a, float * %b) vscale_range(4, 4) {
+; CHECK-LABEL: ptest_v16i1_512bit_sve:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z1.s, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    orv b0, p0, z0.b
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %v0 = bitcast float* %a to <16 x float>*
+  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v2 = fcmp une <16 x float> %v1, zeroinitializer
+  %v3 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v2)
+  ret i1 %v3
+}
+
+define i1 @ptest_or_v16i1_512bit_min_sve(float* %a, float * %b) vscale_range(4, 0) {
+; CHECK-LABEL: ptest_or_v16i1_512bit_min_sve:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl16
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, #0.0
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z1.s, #0.0
+; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.s, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    eor z1.d, z2.d, z1.d
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    orv b0, p0, z0.b
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %v0 = bitcast float* %a to <16 x float>*
+  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v2 = fcmp une <16 x float> %v1, zeroinitializer
+  %v3 = bitcast float* %b to <16 x float>*
+  %v4 = load <16 x float>, <16 x float>* %v3, align 4
+  %v5 = fcmp une <16 x float> %v4, zeroinitializer
+  %v6 = or <16 x i1> %v2, %v5
+  %v7 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v6)
+  ret i1 %v7
+}
+
+declare i1 @llvm.vector.reduce.or.i1.v16i1(<16 x i1>)
+
+;
+; AND reduction.
+;
+
+define i1 @ptest_and_v16i1_512bit_sve(float* %a, float * %b) vscale_range(4, 4) {
+; CHECK-LABEL: ptest_and_v16i1_512bit_sve:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, #0.0
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z1.s, #0.0
+; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.s, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    bic z0.d, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    andv b0, p0, z0.b
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %v0 = bitcast float* %a to <16 x float>*
+  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v2 = fcmp une <16 x float> %v1, zeroinitializer
+  %v3 = bitcast float* %b to <16 x float>*
+  %v4 = load <16 x float>, <16 x float>* %v3, align 4
+  %v5 = fcmp une <16 x float> %v4, zeroinitializer
+  %v6 = and <16 x i1> %v2, %v5
+  %v7 = call i1 @llvm.vector.reduce.and.i1.v16i1 (<16 x i1> %v6)
+  ret i1 %v7
+}
+
+define i1 @ptest_and_v16i1_512bit_min_sve(float* %a, float * %b) vscale_range(4, 0) {
+; CHECK-LABEL: ptest_and_v16i1_512bit_min_sve:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl16
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, #0.0
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z1.s, #0.0
+; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.s, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    bic z0.d, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    andv b0, p0, z0.b
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
+  %v0 = bitcast float* %a to <16 x float>*
+  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v2 = fcmp une <16 x float> %v1, zeroinitializer
+  %v3 = bitcast float* %b to <16 x float>*
+  %v4 = load <16 x float>, <16 x float>* %v3, align 4
+  %v5 = fcmp une <16 x float> %v4, zeroinitializer
+  %v6 = and <16 x i1> %v2, %v5
+  %v7 = call i1 @llvm.vector.reduce.and.i1.v16i1 (<16 x i1> %v6)
+  ret i1 %v7
+}
+
+declare i1 @llvm.vector.reduce.and.i1.v16i1(<16 x i1>)


        


More information about the llvm-commits mailing list