[llvm] 17e51cd - [AArch64][SME]: Add precursory tests for D138682

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 29 04:24:35 PST 2022


Author: Hassnaa Hamdi
Date: 2022-11-29T12:24:21Z
New Revision: 17e51cd4f94cea6798f37ca31863b5a61e278746

URL: https://github.com/llvm/llvm-project/commit/17e51cd4f94cea6798f37ca31863b5a61e278746
DIFF: https://github.com/llvm/llvm-project/commit/17e51cd4f94cea6798f37ca31863b5a61e278746.diff

LOG: [AArch64][SME]: Add precursory tests for D138682

Add testing files:
 - bit-counting.ll
 - bitselect.ll

Added: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll
new file mode 100644
index 0000000000000..e2da0b56a2fd8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll
@@ -0,0 +1,609 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; CLZ
+;
+
+define <4 x i8> @ctlz_v4i8(<4 x i8> %op) #0 {
+; CHECK-LABEL: ctlz_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    adrp x8, .LCPI0_1
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI0_1]
+; CHECK-NEXT:    clz v0.4h, v0.4h
+; CHECK-NEXT:    sub z0.h, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <4 x i8> @llvm.ctlz.v4i8(<4 x i8> %op)
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @ctlz_v8i8(<8 x i8> %op) #0 {
+; CHECK-LABEL: ctlz_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz v0.8b, v0.8b
+; CHECK-NEXT:    ret
+  %res = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %op)
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @ctlz_v16i8(<16 x i8> %op) #0 {
+; CHECK-LABEL: ctlz_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz v0.16b, v0.16b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %op)
+  ret <16 x i8> %res
+}
+
+define void @ctlz_v32i8(ptr %a) #0 {
+; CHECK-LABEL: ctlz_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    clz v0.16b, v0.16b
+; CHECK-NEXT:    clz v1.16b, v1.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, ptr %a
+  %res = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %op)
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define <2 x i16> @ctlz_v2i16(<2 x i16> %op) #0 {
+; CHECK-LABEL: ctlz_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    adrp x8, .LCPI4_1
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI4_1]
+; CHECK-NEXT:    clz v0.2s, v0.2s
+; CHECK-NEXT:    sub z0.s, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> %op)
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @ctlz_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: ctlz_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %op)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @ctlz_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: ctlz_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %op)
+  ret <8 x i16> %res
+}
+
+define void @ctlz_v16i16(ptr %a) #0 {
+; CHECK-LABEL: ctlz_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    clz v0.8h, v0.8h
+; CHECK-NEXT:    clz v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, ptr %a
+  %res = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %op)
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define <2 x i32> @ctlz_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: ctlz_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %op)
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @ctlz_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: ctlz_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    clz v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %op)
+  ret <4 x i32> %res
+}
+
+define void @ctlz_v8i32(ptr %a) #0 {
+; CHECK-LABEL: ctlz_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    clz v0.4s, v0.4s
+; CHECK-NEXT:    clz v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, ptr %a
+  %res = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %op)
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define <1 x i64> @ctlz_v1i64(<1 x i64> %op) #0 {
+; CHECK-LABEL: ctlz_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    clz z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %op)
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @ctlz_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: ctlz_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    clz z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %op)
+  ret <2 x i64> %res
+}
+
+define void @ctlz_v4i64(ptr %a) #0 {
+; CHECK-LABEL: ctlz_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    clz z0.d, p0/m, z0.d
+; CHECK-NEXT:    clz z1.d, p0/m, z1.d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, ptr %a
+  %res = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %op)
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; CNT
+;
+
+define <4 x i8> @ctpop_v4i8(<4 x i8> %op) #0 {
+; CHECK-LABEL: ctpop_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI14_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI14_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    cnt v0.8b, v0.8b
+; CHECK-NEXT:    uaddlp v0.4h, v0.8b
+; CHECK-NEXT:    ret
+  %res = call <4 x i8> @llvm.ctpop.v4i8(<4 x i8> %op)
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @ctpop_v8i8(<8 x i8> %op) #0 {
+; CHECK-LABEL: ctpop_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.8b, v0.8b
+; CHECK-NEXT:    ret
+  %res = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %op)
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @ctpop_v16i8(<16 x i8> %op) #0 {
+; CHECK-LABEL: ctpop_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %op)
+  ret <16 x i8> %res
+}
+
+define void @ctpop_v32i8(ptr %a) #0 {
+; CHECK-LABEL: ctpop_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    cnt v1.16b, v1.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, ptr %a
+  %res = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %op)
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define <2 x i16> @ctpop_v2i16(<2 x i16> %op) #0 {
+; CHECK-LABEL: ctpop_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI18_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI18_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    cnt v0.8b, v0.8b
+; CHECK-NEXT:    uaddlp v0.4h, v0.8b
+; CHECK-NEXT:    uaddlp v0.2s, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.ctpop.v2i16(<2 x i16> %op)
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @ctpop_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: ctpop_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.8b, v0.8b
+; CHECK-NEXT:    uaddlp v0.4h, v0.8b
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %op)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @ctpop_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: ctpop_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %op)
+  ret <8 x i16> %res
+}
+
+define void @ctpop_v16i16(ptr %a) #0 {
+; CHECK-LABEL: ctpop_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    cnt v1.16b, v1.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
+; CHECK-NEXT:    uaddlp v1.8h, v1.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, ptr %a
+  %res = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %op)
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define <2 x i32> @ctpop_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: ctpop_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.8b, v0.8b
+; CHECK-NEXT:    uaddlp v0.4h, v0.8b
+; CHECK-NEXT:    uaddlp v0.2s, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %op)
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @ctpop_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: ctpop_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
+; CHECK-NEXT:    uaddlp v0.4s, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %op)
+  ret <4 x i32> %res
+}
+
+define void @ctpop_v8i32(ptr %a) #0 {
+; CHECK-LABEL: ctpop_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    cnt v1.16b, v1.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
+; CHECK-NEXT:    uaddlp v1.8h, v1.16b
+; CHECK-NEXT:    uaddlp v0.4s, v0.8h
+; CHECK-NEXT:    uaddlp v1.4s, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, ptr %a
+  %res = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %op)
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define <1 x i64> @ctpop_v1i64(<1 x i64> %op) #0 {
+; CHECK-LABEL: ctpop_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.8b, v0.8b
+; CHECK-NEXT:    uaddlp v0.4h, v0.8b
+; CHECK-NEXT:    uaddlp v0.2s, v0.4h
+; CHECK-NEXT:    uaddlp v0.1d, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <1 x i64> @llvm.ctpop.v1i64(<1 x i64> %op)
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @ctpop_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: ctpop_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
+; CHECK-NEXT:    uaddlp v0.4s, v0.8h
+; CHECK-NEXT:    uaddlp v0.2d, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %op)
+  ret <2 x i64> %res
+}
+
+define void @ctpop_v4i64(ptr %a) #0 {
+; CHECK-LABEL: ctpop_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    cnt v1.16b, v1.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
+; CHECK-NEXT:    uaddlp v1.8h, v1.16b
+; CHECK-NEXT:    uaddlp v0.4s, v0.8h
+; CHECK-NEXT:    uaddlp v1.4s, v1.8h
+; CHECK-NEXT:    uaddlp v0.2d, v0.4s
+; CHECK-NEXT:    uaddlp v1.2d, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, ptr %a
+  %res = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %op)
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+;
+; Count trailing zeros
+;
+
+define <4 x i8> @cttz_v4i8(<4 x i8> %op) #0 {
+; CHECK-LABEL: cttz_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI28_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI28_0]
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    clz v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x i8> @llvm.cttz.v4i8(<4 x i8> %op)
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @cttz_v8i8(<8 x i8> %op) #0 {
+; CHECK-LABEL: cttz_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    rbit z0.b, p0/m, z0.b
+; CHECK-NEXT:    clz v0.8b, v0.8b
+; CHECK-NEXT:    ret
+  %res = call <8 x i8> @llvm.cttz.v8i8(<8 x i8> %op)
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @cttz_v16i8(<16 x i8> %op) #0 {
+; CHECK-LABEL: cttz_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    rbit z0.b, p0/m, z0.b
+; CHECK-NEXT:    clz v0.16b, v0.16b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %op)
+  ret <16 x i8> %res
+}
+
+define void @cttz_v32i8(ptr %a) #0 {
+; CHECK-LABEL: cttz_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    rbit z0.b, p0/m, z0.b
+; CHECK-NEXT:    clz v0.16b, v0.16b
+; CHECK-NEXT:    rbit z1.b, p0/m, z1.b
+; CHECK-NEXT:    clz v1.16b, v1.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, ptr %a
+  %res = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %op)
+  store <32 x i8> %res, ptr %a
+  ret void
+}
+
+define <2 x i16> @cttz_v2i16(<2 x i16> %op) #0 {
+; CHECK-LABEL: cttz_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI32_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI32_0]
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    clz v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.cttz.v2i16(<2 x i16> %op)
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @cttz_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: cttz_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    clz v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.cttz.v4i16(<4 x i16> %op)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @cttz_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: cttz_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    clz v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %op)
+  ret <8 x i16> %res
+}
+
+define void @cttz_v16i16(ptr %a) #0 {
+; CHECK-LABEL: cttz_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    clz v0.8h, v0.8h
+; CHECK-NEXT:    rbit z1.h, p0/m, z1.h
+; CHECK-NEXT:    clz v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, ptr %a
+  %res = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %op)
+  store <16 x i16> %res, ptr %a
+  ret void
+}
+
+define <2 x i32> @cttz_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: cttz_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    clz v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %op)
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @cttz_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: cttz_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    clz v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %op)
+  ret <4 x i32> %res
+}
+
+define void @cttz_v8i32(ptr %a) #0 {
+; CHECK-LABEL: cttz_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    clz v0.4s, v0.4s
+; CHECK-NEXT:    rbit z1.s, p0/m, z1.s
+; CHECK-NEXT:    clz v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, ptr %a
+  %res = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %op)
+  store <8 x i32> %res, ptr %a
+  ret void
+}
+
+define <1 x i64> @cttz_v1i64(<1 x i64> %op) #0 {
+; CHECK-LABEL: cttz_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    rbit z0.d, p0/m, z0.d
+; CHECK-NEXT:    clz z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %op)
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @cttz_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: cttz_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    rbit z0.d, p0/m, z0.d
+; CHECK-NEXT:    clz z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %op)
+  ret <2 x i64> %res
+}
+
+define void @cttz_v4i64(ptr %a) #0 {
+; CHECK-LABEL: cttz_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    rbit z0.d, p0/m, z0.d
+; CHECK-NEXT:    clz z0.d, p0/m, z0.d
+; CHECK-NEXT:    rbit z1.d, p0/m, z1.d
+; CHECK-NEXT:    clz z1.d, p0/m, z1.d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, ptr %a
+  %res = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %op)
+  store <4 x i64> %res, ptr %a
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare <4 x i8> @llvm.ctlz.v4i8(<4 x i8>)
+declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>)
+declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>)
+declare <2 x i16> @llvm.ctlz.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>)
+declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>)
+declare <1 x i64> @llvm.ctlz.v1i64(<1 x i64>)
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>)
+
+declare <4 x i8> @llvm.ctpop.v4i8(<4 x i8>)
+declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
+declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>)
+declare <2 x i16> @llvm.ctpop.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.ctpop.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
+declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
+declare <1 x i64> @llvm.ctpop.v1i64(<1 x i64>)
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
+
+declare <4 x i8> @llvm.cttz.v4i8(<4 x i8>)
+declare <8 x i8> @llvm.cttz.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>)
+declare <32 x i8> @llvm.cttz.v32i8(<32 x i8>)
+declare <2 x i16> @llvm.cttz.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.cttz.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>)
+declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>)
+declare <1 x i64> @llvm.cttz.v1i64(<1 x i64>)
+declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
new file mode 100644
index 0000000000000..7e74d913c2f10
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
@@ -0,0 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64"
+
+;
+; NOTE: SVE lowering for the BSP pseudoinst is not currently implemented, so we
+;       don't currently expect the code below to lower to BSL/BIT/BIF. Once
+;       this is implemented, this test will be fleshed out.
+;
+
+define <8 x i32> @fixed_bitselect_v8i32(ptr %pre_cond_ptr, ptr %left_ptr, ptr %right_ptr) #0 {
+; CHECK-LABEL: fixed_bitselect_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    ldp q0, q2, [x0]
+; CHECK-NEXT:    ldp q3, q1, [x1]
+; CHECK-NEXT:    ldp q5, q6, [x2]
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    sub z2.s, z4.s, z2.s
+; CHECK-NEXT:    sub z0.s, z4.s, z0.s
+; CHECK-NEXT:    bsl v0.16b, v3.16b, v5.16b
+; CHECK-NEXT:    bif v1.16b, v6.16b, v2.16b
+; CHECK-NEXT:    ret
+  %pre_cond = load <8 x i32>, ptr %pre_cond_ptr
+  %left = load <8 x i32>, ptr %left_ptr
+  %right = load <8 x i32>, ptr %right_ptr
+
+  %neg_cond = sub <8 x i32> zeroinitializer, %pre_cond
+  %min_cond = add <8 x i32> %pre_cond, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %left_bits_0 = and <8 x i32> %neg_cond, %left
+  %right_bits_0 = and <8 x i32> %min_cond, %right
+  %bsl0000 = or <8 x i32> %right_bits_0, %left_bits_0
+  ret <8 x i32> %bsl0000
+}
+
+attributes #0 = { "target-features"="+sve" }


        


More information about the llvm-commits mailing list