[llvm] c4f356e - [AArch64][SVE] NFC: Add tests for masked add/sub patterns (D129751)

Cullen Rhodes via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 25 00:22:35 PDT 2022


Author: Cullen Rhodes
Date: 2022-07-25T07:22:04Z
New Revision: c4f356e519749d19e5cd445e25743ba2dbe66226

URL: https://github.com/llvm/llvm-project/commit/c4f356e519749d19e5cd445e25743ba2dbe66226
DIFF: https://github.com/llvm/llvm-project/commit/c4f356e519749d19e5cd445e25743ba2dbe66226.diff

LOG: [AArch64][SVE] NFC: Add tests for masked add/sub patterns (D129751)

Added: 
    llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll
new file mode 100644
index 0000000000000..10b0de8e3f92b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll
@@ -0,0 +1,106 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+
+;
+; Masked Additions
+;
+
+define <vscale x 16 x i8> @masked_add_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: masked_add_nxv16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.b, #0 // =0x0
+; CHECK-NEXT:    sel z1.b, p0, z1.b, z2.b
+; CHECK-NEXT:    add z0.b, z0.b, z1.b
+; CHECK-NEXT:    ret
+  %select = select <vscale x 16 x i1> %mask, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer
+  %ret = add <vscale x 16 x i8> %a, %select
+  ret <vscale x 16 x i8> %ret
+}
+
+define <vscale x 8 x i16> @masked_add_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: masked_add_nxv8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    sel z1.h, p0, z1.h, z2.h
+; CHECK-NEXT:    add z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %select = select <vscale x 8 x i1> %mask, <vscale x 8 x i16> %b, <vscale x 8 x i16> zeroinitializer
+  %ret = add <vscale x 8 x i16> %a, %select
+  ret <vscale x 8 x i16> %ret
+}
+
+define <vscale x 4 x i32> @masked_add_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: masked_add_nxv4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    sel z1.s, p0, z1.s, z2.s
+; CHECK-NEXT:    add z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %select = select <vscale x 4 x i1> %mask, <vscale x 4 x i32> %b, <vscale x 4 x i32> zeroinitializer
+  %ret = add <vscale x 4 x i32> %a, %select
+  ret <vscale x 4 x i32> %ret
+}
+
+define <vscale x 2 x i64> @masked_add_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: masked_add_nxv2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    sel z1.d, p0, z1.d, z2.d
+; CHECK-NEXT:    add z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %select = select <vscale x 2 x i1> %mask, <vscale x 2 x i64> %b, <vscale x 2 x i64> zeroinitializer
+  %ret = add <vscale x 2 x i64> %a, %select
+  ret <vscale x 2 x i64> %ret
+}
+
+;
+; Masked Subtractions
+;
+
+define <vscale x 16 x i8> @masked_sub_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: masked_sub_nxv16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.b, #0 // =0x0
+; CHECK-NEXT:    sel z1.b, p0, z1.b, z2.b
+; CHECK-NEXT:    sub z0.b, z0.b, z1.b
+; CHECK-NEXT:    ret
+  %select = select <vscale x 16 x i1> %mask, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer
+  %ret = sub <vscale x 16 x i8> %a, %select
+  ret <vscale x 16 x i8> %ret
+}
+
+define <vscale x 8 x i16> @masked_sub_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: masked_sub_nxv8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    sel z1.h, p0, z1.h, z2.h
+; CHECK-NEXT:    sub z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %select = select <vscale x 8 x i1> %mask, <vscale x 8 x i16> %b, <vscale x 8 x i16> zeroinitializer
+  %ret = sub <vscale x 8 x i16> %a, %select
+  ret <vscale x 8 x i16> %ret
+}
+
+define <vscale x 4 x i32> @masked_sub_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: masked_sub_nxv4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    sel z1.s, p0, z1.s, z2.s
+; CHECK-NEXT:    sub z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %select = select <vscale x 4 x i1> %mask, <vscale x 4 x i32> %b, <vscale x 4 x i32> zeroinitializer
+  %ret = sub <vscale x 4 x i32> %a, %select
+  ret <vscale x 4 x i32> %ret
+}
+
+define <vscale x 2 x i64> @masked_sub_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: masked_sub_nxv2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    sel z1.d, p0, z1.d, z2.d
+; CHECK-NEXT:    sub z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %select = select <vscale x 2 x i1> %mask, <vscale x 2 x i64> %b, <vscale x 2 x i64> zeroinitializer
+  %ret = sub <vscale x 2 x i64> %a, %select
+  ret <vscale x 2 x i64> %ret
+}


        


More information about the llvm-commits mailing list