[llvm] 9787240 - [RISCV][test] Add tests for extending negated mask

Piotr Fusik via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 20 02:37:36 PST 2025


Author: Piotr Fusik
Date: 2025-02-20T11:37:03+01:00
New Revision: 97872409125bd9972ab7c74f1fe25539820f6631

URL: https://github.com/llvm/llvm-project/commit/97872409125bd9972ab7c74f1fe25539820f6631
DIFF: https://github.com/llvm/llvm-project/commit/97872409125bd9972ab7c74f1fe25539820f6631.diff

LOG: [RISCV][test] Add tests for extending negated mask

Added: 
    llvm/test/CodeGen/RISCV/rvv/mask-exts-not.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-not.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-not.ll
new file mode 100644
index 0000000000000..1a00c84a022f3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-not.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 8 x i8> @mask_sext_not_nxv8i8(<vscale x 8 x i1> %m) {
+; CHECK-LABEL: mask_sext_not_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmnot.m v0, v0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    ret
+  %not = xor <vscale x 8 x i1> %m, splat (i1 true)
+  %ext = sext <vscale x 8 x i1> %not to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %ext
+}
+
+define <vscale x 8 x i8> @mask_zext_not_nxv8i8(<vscale x 8 x i1> %m) {
+; CHECK-LABEL: mask_zext_not_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmnot.m v0, v0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    ret
+  %not = xor <vscale x 8 x i1> %m, splat (i1 true)
+  %ext = zext <vscale x 8 x i1> %not to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %ext
+}
+
+define <8 x i8> @mask_sext_not_v8i8(<8 x i1> %m) {
+; CHECK-LABEL: mask_sext_not_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmnot.m v0, v0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    ret
+  %not = xor <8 x i1> %m, splat (i1 true)
+  %ext = sext <8 x i1> %not to <8 x i8>
+  ret <8 x i8> %ext
+}
+
+define <8 x i8> @mask_zext_not_v8i8(<8 x i1> %m) {
+; CHECK-LABEL: mask_zext_not_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmnot.m v0, v0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    ret
+  %not = xor <8 x i1> %m, splat (i1 true)
+  %ext = zext <8 x i1> %not to <8 x i8>
+  ret <8 x i8> %ext
+}
+
+define <vscale x 8 x i8> @mask_sext_xor_nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i1> %x) {
+; CHECK-LABEL: mask_sext_xor_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    ret
+  %xor = xor <vscale x 8 x i1> %m, %x
+  %ext = sext <vscale x 8 x i1> %xor to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %ext
+}
+
+define <vscale x 8 x i8> @mask_zext_xor_nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i1> %x) {
+; CHECK-LABEL: mask_zext_xor_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    ret
+  %xor = xor <vscale x 8 x i1> %m, %x
+  %ext = zext <vscale x 8 x i1> %xor to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %ext
+}
+
+define <8 x i8> @mask_sext_xor_v8i8(<8 x i1> %m) {
+; CHECK-LABEL: mask_sext_xor_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v8, a0
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    ret
+  %xor = xor <8 x i1> %m, <i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0>
+  %ext = sext <8 x i1> %xor to <8 x i8>
+  ret <8 x i8> %ext
+}
+
+define <8 x i8> @mask_zext_xor_v8i8(<8 x i1> %m) {
+; CHECK-LABEL: mask_zext_xor_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v8, a0
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    ret
+  %xor = xor <8 x i1> %m, <i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0>
+  %ext = zext <8 x i1> %xor to <8 x i8>
+  ret <8 x i8> %ext
+}


        


More information about the llvm-commits mailing list