[llvm] e3e9c94 - [X86][AArch64][RISCV] Add tests for combining `(select c, (and X, 1), 0)` -> `(and (zext c), X)`; NFC

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 28 11:47:01 PDT 2023


Author: Noah Goldstein
Date: 2023-09-28T13:46:46-05:00
New Revision: e3e9c94006103a32f28f9a9e10ee72986d3159fc

URL: https://github.com/llvm/llvm-project/commit/e3e9c94006103a32f28f9a9e10ee72986d3159fc
DIFF: https://github.com/llvm/llvm-project/commit/e3e9c94006103a32f28f9a9e10ee72986d3159fc.diff

LOG: [X86][AArch64][RISCV] Add tests for combining `(select c, (and X, 1), 0)` -> `(and (zext c), X)`; NFC

Added: 
    llvm/test/CodeGen/AArch64/select-to-and-zext.ll
    llvm/test/CodeGen/RISCV/select-to-and-zext.ll
    llvm/test/CodeGen/X86/select-to-and-zext.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/select-to-and-zext.ll b/llvm/test/CodeGen/AArch64/select-to-and-zext.ll
new file mode 100644
index 000000000000000..9773108e596705d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/select-to-and-zext.ll
@@ -0,0 +1,91 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s
+
+define i32 @from_cmpeq(i32 %xx, i32 %y) {
+; CHECK-LABEL: from_cmpeq:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x1
+; CHECK-NEXT:    cmp w0, #9
+; CHECK-NEXT:    csel w0, w8, wzr, eq
+; CHECK-NEXT:    ret
+  %x = icmp eq i32 %xx, 9
+  %masked = and i32 %y, 1
+
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_cmpeq_fail_bad_andmask(i32 %xx, i32 %y) {
+; CHECK-LABEL: from_cmpeq_fail_bad_andmask:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x3
+; CHECK-NEXT:    cmp w0, #9
+; CHECK-NEXT:    csel w0, w8, wzr, eq
+; CHECK-NEXT:    ret
+  %x = icmp eq i32 %xx, 9
+  %masked = and i32 %y, 3
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_i1(i1 %x, i32 %y) {
+; CHECK-LABEL: from_i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x1
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csel w0, w8, wzr, ne
+; CHECK-NEXT:    ret
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_trunc_i8(i8 %xx, i32 %y) {
+; CHECK-LABEL: from_trunc_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x1
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csel w0, w8, wzr, ne
+; CHECK-NEXT:    ret
+  %masked = and i32 %y, 1
+  %x = trunc i8 %xx to i1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_trunc_i64(i64 %xx, i32 %y) {
+; CHECK-LABEL: from_trunc_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x1
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csel w0, w8, wzr, ne
+; CHECK-NEXT:    ret
+  %masked = and i32 %y, 1
+  %x = trunc i64 %xx to i1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_i1_fail_bad_select0(i1 %x, i32 %y) {
+; CHECK-LABEL: from_i1_fail_bad_select0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x1
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csinc w0, w8, wzr, ne
+; CHECK-NEXT:    ret
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 %masked, i32 1
+  ret i32 %r
+}
+
+define i32 @from_i1_fail_bad_select1(i1 %x, i32 %y) {
+; CHECK-LABEL: from_i1_fail_bad_select1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x1
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csel w0, wzr, w8, ne
+; CHECK-NEXT:    ret
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 0, i32 %masked
+  ret i32 %r
+}

diff  --git a/llvm/test/CodeGen/RISCV/select-to-and-zext.ll b/llvm/test/CodeGen/RISCV/select-to-and-zext.ll
new file mode 100644
index 000000000000000..949247f8ef2df56
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/select-to-and-zext.ll
@@ -0,0 +1,152 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+
+
+define i32 @from_cmpeq(i32 %xx, i32 %y) {
+; RV32I-LABEL: from_cmpeq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a0, a0, -9
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: from_cmpeq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    addi a0, a0, -9
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+  %x = icmp eq i32 %xx, 9
+  %masked = and i32 %y, 1
+
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_cmpeq_fail_bad_andmask(i32 %xx, i32 %y) {
+; RV32I-LABEL: from_cmpeq_fail_bad_andmask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a0, a0, -9
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    andi a0, a0, 3
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: from_cmpeq_fail_bad_andmask:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    addi a0, a0, -9
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addiw a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    andi a0, a0, 3
+; RV64I-NEXT:    ret
+  %x = icmp eq i32 %xx, 9
+  %masked = and i32 %y, 3
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_i1(i1 %x, i32 %y) {
+; RV32I-LABEL: from_i1:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: from_i1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_trunc_i8(i8 %xx, i32 %y) {
+; RV32I-LABEL: from_trunc_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: from_trunc_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+  %masked = and i32 %y, 1
+  %x = trunc i8 %xx to i1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_trunc_i64(i64 %xx, i32 %y) {
+; RV32I-LABEL: from_trunc_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: from_trunc_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+  %masked = and i32 %y, 1
+  %x = trunc i64 %xx to i1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_i1_fail_bad_select0(i1 %x, i32 %y) {
+; RV32I-LABEL: from_i1_fail_bad_select0:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    bnez a0, .LBB5_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    li a0, 1
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB5_2:
+; RV32I-NEXT:    andi a0, a1, 1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: from_i1_fail_bad_select0:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    bnez a0, .LBB5_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    li a0, 1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB5_2:
+; RV64I-NEXT:    andi a0, a1, 1
+; RV64I-NEXT:    ret
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 %masked, i32 1
+  ret i32 %r
+}
+
+define i32 @from_i1_fail_bad_select1(i1 %x, i32 %y) {
+; RV32I-LABEL: from_i1_fail_bad_select1:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: from_i1_fail_bad_select1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addiw a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 0, i32 %masked
+  ret i32 %r
+}

diff  --git a/llvm/test/CodeGen/X86/select-to-and-zext.ll b/llvm/test/CodeGen/X86/select-to-and-zext.ll
new file mode 100644
index 000000000000000..1bf908da79a184c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/select-to-and-zext.ll
@@ -0,0 +1,182 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-pc-linux-gnu | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu | FileCheck %s --check-prefix=X64
+
+define i32 @from_cmpeq(i32 %xx, i32 %y) {
+; X86-LABEL: from_cmpeq:
+; X86:       # %bb.0:
+; X86-NEXT:    cmpl $9, {{[0-9]+}}(%esp)
+; X86-NEXT:    je .LBB0_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB0_1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: from_cmpeq:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $1, %esi
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpl $9, %edi
+; X64-NEXT:    cmovel %esi, %eax
+; X64-NEXT:    retq
+  %x = icmp eq i32 %xx, 9
+  %masked = and i32 %y, 1
+
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_cmpeq_fail_bad_andmask(i32 %xx, i32 %y) {
+; X86-LABEL: from_cmpeq_fail_bad_andmask:
+; X86:       # %bb.0:
+; X86-NEXT:    cmpl $9, {{[0-9]+}}(%esp)
+; X86-NEXT:    je .LBB1_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB1_1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $3, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: from_cmpeq_fail_bad_andmask:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $3, %esi
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpl $9, %edi
+; X64-NEXT:    cmovel %esi, %eax
+; X64-NEXT:    retq
+  %x = icmp eq i32 %xx, 9
+  %masked = and i32 %y, 3
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_i1(i1 %x, i32 %y) {
+; X86-LABEL: from_i1:
+; X86:       # %bb.0:
+; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT:    jne .LBB2_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB2_1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: from_i1:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $1, %esi
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    testb $1, %dil
+; X64-NEXT:    cmovnel %esi, %eax
+; X64-NEXT:    retq
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_trunc_i8(i8 %xx, i32 %y) {
+; X86-LABEL: from_trunc_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT:    jne .LBB3_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB3_1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: from_trunc_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $1, %esi
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    testb $1, %dil
+; X64-NEXT:    cmovnel %esi, %eax
+; X64-NEXT:    retq
+  %masked = and i32 %y, 1
+  %x = trunc i8 %xx to i1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_trunc_i64(i64 %xx, i32 %y) {
+; X86-LABEL: from_trunc_i64:
+; X86:       # %bb.0:
+; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT:    jne .LBB4_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB4_1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: from_trunc_i64:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $1, %esi
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    testb $1, %dil
+; X64-NEXT:    cmovnel %esi, %eax
+; X64-NEXT:    retq
+  %masked = and i32 %y, 1
+  %x = trunc i64 %xx to i1
+  %r = select i1 %x, i32 %masked, i32 0
+  ret i32 %r
+}
+
+define i32 @from_i1_fail_bad_select0(i1 %x, i32 %y) {
+; X86-LABEL: from_i1_fail_bad_select0:
+; X86:       # %bb.0:
+; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT:    jne .LBB5_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl $1, %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB5_1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: from_i1_fail_bad_select0:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $1, %esi
+; X64-NEXT:    testb $1, %dil
+; X64-NEXT:    movl $1, %eax
+; X64-NEXT:    cmovnel %esi, %eax
+; X64-NEXT:    retq
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 %masked, i32 1
+  ret i32 %r
+}
+
+define i32 @from_i1_fail_bad_select1(i1 %x, i32 %y) {
+; X86-LABEL: from_i1_fail_bad_select1:
+; X86:       # %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT:    jne .LBB6_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:  .LBB6_2:
+; X86-NEXT:    retl
+;
+; X64-LABEL: from_i1_fail_bad_select1:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $1, %esi
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    testb $1, %dil
+; X64-NEXT:    cmovel %esi, %eax
+; X64-NEXT:    retq
+  %masked = and i32 %y, 1
+  %r = select i1 %x, i32 0, i32 %masked
+  ret i32 %r
+}


        


More information about the llvm-commits mailing list