[llvm] e000dbc - [X86] Add test coverage based off Issue #51609

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 30 05:01:02 PDT 2022


Author: Simon Pilgrim
Date: 2022-03-30T12:57:22+01:00
New Revision: e000dbc39f14c08ae2e751c00ffaec01301ba796

URL: https://github.com/llvm/llvm-project/commit/e000dbc39f14c08ae2e751c00ffaec01301ba796
DIFF: https://github.com/llvm/llvm-project/commit/e000dbc39f14c08ae2e751c00ffaec01301ba796.diff

LOG: [X86] Add test coverage based off Issue #51609

Added: 
    llvm/test/CodeGen/X86/xor-lea.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/xor-lea.ll b/llvm/test/CodeGen/X86/xor-lea.ll
new file mode 100644
index 0000000000000..26251c5779804
--- /dev/null
+++ b/llvm/test/CodeGen/X86/xor-lea.ll
@@ -0,0 +1,221 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefixes=X64
+
+; PR52267
+; InstCombine transforms an 'add' with min-signed-value into an 'xor'.
+; LEA instruction selection should be able to see through that
+; transform and reduce add/shift/xor instruction counts and moves.
+
+;
+; XOR(X,MIN_SIGNED_VALUE)
+;
+
+define i8 @xor_sminval_i8(i8 %x) {
+; X86-LABEL: xor_sminval_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    xorb $-128, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_sminval_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorb $-128, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+  %r = xor i8 %x, 128
+  ret i8 %r
+}
+
+; negative test
+define i8 @xor_notsminval_i8(i8 %x) {
+; X86-LABEL: xor_notsminval_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    xorb $127, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_notsminval_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorb $127, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+  %r = xor i8 %x, 127
+  ret i8 %r
+}
+
+define i16 @xor_sminval_i16(i16 %x) {
+; X86-LABEL: xor_sminval_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $32768, %eax # imm = 0x8000
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_sminval_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl $32768, %eax # imm = 0x8000
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
+  %r = xor i16 %x, 32768
+  ret i16 %r
+}
+
+define i32 @xor_sminval_i32(i32 %x) {
+; X86-LABEL: xor_sminval_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_sminval_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl $-2147483648, %eax # imm = 0x80000000
+; X64-NEXT:    retq
+  %r = xor i32 %x, 2147483648
+  ret i32 %r
+}
+
+; negative test
+define i32 @xor_notsminval_i32(i32 %x) {
+; X86-LABEL: xor_notsminval_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $32768, %eax # imm = 0x8000
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_notsminval_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl $32768, %eax # imm = 0x8000
+; X64-NEXT:    retq
+  %r = xor i32 %x, 32768
+  ret i32 %r
+}
+
+define i64 @xor_sminval_i64(i64 %x) {
+; X86-LABEL: xor_sminval_i64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-2147483648, %edx # imm = 0x80000000
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_sminval_i64:
+; X64:       # %bb.0:
+; X64-NEXT:    movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
+; X64-NEXT:    xorq %rdi, %rax
+; X64-NEXT:    retq
+  %r = xor i64 %x, -9223372036854775808
+  ret i64 %r
+}
+
+;
+; XOR(SHL(X,C),MIN_SIGNED_VALUE)
+;
+
+define i8 @xor_shl_sminval_i8(i8 %x) {
+; X86-LABEL: xor_shl_sminval_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    addb %al, %al
+; X86-NEXT:    xorb $-128, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_shl_sminval_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    # kill: def $edi killed $edi def $rdi
+; X64-NEXT:    leal (%rdi,%rdi), %eax
+; X64-NEXT:    xorb $-128, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+  %s = shl i8 %x, 1
+  %r = xor i8 %s, 128
+  ret i8 %r
+}
+
+define i16 @xor_shl_sminval_i16(i16 %x) {
+; X86-LABEL: xor_shl_sminval_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shll $2, %eax
+; X86-NEXT:    xorl $32768, %eax # imm = 0x8000
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_shl_sminval_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    # kill: def $edi killed $edi def $rdi
+; X64-NEXT:    leal (,%rdi,4), %eax
+; X64-NEXT:    xorl $32768, %eax # imm = 0x8000
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
+  %s = shl i16 %x, 2
+  %r = xor i16 %s, 32768
+  ret i16 %r
+}
+
+define i32 @xor_shl_sminval_i32(i32 %x) {
+; X86-LABEL: xor_shl_sminval_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shll $4, %eax
+; X86-NEXT:    xorl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_shl_sminval_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shll $4, %eax
+; X64-NEXT:    xorl $-2147483648, %eax # imm = 0x80000000
+; X64-NEXT:    retq
+  %s = shl i32 %x, 4
+  %r = xor i32 %s, 2147483648
+  ret i32 %r
+}
+
+; negative test
+define i32 @xor_bigshl_sminval_i32(i32 %x) {
+; X86-LABEL: xor_bigshl_sminval_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shll $8, %eax
+; X86-NEXT:    xorl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_bigshl_sminval_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shll $8, %eax
+; X64-NEXT:    xorl $-2147483648, %eax # imm = 0x80000000
+; X64-NEXT:    retq
+  %s = shl i32 %x, 8
+  %r = xor i32 %s, 2147483648
+  ret i32 %r
+}
+
+define i64 @xor_shl_sminval_i64(i64 %x) {
+; X86-LABEL: xor_shl_sminval_i64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    shldl $4, %eax, %edx
+; X86-NEXT:    shll $4, %eax
+; X86-NEXT:    xorl $-2147483648, %edx # imm = 0x80000000
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_shl_sminval_i64:
+; X64:       # %bb.0:
+; X64-NEXT:    shlq $4, %rdi
+; X64-NEXT:    movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
+; X64-NEXT:    xorq %rdi, %rax
+; X64-NEXT:    retq
+  %s = shl i64 %x, 4
+  %r = xor i64 %s, -9223372036854775808
+  ret i64 %r
+}


        


More information about the llvm-commits mailing list