[llvm] d8e50c9 - [CodeGen] Add PR50197 AArch64/ARM/X86 test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 22 06:23:54 PDT 2021


Author: Simon Pilgrim
Date: 2021-10-22T14:22:46+01:00
New Revision: d8e50c9dba7adc7828666bb37939049876cf424f

URL: https://github.com/llvm/llvm-project/commit/d8e50c9dba7adc7828666bb37939049876cf424f
DIFF: https://github.com/llvm/llvm-project/commit/d8e50c9dba7adc7828666bb37939049876cf424f.diff

LOG: [CodeGen] Add PR50197 AArch64/ARM/X86 test coverage

Pre-commit for D111530

Added: 
    llvm/test/CodeGen/AArch64/icmp-shift-opt.ll
    llvm/test/CodeGen/ARM/icmp-shift-opt.ll
    llvm/test/CodeGen/X86/icmp-shift-opt.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll b/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll
new file mode 100644
index 0000000000000..3532881a2223e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
+
+; Optimize expanded SRL/SHL used as an input of
+; SETCC comparing it with zero by removing rotation.
+;
+; See https://bugs.llvm.org/show_bug.cgi?id=50197
+define i128 @opt_setcc_lt_power_of_2(i128 %a) nounwind {
+; CHECK-LABEL: opt_setcc_lt_power_of_2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:  .LBB0_1: // %loop
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    adds x0, x0, #1
+; CHECK-NEXT:    adcs x1, x1, xzr
+; CHECK-NEXT:    extr x8, x1, x0, #60
+; CHECK-NEXT:    orr x8, x8, x1, lsr #60
+; CHECK-NEXT:    cbnz x8, .LBB0_1
+; CHECK-NEXT:  // %bb.2: // %exit
+; CHECK-NEXT:    ret
+  br label %loop
+
+loop:
+  %phi.a = phi i128 [ %a, %0 ], [ %inc, %loop ]
+  %inc = add i128 %phi.a, 1
+  %cmp = icmp ult i128 %inc, 1152921504606846976
+  br i1 %cmp, label %exit, label %loop
+
+exit:
+  ret i128 %inc
+}
+
+define i1 @opt_setcc_srl_eq_zero(i128 %a) nounwind {
+; CHECK-LABEL: opt_setcc_srl_eq_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    extr x8, x1, x0, #17
+; CHECK-NEXT:    orr x8, x8, x1, lsr #17
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+   %srl = lshr i128 %a, 17
+   %cmp = icmp eq i128 %srl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_srl_ne_zero(i128 %a) nounwind {
+; CHECK-LABEL: opt_setcc_srl_ne_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    extr x8, x1, x0, #17
+; CHECK-NEXT:    orr x8, x8, x1, lsr #17
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+   %srl = lshr i128 %a, 17
+   %cmp = icmp ne i128 %srl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_shl_eq_zero(i128 %a) nounwind {
+; CHECK-LABEL: opt_setcc_shl_eq_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    extr x8, x1, x0, #47
+; CHECK-NEXT:    orr x8, x8, x0, lsl #17
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+   %shl = shl i128 %a, 17
+   %cmp = icmp eq i128 %shl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_shl_ne_zero(i128 %a) nounwind {
+; CHECK-LABEL: opt_setcc_shl_ne_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    extr x8, x1, x0, #47
+; CHECK-NEXT:    orr x8, x8, x0, lsl #17
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+   %shl = shl i128 %a, 17
+   %cmp = icmp ne i128 %shl, 0
+   ret i1 %cmp
+}
+
+; Negative test: optimization should not be applied if shift has multiple users.
+define i1 @opt_setcc_shl_eq_zero_multiple_shl_users(i128 %a) nounwind {
+; CHECK-LABEL: opt_setcc_shl_eq_zero_multiple_shl_users:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    extr x1, x1, x0, #47
+; CHECK-NEXT:    lsl x0, x0, #17
+; CHECK-NEXT:    orr x8, x0, x1
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cset w19, eq
+; CHECK-NEXT:    bl use
+; CHECK-NEXT:    mov w0, w19
+; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
+   %shl = shl i128 %a, 17
+   %cmp = icmp eq i128 %shl, 0
+   call void @use(i128 %shl)
+   ret i1 %cmp
+}
+
+; Check that optimization is applied to DAG having appropriate shape
+; even if there were no actual shift's expansion.
+define i1 @opt_setcc_expanded_shl_correct_shifts(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: opt_setcc_expanded_shl_correct_shifts:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    extr x8, x0, x1, #47
+; CHECK-NEXT:    orr x8, x8, x1, lsl #17
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %shl.a = shl i64 %a, 17
+  %srl.b = lshr i64 %b, 47
+  %or.0 = or i64 %shl.a, %srl.b
+  %shl.b = shl i64 %b, 17
+  %or.1 = or i64 %or.0, %shl.b
+  %cmp = icmp eq i64 %or.1, 0
+  ret i1 %cmp
+}
+
+; Negative test: optimization should not be applied as
+; constants used in shifts do not match.
+define i1 @opt_setcc_expanded_shl_wrong_shifts(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: opt_setcc_expanded_shl_wrong_shifts:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    extr x8, x0, x1, #47
+; CHECK-NEXT:    orr x8, x8, x1, lsl #18
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %shl.a = shl i64 %a, 17
+  %srl.b = lshr i64 %b, 47
+  %or.0 = or i64 %shl.a, %srl.b
+  %shl.b = shl i64 %b, 18
+  %or.1 = or i64 %or.0, %shl.b
+  %cmp = icmp eq i64 %or.1, 0
+  ret i1 %cmp
+}
+
+declare void @use(i128 %a)

diff  --git a/llvm/test/CodeGen/ARM/icmp-shift-opt.ll b/llvm/test/CodeGen/ARM/icmp-shift-opt.ll
new file mode 100644
index 0000000000000..d2514b1608016
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/icmp-shift-opt.ll
@@ -0,0 +1,151 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=armv7 %s -o - | FileCheck %s
+
+; Optimize expanded SRL/SHL used as an input of
+; SETCC comparing it with zero by removing rotation.
+;
+; See https://bugs.llvm.org/show_bug.cgi?id=50197
+define i64 @opt_setcc_lt_power_of_2(i64 %a) nounwind {
+; CHECK-LABEL: opt_setcc_lt_power_of_2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:  .LBB0_1: @ %loop
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    adds r0, r0, #1
+; CHECK-NEXT:    adc r1, r1, #0
+; CHECK-NEXT:    lsr r2, r0, #16
+; CHECK-NEXT:    orr r2, r2, r1, lsl #16
+; CHECK-NEXT:    orr r2, r2, r1, lsr #16
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    bne .LBB0_1
+; CHECK-NEXT:  @ %bb.2: @ %exit
+; CHECK-NEXT:    bx lr
+  br label %loop
+
+loop:
+  %phi.a = phi i64 [ %a, %0 ], [ %inc, %loop ]
+  %inc = add i64 %phi.a, 1
+  %cmp = icmp ult i64 %inc, 65536
+  br i1 %cmp, label %exit, label %loop
+
+exit:
+  ret i64 %inc
+}
+
+define i1 @opt_setcc_srl_eq_zero(i64 %a) nounwind {
+; CHECK-LABEL: opt_setcc_srl_eq_zero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsr r0, r0, #17
+; CHECK-NEXT:    orr r0, r0, r1, lsl #15
+; CHECK-NEXT:    orr r0, r0, r1, lsr #17
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    lsr r0, r0, #5
+; CHECK-NEXT:    bx lr
+   %srl = lshr i64 %a, 17
+   %cmp = icmp eq i64 %srl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_srl_ne_zero(i64 %a) nounwind {
+; CHECK-LABEL: opt_setcc_srl_ne_zero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsr r0, r0, #17
+; CHECK-NEXT:    orr r0, r0, r1, lsl #15
+; CHECK-NEXT:    orr r0, r0, r1, lsr #17
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    movwne r0, #1
+; CHECK-NEXT:    bx lr
+   %srl = lshr i64 %a, 17
+   %cmp = icmp ne i64 %srl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_shl_eq_zero(i64 %a) nounwind {
+; CHECK-LABEL: opt_setcc_shl_eq_zero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsl r1, r1, #17
+; CHECK-NEXT:    orr r1, r1, r0, lsr #15
+; CHECK-NEXT:    orr r0, r1, r0, lsl #17
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    lsr r0, r0, #5
+; CHECK-NEXT:    bx lr
+   %shl = shl i64 %a, 17
+   %cmp = icmp eq i64 %shl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_shl_ne_zero(i64 %a) nounwind {
+; CHECK-LABEL: opt_setcc_shl_ne_zero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsl r1, r1, #17
+; CHECK-NEXT:    orr r1, r1, r0, lsr #15
+; CHECK-NEXT:    orr r0, r1, r0, lsl #17
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    movwne r0, #1
+; CHECK-NEXT:    bx lr
+   %shl = shl i64 %a, 17
+   %cmp = icmp ne i64 %shl, 0
+   ret i1 %cmp
+}
+
+; Negative test: optimization should not be applied if shift has multiple users.
+define i1 @opt_setcc_shl_eq_zero_multiple_shl_users(i64 %a) nounwind {
+; CHECK-LABEL: opt_setcc_shl_eq_zero_multiple_shl_users:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    lsl r0, r1, #17
+; CHECK-NEXT:    orr r5, r0, r4, lsr #15
+; CHECK-NEXT:    lsl r0, r4, #17
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl use
+; CHECK-NEXT:    orr r0, r5, r4, lsl #17
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    lsr r0, r0, #5
+; CHECK-NEXT:    pop {r4, r5, r11, pc}
+   %shl = shl i64 %a, 17
+   %cmp = icmp eq i64 %shl, 0
+   call void @use(i64 %shl)
+   ret i1 %cmp
+}
+
+; Check that optimization is applied to DAG having appropriate shape
+; even if there were no actual shift's expansion.
+define i1 @opt_setcc_expanded_shl_correct_shifts(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: opt_setcc_expanded_shl_correct_shifts:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsl r0, r0, #17
+; CHECK-NEXT:    orr r0, r0, r1, lsr #15
+; CHECK-NEXT:    orr r0, r0, r1, lsl #17
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    lsr r0, r0, #5
+; CHECK-NEXT:    bx lr
+  %shl.a = shl i32 %a, 17
+  %srl.b = lshr i32 %b, 15
+  %or.0 = or i32 %shl.a, %srl.b
+  %shl.b = shl i32 %b, 17
+  %or.1 = or i32 %or.0, %shl.b
+  %cmp = icmp eq i32 %or.1, 0
+  ret i1 %cmp
+}
+
+; Negative test: optimization should not be applied as
+; constants used in shifts do not match.
+define i1 @opt_setcc_expanded_shl_wrong_shifts(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: opt_setcc_expanded_shl_wrong_shifts:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsl r0, r0, #17
+; CHECK-NEXT:    orr r0, r0, r1, lsr #15
+; CHECK-NEXT:    orr r0, r0, r1, lsl #18
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    lsr r0, r0, #5
+; CHECK-NEXT:    bx lr
+  %shl.a = shl i32 %a, 17
+  %srl.b = lshr i32 %b, 15
+  %or.0 = or i32 %shl.a, %srl.b
+  %shl.b = shl i32 %b, 18
+  %or.1 = or i32 %or.0, %shl.b
+  %cmp = icmp eq i32 %or.1, 0
+  ret i1 %cmp
+}
+
+declare void @use(i64 %a)

diff  --git a/llvm/test/CodeGen/X86/icmp-shift-opt.ll b/llvm/test/CodeGen/X86/icmp-shift-opt.ll
new file mode 100644
index 0000000000000..94a5462a3c7ce
--- /dev/null
+++ b/llvm/test/CodeGen/X86/icmp-shift-opt.ll
@@ -0,0 +1,334 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-- < %s | FileCheck %s --check-prefix=X86
+; RUN: llc -mtriple=x86_64-- < %s | FileCheck %s --check-prefix=X64
+
+; Optimize expanded SRL/SHL used as an input of
+; SETCC comparing it with zero by removing rotation.
+;
+; See https://bugs.llvm.org/show_bug.cgi?id=50197
+define i128 @opt_setcc_lt_power_of_2(i128 %a) nounwind {
+; X86-LABEL: opt_setcc_lt_power_of_2:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB0_1: # %loop
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    addl $1, %ecx
+; X86-NEXT:    adcl $0, %esi
+; X86-NEXT:    adcl $0, %edx
+; X86-NEXT:    adcl $0, %ebx
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:    shldl $4, %edx, %edi
+; X86-NEXT:    movl %edx, %ebp
+; X86-NEXT:    shldl $4, %esi, %ebp
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl %ebx, %ecx
+; X86-NEXT:    shrl $28, %ecx
+; X86-NEXT:    orl %ebp, %ecx
+; X86-NEXT:    orl %edi, %ecx
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    jne .LBB0_1
+; X86-NEXT:  # %bb.2: # %exit
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %ecx, (%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %ebx, 12(%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: opt_setcc_lt_power_of_2:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rdx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:  .LBB0_1: # %loop
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    addq $1, %rax
+; X64-NEXT:    adcq $0, %rdx
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    shldq $4, %rax, %rcx
+; X64-NEXT:    movq %rdx, %rsi
+; X64-NEXT:    shrq $60, %rsi
+; X64-NEXT:    orq %rcx, %rsi
+; X64-NEXT:    jne .LBB0_1
+; X64-NEXT:  # %bb.2: # %exit
+; X64-NEXT:    retq
+  br label %loop
+
+loop:
+  %phi.a = phi i128 [ %a, %0 ], [ %inc, %loop ]
+  %inc = add i128 %phi.a, 1
+  %cmp = icmp ult i128 %inc, 1152921504606846976
+  br i1 %cmp, label %exit, label %loop
+
+exit:
+  ret i128 %inc
+}
+
+define i1 @opt_setcc_srl_eq_zero(i128 %a) nounwind {
+; X86-LABEL: opt_setcc_srl_eq_zero:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    shldl $15, %edx, %edi
+; X86-NEXT:    shldl $15, %ecx, %edx
+; X86-NEXT:    shrdl $17, %ecx, %eax
+; X86-NEXT:    orl %edi, %eax
+; X86-NEXT:    shrl $17, %esi
+; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    orl %eax, %esi
+; X86-NEXT:    sete %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+;
+; X64-LABEL: opt_setcc_srl_eq_zero:
+; X64:       # %bb.0:
+; X64-NEXT:    shrdq $17, %rsi, %rdi
+; X64-NEXT:    shrq $17, %rsi
+; X64-NEXT:    orq %rdi, %rsi
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+   %srl = lshr i128 %a, 17
+   %cmp = icmp eq i128 %srl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_srl_ne_zero(i128 %a) nounwind {
+; X86-LABEL: opt_setcc_srl_ne_zero:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    shldl $15, %edx, %edi
+; X86-NEXT:    shldl $15, %ecx, %edx
+; X86-NEXT:    shrdl $17, %ecx, %eax
+; X86-NEXT:    orl %edi, %eax
+; X86-NEXT:    shrl $17, %esi
+; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    orl %eax, %esi
+; X86-NEXT:    setne %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+;
+; X64-LABEL: opt_setcc_srl_ne_zero:
+; X64:       # %bb.0:
+; X64-NEXT:    shrdq $17, %rsi, %rdi
+; X64-NEXT:    shrq $17, %rsi
+; X64-NEXT:    orq %rdi, %rsi
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+   %srl = lshr i128 %a, 17
+   %cmp = icmp ne i128 %srl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_shl_eq_zero(i128 %a) nounwind {
+; X86-LABEL: opt_setcc_shl_eq_zero:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    shldl $17, %esi, %edx
+; X86-NEXT:    shldl $17, %ecx, %esi
+; X86-NEXT:    shldl $17, %eax, %ecx
+; X86-NEXT:    shll $17, %eax
+; X86-NEXT:    orl %esi, %eax
+; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    sete %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: opt_setcc_shl_eq_zero:
+; X64:       # %bb.0:
+; X64-NEXT:    shldq $17, %rdi, %rsi
+; X64-NEXT:    shlq $17, %rdi
+; X64-NEXT:    orq %rsi, %rdi
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+   %shl = shl i128 %a, 17
+   %cmp = icmp eq i128 %shl, 0
+   ret i1 %cmp
+}
+
+define i1 @opt_setcc_shl_ne_zero(i128 %a) nounwind {
+; X86-LABEL: opt_setcc_shl_ne_zero:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    shldl $17, %esi, %edx
+; X86-NEXT:    shldl $17, %ecx, %esi
+; X86-NEXT:    shldl $17, %eax, %ecx
+; X86-NEXT:    shll $17, %eax
+; X86-NEXT:    orl %esi, %eax
+; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    setne %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: opt_setcc_shl_ne_zero:
+; X64:       # %bb.0:
+; X64-NEXT:    shldq $17, %rdi, %rsi
+; X64-NEXT:    shlq $17, %rdi
+; X64-NEXT:    orq %rsi, %rdi
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+   %shl = shl i128 %a, 17
+   %cmp = icmp ne i128 %shl, 0
+   ret i1 %cmp
+}
+
+; Negative test: optimization should not be applied if shift has multiple users.
+define i1 @opt_setcc_shl_eq_zero_multiple_shl_users(i128 %a) nounwind {
+; X86-LABEL: opt_setcc_shl_eq_zero_multiple_shl_users:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    shldl $17, %esi, %edx
+; X86-NEXT:    shldl $17, %ecx, %esi
+; X86-NEXT:    shldl $17, %eax, %ecx
+; X86-NEXT:    shll $17, %eax
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    orl %edx, %edi
+; X86-NEXT:    movl %eax, %ebx
+; X86-NEXT:    orl %esi, %ebx
+; X86-NEXT:    orl %edi, %ebx
+; X86-NEXT:    sete %bl
+; X86-NEXT:    pushl %edx
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    pushl %ecx
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    calll use at PLT
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+;
+; X64-LABEL: opt_setcc_shl_eq_zero_multiple_shl_users:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    shldq $17, %rdi, %rsi
+; X64-NEXT:    shlq $17, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    orq %rsi, %rax
+; X64-NEXT:    sete %bl
+; X64-NEXT:    callq use at PLT
+; X64-NEXT:    movl %ebx, %eax
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+   %shl = shl i128 %a, 17
+   %cmp = icmp eq i128 %shl, 0
+   call void @use(i128 %shl)
+   ret i1 %cmp
+}
+
+; Check that optimization is applied to DAG having appropriate shape
+; even if there were no actual shift's expansion.
+define i1 @opt_setcc_expanded_shl_correct_shifts(i64 %a, i64 %b) nounwind {
+; X86-LABEL: opt_setcc_expanded_shl_correct_shifts:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    shldl $17, %edx, %esi
+; X86-NEXT:    shldl $17, %ecx, %edx
+; X86-NEXT:    shldl $17, %eax, %ecx
+; X86-NEXT:    shll $17, %eax
+; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    orl %esi, %ecx
+; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    sete %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: opt_setcc_expanded_shl_correct_shifts:
+; X64:       # %bb.0:
+; X64-NEXT:    shldq $17, %rsi, %rdi
+; X64-NEXT:    shlq $17, %rsi
+; X64-NEXT:    orq %rdi, %rsi
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %shl.a = shl i64 %a, 17
+  %srl.b = lshr i64 %b, 47
+  %or.0 = or i64 %shl.a, %srl.b
+  %shl.b = shl i64 %b, 17
+  %or.1 = or i64 %or.0, %shl.b
+  %cmp = icmp eq i64 %or.1, 0
+  ret i1 %cmp
+}
+
+; Negative test: optimization should not be applied as
+; constants used in shifts do not match.
+define i1 @opt_setcc_expanded_shl_wrong_shifts(i64 %a, i64 %b) nounwind {
+; X86-LABEL: opt_setcc_expanded_shl_wrong_shifts:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    shldl $17, %edx, %esi
+; X86-NEXT:    shldl $17, %ecx, %edx
+; X86-NEXT:    shldl $18, %eax, %ecx
+; X86-NEXT:    shll $18, %eax
+; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    orl %esi, %ecx
+; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    sete %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: opt_setcc_expanded_shl_wrong_shifts:
+; X64:       # %bb.0:
+; X64-NEXT:    shldq $17, %rsi, %rdi
+; X64-NEXT:    shlq $18, %rsi
+; X64-NEXT:    orq %rdi, %rsi
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %shl.a = shl i64 %a, 17
+  %srl.b = lshr i64 %b, 47
+  %or.0 = or i64 %shl.a, %srl.b
+  %shl.b = shl i64 %b, 18
+  %or.1 = or i64 %or.0, %shl.b
+  %cmp = icmp eq i64 %or.1, 0
+  ret i1 %cmp
+}
+
+declare void @use(i128 %a)


        


More information about the llvm-commits mailing list