[llvm] r308286 - [X86] Add test case for PR32282

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 18 03:09:40 PDT 2017


Author: rksimon
Date: Tue Jul 18 03:09:40 2017
New Revision: 308286

URL: http://llvm.org/viewvc/llvm-project?rev=308286&view=rev
Log:
[X86] Add test case for PR32282

Added:
    llvm/trunk/test/CodeGen/X86/pr32282.ll

Added: llvm/trunk/test/CodeGen/X86/pr32282.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr32282.ll?rev=308286&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr32282.ll (added)
+++ llvm/trunk/test/CodeGen/X86/pr32282.ll Tue Jul 18 03:09:40 2017
@@ -0,0 +1,104 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X64
+
+; Check for assert in foldMaskAndShiftToScale due to out of range mask scaling.
+
+ at b = common global i8 zeroinitializer, align 1
+ at c = common global i8 zeroinitializer, align 1
+ at d = common global i64 zeroinitializer, align 8
+ at e = common global i64 zeroinitializer, align 8
+
+define void @foo() {
+; X86-LABEL: foo:
+; X86:       # BB#0:
+; X86-NEXT:    pushl %eax
+; X86-NEXT:  .Lcfi0:
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    movl d, %eax
+; X86-NEXT:    movl d+4, %ecx
+; X86-NEXT:    movl $701685459, %edx # imm = 0x29D2DED3
+; X86-NEXT:    andnl %edx, %ecx, %ecx
+; X86-NEXT:    movl $-564453154, %edx # imm = 0xDE5B20DE
+; X86-NEXT:    andnl %edx, %eax, %edx
+; X86-NEXT:    shrdl $21, %ecx, %edx
+; X86-NEXT:    shrl $21, %ecx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    testb %al, %al
+; X86-NEXT:    cmovnel %ecx, %edx
+; X86-NEXT:    cmovnel %eax, %ecx
+; X86-NEXT:    andl $-2, %edx
+; X86-NEXT:    andl $2147483647, %ecx # imm = 0x7FFFFFFF
+; X86-NEXT:    addl $7, %edx
+; X86-NEXT:    adcxl %eax, %ecx
+; X86-NEXT:    pushl %ecx
+; X86-NEXT:  .Lcfi1:
+; X86-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NEXT:    pushl %edx
+; X86-NEXT:  .Lcfi2:
+; X86-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NEXT:    pushl $0
+; X86-NEXT:  .Lcfi3:
+; X86-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NEXT:    pushl $0
+; X86-NEXT:  .Lcfi4:
+; X86-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NEXT:    calll __divdi3
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:  .Lcfi5:
+; X86-NEXT:    .cfi_adjust_cfa_offset -16
+; X86-NEXT:    orl %eax, %edx
+; X86-NEXT:    setne {{[0-9]+}}(%esp)
+; X86-NEXT:    popl %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: foo:
+; X64:       # BB#0:
+; X64-NEXT:    movq {{.*}}(%rip), %rax
+; X64-NEXT:    movabsq $3013716102212485120, %rcx # imm = 0x29D2DED3DE400000
+; X64-NEXT:    andnq %rcx, %rax, %rcx
+; X64-NEXT:    shrq $21, %rcx
+; X64-NEXT:    addq $7, %rcx
+; X64-NEXT:    movabsq $4393751543808, %rax # imm = 0x3FF00000000
+; X64-NEXT:    testq %rax, %rcx
+; X64-NEXT:    je .LBB0_1
+; X64-NEXT:  # BB#2:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    idivq %rcx
+; X64-NEXT:    jmp .LBB0_3
+; X64-NEXT:  .LBB0_1:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    divl %ecx
+; X64-NEXT:    # kill: %EAX<def> %EAX<kill> %RAX<def>
+; X64-NEXT:  .LBB0_3:
+; X64-NEXT:    testq %rax, %rax
+; X64-NEXT:    setne -{{[0-9]+}}(%rsp)
+; X64-NEXT:    retq
+  %1 = alloca i8, align 1
+  %2 = load i64, i64* @d, align 8
+  %3 = or i64 -3013716102214263007, %2
+  %4 = xor i64 %3, -1
+  %5 = load i64, i64* @e, align 8
+  %6 = load i8, i8* @b, align 1
+  %7 = trunc i8 %6 to i1
+  %8 = zext i1 %7 to i64
+  %9 = xor i64 %5, %8
+  %10 = load i8, i8* @c, align 1
+  %11 = trunc i8 %10 to i1
+  %12 = zext i1 %11 to i32
+  %13 = or i32 551409149, %12
+  %14 = sub nsw i32 %13, 551409131
+  %15 = zext i32 %14 to i64
+  %16 = shl i64 %9, %15
+  %17 = sub nsw i64 %16, 223084523
+  %18 = ashr i64 %4, %17
+  %19 = and i64 %18, 9223372036854775806
+  %20 = add nsw i64 7, %19
+  %21 = sdiv i64 0, %20
+  %22 = icmp ne i64 %21, 0
+  %23 = zext i1 %22 to i8
+  store i8 %23, i8* %1, align 1
+  ret void
+}




More information about the llvm-commits mailing list