[llvm] r337400 - [NFC][X86][AArch64][DAGCombine] More tests for optimizeSetCCOfSignedTruncationCheck()

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 18 09:19:06 PDT 2018


Author: lebedevri
Date: Wed Jul 18 09:19:06 2018
New Revision: 337400

URL: http://llvm.org/viewvc/llvm-project?rev=337400&view=rev
Log:
[NFC][X86][AArch64][DAGCombine] More tests for optimizeSetCCOfSignedTruncationCheck()

At least one of these cases is more canonical,
so we really do have to handle it.
https://godbolt.org/g/pkzP3X
https://rise4fun.com/Alive/pQyh

Modified:
    llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
    llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll
    llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll
    llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll

Modified: llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll?rev=337400&r1=337399&r2=337400&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll Wed Jul 18 09:19:06 2018
@@ -11,7 +11,7 @@
 ;   trunc + sext + icmp ne <- not canonical
 ;   shl   + ashr + icmp ne
 ;   add          + icmp ult
-;   add          + icmp uge
+;   add          + icmp uge/ugt
 ; However only the simplest form (with two shifts) gets lowered best.
 
 ; ---------------------------------------------------------------------------- ;
@@ -253,6 +253,20 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nou
   ret i1 %tmp1
 }
 
+; Slightly more canonical variant
+define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ugtcmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    and w8, w8, #0xffff
+; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ugt i16 %tmp0, 255 ; (1U << 8) - 1
+  ret i1 %tmp1
+}
+
 ; Negative tests
 ; ---------------------------------------------------------------------------- ;
 
@@ -367,3 +381,14 @@ define i1 @add_ugecmp_bad_i24_i8(i24 %x)
   %tmp1 = icmp uge i24 %tmp0, 256 ; 1U << 8
   ret i1 %tmp1
 }
+
+; Slightly more canonical variant
+define i1 @add_ugtcmp_bad_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ugtcmp_bad_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ugt i16 %tmp0, -1 ; when we +1 it, it will wrap to 0
+  ret i1 %tmp1
+}

Modified: llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll?rev=337400&r1=337399&r2=337400&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll Wed Jul 18 09:19:06 2018
@@ -11,7 +11,7 @@
 ;   trunc + sext + icmp eq <- not canonical
 ;   shl   + ashr + icmp eq
 ;   add          + icmp uge
-;   add          + icmp ult
+;   add          + icmp ult/ule
 ; However only the simplest form (with two shifts) gets lowered best.
 
 ; ---------------------------------------------------------------------------- ;
@@ -255,6 +255,20 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nou
   ret i1 %tmp1
 }
 
+; Slightly more canonical variant
+define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ulecmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    and w8, w8, #0xffff
+; CHECK-NEXT:    cmp w8, w0, uxth
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1
+  ret i1 %tmp1
+}
+
 ; Negative tests
 ; ---------------------------------------------------------------------------- ;
 
@@ -368,3 +382,13 @@ define i1 @add_ultcmp_bad_i24_i8(i24 %x)
   %tmp1 = icmp ult i24 %tmp0, 256 ; 1U << 8
   ret i1 %tmp1
 }
+
+define i1 @add_ulecmp_bad_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ulecmp_bad_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w0, wzr, #0x1
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ule i16 %tmp0, -1 ; when we +1 it, it will wrap to 0
+  ret i1 %tmp1
+}

Modified: llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll?rev=337400&r1=337399&r2=337400&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll Wed Jul 18 09:19:06 2018
@@ -12,7 +12,7 @@
 ;   trunc + sext + icmp ne <- not canonical
 ;   shl   + ashr + icmp ne
 ;   add          + icmp ult
-;   add          + icmp uge
+;   add          + icmp uge/ugt
 ; However only the simplest form (with two shifts) gets lowered best.
 
 ; ---------------------------------------------------------------------------- ;
@@ -418,6 +418,29 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nou
   ret i1 %tmp1
 }
 
+; Slightly more canonical variant
+define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: add_ugtcmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $128, %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    cmpl $255, %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugtcmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    subl $-128, %edi
+; X64-NEXT:    movzwl %di, %eax
+; X64-NEXT:    cmpl $255, %eax
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ugt i16 %tmp0, 255 ; (1U << 8) - 1
+  ret i1 %tmp1
+}
+
 ; Negative tests
 ; ---------------------------------------------------------------------------- ;
 
@@ -602,3 +625,14 @@ define i1 @add_ugecmp_bad_i24_i8(i24 %x)
   %tmp1 = icmp uge i24 %tmp0, 256 ; 1U << 8
   ret i1 %tmp1
 }
+
+; Slightly more canonical variant
+define i1 @add_ugtcmp_bad_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ugtcmp_bad_i16_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    ret{{[l|q]}}
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ugt i16 %tmp0, -1 ; when we +1 it, it will wrap to 0
+  ret i1 %tmp1
+}

Modified: llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll?rev=337400&r1=337399&r2=337400&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll (original)
+++ llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll Wed Jul 18 09:19:06 2018
@@ -12,7 +12,7 @@
 ;   trunc + sext + icmp eq <- not canonical
 ;   shl   + ashr + icmp eq
 ;   add          + icmp uge
-;   add          + icmp ult
+;   add          + icmp ult/ule
 ; However only the simplest form (with two shifts) gets lowered best.
 
 ; ---------------------------------------------------------------------------- ;
@@ -422,6 +422,27 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nou
   ret i1 %tmp1
 }
 
+; Slightly more canonical variant
+define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: add_ulecmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ulecmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movsbl %dil, %eax
+; X64-NEXT:    cmpw %di, %ax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1
+  ret i1 %tmp1
+}
+
 ; Negative tests
 ; ---------------------------------------------------------------------------- ;
 
@@ -602,3 +623,13 @@ define i1 @add_ultcmp_bad_i24_i8(i24 %x)
   %tmp1 = icmp ult i24 %tmp0, 256 ; 1U << 8
   ret i1 %tmp1
 }
+
+define i1 @add_ulecmp_bad_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ulecmp_bad_i16_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movb $1, %al
+; CHECK-NEXT:    ret{{[l|q]}}
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ule i16 %tmp0, -1 ; when we +1 it, it will wrap to 0
+  ret i1 %tmp1
+}




More information about the llvm-commits mailing list