[llvm] r341188 - [NFC][X86][AArch64] A few more patterns for [lack of] signed truncation check pattern.[NFC][X86][AArch64] A few more patterns for [lack of] signed truncation check pattern.

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 31 01:52:03 PDT 2018


Author: lebedevri
Date: Fri Aug 31 01:52:03 2018
New Revision: 341188

URL: http://llvm.org/viewvc/llvm-project?rev=341188&view=rev
Log:
[NFC][X86][AArch64] A few more patterns for [lack of] signed truncation check pattern.[NFC][X86][AArch64] A few more patterns for [lack of] signed truncation check pattern.

Modified:
    llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
    llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll
    llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll
    llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll

Modified: llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll?rev=341188&r1=341187&r2=341188&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll Fri Aug 31 01:52:03 2018
@@ -10,7 +10,7 @@
 ; This can be expressed in a several ways in IR:
 ;   trunc + sext + icmp ne <- not canonical
 ;   shl   + ashr + icmp ne
-;   add          + icmp ult
+;   add          + icmp ult/ule
 ;   add          + icmp uge/ugt
 ; However only the simplest form (with two shifts) gets lowered best.
 
@@ -176,6 +176,20 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nou
   ret i1 %tmp1
 }
 
+; Slightly more canonical variant
+define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ulecmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, #128 // =128
+; CHECK-NEXT:    ubfx w8, w8, #8, #8
+; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ule i16 %tmp0, -257 ; ~0U << 8 - 1
+  ret i1 %tmp1
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; add + icmp uge
 ; ---------------------------------------------------------------------------- ;

Modified: llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll?rev=341188&r1=341187&r2=341188&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/signed-truncation-check.ll Fri Aug 31 01:52:03 2018
@@ -10,7 +10,7 @@
 ; This can be expressed in a several ways in IR:
 ;   trunc + sext + icmp eq <- not canonical
 ;   shl   + ashr + icmp eq
-;   add          + icmp uge
+;   add          + icmp uge/ugt
 ;   add          + icmp ult/ule
 ; However only the simplest form (with two shifts) gets lowered best.
 
@@ -178,6 +178,20 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nou
   ret i1 %tmp1
 }
 
+; Slightly more canonical variant
+define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ugtcmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, #128 // =128
+; CHECK-NEXT:    ubfx w8, w8, #8, #8
+; CHECK-NEXT:    cmp w8, #254 // =254
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ugt i16 %tmp0, -257 ; ~0U << 8 - 1
+  ret i1 %tmp1
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; add + icmp ult
 ; ---------------------------------------------------------------------------- ;

Modified: llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll?rev=341188&r1=341187&r2=341188&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lack-of-signed-truncation-check.ll Fri Aug 31 01:52:03 2018
@@ -11,7 +11,7 @@
 ; This can be expressed in a several ways in IR:
 ;   trunc + sext + icmp ne <- not canonical
 ;   shl   + ashr + icmp ne
-;   add          + icmp ult
+;   add          + icmp ult/ule
 ;   add          + icmp uge/ugt
 ; However only the simplest form (with two shifts) gets lowered best.
 
@@ -288,6 +288,29 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nou
   ret i1 %tmp1
 }
 
+; Slightly more canonical variant
+define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: add_ulecmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl $-128, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    cmpl $65280, %eax # imm = 0xFF00
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ulecmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $-128, %edi
+; X64-NEXT:    movzwl %di, %eax
+; X64-NEXT:    cmpl $65280, %eax # imm = 0xFF00
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ule i16 %tmp0, -257 ; ~0U << 8 - 1
+  ret i1 %tmp1
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; add + icmp uge
 ; ---------------------------------------------------------------------------- ;

Modified: llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll?rev=341188&r1=341187&r2=341188&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll (original)
+++ llvm/trunk/test/CodeGen/X86/signed-truncation-check.ll Fri Aug 31 01:52:03 2018
@@ -11,7 +11,7 @@
 ; This can be expressed in a several ways in IR:
 ;   trunc + sext + icmp eq <- not canonical
 ;   shl   + ashr + icmp eq
-;   add          + icmp uge
+;   add          + icmp uge/ugt
 ;   add          + icmp ult/ule
 ; However only the simplest form (with two shifts) gets lowered best.
 
@@ -292,6 +292,29 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nou
   ret i1 %tmp1
 }
 
+; Slightly more canonical variant
+define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: add_ugtcmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl $-128, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    cmpl $65279, %eax # imm = 0xFEFF
+; X86-NEXT:    seta %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugtcmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $-128, %edi
+; X64-NEXT:    movzwl %di, %eax
+; X64-NEXT:    cmpl $65279, %eax # imm = 0xFEFF
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ugt i16 %tmp0, -257 ; ~0U << 8 - 1
+  ret i1 %tmp1
+}
+
 ; ---------------------------------------------------------------------------- ;
 ; add + icmp ult
 ; ---------------------------------------------------------------------------- ;




More information about the llvm-commits mailing list