[llvm-commits] [llvm] r146974 - in /llvm/trunk: lib/Target/X86/X86ISelLowering.cpp lib/Target/X86/X86InstrCompiler.td test/CodeGen/X86/clz.ll

Chandler Carruth chandlerc at gmail.com
Tue Dec 20 03:19:37 PST 2011


Author: chandlerc
Date: Tue Dec 20 05:19:37 2011
New Revision: 146974

URL: http://llvm.org/viewvc/llvm-project?rev=146974&view=rev
Log:
Begin teaching the X86 target how to efficiently codegen patterns that
use the zero-undefined variants of CTTZ and CTLZ. These are just simple
patterns for now, there is more to be done to make real world code using
these constructs be optimized and codegen'ed properly on X86.

The existing tests are spiffed up to check that we no longer generate
unnecessary cmov instructions, and that we generate the very important
'xor' to transform bsr which counts the index of the most significant
one bit to the number of leading (most significant) zero bits. Also they
now check that when the variant with defined zero result is used, the
cmov is still produced.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/lib/Target/X86/X86InstrCompiler.td
    llvm/trunk/test/CodeGen/X86/clz.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=146974&r1=146973&r2=146974&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Dec 20 05:19:37 2011
@@ -380,9 +380,6 @@
   setOperationAction(ISD::FLT_ROUNDS_      , MVT::i32  , Custom);
 
   setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i8   , Expand);
-  setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i16  , Expand);
-  setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i32  , Expand);
-  setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i64  , Expand);
   if (Subtarget->hasBMI()) {
     setOperationAction(ISD::CTTZ           , MVT::i8   , Promote);
   } else {
@@ -394,9 +391,6 @@
   }
 
   setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i8   , Expand);
-  setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i16  , Expand);
-  setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i32  , Expand);
-  setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i64  , Expand);
   if (Subtarget->hasLZCNT()) {
     setOperationAction(ISD::CTLZ           , MVT::i8   , Promote);
   } else {

Modified: llvm/trunk/lib/Target/X86/X86InstrCompiler.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrCompiler.td?rev=146974&r1=146973&r2=146974&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrCompiler.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrCompiler.td Tue Dec 20 05:19:37 2011
@@ -1753,3 +1753,20 @@
           (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
           (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// Bit scan instruction patterns to match explicit zero-undef behavior.
+def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
+def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
+def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
+def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
+def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
+def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
+def : Pat<(ctlz_zero_undef GR16:$src), (XOR16ri (BSR16rr GR16:$src), 15)>;
+def : Pat<(ctlz_zero_undef GR32:$src), (XOR32ri (BSR32rr GR32:$src), 31)>;
+def : Pat<(ctlz_zero_undef GR64:$src), (XOR64ri8 (BSR64rr GR64:$src), 63)>;
+def : Pat<(ctlz_zero_undef (loadi16 addr:$src)),
+          (XOR16ri (BSR16rm addr:$src), 15)>;
+def : Pat<(ctlz_zero_undef (loadi32 addr:$src)),
+          (XOR32ri (BSR32rm addr:$src), 31)>;
+def : Pat<(ctlz_zero_undef (loadi64 addr:$src)),
+          (XOR64ri8 (BSR64rm addr:$src), 63)>;

Modified: llvm/trunk/test/CodeGen/X86/clz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clz.ll?rev=146974&r1=146973&r2=146974&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clz.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clz.ll Tue Dec 20 05:19:37 2011
@@ -1,48 +1,65 @@
 ; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck %s
 
 define i32 @t1(i32 %x) nounwind  {
-	%tmp = tail call i32 @llvm.ctlz.i32( i32 %x, i1 true )
-	ret i32 %tmp
+  %tmp = tail call i32 @llvm.ctlz.i32( i32 %x, i1 true )
+  ret i32 %tmp
 ; CHECK: t1:
 ; CHECK: bsrl
-; CHECK: cmov
+; CHECK-NOT: cmov
+; CHECK: xorl $31,
+; CHECK: ret
 }
 
 declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone 
 
 define i32 @t2(i32 %x) nounwind  {
-	%tmp = tail call i32 @llvm.cttz.i32( i32 %x, i1 true )
-	ret i32 %tmp
+  %tmp = tail call i32 @llvm.cttz.i32( i32 %x, i1 true )
+  ret i32 %tmp
 ; CHECK: t2:
 ; CHECK: bsfl
-; CHECK: cmov
+; CHECK-NOT: cmov
+; CHECK: ret
 }
 
 declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone 
 
 define i16 @t3(i16 %x, i16 %y) nounwind  {
 entry:
-        %tmp1 = add i16 %x, %y
-	%tmp2 = tail call i16 @llvm.ctlz.i16( i16 %tmp1, i1 true )		; <i16> [#uses=1]
-	ret i16 %tmp2
+  %tmp1 = add i16 %x, %y
+  %tmp2 = tail call i16 @llvm.ctlz.i16( i16 %tmp1, i1 true )    ; <i16> [#uses=1]
+  ret i16 %tmp2
 ; CHECK: t3:
 ; CHECK: bsrw
-; CHECK: cmov
+; CHECK-NOT: cmov
+; CHECK: xorw $15,
+; CHECK: ret
 }
 
 declare i16 @llvm.ctlz.i16(i16, i1) nounwind readnone 
 
-; Don't generate the cmovne when the source is known non-zero (and bsr would
-; not set ZF).
-; rdar://9490949
-
 define i32 @t4(i32 %n) nounwind {
 entry:
+; Generate a cmov to handle zero inputs when necessary.
 ; CHECK: t4:
 ; CHECK: bsrl
+; CHECK: cmov
+; CHECK: xorl $31,
+; CHECK: ret
+  %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %n, i1 false)
+  ret i32 %tmp1
+}
+
+define i32 @t5(i32 %n) nounwind {
+entry:
+; Don't generate the cmovne when the source is known non-zero (and bsr would
+; not set ZF).
+; rdar://9490949
+; CHECK: t5:
+; CHECK: bsrl
 ; CHECK-NOT: cmov
+; CHECK: xorl $31,
 ; CHECK: ret
   %or = or i32 %n, 1
-  %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %or, i1 true)
+  %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %or, i1 false)
   ret i32 %tmp1
 }





More information about the llvm-commits mailing list