[llvm] 71d64ed - [X86][Peephole] Add NDD entries for EFLAGS optimization

Shengchen Kan via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 23 23:51:25 PST 2024


Author: Shengchen Kan
Date: 2024-01-24T15:47:58+08:00
New Revision: 71d64ed80f8b7556be6954b2c4d663c7d89f476d

URL: https://github.com/llvm/llvm-project/commit/71d64ed80f8b7556be6954b2c4d663c7d89f476d
DIFF: https://github.com/llvm/llvm-project/commit/71d64ed80f8b7556be6954b2c4d663c7d89f476d.diff

LOG: [X86][Peephole] Add NDD entries for EFLAGS optimization

Added: 
    llvm/test/CodeGen/X86/apx/optimize-compare.mir
    llvm/test/CodeGen/X86/apx/shift-eflags.ll

Modified: 
    llvm/lib/Target/X86/X86InstrInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index c5c6d5a67c16e14..b42d8aad48b3f55 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4809,96 +4809,96 @@ inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
 
   // The shift instructions only modify ZF if their shift count is non-zero.
   // N.B.: The processor truncates the shift count depending on the encoding.
-  case X86::SAR8ri:
-  case X86::SAR16ri:
-  case X86::SAR32ri:
-  case X86::SAR64ri:
-  case X86::SHR8ri:
-  case X86::SHR16ri:
-  case X86::SHR32ri:
-  case X86::SHR64ri:
+  CASE_ND(SAR8ri)
+  CASE_ND(SAR16ri)
+  CASE_ND(SAR32ri)
+  CASE_ND(SAR64ri)
+  CASE_ND(SHR8ri)
+  CASE_ND(SHR16ri)
+  CASE_ND(SHR32ri)
+  CASE_ND(SHR64ri)
     return getTruncatedShiftCount(MI, 2) != 0;
 
   // Some left shift instructions can be turned into LEA instructions but only
   // if their flags aren't used. Avoid transforming such instructions.
-  case X86::SHL8ri:
-  case X86::SHL16ri:
-  case X86::SHL32ri:
-  case X86::SHL64ri: {
+  CASE_ND(SHL8ri)
+  CASE_ND(SHL16ri)
+  CASE_ND(SHL32ri)
+  CASE_ND(SHL64ri) {
     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
     if (isTruncatedShiftCountForLEA(ShAmt))
       return false;
     return ShAmt != 0;
   }
 
-  case X86::SHRD16rri8:
-  case X86::SHRD32rri8:
-  case X86::SHRD64rri8:
-  case X86::SHLD16rri8:
-  case X86::SHLD32rri8:
-  case X86::SHLD64rri8:
+  CASE_ND(SHRD16rri8)
+  CASE_ND(SHRD32rri8)
+  CASE_ND(SHRD64rri8)
+  CASE_ND(SHLD16rri8)
+  CASE_ND(SHLD32rri8)
+  CASE_ND(SHLD64rri8)
     return getTruncatedShiftCount(MI, 3) != 0;
 
-  case X86::SUB64ri32:
-  case X86::SUB32ri:
-  case X86::SUB16ri:
-  case X86::SUB8ri:
-  case X86::SUB64rr:
-  case X86::SUB32rr:
-  case X86::SUB16rr:
-  case X86::SUB8rr:
-  case X86::SUB64rm:
-  case X86::SUB32rm:
-  case X86::SUB16rm:
-  case X86::SUB8rm:
-  case X86::DEC64r:
-  case X86::DEC32r:
-  case X86::DEC16r:
-  case X86::DEC8r:
-  case X86::ADD64ri32:
-  case X86::ADD32ri:
-  case X86::ADD16ri:
-  case X86::ADD8ri:
-  case X86::ADD64rr:
-  case X86::ADD32rr:
-  case X86::ADD16rr:
-  case X86::ADD8rr:
-  case X86::ADD64rm:
-  case X86::ADD32rm:
-  case X86::ADD16rm:
-  case X86::ADD8rm:
-  case X86::INC64r:
-  case X86::INC32r:
-  case X86::INC16r:
-  case X86::INC8r:
-  case X86::ADC64ri32:
-  case X86::ADC32ri:
-  case X86::ADC16ri:
-  case X86::ADC8ri:
-  case X86::ADC64rr:
-  case X86::ADC32rr:
-  case X86::ADC16rr:
-  case X86::ADC8rr:
-  case X86::ADC64rm:
-  case X86::ADC32rm:
-  case X86::ADC16rm:
-  case X86::ADC8rm:
-  case X86::SBB64ri32:
-  case X86::SBB32ri:
-  case X86::SBB16ri:
-  case X86::SBB8ri:
-  case X86::SBB64rr:
-  case X86::SBB32rr:
-  case X86::SBB16rr:
-  case X86::SBB8rr:
-  case X86::SBB64rm:
-  case X86::SBB32rm:
-  case X86::SBB16rm:
-  case X86::SBB8rm:
-  case X86::NEG8r:
-  case X86::NEG16r:
-  case X86::NEG32r:
-  case X86::NEG64r:
+  CASE_ND(SUB64ri32)
+  CASE_ND(SUB32ri)
+  CASE_ND(SUB16ri)
+  CASE_ND(SUB8ri)
+  CASE_ND(SUB64rr)
+  CASE_ND(SUB32rr)
+  CASE_ND(SUB16rr)
+  CASE_ND(SUB8rr)
+  CASE_ND(SUB64rm)
+  CASE_ND(SUB32rm)
+  CASE_ND(SUB16rm)
+  CASE_ND(SUB8rm)
+  CASE_ND(DEC64r)
+  CASE_ND(DEC32r)
+  CASE_ND(DEC16r)
+  CASE_ND(DEC8r)
+  CASE_ND(ADD64ri32)
+  CASE_ND(ADD32ri)
+  CASE_ND(ADD16ri)
+  CASE_ND(ADD8ri)
+  CASE_ND(ADD64rr)
+  CASE_ND(ADD32rr)
+  CASE_ND(ADD16rr)
+  CASE_ND(ADD8rr)
+  CASE_ND(ADD64rm)
+  CASE_ND(ADD32rm)
+  CASE_ND(ADD16rm)
+  CASE_ND(ADD8rm)
+  CASE_ND(INC64r)
+  CASE_ND(INC32r)
+  CASE_ND(INC16r)
+  CASE_ND(INC8r)
+  CASE_ND(ADC64ri32)
+  CASE_ND(ADC32ri)
+  CASE_ND(ADC16ri)
+  CASE_ND(ADC8ri)
+  CASE_ND(ADC64rr)
+  CASE_ND(ADC32rr)
+  CASE_ND(ADC16rr)
+  CASE_ND(ADC8rr)
+  CASE_ND(ADC64rm)
+  CASE_ND(ADC32rm)
+  CASE_ND(ADC16rm)
+  CASE_ND(ADC8rm)
+  CASE_ND(SBB64ri32)
+  CASE_ND(SBB32ri)
+  CASE_ND(SBB16ri)
+  CASE_ND(SBB8ri)
+  CASE_ND(SBB64rr)
+  CASE_ND(SBB32rr)
+  CASE_ND(SBB16rr)
+  CASE_ND(SBB8rr)
+  CASE_ND(SBB64rm)
+  CASE_ND(SBB32rm)
+  CASE_ND(SBB16rm)
+  CASE_ND(SBB8rm)
+  CASE_ND(NEG8r)
+  CASE_ND(NEG16r)
+  CASE_ND(NEG32r)
+  CASE_ND(NEG64r)
   case X86::LZCNT16rr:
   case X86::LZCNT16rm:
   case X86::LZCNT32rr:
@@ -4918,42 +4918,42 @@ inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
   case X86::TZCNT64rr:
   case X86::TZCNT64rm:
     return true;
-  case X86::AND64ri32:
-  case X86::AND32ri:
-  case X86::AND16ri:
-  case X86::AND8ri:
-  case X86::AND64rr:
-  case X86::AND32rr:
-  case X86::AND16rr:
-  case X86::AND8rr:
-  case X86::AND64rm:
-  case X86::AND32rm:
-  case X86::AND16rm:
-  case X86::AND8rm:
-  case X86::XOR64ri32:
-  case X86::XOR32ri:
-  case X86::XOR16ri:
-  case X86::XOR8ri:
-  case X86::XOR64rr:
-  case X86::XOR32rr:
-  case X86::XOR16rr:
-  case X86::XOR8rr:
-  case X86::XOR64rm:
-  case X86::XOR32rm:
-  case X86::XOR16rm:
-  case X86::XOR8rm:
-  case X86::OR64ri32:
-  case X86::OR32ri:
-  case X86::OR16ri:
-  case X86::OR8ri:
-  case X86::OR64rr:
-  case X86::OR32rr:
-  case X86::OR16rr:
-  case X86::OR8rr:
-  case X86::OR64rm:
-  case X86::OR32rm:
-  case X86::OR16rm:
-  case X86::OR8rm:
+  CASE_ND(AND64ri32)
+  CASE_ND(AND32ri)
+  CASE_ND(AND16ri)
+  CASE_ND(AND8ri)
+  CASE_ND(AND64rr)
+  CASE_ND(AND32rr)
+  CASE_ND(AND16rr)
+  CASE_ND(AND8rr)
+  CASE_ND(AND64rm)
+  CASE_ND(AND32rm)
+  CASE_ND(AND16rm)
+  CASE_ND(AND8rm)
+  CASE_ND(XOR64ri32)
+  CASE_ND(XOR32ri)
+  CASE_ND(XOR16ri)
+  CASE_ND(XOR8ri)
+  CASE_ND(XOR64rr)
+  CASE_ND(XOR32rr)
+  CASE_ND(XOR16rr)
+  CASE_ND(XOR8rr)
+  CASE_ND(XOR64rm)
+  CASE_ND(XOR32rm)
+  CASE_ND(XOR16rm)
+  CASE_ND(XOR8rm)
+  CASE_ND(OR64ri32)
+  CASE_ND(OR32ri)
+  CASE_ND(OR16ri)
+  CASE_ND(OR8ri)
+  CASE_ND(OR64rr)
+  CASE_ND(OR32rr)
+  CASE_ND(OR16rr)
+  CASE_ND(OR8rr)
+  CASE_ND(OR64rm)
+  CASE_ND(OR32rm)
+  CASE_ND(OR16rm)
+  CASE_ND(OR8rm)
   case X86::ANDN32rr:
   case X86::ANDN32rm:
   case X86::ANDN64rr:
@@ -5035,10 +5035,10 @@ static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
   switch (MI.getOpcode()) {
   default:
     return X86::COND_INVALID;
-  case X86::NEG8r:
-  case X86::NEG16r:
-  case X86::NEG32r:
-  case X86::NEG64r:
+  CASE_ND(NEG8r)
+  CASE_ND(NEG16r)
+  CASE_ND(NEG32r)
+  CASE_ND(NEG64r)
     return X86::COND_AE;
   case X86::LZCNT16rr:
   case X86::LZCNT32rr:

diff  --git a/llvm/test/CodeGen/X86/apx/optimize-compare.mir b/llvm/test/CodeGen/X86/apx/optimize-compare.mir
new file mode 100644
index 000000000000000..7eabeb30f2ee43d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/optimize-compare.mir
@@ -0,0 +1,73 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - %s -mtriple=x86_64-- -run-pass peephole-opt -mattr=+ndd | FileCheck %s
+
+---
+name: opt_redundant_flags_0
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: opt_redundant_flags_0
+    ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY $edi
+    ; CHECK-NEXT: [[SUB32rr_ND:%[0-9]+]]:gr32 = SUB32rr_ND [[COPY]], [[COPY1]], implicit-def $eflags
+    ; CHECK-NEXT: $eax = COPY [[SUB32rr_ND]]
+    ; CHECK-NEXT: $bl = SETCCr 2, implicit $eflags
+    %0:gr32 = COPY $esi
+    %1:gr32 = COPY $edi
+    %2:gr32 = SUB32rr_ND %0, %1, implicit-def dead $eflags
+    $eax = COPY %2
+    ; CMP should be removed.
+    CMP32rr %0, %1, implicit-def $eflags
+    $bl = SETCCr 2, implicit $eflags
+...
+---
+name: opt_redundant_flags_1
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: opt_redundant_flags_1
+    ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY $edi
+    ; CHECK-NEXT: [[SUB32rr_ND:%[0-9]+]]:gr32 = SUB32rr_ND [[COPY]], [[COPY1]], implicit-def $eflags
+    ; CHECK-NEXT: $eax = COPY [[SUB32rr_ND]]
+    ; CHECK-NEXT: $bl = SETCCr 6, implicit $eflags
+    %0:gr32 = COPY $esi
+    %1:gr32 = COPY $edi
+    %2:gr32 = SUB32rr_ND %0, %1, implicit-def dead $eflags
+    $eax = COPY %2
+    ; CMP should be removed.
+    CMP32rr %1, %0, implicit-def $eflags
+    $bl = SETCCr 3, implicit $eflags
+...
+---
+name: opt_redundant_flags_2
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: opt_redundant_flags_2
+    ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY $edi
+    ; CHECK-NEXT: [[SUB32rr_ND:%[0-9]+]]:gr32 = SUB32rr_ND [[COPY]], [[COPY1]], implicit-def $eflags
+    ; CHECK-NEXT: $cl = SETCCr 2, implicit $eflags
+    ; CHECK-NEXT: $eax = COPY [[SUB32rr_ND]]
+    ; CHECK-NEXT: $bl = SETCCr 2, implicit $eflags
+    %0:gr32 = COPY $esi
+    %1:gr32 = COPY $edi
+    %2:gr32 = SUB32rr_ND %0, %1, implicit-def $eflags
+    ; an extra eflags reader shouldn't stop optimization.
+    $cl = SETCCr 2, implicit $eflags
+    $eax = COPY %2
+    CMP32rr %0, %1, implicit-def $eflags
+    $bl = SETCCr 2, implicit $eflags
+...
+---
+name: opt_zerocmp_user_0
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: opt_zerocmp_user_0
+    ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi
+    ; CHECK-NEXT: [[NEG32r_ND:%[0-9]+]]:gr32 = NEG32r_ND [[COPY]], implicit-def $eflags
+    ; CHECK-NEXT: $al = SETCCr 3, implicit $eflags
+    %0:gr32 = COPY $esi
+    %1:gr32 = NEG32r_ND %0, implicit-def dead $eflags
+    ; TEST should be removed.
+    TEST32rr %0, %0, implicit-def $eflags
+    $al = SETCCr 4, implicit $eflags
+...

diff  --git a/llvm/test/CodeGen/X86/apx/shift-eflags.ll b/llvm/test/CodeGen/X86/apx/shift-eflags.ll
new file mode 100644
index 000000000000000..ed181aa11623845
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/shift-eflags.ll
@@ -0,0 +1,364 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+ndd | FileCheck %s
+
+; Use shift eflags result when it won't cause stalls
+
+; ashr by constant - use sarl eflags result
+define i32 @ashr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: ashr_const:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    sarl $14, %edi, %edx
+; CHECK-NEXT:    cmovnel %ecx, %eax
+; CHECK-NEXT:    retq
+  %s = ashr i32 %a0, 14
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; lshr by constant - simplify to test
+define i32 @lshr_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: lshr_const:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    testl $-16384, %edi # imm = 0xC000
+; CHECK-NEXT:    cmovnel %ecx, %eax
+; CHECK-NEXT:    retq
+  %s = lshr i32 %a0, 14
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; shl by constant - simplify to test
+define i32 @shl_const(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: shl_const:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    testl $262143, %edi # imm = 0x3FFFF
+; CHECK-NEXT:    cmovnel %ecx, %eax
+; CHECK-NEXT:    retq
+  %s = shl i32 %a0, 14
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; ashr by constant and using shift result - use sarl eflags result
+define i32 @ashr_const_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: ashr_const_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sarl $14, %edi, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = ashr i32 %a0, 14
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; lshr by constant and using shift result - use shrl eflags result
+define i32 @lshr_const_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: lshr_const_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    shrl $14, %edi, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = lshr i32 %a0, 14
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; lshr by constant and using result - use shll eflags result
+define i32 @shl_const_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: shl_const_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    shll $14, %edi, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = shl i32 %a0, 14
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; ashr by 1 - use sarl eflags result
+define i32 @ashr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: ashr_const1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    sarl $1, %edi, %edx
+; CHECK-NEXT:    cmovnel %ecx, %eax
+; CHECK-NEXT:    retq
+  %s = ashr i32 %a0, 1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; lshr by 1 - simplify to test
+define i32 @lshr_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: lshr_const1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    testl $-2, %edi
+; CHECK-NEXT:    cmovnel %ecx, %eax
+; CHECK-NEXT:    retq
+  %s = lshr i32 %a0, 1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; shl by 1 - simplify to test
+define i32 @shl_const1(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: shl_const1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    testl $2147483647, %edi # imm = 0x7FFFFFFF
+; CHECK-NEXT:    cmovnel %ecx, %eax
+; CHECK-NEXT:    retq
+  %s = shl i32 %a0, 1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; ashr by 1 and using shift result - use sarl eflags result
+define i32 @ashr_const1_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: ashr_const1_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sarl $1, %edi, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = ashr i32 %a0, 1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; lshr by 1 and using shift result - use shrl eflags result
+define i32 @lshr_const1_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: lshr_const1_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    shrl $1, %edi, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = lshr i32 %a0, 1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; lshr by 1 and using result - use addl eflags result
+define i32 @shl_const1_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: shl_const1_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addl %edi, %edi, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = shl i32 %a0, 1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; ashr by variable - use seperate test
+define i32 @ashr_var(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: ashr_var:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    movl %esi, %ecx
+; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT:    sarl %cl, %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = ashr i32 %a0, %a1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; lshr by variable - use seperate test
+define i32 @lshr_var(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: lshr_var:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    movl %esi, %ecx
+; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT:    shrl %cl, %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = lshr i32 %a0, %a1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; shl by variable - use seperate test
+define i32 @shl_var(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: shl_var:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    movl %esi, %ecx
+; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT:    shll %cl, %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = shl i32 %a0, %a1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; ashr by variable and using result - use seperate test
+define i32 @ashr_var_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: ashr_var_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %ecx
+; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT:    sarl %cl, %edi, %eax
+; CHECK-NEXT:    testl %eax, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = ashr i32 %a0, %a1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; lshr by variable and using result - use seperate test
+define i32 @lshr_var_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: lshr_var_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %ecx
+; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT:    shrl %cl, %edi, %eax
+; CHECK-NEXT:    testl %eax, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = lshr i32 %a0, %a1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; shl by variable and using result - use seperate test
+define i32 @shl_var_self_select(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: shl_var_self_select:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %ecx
+; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT:    shll %cl, %edi, %eax
+; CHECK-NEXT:    testl %eax, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %s = shl i32 %a0, %a1
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; ashr by non-zero variable - use seperate test
+define i32 @ashr_var_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: ashr_var_amt_never_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    orb $1, %sil, %cl
+; CHECK-NEXT:    sarl %cl, %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovel %edx, %eax
+; CHECK-NEXT:    retq
+  %a = or i32 %a1, 1
+  %s = ashr i32 %a0, %a
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; lshr by non-zero variable - use seperate test
+define i32 @lshr_var_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: lshr_var_amt_never_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    orb $1, %sil, %cl
+; CHECK-NEXT:    shrl %cl, %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovel %edx, %eax
+; CHECK-NEXT:    retq
+  %a = or i32 %a1, 1
+  %s = lshr i32 %a0, %a
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; shl by non-zero variable - use seperate test
+define i32 @shl_var_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: shl_var_amt_never_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    orb $1, %sil, %cl
+; CHECK-NEXT:    shll %cl, %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovel %edx, %eax
+; CHECK-NEXT:    retq
+  %a = or i32 %a1, 1
+  %s = shl i32 %a0, %a
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %a2, i32 %a3
+  ret i32 %r
+}
+
+; ashr by non-zero variable and using result - use seperate test
+define i32 @ashr_var_self_select_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: ashr_var_self_select_amt_never_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    orb $1, %sil, %cl
+; CHECK-NEXT:    shrl %cl, %edi, %eax
+; CHECK-NEXT:    testl %eax, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %a = or i32 %a1, 1
+  %s = lshr i32 %a0, %a
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; lshr by non-zero variable and using result - use seperate test
+define i32 @lshr_var_self_select_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: lshr_var_self_select_amt_never_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    orb $1, %sil, %cl
+; CHECK-NEXT:    shrl %cl, %edi, %eax
+; CHECK-NEXT:    testl %eax, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %a = or i32 %a1, 1
+  %s = lshr i32 %a0, %a
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}
+
+; shl by non-zero variable and using result - use seperate test
+define i32 @shl_var_self_select_amt_never_zero(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: shl_var_self_select_amt_never_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    orb $1, %sil, %cl
+; CHECK-NEXT:    shrl %cl, %edi, %eax
+; CHECK-NEXT:    testl %eax, %eax
+; CHECK-NEXT:    cmovnel %edx, %eax
+; CHECK-NEXT:    retq
+  %a = or i32 %a1, 1
+  %s = lshr i32 %a0, %a
+  %c = icmp eq i32 %s, 0
+  %r = select i1 %c, i32 %s, i32 %a2
+  ret i32 %r
+}


        


More information about the llvm-commits mailing list