[llvm] r291630 - [X86] Dont run combineSetCCAtomicArith() when the cmp has multiple uses

Hans Wennborg via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 10 16:49:54 PST 2017


Author: hans
Date: Tue Jan 10 18:49:54 2017
New Revision: 291630

URL: http://llvm.org/viewvc/llvm-project?rev=291630&view=rev
Log:
[X86] Dont run combineSetCCAtomicArith() when the cmp has multiple uses

We would miscompile the following:

  void g(int);
  int f(volatile long long *p) {
    bool b = __atomic_fetch_add(p, 1, __ATOMIC_SEQ_CST) < 0;
    g(b ? 12 : 34);
    return b ? 56 : 78;
  }

into

  pushq   %rax
  lock            incq    (%rdi)
  movl    $12, %eax
  movl    $34, %edi
  cmovlel %eax, %edi
  callq   g(int)
  testq   %rax, %rax   <---- Bad.
  movl    $56, %ecx
  movl    $78, %eax
  cmovsl  %ecx, %eax
  popq    %rcx
  retq

because the code failed to take into account that the cmp has multiple
uses, replaced one of them, and left the other one comparing garbage.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=291630&r1=291629&r2=291630&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Jan 10 18:49:54 2017
@@ -29404,6 +29404,12 @@ static SDValue combineSetCCAtomicArith(S
         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
     return SDValue();
 
+  // Can't replace the cmp if it has more uses than the one we're looking at.
+  // FIXME: We would like to be able to handle this, but would need to make sure
+  // all uses were updated.
+  if (!Cmp.hasOneUse())
+    return SDValue();
+
   // This only applies to variations of the common case:
   //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
   //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)

Modified: llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll?rev=291630&r1=291629&r2=291630&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll Tue Jan 10 18:49:54 2017
@@ -176,4 +176,20 @@ entry:
   ret i8 %tmp2
 }
 
+define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 {
+; TODO: It's possible to use "lock inc" here, but both cmovs need to be updated.
+; CHECK-LABEL: test_add_1_cmov_cmov:
+; CHECK:       # BB#0: # %entry
+; CHECK-NEXT:    movl $1, %eax
+; CHECK-NEXT:    lock xaddq %rax, (%rdi)
+; CHECK-NEXT:    testq   %rax, %rax
+entry:
+  %add = atomicrmw add i64* %p, i64 1 seq_cst
+  %cmp = icmp slt i64 %add, 0
+  %s1 = select i1 %cmp, i8 12, i8 34
+  store i8 %s1, i8* %q
+  %s2 = select i1 %cmp, i8 56, i8 78
+  ret i8 %s2
+}
+
 attributes #0 = { nounwind }




More information about the llvm-commits mailing list