[llvm] r290066 - Revert r289955 and r289962. This is causing lots of ASAN failures for us.

Daniel Jasper via llvm-commits llvm-commits at lists.llvm.org
Sun Dec 18 06:36:39 PST 2016


Author: djasper
Date: Sun Dec 18 08:36:38 2016
New Revision: 290066

URL: http://llvm.org/viewvc/llvm-project?rev=290066&view=rev
Log:
Revert r289955 and r289962. This is causing lots of ASAN failures for us.

Not sure whether it causes and ASAN false positive or whether it
actually leads to incorrect code or whether it even exposes bad code.
Hans, I'll get you instructions to reproduce this.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=290066&r1=290065&r2=290066&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Dec 18 08:36:38 2016
@@ -28985,19 +28985,11 @@ static SDValue combineSelect(SDNode *N,
   return SDValue();
 }
 
-/// Combine brcond/cmov/setcc/.. based on comparing the result of
-/// atomic_load_add to use EFLAGS produced by the addition
-/// directly if possible. For example:
-///
-///   (setcc (cmp (atomic_load_add x, -C) C), COND_E)
-/// becomes:
-///   (setcc (LADD x, -C), COND_E)
-///
-/// and
+/// Combine:
 ///   (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
-/// becomes:
+/// to:
 ///   (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
-///
+/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
 /// Note that this is only legal for some op/cc combinations.
 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
                                        SelectionDAG &DAG) {
@@ -29006,7 +28998,7 @@ static SDValue combineSetCCAtomicArith(S
         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
     return SDValue();
 
-  // This applies to variations of the common case:
+  // This only applies to variations of the common case:
   //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
   //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
   //   (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
@@ -29025,9 +29017,8 @@ static SDValue combineSetCCAtomicArith(S
     return SDValue();
 
   auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
-  if (!CmpRHSC)
+  if (!CmpRHSC || CmpRHSC->getZExtValue() != 0)
     return SDValue();
-  APInt Comparand = CmpRHSC->getAPIntValue();
 
   const unsigned Opc = CmpLHS.getOpcode();
 
@@ -29043,19 +29034,16 @@ static SDValue combineSetCCAtomicArith(S
   if (Opc == ISD::ATOMIC_LOAD_SUB)
     Addend = -Addend;
 
-  if (Comparand == -Addend) {
-    // No change to CC.
-  } else if (CC == X86::COND_S && Comparand == 0 && Addend == 1) {
+  if (CC == X86::COND_S && Addend == 1)
     CC = X86::COND_LE;
-  } else if (CC == X86::COND_NS && Comparand == 0 && Addend == 1) {
+  else if (CC == X86::COND_NS && Addend == 1)
     CC = X86::COND_G;
-  } else if (CC == X86::COND_G && Comparand == 0 && Addend == -1) {
+  else if (CC == X86::COND_G && Addend == -1)
     CC = X86::COND_GE;
-  } else if (CC == X86::COND_LE && Comparand == 0 && Addend == -1) {
+  else if (CC == X86::COND_LE && Addend == -1)
     CC = X86::COND_L;
-  } else {
+  else
     return SDValue();
-  }
 
   SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG);
   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),

Modified: llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll?rev=290066&r1=290065&r2=290066&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll Sun Dec 18 08:36:38 2016
@@ -176,45 +176,4 @@ entry:
   ret i8 %tmp2
 }
 
-define i8 @test_sub_1_setcc_eq(i64* %p) #0 {
-; CHECK-LABEL: test_sub_1_setcc_eq:
-; CHECK:       # BB#0: # %entry
-; CHECK-NEXT:    lock decq (%rdi)
-; CHECK-NEXT:    sete %al
-; CHECK-NEXT:    retq
-entry:
-  %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
-  %tmp1 = icmp eq i64 %tmp0, 1
-  %tmp2 = zext i1 %tmp1 to i8
-  ret i8 %tmp2
-}
-
-define i8 @test_add_5_setcc_ne(i64* %p) #0 {
-; CHECK-LABEL: test_add_5_setcc_ne:
-; CHECK:       # BB#0: # %entry
-; CHECK-NEXT:    lock addq $5, (%rdi)
-; CHECK-NEXT:    setne %al
-; CHECK-NEXT:    retq
-entry:
-  %tmp0 = atomicrmw add i64* %p, i64 5 seq_cst
-  %tmp1 = icmp ne i64 %tmp0, -5
-  %tmp2 = zext i1 %tmp1 to i8
-  ret i8 %tmp2
-}
-
-define i8 @test_add_5_setcc_ne_comparand_mismatch(i64* %p) #0 {
-; CHECK-LABEL: test_add_5_setcc_ne_comparand_mismatch:
-; CHECK:       # BB#0: # %entry
-; CHECK-NEXT:    movl $5, %eax
-; CHECK-NEXT:    lock xaddq %rax, (%rdi)
-; CHECK-NEXT:    testq %rax, %rax
-; CHECK-NEXT:    setne %al
-; CHECK-NEXT:    retq
-entry:
-  %tmp0 = atomicrmw add i64* %p, i64 5 seq_cst
-  %tmp1 = icmp ne i64 %tmp0, 0
-  %tmp2 = zext i1 %tmp1 to i8
-  ret i8 %tmp2
-}
-
 attributes #0 = { nounwind }




More information about the llvm-commits mailing list