[llvm] r250135 - x86: preserve flags when folding atomic operations

JF Bastien via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 12 17:28:47 PDT 2015


Author: jfb
Date: Mon Oct 12 19:28:47 2015
New Revision: 250135

URL: http://llvm.org/viewvc/llvm-project?rev=250135&view=rev
Log:
x86: preserve flags when folding atomic operations

Summary:
D4796 taught LLVM to fold some atomic integer operations into a single
instruction. The pattern was unaware that the instructions clobbered
flags.

This patch adds the missing EFLAGS definition.

Floating point operations don't set flags, the subsequent fadd
optimization is therefore correct. The same applies for surrounding
load/store optimizations.

Reviewers: rsmith, rtrieu

Subscribers: llvm-commits, reames, morisset

Differential Revision: http://reviews.llvm.org/D13680

Added:
    llvm/trunk/test/CodeGen/X86/atomic-flags.ll
Modified:
    llvm/trunk/lib/Target/X86/X86InstrCompiler.td

Modified: llvm/trunk/lib/Target/X86/X86InstrCompiler.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrCompiler.td?rev=250135&r1=250134&r2=250135&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrCompiler.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrCompiler.td Mon Oct 12 19:28:47 2015
@@ -774,12 +774,14 @@ multiclass RELEASE_BINOP_MI<SDNode op> {
         [(atomic_store_64 addr:$dst, (op
             (atomic_load_64 addr:$dst), GR64:$src))]>;
 }
-defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
-defm RELEASE_AND : RELEASE_BINOP_MI<and>;
-defm RELEASE_OR  : RELEASE_BINOP_MI<or>;
-defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
-// Note: we don't deal with sub, because substractions of constants are
-// optimized into additions before this code can run
+let Defs = [EFLAGS] in {
+  defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
+  defm RELEASE_AND : RELEASE_BINOP_MI<and>;
+  defm RELEASE_OR  : RELEASE_BINOP_MI<or>;
+  defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
+  // Note: we don't deal with sub, because substractions of constants are
+  //       optimized into additions before this code can run.
+}
 
 // Same as above, but for floating-point.
 // FIXME: imm version.

Added: llvm/trunk/test/CodeGen/X86/atomic-flags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-flags.ll?rev=250135&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-flags.ll (added)
+++ llvm/trunk/test/CodeGen/X86/atomic-flags.ll Mon Oct 12 19:28:47 2015
@@ -0,0 +1,38 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s
+
+; Make sure that flags are properly preserved despite atomic optimizations.
+
+define i32 @atomic_and_flags(i8* %p, i32 %a, i32 %b) {
+; CHECK-LABEL: atomic_and_flags:
+
+  ; Generate flags value, and use it.
+  ; CHECK:      cmpl
+  ; CHECK-NEXT: jne
+  %cmp = icmp eq i32 %a, %b
+  br i1 %cmp, label %L1, label %L2
+
+L1:
+  ; The following pattern will get folded.
+  ; CHECK: addb
+  %1 = load atomic i8, i8* %p seq_cst, align 1
+  %2 = add i8 %1, 2
+  store atomic i8 %2, i8* %p release, align 1
+
+  ; Use the comparison result again. We need to rematerialize the comparison
+  ; somehow. This test checks that cmpl gets emitted again, but any
+  ; rematerialization would work (the optimizer used to clobber the flags with
+  ; the add).
+  ; CHECK-NEXT: cmpl
+  ; CHECK-NEXT: jne
+  br i1 %cmp, label %L3, label %L4
+
+L2:
+  ret i32 2
+
+L3:
+  ret i32 3
+
+L4:
+  ret i32 4
+}




More information about the llvm-commits mailing list