[PATCH] IR support for "cmpxchg weak"

Tim Northover t.p.northover at gmail.com
Fri Jun 13 06:57:13 PDT 2014


> .. of the patch. The paragraph is confusing, as I don't see why Clang would
> need to change with this patch as it doesn't set weak to true...

It has to adapt to the new return type of cmpxchg instructions.
Changing all the current icmps to extractvalues and adding a bunch
more for when the loaded value is used.

I'm attaching it here in case you're interested, though it's all
pretty straightforward.

Cheers.

Tim.
-------------- next part --------------
commit 094f6767931424527a7be3f07534a2712c868dd7
Author: Tim Northover <T.P.Northover at gmail.com>
Date:   Fri Jun 13 09:01:18 2014 +0100

    IR-change: cmpxchg operations now return { iN, i1 }.
    
    This is a minimal fix for clang. I'll soon add support for generating
    weak variants when requested, but that's not really necessary for the
    LLVM change in isolation.

diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 18eb065..ad4ba88 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -186,13 +186,14 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E,
   llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
   Desired->setAlignment(Align);
 
-  llvm::AtomicCmpXchgInst *Old = CGF.Builder.CreateAtomicCmpXchg(
+  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
       Ptr, Expected, Desired, SuccessOrder, FailureOrder);
-  Old->setVolatile(E->isVolatile());
+  Pair->setVolatile(E->isVolatile());
 
   // Cmp holds the result of the compare-exchange operation: true on success,
   // false on failure.
-  llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
+  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
+  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
 
   // This basic block is used to hold the store instruction if the operation
   // failed.
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index f705ed8..47245cf 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -975,6 +975,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
     Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
                                                 llvm::SequentiallyConsistent,
                                                 llvm::SequentiallyConsistent);
+    Result = Builder.CreateExtractValue(Result, 0);
     Result = EmitFromInt(*this, Result, T, ValueType);
     return RValue::get(Result);
   }
@@ -998,11 +999,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
     Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
     Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
 
-    Value *OldVal = Args[1];
-    Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
-                                                 llvm::SequentiallyConsistent,
-                                                 llvm::SequentiallyConsistent);
-    Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
+    Value *Pair = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+                                              llvm::SequentiallyConsistent,
+                                              llvm::SequentiallyConsistent);
+    Value *Result = Builder.CreateExtractValue(Pair, 1);
     // zext bool to int.
     Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
     return RValue::get(Result);
@@ -1524,7 +1524,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
         SequentiallyConsistent,
         SequentiallyConsistent);
       CXI->setVolatile(true);
-      return RValue::get(CXI);
+      return RValue::get(Builder.CreateExtractValue(CXI, 0));
   }
   case Builtin::BI_InterlockedIncrement: {
     AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 4dfa169..14ef5b7 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -1732,11 +1732,12 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
   if (atomicPHI) {
     llvm::BasicBlock *opBB = Builder.GetInsertBlock();
     llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
-    llvm::Value *old = Builder.CreateAtomicCmpXchg(
+    llvm::Value *pair = Builder.CreateAtomicCmpXchg(
         LV.getAddress(), atomicPHI, CGF.EmitToMemory(value, type),
         llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
+    llvm::Value *old = Builder.CreateExtractValue(pair, 0);
+    llvm::Value *success = Builder.CreateExtractValue(pair, 1);
     atomicPHI->addIncoming(old, opBB);
-    llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
     Builder.CreateCondBr(success, contBB, opBB);
     Builder.SetInsertPoint(contBB);
     return isPre ? value : input;
@@ -2075,11 +2076,12 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
   if (atomicPHI) {
     llvm::BasicBlock *opBB = Builder.GetInsertBlock();
     llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
-    llvm::Value *old = Builder.CreateAtomicCmpXchg(
+    llvm::Value *pair = Builder.CreateAtomicCmpXchg(
         LHSLV.getAddress(), atomicPHI, CGF.EmitToMemory(Result, LHSTy),
         llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
+    llvm::Value *old = Builder.CreateExtractValue(pair, 0);
+    llvm::Value *success = Builder.CreateExtractValue(pair, 1);
     atomicPHI->addIncoming(old, opBB);
-    llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
     Builder.CreateCondBr(success, contBB, opBB);
     Builder.SetInsertPoint(contBB);
     return LHSLV;
diff --git a/test/CodeGen/Atomics.c b/test/CodeGen/Atomics.c
index 5798dff..684f36d 100644
--- a/test/CodeGen/Atomics.c
+++ b/test/CodeGen/Atomics.c
@@ -160,23 +160,70 @@ void test_op_and_fetch (void)
 
 void test_compare_and_swap (void)
 {
-  sc = __sync_val_compare_and_swap (&sc, uc, sc); // CHECK: cmpxchg i8
-  uc = __sync_val_compare_and_swap (&uc, uc, sc); // CHECK: cmpxchg i8
-  ss = __sync_val_compare_and_swap (&ss, uc, sc); // CHECK: cmpxchg i16
-  us = __sync_val_compare_and_swap (&us, uc, sc); // CHECK: cmpxchg i16
-  si = __sync_val_compare_and_swap (&si, uc, sc); // CHECK: cmpxchg i32
-  ui = __sync_val_compare_and_swap (&ui, uc, sc); // CHECK: cmpxchg i32
-  sll = __sync_val_compare_and_swap (&sll, uc, sc); // CHECK: cmpxchg i64
-  ull = __sync_val_compare_and_swap (&ull, uc, sc); // CHECK: cmpxchg i64
-
-  ui = __sync_bool_compare_and_swap (&sc, uc, sc); // CHECK: cmpxchg
-  ui = __sync_bool_compare_and_swap (&uc, uc, sc); // CHECK: cmpxchg
-  ui = __sync_bool_compare_and_swap (&ss, uc, sc); // CHECK: cmpxchg
-  ui = __sync_bool_compare_and_swap (&us, uc, sc); // CHECK: cmpxchg
-  ui = __sync_bool_compare_and_swap (&si, uc, sc); // CHECK: cmpxchg
-  ui = __sync_bool_compare_and_swap (&ui, uc, sc); // CHECK: cmpxchg
-  ui = __sync_bool_compare_and_swap (&sll, uc, sc); // CHECK: cmpxchg
-  ui = __sync_bool_compare_and_swap (&ull, uc, sc); // CHECK: cmpxchg
+  sc = __sync_val_compare_and_swap (&sc, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i8
+  // CHECK: extractvalue { i8, i1 } [[PAIR]], 0
+
+  uc = __sync_val_compare_and_swap (&uc, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i8
+  // CHECK: extractvalue { i8, i1 } [[PAIR]], 0
+
+  ss = __sync_val_compare_and_swap (&ss, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i16
+  // CHECK: extractvalue { i16, i1 } [[PAIR]], 0
+
+  us = __sync_val_compare_and_swap (&us, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i16
+  // CHECK: extractvalue { i16, i1 } [[PAIR]], 0
+
+  si = __sync_val_compare_and_swap (&si, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i32
+  // CHECK: extractvalue { i32, i1 } [[PAIR]], 0
+
+  ui = __sync_val_compare_and_swap (&ui, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i32
+  // CHECK: extractvalue { i32, i1 } [[PAIR]], 0
+
+  sll = __sync_val_compare_and_swap (&sll, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i64
+  // CHECK: extractvalue { i64, i1 } [[PAIR]], 0
+
+  ull = __sync_val_compare_and_swap (&ull, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i64
+  // CHECK: extractvalue { i64, i1 } [[PAIR]], 0
+
+
+  ui = __sync_bool_compare_and_swap (&sc, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i8
+  // CHECK: extractvalue { i8, i1 } [[PAIR]], 1
+
+  ui = __sync_bool_compare_and_swap (&uc, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i8
+  // CHECK: extractvalue { i8, i1 } [[PAIR]], 1
+
+  ui = __sync_bool_compare_and_swap (&ss, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i16
+  // CHECK: extractvalue { i16, i1 } [[PAIR]], 1
+
+  ui = __sync_bool_compare_and_swap (&us, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i16
+  // CHECK: extractvalue { i16, i1 } [[PAIR]], 1
+
+  ui = __sync_bool_compare_and_swap (&si, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i32
+  // CHECK: extractvalue { i32, i1 } [[PAIR]], 1
+
+  ui = __sync_bool_compare_and_swap (&ui, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i32
+  // CHECK: extractvalue { i32, i1 } [[PAIR]], 1
+
+  ui = __sync_bool_compare_and_swap (&sll, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i64
+  // CHECK: extractvalue { i64, i1 } [[PAIR]], 1
+
+  ui = __sync_bool_compare_and_swap (&ull, uc, sc);
+  // CHECK: [[PAIR:%[a-z0-9._]+]] = cmpxchg i64
+  // CHECK: extractvalue { i64, i1 } [[PAIR]], 1
 }
 
 void test_lock (void)
diff --git a/test/CodeGen/atomic-ops.c b/test/CodeGen/atomic-ops.c
index 2517b67..edcb63f 100644
--- a/test/CodeGen/atomic-ops.c
+++ b/test/CodeGen/atomic-ops.c
@@ -15,13 +15,13 @@ typedef enum memory_order {
 } memory_order;
 
 int fi1(_Atomic(int) *i) {
-  // CHECK: @fi1
+  // CHECK-LABEL: @fi1
   // CHECK: load atomic i32* {{.*}} seq_cst
   return __c11_atomic_load(i, memory_order_seq_cst);
 }
 
 int fi1a(int *i) {
-  // CHECK: @fi1a
+  // CHECK-LABEL: @fi1a
   // CHECK: load atomic i32* {{.*}} seq_cst
   int v;
   __atomic_load(i, &v, memory_order_seq_cst);
@@ -29,60 +29,60 @@ int fi1a(int *i) {
 }
 
 int fi1b(int *i) {
-  // CHECK: @fi1b
+  // CHECK-LABEL: @fi1b
   // CHECK: load atomic i32* {{.*}} seq_cst
   return __atomic_load_n(i, memory_order_seq_cst);
 }
 
 void fi2(_Atomic(int) *i) {
-  // CHECK: @fi2
+  // CHECK-LABEL: @fi2
   // CHECK: store atomic i32 {{.*}} seq_cst
   __c11_atomic_store(i, 1, memory_order_seq_cst);
 }
 
 void fi2a(int *i) {
-  // CHECK: @fi2a
+  // CHECK-LABEL: @fi2a
   // CHECK: store atomic i32 {{.*}} seq_cst
   int v = 1;
   __atomic_store(i, &v, memory_order_seq_cst);
 }
 
 void fi2b(int *i) {
-  // CHECK: @fi2b
+  // CHECK-LABEL: @fi2b
   // CHECK: store atomic i32 {{.*}} seq_cst
   __atomic_store_n(i, 1, memory_order_seq_cst);
 }
 
 int fi3(_Atomic(int) *i) {
-  // CHECK: @fi3
+  // CHECK-LABEL: @fi3
   // CHECK: atomicrmw and
   // CHECK-NOT: and
   return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
 }
 
 int fi3a(int *i) {
-  // CHECK: @fi3a
+  // CHECK-LABEL: @fi3a
   // CHECK: atomicrmw xor
   // CHECK-NOT: xor
   return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
 }
 
 int fi3b(int *i) {
-  // CHECK: @fi3b
+  // CHECK-LABEL: @fi3b
   // CHECK: atomicrmw add
   // CHECK: add
   return __atomic_add_fetch(i, 1, memory_order_seq_cst);
 }
 
 int fi3c(int *i) {
-  // CHECK: @fi3c
+  // CHECK-LABEL: @fi3c
   // CHECK: atomicrmw nand
   // CHECK-NOT: and
   return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
 }
 
 int fi3d(int *i) {
-  // CHECK: @fi3d
+  // CHECK-LABEL: @fi3d
   // CHECK: atomicrmw nand
   // CHECK: and
   // CHECK: xor
@@ -90,9 +90,10 @@ int fi3d(int *i) {
 }
 
 _Bool fi4(_Atomic(int) *i) {
-  // CHECK: @fi4
-  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
-  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = icmp eq i32 [[OLD]], [[EXPECTED]]
+  // CHECK-LABEL: @fi4
+  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
+  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
+  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
   // CHECK: store i32 [[OLD]]
   int cmp = 0;
@@ -100,9 +101,10 @@ _Bool fi4(_Atomic(int) *i) {
 }
 
 _Bool fi4a(int *i) {
-  // CHECK: @fi4
-  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
-  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = icmp eq i32 [[OLD]], [[EXPECTED]]
+  // CHECK-LABEL: @fi4
+  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
+  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
+  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
   // CHECK: store i32 [[OLD]]
   int cmp = 0;
@@ -111,9 +113,10 @@ _Bool fi4a(int *i) {
 }
 
 _Bool fi4b(int *i) {
-  // CHECK: @fi4
-  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
-  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = icmp eq i32 [[OLD]], [[EXPECTED]]
+  // CHECK-LABEL: @fi4
+  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
+  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
+  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
   // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
   // CHECK: store i32 [[OLD]]
   int cmp = 0;
@@ -121,13 +124,13 @@ _Bool fi4b(int *i) {
 }
 
 float ff1(_Atomic(float) *d) {
-  // CHECK: @ff1
+  // CHECK-LABEL: @ff1
   // CHECK: load atomic i32* {{.*}} monotonic
   return __c11_atomic_load(d, memory_order_relaxed);
 }
 
 void ff2(_Atomic(float) *d) {
-  // CHECK: @ff2
+  // CHECK-LABEL: @ff2
   // CHECK: store atomic i32 {{.*}} release
   __c11_atomic_store(d, 1, memory_order_release);
 }
@@ -137,20 +140,20 @@ float ff3(_Atomic(float) *d) {
 }
 
 int* fp1(_Atomic(int*) *p) {
-  // CHECK: @fp1
+  // CHECK-LABEL: @fp1
   // CHECK: load atomic i32* {{.*}} seq_cst
   return __c11_atomic_load(p, memory_order_seq_cst);
 }
 
 int* fp2(_Atomic(int*) *p) {
-  // CHECK: @fp2
+  // CHECK-LABEL: @fp2
   // CHECK: store i32 4
   // CHECK: atomicrmw add {{.*}} monotonic
   return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
 }
 
 int *fp2a(int **p) {
-  // CHECK: @fp2a
+  // CHECK-LABEL: @fp2a
   // CHECK: store i32 4
   // CHECK: atomicrmw sub {{.*}} monotonic
   // Note, the GNU builtins do not multiply by sizeof(T)!
@@ -158,20 +161,20 @@ int *fp2a(int **p) {
 }
 
 _Complex float fc(_Atomic(_Complex float) *c) {
-  // CHECK: @fc
+  // CHECK-LABEL: @fc
   // CHECK: atomicrmw xchg i64*
   return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
 }
 
 typedef struct X { int x; } X;
 X fs(_Atomic(X) *c) {
-  // CHECK: @fs
+  // CHECK-LABEL: @fs
   // CHECK: atomicrmw xchg i32*
   return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
 }
 
 X fsa(X *c, X *d) {
-  // CHECK: @fsa
+  // CHECK-LABEL: @fsa
   // CHECK: atomicrmw xchg i32*
   X ret;
   __atomic_exchange(c, d, &ret, memory_order_seq_cst);
@@ -179,7 +182,7 @@ X fsa(X *c, X *d) {
 }
 
 _Bool fsb(_Bool *c) {
-  // CHECK: @fsb
+  // CHECK-LABEL: @fsb
   // CHECK: atomicrmw xchg i8*
   return __atomic_exchange_n(c, 1, memory_order_seq_cst);
 }
@@ -205,7 +208,7 @@ struct Seventeen {
 } seventeen;
 
 int lock_free(struct Incomplete *incomplete) {
-  // CHECK: @lock_free
+  // CHECK-LABEL: @lock_free
 
   // CHECK: call i32 @__atomic_is_lock_free(i32 3, i8* null)
   __c11_atomic_is_lock_free(3);
@@ -253,7 +256,7 @@ struct foo bigThing;
 _Atomic(struct foo) bigAtomic;
 
 void structAtomicStore() {
-  // CHECK: @structAtomicStore
+  // CHECK-LABEL: @structAtomicStore
   struct foo f = {0};
   struct bar b = {0};
   __atomic_store(&smallThing, &b, 5);
@@ -263,7 +266,7 @@ void structAtomicStore() {
   // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing
 }
 void structAtomicLoad() {
-  // CHECK: @structAtomicLoad
+  // CHECK-LABEL: @structAtomicLoad
   struct bar b;
   __atomic_load(&smallThing, &b, 5);
   // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing
@@ -273,7 +276,7 @@ void structAtomicLoad() {
   // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing
 }
 struct foo structAtomicExchange() {
-  // CHECK: @structAtomicExchange
+  // CHECK-LABEL: @structAtomicExchange
   struct foo f = {0};
   struct foo old;
   __atomic_exchange(&f, &bigThing, &old, 5);
@@ -283,7 +286,7 @@ struct foo structAtomicExchange() {
   // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
 }
 int structAtomicCmpExchange() {
-  // CHECK: @structAtomicCmpExchange
+  // CHECK-LABEL: @structAtomicCmpExchange
   _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
   // CHECK: call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
 
@@ -298,7 +301,7 @@ int structAtomicCmpExchange() {
 // types.
 _Atomic(int) atomic_init_i = 42;
 
-// CHECK: @atomic_init_foo
+// CHECK-LABEL: @atomic_init_foo
 void atomic_init_foo()
 {
   // CHECK-NOT: }
diff --git a/test/CodeGen/atomic.c b/test/CodeGen/atomic.c
index ac3848f..43f5bc8 100644
--- a/test/CodeGen/atomic.c
+++ b/test/CodeGen/atomic.c
@@ -35,10 +35,12 @@ int atomic(void) {
   // CHECK: atomicrmw xchg i32* %val, i32 8 seq_cst
   
   old = __sync_val_compare_and_swap(&val, 4, 1976);
-  // CHECK: cmpxchg i32* %val, i32 4, i32 1976 seq_cst
-  
+  // CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg i32* %val, i32 4, i32 1976 seq_cst
+  // CHECK: extractvalue { i32, i1 } [[PAIR]], 0
+
   old = __sync_bool_compare_and_swap(&val, 4, 1976);
-  // CHECK: cmpxchg i32* %val, i32 4, i32 1976 seq_cst
+  // CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg i32* %val, i32 4, i32 1976 seq_cst
+  // CHECK: extractvalue { i32, i1 } [[PAIR]], 1
 
   old = __sync_fetch_and_and(&val, 0x9);
   // CHECK: atomicrmw and i32* %val, i32 9 seq_cst
@@ -65,10 +67,13 @@ int atomic(void) {
   // CHECK: atomicrmw xor i8* %valc, i8 5 seq_cst  
   
   __sync_val_compare_and_swap((void **)0, (void *)0, (void *)0);
-  // CHECK: cmpxchg i32* null, i32 0, i32 0 seq_cst
+  // CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg i32* null, i32 0, i32 0 seq_cst
+  // CHECK: extractvalue { i32, i1 } [[PAIR]], 0
 
   if ( __sync_val_compare_and_swap(&valb, 0, 1)) {
-    // CHECK: cmpxchg i8* %valb, i8 0, i8 1 seq_cst
+    // CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg i8* %valb, i8 0, i8 1 seq_cst
+    // CHECK: [[VAL:%[a-z0-9_.]+]] = extractvalue { i8, i1 } [[PAIR]], 0
+    // CHECK: trunc i8 [[VAL]] to i1
     old = 42;
   }
   


More information about the llvm-commits mailing list