r224110 - CodeGen: Loads/stores to allocas for atomic ops shouldn't be volatile

David Majnemer david.majnemer at gmail.com
Fri Dec 12 00:16:09 PST 2014


Author: majnemer
Date: Fri Dec 12 02:16:09 2014
New Revision: 224110

URL: http://llvm.org/viewvc/llvm-project?rev=224110&view=rev
Log:
CodeGen: Loads/stores to allocas for atomic ops shouldn't be volatile

Don't inherit the volatile-ness of the input pointer to the volatile
operation for memory allocated on the side.

This fixes PR17306.

Modified:
    cfe/trunk/lib/CodeGen/CGAtomic.cpp
    cfe/trunk/test/CodeGen/atomic-ops.c

Modified: cfe/trunk/lib/CodeGen/CGAtomic.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGAtomic.cpp?rev=224110&r1=224109&r2=224110&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGAtomic.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGAtomic.cpp Fri Dec 12 02:16:09 2014
@@ -588,9 +588,12 @@ RValue CodeGenFunction::EmitAtomicExpr(A
     break;
   }
 
+  QualType RValTy = E->getType().getUnqualifiedType();
+
   auto GetDest = [&] {
-    if (!E->getType()->isVoidType() && !Dest)
-      Dest = CreateMemTemp(E->getType(), ".atomicdst");
+    if (!RValTy->isVoidType() && !Dest) {
+      Dest = CreateMemTemp(RValTy, ".atomicdst");
+    }
     return Dest;
   };
 
@@ -755,7 +758,7 @@ RValue CodeGenFunction::EmitAtomicExpr(A
           Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
       StoreDest->setAlignment(Align);
     }
-    return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
+    return convertTempToRValue(Dest, RValTy, E->getExprLoc());
   }
 
   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
@@ -810,9 +813,9 @@ RValue CodeGenFunction::EmitAtomicExpr(A
       // enforce that in general.
       break;
     }
-    if (E->getType()->isVoidType())
+    if (RValTy->isVoidType())
       return RValue::get(nullptr);
-    return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
+    return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
   }
 
   // Long case, when Order isn't obviously constant.
@@ -878,9 +881,9 @@ RValue CodeGenFunction::EmitAtomicExpr(A
 
   // Cleanup and return
   Builder.SetInsertPoint(ContBB);
-  if (E->getType()->isVoidType())
+  if (RValTy->isVoidType())
     return RValue::get(nullptr);
-  return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
+  return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
 }
 
 llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {

Modified: cfe/trunk/test/CodeGen/atomic-ops.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/atomic-ops.c?rev=224110&r1=224109&r2=224110&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/atomic-ops.c (original)
+++ cfe/trunk/test/CodeGen/atomic-ops.c Fri Dec 12 02:16:09 2014
@@ -564,7 +564,39 @@ int PR21643() {
   // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4
   // CHECK: %[[ret:.*]] = load i32* %[[atomicdst]], align 4
   // CHECK: ret i32 %[[ret]]
+}
+
+int PR17306_1(volatile _Atomic(int) *i) {
+  // CHECK-LABEL: @PR17306_1
+  // CHECK:      %[[i_addr:.*]] = alloca i32
+  // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
+  // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
+  // CHECK-NEXT: %[[addr:.*]] = load i32** %[[i_addr]]
+  // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32* %[[addr]] seq_cst
+  // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]]
+  // CHECK-NEXT: %[[retval:.*]] = load i32* %[[atomicdst]]
+  // CHECK-NEXT: ret i32 %[[retval]]
+  return __c11_atomic_load(i, memory_order_seq_cst);
+}
 
+int PR17306_2(volatile int *i, int value) {
+  // CHECK-LABEL: @PR17306_2
+  // CHECK:      %[[i_addr:.*]] = alloca i32*
+  // CHECK-NEXT: %[[value_addr:.*]] = alloca i32
+  // CHECK-NEXT: %[[atomictmp:.*]] = alloca i32
+  // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
+  // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
+  // CHECK-NEXT: store i32 %value, i32* %[[value_addr]]
+  // CHECK-NEXT: %[[i_lval:.*]] = load i32** %[[i_addr]]
+  // CHECK-NEXT: %[[value:.*]] = load i32* %[[value_addr]]
+  // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]]
+  // CHECK-NEXT: %[[value_lval:.*]] = load i32* %[[atomictmp]]
+  // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst
+  // CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]]
+  // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]]
+  // CHECK-NEXT: %[[retval:.*]] = load i32* %[[atomicdst]]
+  // CHECK-NEXT: ret i32 %[[retval]]
+  return __atomic_add_fetch(i, value, memory_order_seq_cst);
 }
 
 #endif





More information about the cfe-commits mailing list