r186049 - Simplify atomic load/store IRGen.

Eli Friedman eli.friedman at gmail.com
Wed Jul 10 18:32:22 PDT 2013


Author: efriedma
Date: Wed Jul 10 20:32:21 2013
New Revision: 186049

URL: http://llvm.org/viewvc/llvm-project?rev=186049&view=rev
Log:
Simplify atomic load/store IRGen.

Also fixes a couple minor bugs along the way; see testcases.

Modified:
    cfe/trunk/lib/CodeGen/CGAtomic.cpp
    cfe/trunk/lib/CodeGen/CGExpr.cpp
    cfe/trunk/lib/CodeGen/CGExprAgg.cpp
    cfe/trunk/lib/CodeGen/CGValue.h
    cfe/trunk/test/CodeGen/c11atomics-ios.c
    cfe/trunk/test/CodeGen/c11atomics.c

Modified: cfe/trunk/lib/CodeGen/CGAtomic.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGAtomic.cpp?rev=186049&r1=186048&r2=186049&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGAtomic.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGAtomic.cpp Wed Jul 10 20:32:21 2013
@@ -93,7 +93,7 @@ namespace {
       return (ValueSizeInBits != AtomicSizeInBits);
     }
 
-    void emitMemSetZeroIfNecessary(LValue dest) const;
+    bool emitMemSetZeroIfNecessary(LValue dest) const;
 
     llvm::Value *getAtomicSizeValue() const {
       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
@@ -164,21 +164,22 @@ bool AtomicInfo::requiresMemSetZero(llvm
     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
                            AtomicSizeInBits / 2);
 
-  // Just be pessimistic about aggregates.
+  // Padding in structs has an undefined bit pattern.  User beware.
   case TEK_Aggregate:
-    return true;
+    return false;
   }
   llvm_unreachable("bad evaluation kind");
 }
 
-void AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
+bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
   llvm::Value *addr = dest.getAddress();
   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
-    return;
+    return false;
 
   CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
                            AtomicSizeInBits / 8,
                            dest.getAlignment().getQuantity());
+  return true;
 }
 
 static void
@@ -715,30 +716,13 @@ llvm::Value *AtomicInfo::emitCastToAtomi
 
 RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
                                        AggValueSlot resultSlot) const {
-  if (EvaluationKind == TEK_Aggregate) {
-    // Nothing to do if the result is ignored.
-    if (resultSlot.isIgnored()) return resultSlot.asRValue();
-
-    assert(resultSlot.getAddr() == addr || hasPadding());
-
-    // In these cases, we should have emitted directly into the result slot.
-    if (!hasPadding() || resultSlot.isValueOfAtomic())
-      return resultSlot.asRValue();
-
-    // Otherwise, fall into the common path.
-  }
+  if (EvaluationKind == TEK_Aggregate)
+    return resultSlot.asRValue();
 
   // Drill into the padding structure if we have one.
   if (hasPadding())
     addr = CGF.Builder.CreateStructGEP(addr, 0);
 
-  // If we're emitting to an aggregate, copy into the result slot.
-  if (EvaluationKind == TEK_Aggregate) {
-    CGF.EmitAggregateCopy(resultSlot.getAddr(), addr, getValueType(),
-                          resultSlot.isVolatile());
-    return resultSlot.asRValue();
-  }
-
   // Otherwise, just convert the temporary to an r-value using the
   // normal conversion routine.
   return CGF.convertTempToRValue(addr, getValueType());
@@ -752,10 +736,7 @@ RValue CodeGenFunction::EmitAtomicLoad(L
   // Check whether we should use a library call.
   if (atomics.shouldUseLibcall()) {
     llvm::Value *tempAddr;
-    if (resultSlot.isValueOfAtomic()) {
-      assert(atomics.getEvaluationKind() == TEK_Aggregate);
-      tempAddr = resultSlot.getPaddedAtomicAddr();
-    } else if (!resultSlot.isIgnored() && !atomics.hasPadding()) {
+    if (!resultSlot.isIgnored()) {
       assert(atomics.getEvaluationKind() == TEK_Aggregate);
       tempAddr = resultSlot.getAddr();
     } else {
@@ -819,16 +800,10 @@ RValue CodeGenFunction::EmitAtomicLoad(L
   llvm::Value *temp;
   bool tempIsVolatile = false;
   CharUnits tempAlignment;
-  if (atomics.getEvaluationKind() == TEK_Aggregate &&
-      (!atomics.hasPadding() || resultSlot.isValueOfAtomic())) {
+  if (atomics.getEvaluationKind() == TEK_Aggregate) {
     assert(!resultSlot.isIgnored());
-    if (resultSlot.isValueOfAtomic()) {
-      temp = resultSlot.getPaddedAtomicAddr();
-      tempAlignment = atomics.getAtomicAlignment();
-    } else {
-      temp = resultSlot.getAddr();
-      tempAlignment = atomics.getValueAlignment();
-    }
+    temp = resultSlot.getAddr();
+    tempAlignment = atomics.getValueAlignment();
     tempIsVolatile = resultSlot.isVolatile();
   } else {
     temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
@@ -996,13 +971,11 @@ void CodeGenFunction::EmitAtomicInit(Exp
   }
 
   case TEK_Aggregate: {
-    // Memset the buffer first if there's any possibility of
-    // uninitialized internal bits.
-    atomics.emitMemSetZeroIfNecessary(dest);
-
-    // HACK: whether the initializer actually has an atomic type
-    // doesn't really seem reliable right now.
+    // Fix up the destination if the initializer isn't an expression
+    // of atomic type.
+    bool Zeroed = false;
     if (!init->getType()->isAtomicType()) {
+      Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
       dest = atomics.projectValue(dest);
     }
 
@@ -1010,7 +983,10 @@ void CodeGenFunction::EmitAtomicInit(Exp
     AggValueSlot slot = AggValueSlot::forLValue(dest,
                                         AggValueSlot::IsNotDestructed,
                                         AggValueSlot::DoesNotNeedGCBarriers,
-                                        AggValueSlot::IsNotAliased);
+                                        AggValueSlot::IsNotAliased,
+                                        Zeroed ? AggValueSlot::IsZeroed :
+                                                 AggValueSlot::IsNotZeroed);
+
     EmitAggExpr(init, slot);
     return;
   }

Modified: cfe/trunk/lib/CodeGen/CGExpr.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGExpr.cpp?rev=186049&r1=186048&r2=186049&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGExpr.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGExpr.cpp Wed Jul 10 20:32:21 2013
@@ -2717,11 +2717,10 @@ LValue CodeGenFunction::EmitCastLValue(c
   case CK_BuiltinFnToFnPtr:
     llvm_unreachable("builtin functions are handled elsewhere");
 
-  // These two casts are currently treated as no-ops, although they could
-  // potentially be real operations depending on the target's ABI.
+  // These are never l-values; just use the aggregate emission code.
   case CK_NonAtomicToAtomic:
   case CK_AtomicToNonAtomic:
-    return EmitLValue(E->getSubExpr());
+    return EmitAggExprToLValue(E);
 
   case CK_Dynamic: {
     LValue LV = EmitLValue(E->getSubExpr());

Modified: cfe/trunk/lib/CodeGen/CGExprAgg.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGExprAgg.cpp?rev=186049&r1=186048&r2=186049&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGExprAgg.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGExprAgg.cpp Wed Jul 10 20:32:21 2013
@@ -29,14 +29,6 @@ using namespace CodeGen;
 //                        Aggregate Expression Emitter
 //===----------------------------------------------------------------------===//
 
-llvm::Value *AggValueSlot::getPaddedAtomicAddr() const {
-  assert(isValueOfAtomic());
-  llvm::GEPOperator *op = cast<llvm::GEPOperator>(getAddr());
-  assert(op->getNumIndices() == 2);
-  assert(op->hasAllZeroIndices());
-  return op->getPointerOperand();
-}
-
 namespace  {
 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
   CodeGenFunction &CGF;
@@ -202,38 +194,6 @@ public:
     CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
   }
 };
-
-/// A helper class for emitting expressions into the value sub-object
-/// of a padded atomic type.
-class ValueDestForAtomic {
-  AggValueSlot Dest;
-public:
-  ValueDestForAtomic(CodeGenFunction &CGF, AggValueSlot dest, QualType type)
-    : Dest(dest) {
-    assert(!Dest.isValueOfAtomic());
-    if (!Dest.isIgnored() && CGF.CGM.isPaddedAtomicType(type)) {
-      llvm::Value *valueAddr = CGF.Builder.CreateStructGEP(Dest.getAddr(), 0);
-      Dest = AggValueSlot::forAddr(valueAddr,
-                                   Dest.getAlignment(),
-                                   Dest.getQualifiers(),
-                                   Dest.isExternallyDestructed(),
-                                   Dest.requiresGCollection(),
-                                   Dest.isPotentiallyAliased(),
-                                   Dest.isZeroed(),
-                                   AggValueSlot::IsValueOfAtomic);
-    }
-  }
-
-  const AggValueSlot &getDest() const { return Dest; }
-
-  ~ValueDestForAtomic() {
-    // Kill the GEP if we made one and it didn't end up used.
-    if (Dest.isValueOfAtomic()) {
-      llvm::Instruction *addr = cast<llvm::GetElementPtrInst>(Dest.getAddr());
-      if (addr->use_empty()) addr->eraseFromParent();
-    }
-  }
-};
 }  // end anonymous namespace.
 
 //===----------------------------------------------------------------------===//
@@ -248,8 +208,7 @@ void AggExprEmitter::EmitAggLoadOfLValue
 
   // If the type of the l-value is atomic, then do an atomic load.
   if (LV.getType()->isAtomicType()) {
-    ValueDestForAtomic valueDest(CGF, Dest, LV.getType());
-    CGF.EmitAtomicLoad(LV, valueDest.getDest());
+    CGF.EmitAtomicLoad(LV, Dest);
     return;
   }
 
@@ -653,34 +612,33 @@ void AggExprEmitter::VisitCastExpr(CastE
     }
 
     // If we're converting an r-value of non-atomic type to an r-value
-    // of atomic type, just make an atomic temporary, emit into that,
-    // and then copy the value out.  (FIXME: do we need to
-    // zero-initialize it first?)
+    // of atomic type, just emit directly into the relevant sub-object.
     if (isToAtomic) {
-      ValueDestForAtomic valueDest(CGF, Dest, atomicType);
+      AggValueSlot valueDest = Dest;
+      if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
+        // Zero-initialize.  (Strictly speaking, we only need to intialize
+        // the padding at the end, but this is simpler.)
+        if (!Dest.isZeroed())
+          CGF.EmitNullInitialization(Dest.getAddr(), type);
+
+        // Build a GEP to refer to the subobject.
+        llvm::Value *valueAddr =
+            CGF.Builder.CreateStructGEP(valueDest.getAddr(), 0);
+        valueDest = AggValueSlot::forAddr(valueAddr,
+                                          valueDest.getAlignment(),
+                                          valueDest.getQualifiers(),
+                                          valueDest.isExternallyDestructed(),
+                                          valueDest.requiresGCollection(),
+                                          valueDest.isPotentiallyAliased(),
+                                          AggValueSlot::IsZeroed);
+      }
+      
       CGF.EmitAggExpr(E->getSubExpr(), valueDest.getDest());
       return;
     }
 
     // Otherwise, we're converting an atomic type to a non-atomic type.
-
-    // If the dest is a value-of-atomic subobject, drill back out.
-    if (Dest.isValueOfAtomic()) {
-      AggValueSlot atomicSlot =
-        AggValueSlot::forAddr(Dest.getPaddedAtomicAddr(),
-                              Dest.getAlignment(),
-                              Dest.getQualifiers(),
-                              Dest.isExternallyDestructed(),
-                              Dest.requiresGCollection(),
-                              Dest.isPotentiallyAliased(),
-                              Dest.isZeroed(),
-                              AggValueSlot::IsNotValueOfAtomic);
-      CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
-      return;
-    }
-
-    // Otherwise, make an atomic temporary, emit into that, and then
-    // copy the value out.
+    // Make an atomic temporary, emit into that, and then copy the value out.
     AggValueSlot atomicSlot =
       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);

Modified: cfe/trunk/lib/CodeGen/CGValue.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGValue.h?rev=186049&r1=186048&r2=186049&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGValue.h (original)
+++ cfe/trunk/lib/CodeGen/CGValue.h Wed Jul 10 20:32:21 2013
@@ -381,23 +381,11 @@ class AggValueSlot {
   /// evaluating an expression which constructs such an object.
   bool AliasedFlag : 1;
 
-  /// ValueOfAtomicFlag - This is set to true if the slot is the value
-  /// subobject of an object the size of an _Atomic(T).  The specific
-  /// guarantees this makes are:
-  ///   - the address is guaranteed to be a getelementptr into the
-  ///     padding struct and
-  ///   - it is okay to store something the width of an _Atomic(T)
-  ///     into the address.
-  /// Tracking this allows us to avoid some obviously unnecessary
-  /// memcpys.
-  bool ValueOfAtomicFlag : 1;
-
 public:
   enum IsAliased_t { IsNotAliased, IsAliased };
   enum IsDestructed_t { IsNotDestructed, IsDestructed };
   enum IsZeroed_t { IsNotZeroed, IsZeroed };
   enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers };
-  enum IsValueOfAtomic_t { IsNotValueOfAtomic, IsValueOfAtomic };
 
   /// ignored - Returns an aggregate value slot indicating that the
   /// aggregate value is being ignored.
@@ -421,9 +409,7 @@ public:
                               IsDestructed_t isDestructed,
                               NeedsGCBarriers_t needsGC,
                               IsAliased_t isAliased,
-                              IsZeroed_t isZeroed = IsNotZeroed,
-                              IsValueOfAtomic_t isValueOfAtomic
-                                = IsNotValueOfAtomic) {
+                              IsZeroed_t isZeroed = IsNotZeroed) {
     AggValueSlot AV;
     AV.Addr = addr;
     AV.Alignment = align.getQuantity();
@@ -432,7 +418,6 @@ public:
     AV.ObjCGCFlag = needsGC;
     AV.ZeroedFlag = isZeroed;
     AV.AliasedFlag = isAliased;
-    AV.ValueOfAtomicFlag = isValueOfAtomic;
     return AV;
   }
 
@@ -440,12 +425,9 @@ public:
                                 IsDestructed_t isDestructed,
                                 NeedsGCBarriers_t needsGC,
                                 IsAliased_t isAliased,
-                                IsZeroed_t isZeroed = IsNotZeroed,
-                                IsValueOfAtomic_t isValueOfAtomic
-                                  = IsNotValueOfAtomic) {
+                                IsZeroed_t isZeroed = IsNotZeroed) {
     return forAddr(LV.getAddress(), LV.getAlignment(),
-                   LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed,
-                   isValueOfAtomic);
+                   LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
   }
 
   IsDestructed_t isExternallyDestructed() const {
@@ -477,12 +459,6 @@ public:
     return Addr;
   }
 
-  IsValueOfAtomic_t isValueOfAtomic() const {
-    return IsValueOfAtomic_t(ValueOfAtomicFlag);
-  }
-
-  llvm::Value *getPaddedAtomicAddr() const;
-
   bool isIgnored() const {
     return Addr == 0;
   }

Modified: cfe/trunk/test/CodeGen/c11atomics-ios.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/c11atomics-ios.c?rev=186049&r1=186048&r2=186049&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/c11atomics-ios.c (original)
+++ cfe/trunk/test/CodeGen/c11atomics-ios.c Wed Jul 10 20:32:21 2013
@@ -102,8 +102,6 @@ void testStruct(_Atomic(S) *fp) {
 // CHECK-NEXT: store [[S]]*
 
 // CHECK-NEXT: [[P:%.*]] = load [[S]]** [[FP]]
-// CHECK-NEXT: [[T0:%.*]] = bitcast [[S]]* [[P]] to i8*
-// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]]* [[P]], i32 0, i32 0
 // CHECK-NEXT: store i16 1, i16* [[T0]], align 2
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]]* [[P]], i32 0, i32 1
@@ -114,8 +112,6 @@ void testStruct(_Atomic(S) *fp) {
 // CHECK-NEXT: store i16 4, i16* [[T0]], align 2
   __c11_atomic_init(fp, (S){1,2,3,4});
 
-// CHECK-NEXT: [[T0:%.*]] = bitcast [[S]]* [[X]] to i8*
-// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]]* [[X]], i32 0, i32 0
 // CHECK-NEXT: store i16 1, i16* [[T0]], align 2
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]]* [[X]], i32 0, i32 1
@@ -169,7 +165,7 @@ void testPromotedStruct(_Atomic(PS) *fp)
   __c11_atomic_init(fp, (PS){1,2,3});
 
 // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[X]] to i8*
-// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T0]], i8 0, i32 8, i32 8, i1 false)
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]]* [[X]], i32 0, i32 0
 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]]* [[T0]], i32 0, i32 0
 // CHECK-NEXT: store i16 1, i16* [[T1]], align 2
@@ -183,7 +179,7 @@ void testPromotedStruct(_Atomic(PS) *fp)
 // CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i64*
 // CHECK-NEXT: [[T2:%.*]] = load atomic i64* [[T1]] seq_cst, align 8
 // CHECK-NEXT: [[T3:%.*]] = bitcast [[APS]]* [[TMP0]] to i64*
-// CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 8
+// CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 2
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]]* [[TMP0]], i32 0, i32 0
 // CHECK-NEXT: [[T1:%.*]] = bitcast [[PS]]* [[F]] to i8*
 // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T0]] to i8*
@@ -191,6 +187,8 @@ void testPromotedStruct(_Atomic(PS) *fp)
   PS f = *fp;
 
 // CHECK-NEXT: [[T0:%.*]] = load [[APS]]** [[FP]]
+// CHECK-NEXT: [[T1:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[TMP1]] to i8*
+// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T1]], i8 0, i32 8, i32 8, i1 false)
 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]]* [[TMP1]], i32 0, i32 0
 // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T1]] to i8*
 // CHECK-NEXT: [[T3:%.*]] = bitcast [[PS]]* [[F]] to i8*

Modified: cfe/trunk/test/CodeGen/c11atomics.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/c11atomics.c?rev=186049&r1=186048&r2=186049&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/c11atomics.c (original)
+++ cfe/trunk/test/CodeGen/c11atomics.c Wed Jul 10 20:32:21 2013
@@ -233,8 +233,6 @@ void testStruct(_Atomic(S) *fp) {
 // CHECK-NEXT: store [[S]]*
 
 // CHECK-NEXT: [[P:%.*]] = load [[S]]** [[FP]]
-// CHECK-NEXT: [[T0:%.*]] = bitcast [[S]]* [[P]] to i8*
-// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]]* [[P]], i32 0, i32 0
 // CHECK-NEXT: store i16 1, i16* [[T0]], align 2
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]]* [[P]], i32 0, i32 1
@@ -245,8 +243,6 @@ void testStruct(_Atomic(S) *fp) {
 // CHECK-NEXT: store i16 4, i16* [[T0]], align 2
   __c11_atomic_init(fp, (S){1,2,3,4});
 
-// CHECK-NEXT: [[T0:%.*]] = bitcast [[S]]* [[X]] to i8*
-// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]]* [[X]], i32 0, i32 0
 // CHECK-NEXT: store i16 1, i16* [[T0]], align 2
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]]* [[X]], i32 0, i32 1
@@ -283,6 +279,9 @@ void testPromotedStruct(_Atomic(PS) *fp)
 // CHECK-NEXT: [[F:%.*]] = alloca [[PS:%.*]], align 2
 // CHECK-NEXT: [[TMP0:%.*]] = alloca [[APS]], align 8
 // CHECK-NEXT: [[TMP1:%.*]] = alloca [[APS]], align 8
+// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[TMP2:%.*]] = alloca %struct.PS, align 2
+// CHECK-NEXT: [[TMP3:%.*]] = alloca [[APS]], align 8
 // CHECK-NEXT: store [[APS]]*
 
 // CHECK-NEXT: [[P:%.*]] = load [[APS]]** [[FP]]
@@ -298,7 +297,7 @@ void testPromotedStruct(_Atomic(PS) *fp)
   __c11_atomic_init(fp, (PS){1,2,3});
 
 // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[X]] to i8*
-// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T0]], i8 0, i32 8, i32 8, i1 false)
 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]]* [[X]], i32 0, i32 0
 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]]* [[T0]], i32 0, i32 0
 // CHECK-NEXT: store i16 1, i16* [[T1]], align 2
@@ -319,6 +318,8 @@ void testPromotedStruct(_Atomic(PS) *fp)
   PS f = *fp;
 
 // CHECK-NEXT: [[T0:%.*]] = load [[APS]]** [[FP]]
+// CHECK-NEXT: [[T1:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[TMP1]] to i8*
+// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T1]], i8 0, i32 8, i32 8, i1 false)
 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]]* [[TMP1]], i32 0, i32 0
 // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T1]] to i8*
 // CHECK-NEXT: [[T3:%.*]] = bitcast [[PS]]* [[F]] to i8*
@@ -328,6 +329,20 @@ void testPromotedStruct(_Atomic(PS) *fp)
 // CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 8, i8* [[T4]], i8* [[T5]], i32 5)
   *fp = f;
 
+// CHECK-NEXT: [[T0:%.*]] = load [[APS]]** %fp.addr, align 4
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i8*
+// CHECK-NEXT: [[T2:%.*]] = bitcast [[APS]]* [[TMP3]] to i8*
+// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5)
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]]* [[TMP3]], i32 0, i32 0
+// CHECK-NEXT: [[T1:%.*]] = bitcast %struct.PS* [[TMP2]] to i8*
+// CHECK-NEXT: [[T2:%.*]] = bitcast %struct.PS* [[T0]] to i8*
+// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 6, i32 2, i1 false)
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds %struct.PS* [[TMP2]], i32 0, i32 0
+// CHECK-NEXT: [[T1:%.*]] = load i16* [[T0]], align 2
+// CHECK-NEXT: [[T2:%.*]] = sext i16 [[T1]] to i32
+// CHECK-NEXT: store i32 [[T2]], i32* [[A]], align 4
+  int a = ((PS)*fp).x;
+
 // CHECK-NEXT: ret void
 }
 





More information about the cfe-commits mailing list