[cfe-commits] r169489 - in /cfe/trunk: lib/CodeGen/CGExpr.cpp lib/CodeGen/CGObjCRuntime.cpp lib/CodeGen/CGRecordLayout.h lib/CodeGen/CGRecordLayoutBuilder.cpp lib/CodeGen/CGValue.h test/CodeGen/2008-01-07-UnusualIntSize.c test/CodeGen/PR4611-bitfield-layout.c test/CodeGen/bitfield-2.c test/CodeGen/init.c test/CodeGen/packed-nest-unpacked.c test/CodeGen/pr2394.c test/CodeGenCXX/2009-12-23-MissingSext.cpp test/CodeGenCXX/bitfield.cpp test/CodeGenCXX/references.cpp test/CodeGenObjC/bitfield-access.m

Chandler Carruth chandlerc at gmail.com
Thu Dec 6 03:14:44 PST 2012


Author: chandlerc
Date: Thu Dec  6 05:14:44 2012
New Revision: 169489

URL: http://llvm.org/viewvc/llvm-project?rev=169489&view=rev
Log:
Rework the bitfield access IR generation to address PR13619 and
generally support the C++11 memory model requirements for bitfield
accesses by relying more heavily on LLVM's memory model.

The primary change this introduces is to move from a manually aligned
and strided access pattern across the bits of the bitfield to a much
simpler lump access of all bits in the bitfield followed by math to
extract the bits relevant for the particular field.

This simplifies the code significantly, but relies on LLVM to
intelligently lowering these integers.

I have tested LLVM's lowering both synthetically and in benchmarks. The
lowering appears to be functional, and there are no really significant
performance regressions. Different code patterns accessing bitfields
will vary in how this impacts them. The only real regressions I'm seeing
are a few patterns where the LLVM code generation for loads that feed
directly into a mask operation don't take advantage of the x86 ability
to do a smaller load and a cheap zero-extension. This doesn't regress
any benchmark in the nightly test suite on my box past the noise
threshold, but my box is quite noisy. I'll be watching the LNT numbers,
and will look into further improvements to the LLVM lowering as needed.

Added:
    cfe/trunk/test/CodeGenCXX/bitfield.cpp
Modified:
    cfe/trunk/lib/CodeGen/CGExpr.cpp
    cfe/trunk/lib/CodeGen/CGObjCRuntime.cpp
    cfe/trunk/lib/CodeGen/CGRecordLayout.h
    cfe/trunk/lib/CodeGen/CGRecordLayoutBuilder.cpp
    cfe/trunk/lib/CodeGen/CGValue.h
    cfe/trunk/test/CodeGen/2008-01-07-UnusualIntSize.c
    cfe/trunk/test/CodeGen/PR4611-bitfield-layout.c
    cfe/trunk/test/CodeGen/bitfield-2.c
    cfe/trunk/test/CodeGen/init.c
    cfe/trunk/test/CodeGen/packed-nest-unpacked.c
    cfe/trunk/test/CodeGen/pr2394.c
    cfe/trunk/test/CodeGenCXX/2009-12-23-MissingSext.cpp
    cfe/trunk/test/CodeGenCXX/references.cpp
    cfe/trunk/test/CodeGenObjC/bitfield-access.m

Modified: cfe/trunk/lib/CodeGen/CGExpr.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGExpr.cpp?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGExpr.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGExpr.cpp Thu Dec  6 05:14:44 2012
@@ -1155,72 +1155,30 @@
 
   // Get the output type.
   llvm::Type *ResLTy = ConvertType(LV.getType());
-  unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
 
-  // Compute the result as an OR of all of the individual component accesses.
-  llvm::Value *Res = 0;
-  for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
-    const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
-    CharUnits AccessAlignment = AI.AccessAlignment;
-    if (!LV.getAlignment().isZero())
-      AccessAlignment = std::min(AccessAlignment, LV.getAlignment());
-
-    // Get the field pointer.
-    llvm::Value *Ptr = LV.getBitFieldBaseAddr();
-
-    // Only offset by the field index if used, so that incoming values are not
-    // required to be structures.
-    if (AI.FieldIndex)
-      Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
-
-    // Offset by the byte offset, if used.
-    if (!AI.FieldByteOffset.isZero()) {
-      Ptr = EmitCastToVoidPtr(Ptr);
-      Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
-                                       "bf.field.offs");
-    }
-
-    // Cast to the access type.
-    llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
-                       CGM.getContext().getTargetAddressSpace(LV.getType()));
-    Ptr = Builder.CreateBitCast(Ptr, PTy);
-
-    // Perform the load.
-    llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
-    Load->setAlignment(AccessAlignment.getQuantity());
-
-    // Shift out unused low bits and mask out unused high bits.
-    llvm::Value *Val = Load;
-    if (AI.FieldBitStart)
-      Val = Builder.CreateLShr(Load, AI.FieldBitStart);
-    Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
-                                                            AI.TargetBitWidth),
-                            "bf.clear");
-
-    // Extend or truncate to the target size.
-    if (AI.AccessWidth < ResSizeInBits)
-      Val = Builder.CreateZExt(Val, ResLTy);
-    else if (AI.AccessWidth > ResSizeInBits)
-      Val = Builder.CreateTrunc(Val, ResLTy);
-
-    // Shift into place, and OR into the result.
-    if (AI.TargetBitOffset)
-      Val = Builder.CreateShl(Val, AI.TargetBitOffset);
-    Res = Res ? Builder.CreateOr(Res, Val) : Val;
-  }
-
-  // If the bit-field is signed, perform the sign-extension.
-  //
-  // FIXME: This can easily be folded into the load of the high bits, which
-  // could also eliminate the mask of high bits in some situations.
-  if (Info.isSigned()) {
-    unsigned ExtraBits = ResSizeInBits - Info.getSize();
-    if (ExtraBits)
-      Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
-                               ExtraBits, "bf.val.sext");
+  llvm::Value *Ptr = LV.getBitFieldAddr();
+  llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(),
+                                        "bf.load");
+  cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment);
+
+  if (Info.IsSigned) {
+    assert((Info.Offset + Info.Size) <= Info.StorageSize);
+    unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
+    if (HighBits)
+      Val = Builder.CreateShl(Val, HighBits, "bf.shl");
+    if (Info.Offset + HighBits)
+      Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
+  } else {
+    if (Info.Offset)
+      Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
+    if (Info.Offset + Info.Size < Info.StorageSize)
+      Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
+                                                              Info.Size),
+                              "bf.clear");
   }
+  Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
 
-  return RValue::get(Res);
+  return RValue::get(Val);
 }
 
 // If this is a reference to a subset of the elements of a vector, create an
@@ -1350,106 +1308,71 @@
 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
                                                      llvm::Value **Result) {
   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
-
-  // Get the output type.
   llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
-  unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
+  llvm::Value *Ptr = Dst.getBitFieldAddr();
 
   // Get the source value, truncated to the width of the bit-field.
   llvm::Value *SrcVal = Src.getScalarVal();
 
-  if (hasBooleanRepresentation(Dst.getType()))
-    SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
+  // Cast the source to the storage type and shift it into place.
+  SrcVal = Builder.CreateIntCast(SrcVal,
+                                 Ptr->getType()->getPointerElementType(),
+                                 /*IsSigned=*/false);
+  llvm::Value *MaskedVal = SrcVal;
+
+  // See if there are other bits in the bitfield's storage we'll need to load
+  // and mask together with source before storing.
+  if (Info.StorageSize != Info.Size) {
+    assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
+    llvm::Value *Val = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(),
+                                          "bf.load");
+    cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment);
+
+    // Mask the source value as needed.
+    if (!hasBooleanRepresentation(Dst.getType()))
+      SrcVal = Builder.CreateAnd(SrcVal,
+                                 llvm::APInt::getLowBitsSet(Info.StorageSize,
+                                                            Info.Size),
+                                 "bf.value");
+    MaskedVal = SrcVal;
+    if (Info.Offset)
+      SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
+
+    // Mask out the original value.
+    Val = Builder.CreateAnd(Val,
+                            ~llvm::APInt::getBitsSet(Info.StorageSize,
+                                                     Info.Offset,
+                                                     Info.Offset + Info.Size),
+                            "bf.clear");
 
-  SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
-                                                                Info.getSize()),
-                             "bf.value");
+    // Or together the unchanged values and the source value.
+    SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
+  } else {
+    assert(Info.Offset == 0);
+  }
+
+  // Write the new value back out.
+  llvm::StoreInst *Store = Builder.CreateStore(SrcVal, Ptr,
+                                               Dst.isVolatileQualified());
+  Store->setAlignment(Info.StorageAlignment);
 
   // Return the new value of the bit-field, if requested.
   if (Result) {
-    // Cast back to the proper type for result.
-    llvm::Type *SrcTy = Src.getScalarVal()->getType();
-    llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
-                                                   "bf.reload.val");
-
-    // Sign extend if necessary.
-    if (Info.isSigned()) {
-      unsigned ExtraBits = ResSizeInBits - Info.getSize();
-      if (ExtraBits)
-        ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
-                                       ExtraBits, "bf.reload.sext");
-    }
-
-    *Result = ReloadVal;
-  }
-
-  // Iterate over the components, writing each piece to memory.
-  for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
-    const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
-    CharUnits AccessAlignment = AI.AccessAlignment;
-    if (!Dst.getAlignment().isZero())
-      AccessAlignment = std::min(AccessAlignment, Dst.getAlignment());
-
-    // Get the field pointer.
-    llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
-    unsigned addressSpace =
-      cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
-
-    // Only offset by the field index if used, so that incoming values are not
-    // required to be structures.
-    if (AI.FieldIndex)
-      Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
-
-    // Offset by the byte offset, if used.
-    if (!AI.FieldByteOffset.isZero()) {
-      Ptr = EmitCastToVoidPtr(Ptr);
-      Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
-                                       "bf.field.offs");
-    }
-
-    // Cast to the access type.
-    llvm::Type *AccessLTy =
-      llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
-
-    llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
-    Ptr = Builder.CreateBitCast(Ptr, PTy);
-
-    // Extract the piece of the bit-field value to write in this access, limited
-    // to the values that are part of this access.
-    llvm::Value *Val = SrcVal;
-    if (AI.TargetBitOffset)
-      Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
-    Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
-                                                            AI.TargetBitWidth));
-
-    // Extend or truncate to the access size.
-    if (ResSizeInBits < AI.AccessWidth)
-      Val = Builder.CreateZExt(Val, AccessLTy);
-    else if (ResSizeInBits > AI.AccessWidth)
-      Val = Builder.CreateTrunc(Val, AccessLTy);
-
-    // Shift into the position in memory.
-    if (AI.FieldBitStart)
-      Val = Builder.CreateShl(Val, AI.FieldBitStart);
-
-    // If necessary, load and OR in bits that are outside of the bit-field.
-    if (AI.TargetBitWidth != AI.AccessWidth) {
-      llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
-      Load->setAlignment(AccessAlignment.getQuantity());
-
-      // Compute the mask for zeroing the bits that are part of the bit-field.
-      llvm::APInt InvMask =
-        ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
-                                 AI.FieldBitStart + AI.TargetBitWidth);
-
-      // Apply the mask and OR in to the value to write.
-      Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
-    }
-
-    // Write the value.
-    llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
-                                                 Dst.isVolatileQualified());
-    Store->setAlignment(AccessAlignment.getQuantity());
+    llvm::Value *ResultVal = MaskedVal;
+
+    // Sign extend the value if needed.
+    if (Info.IsSigned) {
+      assert(Info.Size <= Info.StorageSize);
+      unsigned HighBits = Info.StorageSize - Info.Size;
+      if (HighBits) {
+        ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
+        ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
+      }
+    }
+
+    ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
+                                      "bf.result.cast");
+    *Result = ResultVal;
   }
 }
 
@@ -2333,10 +2256,21 @@
     const CGRecordLayout &RL =
       CGM.getTypes().getCGRecordLayout(field->getParent());
     const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
+    llvm::Value *Addr = base.getAddress();
+    unsigned Idx = RL.getLLVMFieldNo(field);
+    if (Idx != 0)
+      // For structs, we GEP to the field that the record layout suggests.
+      Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
+    // Get the access type.
+    llvm::Type *PtrTy = llvm::Type::getIntNPtrTy(
+      getLLVMContext(), Info.StorageSize,
+      CGM.getContext().getTargetAddressSpace(base.getType()));
+    if (Addr->getType() != PtrTy)
+      Addr = Builder.CreateBitCast(Addr, PtrTy);
+
     QualType fieldType =
       field->getType().withCVRQualifiers(base.getVRQualifiers());
-    return LValue::MakeBitfield(base.getAddress(), Info, fieldType,
-                                base.getAlignment());
+    return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment());
   }
 
   const RecordDecl *rec = field->getParent();

Modified: cfe/trunk/lib/CodeGen/CGObjCRuntime.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGObjCRuntime.cpp?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGObjCRuntime.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGObjCRuntime.cpp Thu Dec  6 05:14:44 2012
@@ -89,14 +89,13 @@
                                                unsigned CVRQualifiers,
                                                llvm::Value *Offset) {
   // Compute (type*) ( (char *) BaseValue + Offset)
-  llvm::Type *I8Ptr = CGF.Int8PtrTy;
   QualType IvarTy = Ivar->getType();
   llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
-  llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+  llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
   V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
-  V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
 
   if (!Ivar->isBitField()) {
+    V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
     LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
     LV.getQuals().addCVRQualifiers(CVRQualifiers);
     return LV;
@@ -116,16 +115,14 @@
   // Note, there is a subtle invariant here: we can only call this routine on
   // non-synthesized ivars but we may be called for synthesized ivars.  However,
   // a synthesized ivar can never be a bit-field, so this is safe.
-  const ASTRecordLayout &RL =
-    CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
-  uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize());
   uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
   uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
-  uint64_t ContainingTypeAlign = CGF.CGM.getContext().getTargetInfo().getCharAlign();
-  uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
+  uint64_t AlignmentBits = CGF.CGM.getContext().getTargetInfo().getCharAlign();
   uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
-  CharUnits ContainingTypeAlignCharUnits = 
-    CGF.CGM.getContext().toCharUnitsFromBits(ContainingTypeAlign);
+  CharUnits StorageSize =
+    CGF.CGM.getContext().toCharUnitsFromBits(
+      llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits));
+  CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits);
 
   // Allocate a new CGBitFieldInfo object to describe this access.
   //
@@ -135,11 +132,15 @@
   // objects.
   CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
     CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
-                             ContainingTypeSize, ContainingTypeAlign));
+                             CGF.CGM.getContext().toBits(StorageSize),
+                             Alignment.getQuantity()));
 
+  V = CGF.Builder.CreateBitCast(V,
+                                llvm::Type::getIntNPtrTy(CGF.getLLVMContext(),
+                                                         Info->StorageSize));
   return LValue::MakeBitfield(V, *Info,
                               IvarTy.withCVRQualifiers(CVRQualifiers),
-                              ContainingTypeAlignCharUnits);
+                              Alignment);
 }
 
 namespace {

Modified: cfe/trunk/lib/CodeGen/CGRecordLayout.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGRecordLayout.h?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGRecordLayout.h (original)
+++ cfe/trunk/lib/CodeGen/CGRecordLayout.h Thu Dec  6 05:14:44 2012
@@ -23,122 +23,71 @@
 namespace clang {
 namespace CodeGen {
 
-/// \brief Helper object for describing how to generate the code for access to a
-/// bit-field.
+/// \brief Structure with information about how a bitfield should be accessed.
 ///
-/// This structure is intended to describe the "policy" of how the bit-field
-/// should be accessed, which may be target, language, or ABI dependent.
-class CGBitFieldInfo {
-public:
-  /// Descriptor for a single component of a bit-field access. The entire
-  /// bit-field is constituted of a bitwise OR of all of the individual
-  /// components.
-  ///
-  /// Each component describes an accessed value, which is how the component
-  /// should be transferred to/from memory, and a target placement, which is how
-  /// that component fits into the constituted bit-field. The pseudo-IR for a
-  /// load is:
-  ///
-  ///   %0 = gep %base, 0, FieldIndex
-  ///   %1 = gep (i8*) %0, FieldByteOffset
-  ///   %2 = (i(AccessWidth) *) %1
-  ///   %3 = load %2, align AccessAlignment
-  ///   %4 = shr %3, FieldBitStart
-  ///
-  /// and the composed bit-field is formed as the boolean OR of all accesses,
-  /// masked to TargetBitWidth bits and shifted to TargetBitOffset.
-  struct AccessInfo {
-    /// Offset of the field to load in the LLVM structure, if any.
-    unsigned FieldIndex;
-
-    /// Byte offset from the field address, if any. This should generally be
-    /// unused as the cleanest IR comes from having a well-constructed LLVM type
-    /// with proper GEP instructions, but sometimes its use is required, for
-    /// example if an access is intended to straddle an LLVM field boundary.
-    CharUnits FieldByteOffset;
-
-    /// Bit offset in the accessed value to use. The width is implied by \see
-    /// TargetBitWidth.
-    unsigned FieldBitStart;
-
-    /// Bit width of the memory access to perform.
-    unsigned AccessWidth;
-
-    /// The alignment of the memory access, assuming the parent is aligned.
-    CharUnits AccessAlignment;
-
-    /// Offset for the target value.
-    unsigned TargetBitOffset;
-
-    /// Number of bits in the access that are destined for the bit-field.
-    unsigned TargetBitWidth;
-  };
-
-private:
-  /// The components to use to access the bit-field. We may need up to three
-  /// separate components to support up to i64 bit-field access (4 + 2 + 1 byte
-  /// accesses).
-  //
-  // FIXME: De-hardcode this, just allocate following the struct.
-  AccessInfo Components[3];
+/// Often we layout a sequence of bitfields as a contiguous sequence of bits.
+/// When the AST record layout does this, we represent it in the LLVM IR's type
+/// as either a sequence of i8 members or a byte array to reserve the number of
+/// bytes touched without forcing any particular alignment beyond the basic
+/// character alignment.
+///
+/// Then accessing a particular bitfield involves converting this byte array
+/// into a single integer of that size (i24 or i40 -- may not be power-of-two
+/// size), loading it, and shifting and masking to extract the particular
+/// subsequence of bits which make up that particular bitfield. This structure
+/// encodes the information used to construct the extraction code sequences.
+/// The CGRecordLayout also has a field index which encodes which byte-sequence
+/// this bitfield falls within. Let's assume the following C struct:
+///
+///   struct S {
+///     char a, b, c;
+///     unsigned bits : 3;
+///     unsigned more_bits : 4;
+///     unsigned still_more_bits : 7;
+///   };
+///
+/// This will end up as the following LLVM type. The first array is the
+/// bitfield, and the second is the padding out to a 4-byte alignmnet.
+///
+///   %t = type { i8, i8, i8, i8, i8, [3 x i8] }
+///
+/// When generating code to access more_bits, we'll generate something
+/// essentially like this:
+///
+///   define i32 @foo(%t* %base) {
+///     %0 = gep %t* %base, i32 0, i32 3
+///     %2 = load i8* %1
+///     %3 = lshr i8 %2, 3
+///     %4 = and i8 %3, 15
+///     %5 = zext i8 %4 to i32
+///     ret i32 %i
+///   }
+///
+struct CGBitFieldInfo {
+  /// The offset within a contiguous run of bitfields that are represented as
+  /// a single "field" within the LLVM struct type. This offset is in bits.
+  unsigned Offset : 16;
 
   /// The total size of the bit-field, in bits.
-  unsigned Size;
-
-  /// The number of access components to use.
-  unsigned NumComponents;
+  unsigned Size : 15;
 
   /// Whether the bit-field is signed.
-  bool IsSigned : 1;
-
-public:
-  CGBitFieldInfo(unsigned Size, unsigned NumComponents, AccessInfo *_Components,
-                 bool IsSigned) : Size(Size), NumComponents(NumComponents),
-                                  IsSigned(IsSigned) {
-    assert(NumComponents <= 3 && "invalid number of components!");
-    for (unsigned i = 0; i != NumComponents; ++i)
-      Components[i] = _Components[i];
-
-    // Check some invariants.
-    unsigned AccessedSize = 0;
-    for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
-      const AccessInfo &AI = getComponent(i);
-      AccessedSize += AI.TargetBitWidth;
-
-      // We shouldn't try to load 0 bits.
-      assert(AI.TargetBitWidth > 0);
-
-      // We can't load more bits than we accessed.
-      assert(AI.FieldBitStart + AI.TargetBitWidth <= AI.AccessWidth);
-
-      // We shouldn't put any bits outside the result size.
-      assert(AI.TargetBitWidth + AI.TargetBitOffset <= Size);
-    }
-
-    // Check that the total number of target bits matches the total bit-field
-    // size.
-    assert(AccessedSize == Size && "Total size does not match accessed size!");
-  }
-
-public:
-  /// \brief Check whether this bit-field access is (i.e., should be sign
-  /// extended on loads).
-  bool isSigned() const { return IsSigned; }
-
-  /// \brief Get the size of the bit-field, in bits.
-  unsigned getSize() const { return Size; }
-
-  /// @name Component Access
-  /// @{
+  unsigned IsSigned : 1;
 
-  unsigned getNumComponents() const { return NumComponents; }
-
-  const AccessInfo &getComponent(unsigned Index) const {
-    assert(Index < getNumComponents() && "Invalid access!");
-    return Components[Index];
-  }
-
-  /// @}
+  /// The storage size in bits which should be used when accessing this
+  /// bitfield.
+  unsigned StorageSize;
+
+  /// The alignment which should be used when accessing the bitfield.
+  unsigned StorageAlignment;
+
+  CGBitFieldInfo()
+      : Offset(), Size(), IsSigned(), StorageSize(), StorageAlignment() {}
+
+  CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
+                 unsigned StorageSize, unsigned StorageAlignment)
+      : Offset(Offset), Size(Size), IsSigned(IsSigned),
+        StorageSize(StorageSize), StorageAlignment(StorageAlignment) {}
 
   void print(raw_ostream &OS) const;
   void dump() const;
@@ -146,17 +95,11 @@
   /// \brief Given a bit-field decl, build an appropriate helper object for
   /// accessing that field (which is expected to have the given offset and
   /// size).
-  static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types, const FieldDecl *FD,
-                                 uint64_t FieldOffset, uint64_t FieldSize);
-
-  /// \brief Given a bit-field decl, build an appropriate helper object for
-  /// accessing that field (which is expected to have the given offset and
-  /// size). The field decl should be known to be contained within a type of at
-  /// least the given size and with the given alignment.
-  static CGBitFieldInfo MakeInfo(CodeGenTypes &Types, const FieldDecl *FD,
-                                 uint64_t FieldOffset, uint64_t FieldSize,
-                                 uint64_t ContainingTypeSizeInBits,
-                                 unsigned ContainingTypeAlign);
+  static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types,
+                                 const FieldDecl *FD,
+                                 uint64_t Offset, uint64_t Size,
+                                 uint64_t StorageSize,
+                                 uint64_t StorageAlignment);
 };
 
 /// CGRecordLayout - This class handles struct and union layout info while
@@ -240,7 +183,6 @@
   /// \brief Return llvm::StructType element number that corresponds to the
   /// field FD.
   unsigned getLLVMFieldNo(const FieldDecl *FD) const {
-    assert(!FD->isBitField() && "Invalid call for bit-field decl!");
     assert(FieldInfo.count(FD) && "Invalid field for record!");
     return FieldInfo.lookup(FD);
   }

Modified: cfe/trunk/lib/CodeGen/CGRecordLayoutBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGRecordLayoutBuilder.cpp?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGRecordLayoutBuilder.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGRecordLayoutBuilder.cpp Thu Dec  6 05:14:44 2012
@@ -100,10 +100,6 @@
   /// Alignment - Contains the alignment of the RecordDecl.
   CharUnits Alignment;
 
-  /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
-  /// this will have the number of bits still available in the field.
-  char BitsAvailableInLastField;
-
   /// NextFieldOffset - Holds the next field offset.
   CharUnits NextFieldOffset;
 
@@ -115,6 +111,12 @@
   /// LayoutUnion - Will layout a union RecordDecl.
   void LayoutUnion(const RecordDecl *D);
 
+  /// Lay out a sequence of contiguous bitfields.
+  bool LayoutBitfields(const ASTRecordLayout &Layout,
+                       unsigned &FirstFieldNo,
+                       RecordDecl::field_iterator &FI,
+                       RecordDecl::field_iterator FE);
+
   /// LayoutField - try to layout all fields in the record decl.
   /// Returns false if the operation failed because the struct is not packed.
   bool LayoutFields(const RecordDecl *D);
@@ -194,7 +196,7 @@
     : BaseSubobjectType(0),
       IsZeroInitializable(true), IsZeroInitializableAsBase(true),
       Packed(false), IsMsStruct(false),
-      Types(Types), BitsAvailableInLastField(0) { }
+      Types(Types) { }
 
   /// Layout - Will layout a RecordDecl.
   void Layout(const RecordDecl *D);
@@ -230,13 +232,10 @@
 }
 
 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
-                               const FieldDecl *FD,
-                               uint64_t FieldOffset,
-                               uint64_t FieldSize,
-                               uint64_t ContainingTypeSizeInBits,
-                               unsigned ContainingTypeAlign) {
-  assert(ContainingTypeAlign && "Expected alignment to be specified");
-
+                                        const FieldDecl *FD,
+                                        uint64_t Offset, uint64_t Size,
+                                        uint64_t StorageSize,
+                                        uint64_t StorageAlignment) {
   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
   CharUnits TypeSizeInBytes =
     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
@@ -244,7 +243,7 @@
 
   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
 
-  if (FieldSize > TypeSizeInBits) {
+  if (Size > TypeSizeInBits) {
     // We have a wide bit-field. The extra bits are only used for padding, so
     // if we have a bitfield of type T, with size N:
     //
@@ -254,173 +253,127 @@
     //
     // T t : sizeof(T);
     //
-    FieldSize = TypeSizeInBits;
-  }
-
-  // in big-endian machines the first fields are in higher bit positions,
-  // so revert the offset. The byte offsets are reversed(back) later.
-  if (Types.getDataLayout().isBigEndian()) {
-    FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
-  }
-
-  // Compute the access components. The policy we use is to start by attempting
-  // to access using the width of the bit-field type itself and to always access
-  // at aligned indices of that type. If such an access would fail because it
-  // extends past the bound of the type, then we reduce size to the next smaller
-  // power of two and retry. The current algorithm assumes pow2 sized types,
-  // although this is easy to fix.
-  //
-  assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
-  CGBitFieldInfo::AccessInfo Components[3];
-  unsigned NumComponents = 0;
-  unsigned AccessedTargetBits = 0;       // The number of target bits accessed.
-  unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
-
-  // If requested, widen the initial bit-field access to be register sized. The
-  // theory is that this is most likely to allow multiple accesses into the same
-  // structure to be coalesced, and that the backend should be smart enough to
-  // narrow the store if no coalescing is ever done.
-  //
-  // The subsequent code will handle align these access to common boundaries and
-  // guaranteeing that we do not access past the end of the structure.
-  if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
-    if (AccessWidth < Types.getTarget().getRegisterWidth())
-      AccessWidth = Types.getTarget().getRegisterWidth();
-  }
-
-  // Round down from the field offset to find the first access position that is
-  // at an aligned offset of the initial access type.
-  uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
-
-  // Adjust initial access size to fit within record.
-  while (AccessWidth > Types.getTarget().getCharWidth() &&
-         AccessStart + AccessWidth > ContainingTypeSizeInBits) {
-    AccessWidth >>= 1;
-    AccessStart = FieldOffset - (FieldOffset % AccessWidth);
-  }
-
-  while (AccessedTargetBits < FieldSize) {
-    // Check that we can access using a type of this size, without reading off
-    // the end of the structure. This can occur with packed structures and
-    // -fno-bitfield-type-align, for example.
-    if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
-      // If so, reduce access size to the next smaller power-of-two and retry.
-      AccessWidth >>= 1;
-      assert(AccessWidth >= Types.getTarget().getCharWidth()
-             && "Cannot access under byte size!");
-      continue;
-    }
-
-    // Otherwise, add an access component.
-
-    // First, compute the bits inside this access which are part of the
-    // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
-    // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
-    // in the target that we are reading.
-    assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
-    assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
-    uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
-    uint64_t AccessBitsInFieldSize =
-      std::min(AccessWidth + AccessStart,
-               FieldOffset + FieldSize) - AccessBitsInFieldStart;
-
-    assert(NumComponents < 3 && "Unexpected number of components!");
-    CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
-    AI.FieldIndex = 0;
-    // FIXME: We still follow the old access pattern of only using the field
-    // byte offset. We should switch this once we fix the struct layout to be
-    // pretty.
-
-    // on big-endian machines we reverted the bit offset because first fields are
-    // in higher bits. But this also reverts the bytes, so fix this here by reverting
-    // the byte offset on big-endian machines.
-    if (Types.getDataLayout().isBigEndian()) {
-      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
-          ContainingTypeSizeInBits - AccessStart - AccessWidth);
-    } else {
-      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
-    }
-    AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
-    AI.AccessWidth = AccessWidth;
-    AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
-        llvm::MinAlign(ContainingTypeAlign, AccessStart));
-    AI.TargetBitOffset = AccessedTargetBits;
-    AI.TargetBitWidth = AccessBitsInFieldSize;
-
-    AccessStart += AccessWidth;
-    AccessedTargetBits += AI.TargetBitWidth;
+    Size = TypeSizeInBits;
   }
 
-  assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
-  return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
-}
-
-CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
-                                        const FieldDecl *FD,
-                                        uint64_t FieldOffset,
-                                        uint64_t FieldSize) {
-  const RecordDecl *RD = FD->getParent();
-  const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
-  uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
-  unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
-
-  return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
-                  ContainingTypeAlign);
-}
-
-void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
-                                           uint64_t fieldOffset) {
-  uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
-
-  if (fieldSize == 0)
-    return;
-
-  uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
-  CharUnits numBytesToAppend;
-  unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
-
-  if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
-    assert(fieldOffset % charAlign == 0 && 
-           "Field offset not aligned correctly");
-
-    CharUnits fieldOffsetInCharUnits = 
-      Types.getContext().toCharUnitsFromBits(fieldOffset);
+  // Reverse the bit offsets for big endian machines.
+  if (Types.getDataLayout().isBigEndian())
+    Offset = Size - Offset - 1;
+
+  return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageAlignment);
+}
+
+/// \brief Layout the range of bitfields from BFI to BFE as contiguous storage.
+bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
+                                            unsigned &FirstFieldNo,
+                                            RecordDecl::field_iterator &FI,
+                                            RecordDecl::field_iterator FE) {
+  assert(FI != FE);
+  uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo);
+  uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
+
+  unsigned CharAlign = Types.getContext().getTargetInfo().getCharAlign();
+  assert(FirstFieldOffset % CharAlign == 0 &&
+         "First field offset is misaligned");
+  CharUnits FirstFieldOffsetInBytes
+    = Types.getContext().toCharUnitsFromBits(FirstFieldOffset);
+
+  unsigned StorageAlignment
+    = llvm::MinAlign(Alignment.getQuantity(),
+                     FirstFieldOffsetInBytes.getQuantity());
+
+  if (FirstFieldOffset < NextFieldOffsetInBits) {
+    CharUnits FieldOffsetInCharUnits =
+      Types.getContext().toCharUnitsFromBits(FirstFieldOffset);
 
     // Try to resize the last base field.
-    if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
-      nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
+    if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits))
+      llvm_unreachable("We must be able to resize the last base if we need to "
+                       "pack bits into it.");
+
+    NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
+    assert(FirstFieldOffset >= NextFieldOffsetInBits);
+  }
+
+  // Append padding if necessary.
+  AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset),
+                CharUnits::One());
+
+  // Find the last bitfield in a contiguous run of bitfields.
+  RecordDecl::field_iterator BFI = FI;
+  unsigned LastFieldNo = FirstFieldNo;
+  uint64_t NextContiguousFieldOffset = FirstFieldOffset;
+  for (RecordDecl::field_iterator FJ = FI;
+       (FJ != FE && (*FJ)->isBitField() &&
+        NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) &&
+        (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) {
+    NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext());
+    ++LastFieldNo;
+
+    // We must use packed structs for packed fields, and also unnamed bit
+    // fields since they don't affect the struct alignment.
+    if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName()))
+      return false;
   }
-
-  if (fieldOffset < nextFieldOffsetInBits) {
-    assert(BitsAvailableInLastField && "Bitfield size mismatch!");
-    assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
-
-    // The bitfield begins in the previous bit-field.
-    numBytesToAppend = Types.getContext().toCharUnitsFromBits(
-      llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 
-                               charAlign));
-  } else {
-    assert(fieldOffset % charAlign == 0 && 
-           "Field offset not aligned correctly");
-
-    // Append padding if necessary.
-    AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset), 
-                  CharUnits::One());
-
-    numBytesToAppend = Types.getContext().toCharUnitsFromBits(
-        llvm::RoundUpToAlignment(fieldSize, charAlign));
-
-    assert(!numBytesToAppend.isZero() && "No bytes to append!");
+  RecordDecl::field_iterator BFE = llvm::next(FI);
+  --LastFieldNo;
+  assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields");
+  FieldDecl *LastFD = *FI;
+
+  // Find the last bitfield's offset, add its size, and round it up to the
+  // character alignment to compute the storage required.
+  uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo);
+  uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext());
+  uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset;
+  CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits(
+    llvm::RoundUpToAlignment(TotalBits, CharAlign));
+  uint64_t StorageBits = Types.getContext().toBits(StorageBytes);
+
+  // Grow the storage to encompass any known padding in the layout when doing
+  // so will make the storage a power-of-two. There are two cases when we can
+  // do this. The first is when we have a subsequent field and can widen up to
+  // its offset. The second is when the data size of the AST record layout is
+  // past the end of the current storage. The latter is true when there is tail
+  // padding on a struct and no members of a super class can be packed into it.
+  //
+  // Note that we widen the storage as much as possible here to express the
+  // maximum latitude the language provides, and rely on the backend to lower
+  // these in conjunction with shifts and masks to narrower operations where
+  // beneficial.
+  uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize());
+  if (BFE != FE)
+    // If there are more fields to be laid out, the offset at the end of the
+    // bitfield is the offset of the next field in the record.
+    EndOffset = Layout.getFieldOffset(LastFieldNo + 1);
+  assert(EndOffset >= (FirstFieldOffset + TotalBits) &&
+         "End offset is not past the end of the known storage bits.");
+  uint64_t SpaceBits = EndOffset - FirstFieldOffset;
+  uint64_t LongBits = Types.getContext().getTargetInfo().getLongWidth();
+  uint64_t WidenedBits = (StorageBits / LongBits) * LongBits +
+                         llvm::NextPowerOf2(StorageBits % LongBits - 1);
+  assert(WidenedBits >= StorageBits && "Widening shrunk the bits!");
+  if (WidenedBits <= SpaceBits) {
+    StorageBits = WidenedBits;
+    StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits);
+    assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes));
+  }
+
+  unsigned FieldIndex = FieldTypes.size();
+  AppendBytes(StorageBytes);
+
+  // Now walk the bitfields associating them with this field of storage and
+  // building up the bitfield specific info.
+  unsigned FieldNo = FirstFieldNo;
+  for (; BFI != BFE; ++BFI, ++FieldNo) {
+    FieldDecl *FD = *BFI;
+    uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset;
+    uint64_t FieldSize = FD->getBitWidthValue(Types.getContext());
+    Fields[FD] = FieldIndex;
+    BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize,
+                                             StorageBits, StorageAlignment);
   }
-
-  // Add the bit field info.
-  BitFields.insert(std::make_pair(D,
-                   CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
-
-  AppendBytes(numBytesToAppend);
-
-  BitsAvailableInLastField =
-    Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
+  FirstFieldNo = LastFieldNo;
+  return true;
 }
 
 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
@@ -429,15 +382,7 @@
   if (!Packed && D->hasAttr<PackedAttr>())
     return false;
 
-  if (D->isBitField()) {
-    // We must use packed structs for unnamed bit fields since they
-    // don't affect the struct alignment.
-    if (!Packed && !D->getDeclName())
-      return false;
-
-    LayoutBitField(D, fieldOffset);
-    return true;
-  }
+  assert(!D->isBitField() && "Bitfields should be laid out seperately.");
 
   CheckZeroInitializable(D->getType());
 
@@ -497,6 +442,7 @@
 llvm::Type *
 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
                                         const ASTRecordLayout &Layout) {
+  Fields[Field] = 0;
   if (Field->isBitField()) {
     uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
 
@@ -504,22 +450,23 @@
     if (FieldSize == 0)
       return 0;
 
-    llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
-    CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
-      llvm::RoundUpToAlignment(FieldSize, 
-                               Types.getContext().getTargetInfo().getCharAlign()));
+    unsigned StorageBits = llvm::RoundUpToAlignment(
+      FieldSize, Types.getContext().getTargetInfo().getCharAlign());
+    CharUnits NumBytesToAppend
+      = Types.getContext().toCharUnitsFromBits(StorageBits);
 
+    llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
     if (NumBytesToAppend > CharUnits::One())
       FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
 
     // Add the bit field info.
-    BitFields.insert(std::make_pair(Field,
-                         CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
+    BitFields[Field] = CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize,
+                                                StorageBits,
+                                                Alignment.getQuantity());
     return FieldTy;
   }
 
   // This is a regular union field.
-  Fields[Field] = 0;
   return Types.ConvertTypeForMem(Field->getType());
 }
 
@@ -815,20 +762,38 @@
   unsigned FieldNo = 0;
   const FieldDecl *LastFD = 0;
   
-  for (RecordDecl::field_iterator Field = D->field_begin(),
-       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+  for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end();
+       FI != FE; ++FI, ++FieldNo) {
+    FieldDecl *FD = *FI;
     if (IsMsStruct) {
       // Zero-length bitfields following non-bitfield members are
       // ignored:
-      const FieldDecl *FD = *Field;
       if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
         --FieldNo;
         continue;
       }
       LastFD = FD;
     }
-    
-    if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
+
+    // If this field is a bitfield, layout all of the consecutive
+    // non-zero-length bitfields and the last zero-length bitfield; these will
+    // all share storage.
+    if (FD->isBitField()) {
+      // If all we have is a zero-width bitfield, skip it.
+      if (FD->getBitWidthValue(Types.getContext()) == 0)
+        continue;
+
+      // Layout this range of bitfields.
+      if (!LayoutBitfields(Layout, FieldNo, FI, FE)) {
+        assert(!Packed &&
+               "Could not layout bitfields even with a packed LLVM struct!");
+        return false;
+      }
+      assert(FI != FE && "Advanced past the last bitfield");
+      continue;
+    }
+
+    if (!LayoutField(FD, Layout.getFieldOffset(FieldNo))) {
       assert(!Packed &&
              "Could not layout fields even with a packed LLVM struct!");
       return false;
@@ -889,7 +854,6 @@
   FieldTypes.push_back(fieldType);
 
   NextFieldOffset = fieldOffset + fieldSize;
-  BitsAvailableInLastField = 0;
 }
 
 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
@@ -1090,18 +1054,30 @@
       LastFD = FD;
       continue;
     }
-    
-    const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
-    for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
-      const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
 
-      // Verify that every component access is within the structure.
-      uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
-      uint64_t AccessBitOffset = FieldOffset +
-        getContext().toBits(AI.FieldByteOffset);
-      assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
-             "Invalid bit-field access (out of range)!");
+    // Don't inspect zero-length bitfields.
+    if (FD->getBitWidthValue(getContext()) == 0)
+      continue;
+
+      unsigned FieldNo = RL->getLLVMFieldNo(FD);
+    const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
+    llvm::Type *ElementTy = ST->getTypeAtIndex(FieldNo);
+    // Unions have overlapping elements dictating their layout, but for
+    // non-unions we can verify that this section of the layout is the exact
+    // expected size. For unions we verify that the start is zero and the size
+    // is in-bounds.
+    if (D->isUnion()) {
+      assert(Info.Offset == 0 && "Union bitfield with a non-zero offset");
+      assert(Info.StorageSize <= SL->getSizeInBits() &&
+             "Union not large enough for bitfield storage");
+    } else {
+      assert(Info.StorageSize ==
+             getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
+             "Storage size does not match the element type size");
     }
+    assert(Info.Size > 0 && "Empty bitfield!");
+    assert(Info.Offset + Info.Size <= Info.StorageSize &&
+           "Bitfield outside of its allocated storage");
   }
 #endif
 
@@ -1143,32 +1119,12 @@
 }
 
 void CGBitFieldInfo::print(raw_ostream &OS) const {
-  OS << "<CGBitFieldInfo";
-  OS << " Size:" << Size;
-  OS << " IsSigned:" << IsSigned << "\n";
-
-  OS.indent(4 + strlen("<CGBitFieldInfo"));
-  OS << " NumComponents:" << getNumComponents();
-  OS << " Components: [";
-  if (getNumComponents()) {
-    OS << "\n";
-    for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
-      const AccessInfo &AI = getComponent(i);
-      OS.indent(8);
-      OS << "<AccessInfo"
-         << " FieldIndex:" << AI.FieldIndex
-         << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
-         << " FieldBitStart:" << AI.FieldBitStart
-         << " AccessWidth:" << AI.AccessWidth << "\n";
-      OS.indent(8 + strlen("<AccessInfo"));
-      OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
-         << " TargetBitOffset:" << AI.TargetBitOffset
-         << " TargetBitWidth:" << AI.TargetBitWidth
-         << ">\n";
-    }
-    OS.indent(4);
-  }
-  OS << "]>";
+  OS << "<CGBitFieldInfo"
+     << " Offset:" << Offset
+     << " Size:" << Size
+     << " IsSigned:" << IsSigned
+     << " StorageSize:" << StorageSize
+     << " StorageAlignment:" << StorageAlignment << ">";
 }
 
 void CGBitFieldInfo::dump() const {

Modified: cfe/trunk/lib/CodeGen/CGValue.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGValue.h?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGValue.h (original)
+++ cfe/trunk/lib/CodeGen/CGValue.h Thu Dec  6 05:14:44 2012
@@ -28,7 +28,7 @@
 namespace clang {
 namespace CodeGen {
   class AggValueSlot;
-  class CGBitFieldInfo;
+  struct CGBitFieldInfo;
 
 /// RValue - This trivial value class is used to represent the result of an
 /// expression that is evaluated.  It can be one of three things: either a
@@ -246,7 +246,7 @@
   }
 
   // bitfield lvalue
-  llvm::Value *getBitFieldBaseAddr() const {
+  llvm::Value *getBitFieldAddr() const {
     assert(isBitField());
     return V;
   }
@@ -290,16 +290,16 @@
 
   /// \brief Create a new object to represent a bit-field access.
   ///
-  /// \param BaseValue - The base address of the structure containing the
-  /// bit-field.
+  /// \param BaseValue - The base address of the bit-field sequence this
+  /// bit-field refers to.
   /// \param Info - The information describing how to perform the bit-field
   /// access.
-  static LValue MakeBitfield(llvm::Value *BaseValue,
+  static LValue MakeBitfield(llvm::Value *Addr,
                              const CGBitFieldInfo &Info,
                              QualType type, CharUnits Alignment) {
     LValue R;
     R.LVType = BitField;
-    R.V = BaseValue;
+    R.V = Addr;
     R.BitFieldInfo = &Info;
     R.Initialize(type, type.getQualifiers(), Alignment);
     return R;

Modified: cfe/trunk/test/CodeGen/2008-01-07-UnusualIntSize.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/2008-01-07-UnusualIntSize.c?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/2008-01-07-UnusualIntSize.c (original)
+++ cfe/trunk/test/CodeGen/2008-01-07-UnusualIntSize.c Thu Dec  6 05:14:44 2012
@@ -8,8 +8,8 @@
 // This should have %0 and %1 truncated to 33 bits before any operation.
 // This can be done using i33 or an explicit and.
 _Bool test(void) {
-  // CHECK: and i64 %[[TMP1:[0-9]+]], 8589934591
+  // CHECK: and i64 %[[TMP1:[^,]+]], 8589934591
   // CHECK-NOT: and i64 [[TMP1]], 8589934591
-  // CHECK: and i64 %{{[0-9]}}, 8589934591
+  // CHECK: and i64 %{{[^,]+}}, 8589934591
   return a.u33 + b.u33 != 0;
 }

Modified: cfe/trunk/test/CodeGen/PR4611-bitfield-layout.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/PR4611-bitfield-layout.c?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/PR4611-bitfield-layout.c (original)
+++ cfe/trunk/test/CodeGen/PR4611-bitfield-layout.c Thu Dec  6 05:14:44 2012
@@ -1,5 +1,6 @@
-// RUN: %clang_cc1 -triple i386-unknown-unknown %s -emit-llvm -o %t
-// RUN: grep "struct.object_entry = type { i8, \[2 x i8\], i8 }" %t
+// RUN: %clang_cc1 -triple i386-unknown-unknown %s -emit-llvm -o - | FileCheck %s
+//
+// CHECK: struct.object_entry = type { [4 x i8] }
 
 struct object_entry {
        unsigned int type:3, pack_id:16, depth:13;

Modified: cfe/trunk/test/CodeGen/bitfield-2.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/bitfield-2.c?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/bitfield-2.c (original)
+++ cfe/trunk/test/CodeGen/bitfield-2.c Thu Dec  6 05:14:44 2012
@@ -14,12 +14,7 @@
 // CHECK-RECORD:   LLVMType:%struct.s0 = type <{ [3 x i8] }>
 // CHECK-RECORD:   IsZeroInitializable:1
 // CHECK-RECORD:   BitFields:[
-// CHECK-RECORD:     <CGBitFieldInfo Size:24 IsSigned:1
-// CHECK-RECORD:                     NumComponents:2 Components: [
-// CHECK-RECORD:         <AccessInfo FieldIndex:0 FieldByteOffset:0 FieldBitStart:0 AccessWidth:16
-// CHECK-RECORD:                     AccessAlignment:1 TargetBitOffset:0 TargetBitWidth:16>
-// CHECK-RECORD:         <AccessInfo FieldIndex:0 FieldByteOffset:2 FieldBitStart:0 AccessWidth:8
-// CHECK-RECORD:                     AccessAlignment:1 TargetBitOffset:16 TargetBitWidth:8>
+// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageAlignment:1>
 struct __attribute((packed)) s0 {
   int f0 : 24;
 };
@@ -56,20 +51,11 @@
 // CHECK-RECORD: *** Dumping IRgen Record Layout
 // CHECK-RECORD: Record: struct s1
 // CHECK-RECORD: Layout: <CGRecordLayout
-// CHECK-RECORD:   LLVMType:%struct.s1 = type <{ [2 x i8], i8 }>
+// CHECK-RECORD:   LLVMType:%struct.s1 = type <{ [3 x i8] }>
 // CHECK-RECORD:   IsZeroInitializable:1
 // CHECK-RECORD:   BitFields:[
-// CHECK-RECORD:     <CGBitFieldInfo Size:10 IsSigned:1
-// CHECK-RECORD:                     NumComponents:1 Components: [
-// CHECK-RECORD:         <AccessInfo FieldIndex:0 FieldByteOffset:0 FieldBitStart:0 AccessWidth:16
-// CHECK-RECORD:                     AccessAlignment:1 TargetBitOffset:0 TargetBitWidth:10>
-// CHECK-RECORD:     ]>
-// CHECK-RECORD:     <CGBitFieldInfo Size:10 IsSigned:1
-// CHECK-RECORD:                     NumComponents:2 Components: [
-// CHECK-RECORD:         <AccessInfo FieldIndex:0 FieldByteOffset:0 FieldBitStart:10 AccessWidth:16
-// CHECK-RECORD:                     AccessAlignment:1 TargetBitOffset:0 TargetBitWidth:6>
-// CHECK-RECORD:         <AccessInfo FieldIndex:0 FieldByteOffset:2 FieldBitStart:0 AccessWidth:8
-// CHECK-RECORD:                     AccessAlignment:1 TargetBitOffset:6 TargetBitWidth:4>
+// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageAlignment:1>
+// CHECK-RECORD:     <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageAlignment:1>
 
 #pragma pack(push)
 #pragma pack(1)
@@ -116,10 +102,7 @@
 // CHECK-RECORD:   LLVMType:%union.u2 = type <{ i8 }>
 // CHECK-RECORD:   IsZeroInitializable:1
 // CHECK-RECORD:   BitFields:[
-// CHECK-RECORD:     <CGBitFieldInfo Size:3 IsSigned:0
-// CHECK-RECORD:                     NumComponents:1 Components: [
-// CHECK-RECORD:         <AccessInfo FieldIndex:0 FieldByteOffset:0 FieldBitStart:0 AccessWidth:8
-// CHECK-RECORD:                     AccessAlignment:1 TargetBitOffset:0 TargetBitWidth:3>
+// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageAlignment:1>
 
 union __attribute__((packed)) u2 {
   unsigned long long f0 : 3;
@@ -291,15 +274,8 @@
 // CHECK-RECORD:   LLVMType:%struct.s7 = type { i32, i32, i32, i8, [3 x i8], [4 x i8], [12 x i8] }
 // CHECK-RECORD:   IsZeroInitializable:1
 // CHECK-RECORD:   BitFields:[
-// CHECK-RECORD:     <CGBitFieldInfo Size:5 IsSigned:1
-// CHECK-RECORD:                     NumComponents:1 Components: [
-// CHECK-RECORD:         <AccessInfo FieldIndex:0 FieldByteOffset:12 FieldBitStart:0 AccessWidth:32
-// CHECK-RECORD:                     AccessAlignment:4 TargetBitOffset:0 TargetBitWidth:5>
-// CHECK-RECORD:     ]>
-// CHECK-RECORD:     <CGBitFieldInfo Size:29 IsSigned:1
-// CHECK-RECORD:                     NumComponents:1 Components: [
-// CHECK-RECORD:         <AccessInfo FieldIndex:0 FieldByteOffset:16 FieldBitStart:0 AccessWidth:32
-// CHECK-RECORD:                     AccessAlignment:16 TargetBitOffset:0 TargetBitWidth:29>
+// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageAlignment:4>
+// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageAlignment:16>
 
 struct __attribute__((aligned(16))) s7 {
   int a, b, c;

Modified: cfe/trunk/test/CodeGen/init.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/init.c?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/init.c (original)
+++ cfe/trunk/test/CodeGen/init.c Thu Dec  6 05:14:44 2012
@@ -130,5 +130,5 @@
   struct X { int a; int b : 10; int c; };
   struct X y = {.c = x};
   // CHECK: @test13
-  // CHECK: and i32 {{.*}}, -1024
+  // CHECK: and i16 {{.*}}, -1024
 }

Modified: cfe/trunk/test/CodeGen/packed-nest-unpacked.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/packed-nest-unpacked.c?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/packed-nest-unpacked.c (original)
+++ cfe/trunk/test/CodeGen/packed-nest-unpacked.c Thu Dec  6 05:14:44 2012
@@ -60,6 +60,6 @@
 
 unsigned test7() {
   // CHECK: @test7
-  // CHECK: load i32* bitcast (%struct.XBitfield* getelementptr inbounds (%struct.YBitfield* @gbitfield, i32 0, i32 1) to i32*), align 1
+  // CHECK: load i32* bitcast (%struct.XBitfield* getelementptr inbounds (%struct.YBitfield* @gbitfield, i32 0, i32 1) to i32*), align 4
   return gbitfield.y.b2;
 }

Modified: cfe/trunk/test/CodeGen/pr2394.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/pr2394.c?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/pr2394.c (original)
+++ cfe/trunk/test/CodeGen/pr2394.c Thu Dec  6 05:14:44 2012
@@ -1,7 +1,6 @@
 // RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s
 struct __attribute((packed)) x {int a : 24;};
 int a(struct x* g) {
-  // CHECK: load i16
-  // CHECK: load i8
+  // CHECK: load i24
   return g->a;
 }

Modified: cfe/trunk/test/CodeGenCXX/2009-12-23-MissingSext.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/2009-12-23-MissingSext.cpp?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGenCXX/2009-12-23-MissingSext.cpp (original)
+++ cfe/trunk/test/CodeGenCXX/2009-12-23-MissingSext.cpp Thu Dec  6 05:14:44 2012
@@ -8,8 +8,12 @@
 };
 int bar(struct foo p, int x) {
 // CHECK: bar
-// CHECK: and {{.*}} 16777215
-// CHECK: and {{.*}} 16777215
+// CHECK: %[[val:.*]] = load i32* {{.*}}
+// CHECK-NEXT:          ashr i32 %[[val]]
+// CHECK:             = load i32* {{.*}}
+// CHECK:             = load i32* {{.*}}
+// CHECK: %[[val:.*]] = load i32* {{.*}}
+// CHECK-NEXT:          ashr i32 %[[val]]
   x = (p.y > x ? x : p.y);
   return x;
 // CHECK: ret

Added: cfe/trunk/test/CodeGenCXX/bitfield.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/bitfield.cpp?rev=169489&view=auto
==============================================================================
--- cfe/trunk/test/CodeGenCXX/bitfield.cpp (added)
+++ cfe/trunk/test/CodeGenCXX/bitfield.cpp Thu Dec  6 05:14:44 2012
@@ -0,0 +1,195 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -verify -emit-llvm -o - %s | FileCheck %s
+//
+// Tests for bitfield access patterns in C++ with special attention to
+// conformance to C++11 memory model requirements.
+
+namespace N1 {
+  // Ensure that neither loads nor stores to bitfields are not widened into
+  // other memory locations. (PR13691)
+  //
+  // NOTE: We could potentially widen loads based on their alignment if we are
+  // comfortable requiring that subsequent memory locations within the
+  // alignment-widened load are not volatile.
+  struct S {
+    char a;
+    unsigned b : 1;
+    char c;
+  };
+  unsigned read(S* s) {
+    // CHECK: define i32 @_ZN2N14read
+    // CHECK:   %[[ptr:.*]] = getelementptr inbounds %{{.*}}* %{{.*}}, i32 0, i32 1
+    // CHECK:   %[[val:.*]] = load i8* %[[ptr]]
+    // CHECK:   %[[and:.*]] = and i8 %[[val]], 1
+    // CHECK:   %[[ext:.*]] = zext i8 %[[and]] to i32
+    // CHECK:                 ret i32 %[[ext]]
+    return s->b;
+  }
+  void write(S* s, unsigned x) {
+    // CHECK: define void @_ZN2N15write
+    // CHECK:   %[[ptr:.*]]     = getelementptr inbounds %{{.*}}* %{{.*}}, i32 0, i32 1
+    // CHECK:   %[[x_trunc:.*]] = trunc i32 %{{.*}} to i8
+    // CHECK:   %[[old:.*]]     = load i8* %[[ptr]]
+    // CHECK:   %[[x_and:.*]]   = and i8 %[[x_trunc]], 1
+    // CHECK:   %[[old_and:.*]] = and i8 %[[old]], -2
+    // CHECK:   %[[new:.*]]     = or i8 %[[old_and]], %[[x_and]]
+    // CHECK:                     store i8 %[[new]], i8* %[[ptr]]
+    s->b = x;
+  }
+}
+
+namespace N2 {
+  // Do widen loads and stores to bitfields when those bitfields have padding
+  // within the struct following them.
+  struct S {
+    unsigned b : 24;
+    void *p;
+  };
+  unsigned read(S* s) {
+    // CHECK: define i32 @_ZN2N24read
+    // CHECK:   %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32*
+    // CHECK:   %[[val:.*]] = load i32* %[[ptr]]
+    // CHECK:   %[[and:.*]] = and i32 %[[val]], 16777215
+    // CHECK:                 ret i32 %[[and]]
+    return s->b;
+  }
+  void write(S* s, unsigned x) {
+    // CHECK: define void @_ZN2N25write
+    // CHECK:   %[[ptr:.*]]     = bitcast %{{.*}}* %{{.*}} to i32*
+    // CHECK:   %[[old:.*]]     = load i32* %[[ptr]]
+    // CHECK:   %[[x_and:.*]]   = and i32 %{{.*}}, 16777215
+    // CHECK:   %[[old_and:.*]] = and i32 %[[old]], -16777216
+    // CHECK:   %[[new:.*]]     = or i32 %[[old_and]], %[[x_and]]
+    // CHECK:                     store i32 %[[new]], i32* %[[ptr]]
+    s->b = x;
+  }
+}
+
+namespace N3 {
+  // Do widen loads and stores to bitfields through the trailing padding at the
+  // end of a struct.
+  struct S {
+    unsigned b : 24;
+  };
+  unsigned read(S* s) {
+    // CHECK: define i32 @_ZN2N34read
+    // CHECK:   %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32*
+    // CHECK:   %[[val:.*]] = load i32* %[[ptr]]
+    // CHECK:   %[[and:.*]] = and i32 %[[val]], 16777215
+    // CHECK:                 ret i32 %[[and]]
+    return s->b;
+  }
+  void write(S* s, unsigned x) {
+    // CHECK: define void @_ZN2N35write
+    // CHECK:   %[[ptr:.*]]     = bitcast %{{.*}}* %{{.*}} to i32*
+    // CHECK:   %[[old:.*]]     = load i32* %[[ptr]]
+    // CHECK:   %[[x_and:.*]]   = and i32 %{{.*}}, 16777215
+    // CHECK:   %[[old_and:.*]] = and i32 %[[old]], -16777216
+    // CHECK:   %[[new:.*]]     = or i32 %[[old_and]], %[[x_and]]
+    // CHECK:                     store i32 %[[new]], i32* %[[ptr]]
+    s->b = x;
+  }
+}
+
+namespace N4 {
+  // Do NOT widen loads and stores to bitfields into padding at the end of
+  // a class which might end up with members inside of it when inside a derived
+  // class.
+  struct Base {
+    virtual ~Base() {}
+
+    unsigned b : 24;
+  };
+  // Imagine some other translation unit introduces:
+#if 0
+  struct Derived : public Base {
+    char c;
+  };
+#endif
+  unsigned read(Base* s) {
+    // FIXME: We should widen this load as long as the function isn't being
+    // instrumented by thread-sanitizer.
+    //
+    // CHECK: define i32 @_ZN2N44read
+    // CHECK:   %[[ptr:.*]] = bitcast {{.*}}* %{{.*}} to i24*
+    // CHECK:   %[[val:.*]] = load i24* %[[ptr]]
+    // CHECK:   %[[ext:.*]] = zext i24 %[[val]] to i32
+    // CHECK:                 ret i32 %[[ext]]
+    return s->b;
+  }
+  void write(Base* s, unsigned x) {
+    // CHECK: define void @_ZN2N45write
+    // CHECK:   %[[ptr:.*]] = bitcast {{.*}}* %{{.*}} to i24*
+    // CHECK:   %[[new:.*]] = trunc i32 %{{.*}} to i24
+    // CHECK:                 store i24 %[[new]], i24* %[[ptr]]
+    s->b = x;
+  }
+}
+
+namespace N5 {
+  // Widen through padding at the end of a struct even if that struct
+  // participates in a union with another struct which has a separate field in
+  // that location. The reasoning is that if the operation is storing to that
+  // member of the union, it must be the active member, and thus we can write
+  // through the padding. If it is a load, it might be a load of a common
+  // prefix through a non-active member, but in such a case the extra bits
+  // loaded are masked off anyways.
+  union U {
+    struct X { unsigned b : 24; char c; } x;
+    struct Y { unsigned b : 24; } y;
+  };
+  unsigned read(U* u) {
+    // CHECK: define i32 @_ZN2N54read
+    // CHECK:   %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32*
+    // CHECK:   %[[val:.*]] = load i32* %[[ptr]]
+    // CHECK:   %[[and:.*]] = and i32 %[[val]], 16777215
+    // CHECK:                 ret i32 %[[and]]
+    return u->y.b;
+  }
+  void write(U* u, unsigned x) {
+    // CHECK: define void @_ZN2N55write
+    // CHECK:   %[[ptr:.*]]     = bitcast %{{.*}}* %{{.*}} to i32*
+    // CHECK:   %[[old:.*]]     = load i32* %[[ptr]]
+    // CHECK:   %[[x_and:.*]]   = and i32 %{{.*}}, 16777215
+    // CHECK:   %[[old_and:.*]] = and i32 %[[old]], -16777216
+    // CHECK:   %[[new:.*]]     = or i32 %[[old_and]], %[[x_and]]
+    // CHECK:                     store i32 %[[new]], i32* %[[ptr]]
+    u->y.b = x;
+  }
+}
+
+namespace N6 {
+  // Zero-length bitfields partition the memory locations of bitfields for the
+  // purposes of the memory model. That means stores must not span zero-length
+  // bitfields and loads may only span them when we are not instrumenting with
+  // thread sanitizer.
+  // FIXME: We currently don't widen loads even without thread sanitizer, even
+  // though we could.
+  struct S {
+    unsigned b1 : 24;
+    unsigned char : 0;
+    unsigned char b2 : 8;
+  };
+  unsigned read(S* s) {
+    // CHECK: define i32 @_ZN2N64read
+    // CHECK:   %[[ptr1:.*]] = bitcast {{.*}}* %{{.*}} to i24*
+    // CHECK:   %[[val1:.*]] = load i24* %[[ptr1]]
+    // CHECK:   %[[ext1:.*]] = zext i24 %[[val1]] to i32
+    // CHECK:   %[[ptr2:.*]] = getelementptr inbounds {{.*}}* %{{.*}}, i32 0, i32 1
+    // CHECK:   %[[val2:.*]] = load i8* %[[ptr2]]
+    // CHECK:   %[[ext2:.*]] = zext i8 %[[val2]] to i32
+    // CHECK:   %[[add:.*]]  = add nsw i32 %[[ext1]], %[[ext2]]
+    // CHECK:                  ret i32 %[[add]]
+    return s->b1 + s->b2;
+  }
+  void write(S* s, unsigned x) {
+    // CHECK: define void @_ZN2N65write
+    // CHECK:   %[[ptr1:.*]] = bitcast {{.*}}* %{{.*}} to i24*
+    // CHECK:   %[[new1:.*]] = trunc i32 %{{.*}} to i24
+    // CHECK:                  store i24 %[[new1]], i24* %[[ptr1]]
+    // CHECK:   %[[new2:.*]] = trunc i32 %{{.*}} to i8
+    // CHECK:   %[[ptr2:.*]] = getelementptr inbounds {{.*}}* %{{.*}}, i32 0, i32 1
+    // CHECK:                  store i8 %[[new2]], i8* %[[ptr2]]
+    s->b1 = x;
+    s->b2 = x;
+  }
+}

Modified: cfe/trunk/test/CodeGenCXX/references.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/references.cpp?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGenCXX/references.cpp (original)
+++ cfe/trunk/test/CodeGenCXX/references.cpp Thu Dec  6 05:14:44 2012
@@ -283,12 +283,11 @@
   void f() {
     // CHECK: call void @llvm.memcpy
     a x = { 0, 0 };
-    // CHECK: [[WITH_SEVENTEEN:%[a-zA-Z0-9]+]] = or i32 [[WITHOUT_SEVENTEEN:%[a-zA-Z0-9]+]], 17
-    // CHECK: store i32 [[WITH_SEVENTEEN]], i32* [[XA:%[a-zA-Z0-9]+]]
+    // CHECK: [[WITH_SEVENTEEN:%[.a-zA-Z0-9]+]] = or i32 [[WITHOUT_SEVENTEEN:%[.a-zA-Z0-9]+]], 17
+    // CHECK: store i32 [[WITH_SEVENTEEN]], i32* [[XA:%[.a-zA-Z0-9]+]]
     x.a = 17;
     // CHECK-NEXT: bitcast
-    // CHECK-NEXT: load 
-    // CHECK-NEXT: and
+    // CHECK-NEXT: load
     // CHECK-NEXT: shl
     // CHECK-NEXT: ashr
     // CHECK-NEXT: store i32
@@ -297,7 +296,7 @@
     // CHECK-NEXT: bitcast
     // CHECK-NEXT: load
     // CHECK-NEXT: and
-    // CHECK-NEXT: or
+    // CHECK-NEXT: or i32 {{.*}}, 19456
     // CHECK-NEXT: store i32
     x.b = 19;
     // CHECK-NEXT: ret void

Modified: cfe/trunk/test/CodeGenObjC/bitfield-access.m
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenObjC/bitfield-access.m?rev=169489&r1=169488&r2=169489&view=diff
==============================================================================
--- cfe/trunk/test/CodeGenObjC/bitfield-access.m (original)
+++ cfe/trunk/test/CodeGenObjC/bitfield-access.m Thu Dec  6 05:14:44 2012
@@ -15,8 +15,8 @@
 // end of the structure.
 //
 // CHECK-I386: define i32 @f0(
-// CHECK-I386:   [[t0_0:%.*]] = load i16* {{.*}}, align 1
-// CHECK-I386:   lshr i16 [[t0_0]], 7
+// CHECK-I386:   [[t0_0:%.*]] = load i8* {{.*}}, align 1
+// CHECK-I386:   lshr i8 [[t0_0]], 7
 // CHECK-I386: }
 int f0(I0 *a) {
   return a->y;
@@ -26,13 +26,11 @@
 //
 // CHECK-ARM: define i32 @f1(
 // CHECK-ARM:    [[t1_ptr:%.*]] = getelementptr
-// CHECK-ARM:    [[t1_base:%.*]] = bitcast i8* [[t1_ptr]] to i32*
-// CHECK-ARM:    [[t1_0:%.*]] = load i32* [[t1_base]], align 1
-// CHECK-ARM:    lshr i32 [[t1_0]], 1
-// CHECK-ARM:    [[t1_base_2_cast:%.*]] = bitcast i32* %{{.*}} to i8*
-// CHECK-ARM:    [[t1_base_2:%.*]] = getelementptr i8* [[t1_base_2_cast]]
-// CHECK-ARM:    [[t1_1:%.*]] = load i8* [[t1_base_2]], align 1
-// CHECK-ARM:    and i8 [[t1_1:%.*]], 1
+// CHECK-ARM:    [[t1_base:%.*]] = bitcast i8* [[t1_ptr]] to i40*
+// CHECK-ARM:    [[t1_0:%.*]] = load i40* [[t1_base]], align 1
+// CHECK-ARM:    [[t1_1:%.*]] = lshr i40 [[t1_0]], 1
+// CHECK-ARM:    [[t1_2:%.*]] = and i40 [[t1_1]],
+// CHECK-ARM:                   trunc i40 [[t1_2]] to i32
 // CHECK-ARM: }
 @interface I1 {
 @public





More information about the cfe-commits mailing list