[clang] 49839f9 - [clang] Better SysV bitfield access units (#65742)

Nathan Sidwell via cfe-commits cfe-commits at lists.llvm.org
Fri Mar 29 06:36:08 PDT 2024


Author: Nathan Sidwell
Date: 2024-03-29T09:35:31-04:00
New Revision: 49839f97d2951e0b95d33aee00f00022952dab78

URL: https://github.com/llvm/llvm-project/commit/49839f97d2951e0b95d33aee00f00022952dab78
DIFF: https://github.com/llvm/llvm-project/commit/49839f97d2951e0b95d33aee00f00022952dab78.diff

LOG: [clang] Better SysV bitfield access units (#65742)

Reimplement bitfield access unit computation for SysV ABIs. Considers
expense of unaligned and non-power-of-2 accesses.

Added: 
    

Modified: 
    clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
    clang/test/CodeGen/aapcs-bitfield-access-unit.c
    clang/test/CodeGen/aapcs-bitfield.c
    clang/test/CodeGen/arm-bitfield-alignment.c
    clang/test/CodeGen/arm64-be-bitfield.c
    clang/test/CodeGen/bitfield-2.c
    clang/test/CodeGen/bitfield-access-pad.c
    clang/test/CodeGen/bitfield-access-unit.c
    clang/test/CodeGen/debug-info-bitfield-0-struct.c
    clang/test/CodeGen/no-bitfield-type-align.c
    clang/test/CodeGen/struct-x86-darwin.c
    clang/test/CodeGen/tbaa-struct.cpp
    clang/test/CodeGenCXX/bitfield-access-empty.cpp
    clang/test/CodeGenCXX/bitfield-access-tail.cpp
    clang/test/CodeGenCXX/bitfield-ir.cpp
    clang/test/CodeGenCXX/bitfield.cpp
    clang/test/OpenMP/atomic_capture_codegen.cpp
    clang/test/OpenMP/atomic_read_codegen.c
    clang/test/OpenMP/atomic_update_codegen.cpp
    clang/test/OpenMP/atomic_write_codegen.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 7822903b89ce47..e32023aeac1e6f 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -47,8 +47,10 @@ namespace {
 ///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
 ///   C++ examples that require clipping:
 ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
-///   struct A { int a : 24; }; // a must be clipped because a struct like B
-//    could exist: struct B : A { char b; }; // b goes at offset 3
+///   struct A { int a : 24; ~A(); }; // a must be clipped because:
+///   struct B : A { char b; }; // b goes at offset 3
+/// * The allocation of bitfield access units is described in more detail in
+///   CGRecordLowering::accumulateBitFields.
 /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
 ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
 ///   has an underlying storage type.  Therefore empty structures containing
@@ -184,8 +186,9 @@ struct CGRecordLowering {
   void lower(bool NonVirtualBaseType);
   void lowerUnion(bool isNoUniqueAddress);
   void accumulateFields();
-  void accumulateBitFields(RecordDecl::field_iterator Field,
-                           RecordDecl::field_iterator FieldEnd);
+  RecordDecl::field_iterator
+  accumulateBitFields(RecordDecl::field_iterator Field,
+                      RecordDecl::field_iterator FieldEnd);
   void computeVolatileBitfields();
   void accumulateBases();
   void accumulateVPtrs();
@@ -378,13 +381,15 @@ void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
 void CGRecordLowering::accumulateFields() {
   for (RecordDecl::field_iterator Field = D->field_begin(),
                                   FieldEnd = D->field_end();
-    Field != FieldEnd;) {
+       Field != FieldEnd;) {
     if (Field->isBitField()) {
-      RecordDecl::field_iterator Start = Field;
-      // Iterate to gather the list of bitfields.
-      for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
-      accumulateBitFields(Start, Field);
-    } else if (!Field->isZeroSize(Context)) {
+      Field = accumulateBitFields(Field, FieldEnd);
+      assert((Field == FieldEnd || !Field->isBitField()) &&
+             "Failed to accumulate all the bitfields");
+    } else if (Field->isZeroSize(Context)) {
+      // Empty fields have no storage.
+      ++Field;
+    } else {
       // Use base subobject layout for the potentially-overlapping field,
       // as it is done in RecordLayoutBuilder
       Members.push_back(MemberInfo(
@@ -394,33 +399,33 @@ void CGRecordLowering::accumulateFields() {
               : getStorageType(*Field),
           *Field));
       ++Field;
-    } else {
-      ++Field;
     }
   }
 }
 
-void
+// Create members for bitfields. Field is a bitfield, and FieldEnd is the end
+// iterator of the record. Return the first non-bitfield encountered.
+RecordDecl::field_iterator
 CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
                                       RecordDecl::field_iterator FieldEnd) {
-  // Run stores the first element of the current run of bitfields.  FieldEnd is
-  // used as a special value to note that we don't have a current run.  A
-  // bitfield run is a contiguous collection of bitfields that can be stored in
-  // the same storage block.  Zero-sized bitfields and bitfields that would
-  // cross an alignment boundary break a run and start a new one.
-  RecordDecl::field_iterator Run = FieldEnd;
-  // Tail is the offset of the first bit off the end of the current run.  It's
-  // used to determine if the ASTRecordLayout is treating these two bitfields as
-  // contiguous.  StartBitOffset is offset of the beginning of the Run.
-  uint64_t StartBitOffset, Tail = 0;
   if (isDiscreteBitFieldABI()) {
-    for (; Field != FieldEnd; ++Field) {
-      uint64_t BitOffset = getFieldBitOffset(*Field);
+    // Run stores the first element of the current run of bitfields. FieldEnd is
+    // used as a special value to note that we don't have a current run. A
+    // bitfield run is a contiguous collection of bitfields that can be stored
+    // in the same storage block. Zero-sized bitfields and bitfields that would
+    // cross an alignment boundary break a run and start a new one.
+    RecordDecl::field_iterator Run = FieldEnd;
+    // Tail is the offset of the first bit off the end of the current run. It's
+    // used to determine if the ASTRecordLayout is treating these two bitfields
+    // as contiguous. StartBitOffset is offset of the beginning of the Run.
+    uint64_t StartBitOffset, Tail = 0;
+    for (; Field != FieldEnd && Field->isBitField(); ++Field) {
       // Zero-width bitfields end runs.
       if (Field->isZeroLengthBitField(Context)) {
         Run = FieldEnd;
         continue;
       }
+      uint64_t BitOffset = getFieldBitOffset(*Field);
       llvm::Type *Type =
           Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
       // If we don't have a run yet, or don't live within the previous run's
@@ -439,82 +444,248 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                    MemberInfo::Field, nullptr, *Field));
     }
-    return;
+    return Field;
   }
 
-  // Check if OffsetInRecord (the size in bits of the current run) is better
-  // as a single field run. When OffsetInRecord has legal integer width, and
-  // its bitfield offset is naturally aligned, it is better to make the
-  // bitfield a separate storage component so as it can be accessed directly
-  // with lower cost.
-  auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
-                                      uint64_t StartBitOffset) {
-    if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
-      return false;
-    if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
-        !DataLayout.fitsInLegalInteger(OffsetInRecord))
-      return false;
-    // Make sure StartBitOffset is naturally aligned if it is treated as an
-    // IType integer.
-    if (StartBitOffset %
-            Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
-        0)
-      return false;
-    return true;
-  };
+  // The SysV ABI can overlap bitfield storage units with both other bitfield
+  // storage units /and/ other non-bitfield data members. Accessing a sequence
+  // of bitfields mustn't interfere with adjacent non-bitfields -- they're
+  // permitted to be accessed in separate threads for instance.
+
+  // We split runs of bit-fields into a sequence of "access units". When we emit
+  // a load or store of a bit-field, we'll load/store the entire containing
+  // access unit. As mentioned, the standard requires that these loads and
+  // stores must not interfere with accesses to other memory locations, and it
+  // defines the bit-field's memory location as the current run of
+  // non-zero-width bit-fields. So an access unit must never overlap with
+  // non-bit-field storage or cross a zero-width bit-field. Otherwise, we're
+  // free to draw the lines as we see fit.
+
+  // Drawing these lines well can be complicated. LLVM generally can't modify a
+  // program to access memory that it didn't before, so using very narrow access
+  // units can prevent the compiler from using optimal access patterns. For
+  // example, suppose a run of bit-fields occupies four bytes in a struct. If we
+  // split that into four 1-byte access units, then a sequence of assignments
+  // that doesn't touch all four bytes may have to be emitted with multiple
+  // 8-bit stores instead of a single 32-bit store. On the other hand, if we use
+  // very wide access units, we may find ourselves emitting accesses to
+  // bit-fields we didn't really need to touch, just because LLVM was unable to
+  // clean up after us.
+
+  // It is desirable to have access units be aligned powers of 2 no larger than
+  // a register. (On non-strict alignment ISAs, the alignment requirement can be
+  // dropped.) A three byte access unit will be accessed using 2-byte and 1-byte
+  // accesses and bit manipulation. If no bitfield straddles across the two
+  // separate accesses, it is better to have separate 2-byte and 1-byte access
+  // units, as then LLVM will not generate unnecessary memory accesses, or bit
+  // manipulation. Similarly, on a strict-alignment architecture, it is better
+  // to keep access-units naturally aligned, to avoid similar bit
+  // manipulation synthesizing larger unaligned accesses.
+
+  // Bitfields that share parts of a single byte are, of necessity, placed in
+  // the same access unit. That unit will encompass a consecutive run where
+  // adjacent bitfields share parts of a byte. (The first bitfield of such an
+  // access unit will start at the beginning of a byte.)
+
+  // We then try and accumulate adjacent access units when the combined unit is
+  // naturally sized, no larger than a register, and (on a strict alignment
+  // ISA), naturally aligned. Note that this requires lookahead to one or more
+  // subsequent access units. For instance, consider a 2-byte access-unit
+  // followed by 2 1-byte units. We can merge that into a 4-byte access-unit,
+  // but we would not want to merge a 2-byte followed by a single 1-byte (and no
+  // available tail padding). We keep track of the best access unit seen so far,
+  // and use that when we determine we cannot accumulate any more. Then we start
+  // again at the bitfield following that best one.
+
+  // The accumulation is also prevented when:
+  // *) it would cross a character-aigned zero-width bitfield, or
+  // *) fine-grained bitfield access option is in effect.
+
+  CharUnits RegSize =
+      bitsToCharUnits(Context.getTargetInfo().getRegisterWidth());
+  unsigned CharBits = Context.getCharWidth();
+
+  // Data about the start of the span we're accumulating to create an access
+  // unit from. Begin is the first bitfield of the span. If Begin is FieldEnd,
+  // we've not got a current span. The span starts at the BeginOffset character
+  // boundary. BitSizeSinceBegin is the size (in bits) of the span -- this might
+  // include padding when we've advanced to a subsequent bitfield run.
+  RecordDecl::field_iterator Begin = FieldEnd;
+  CharUnits BeginOffset;
+  uint64_t BitSizeSinceBegin;
+
+  // The (non-inclusive) end of the largest acceptable access unit we've found
+  // since Begin. If this is Begin, we're gathering the initial set of bitfields
+  // of a new span. BestEndOffset is the end of that acceptable access unit --
+  // it might extend beyond the last character of the bitfield run, using
+  // available padding characters.
+  RecordDecl::field_iterator BestEnd = Begin;
+  CharUnits BestEndOffset;
 
-  // The start field is better as a single field run.
-  bool StartFieldAsSingleRun = false;
   for (;;) {
-    // Check to see if we need to start a new run.
-    if (Run == FieldEnd) {
-      // If we're out of fields, return.
-      if (Field == FieldEnd)
+    // AtAlignedBoundary is true iff Field is the (potential) start of a new
+    // span (or the end of the bitfields). When true, LimitOffset is the
+    // character offset of that span and Barrier indicates whether the new
+    // span cannot be merged into the current one.
+    bool AtAlignedBoundary = false;
+    bool Barrier = false;
+
+    if (Field != FieldEnd && Field->isBitField()) {
+      uint64_t BitOffset = getFieldBitOffset(*Field);
+      if (Begin == FieldEnd) {
+        // Beginning a new span.
+        Begin = Field;
+        BestEnd = Begin;
+
+        assert((BitOffset % CharBits) == 0 && "Not at start of char");
+        BeginOffset = bitsToCharUnits(BitOffset);
+        BitSizeSinceBegin = 0;
+      } else if ((BitOffset % CharBits) != 0) {
+        // Bitfield occupies the same character as previous bitfield, it must be
+        // part of the same span. This can include zero-length bitfields, should
+        // the target not align them to character boundaries. Such non-alignment
+        // is at variance with the standards, which require zero-length
+        // bitfields be a barrier between access units. But of course we can't
+        // achieve that in the middle of a character.
+        assert(BitOffset == Context.toBits(BeginOffset) + BitSizeSinceBegin &&
+               "Concatenating non-contiguous bitfields");
+      } else {
+        // Bitfield potentially begins a new span. This includes zero-length
+        // bitfields on non-aligning targets that lie at character boundaries
+        // (those are barriers to merging).
+        if (Field->isZeroLengthBitField(Context))
+          Barrier = true;
+        AtAlignedBoundary = true;
+      }
+    } else {
+      // We've reached the end of the bitfield run. Either we're done, or this
+      // is a barrier for the current span.
+      if (Begin == FieldEnd)
         break;
-      // Any non-zero-length bitfield can start a new run.
-      if (!Field->isZeroLengthBitField(Context)) {
-        Run = Field;
-        StartBitOffset = getFieldBitOffset(*Field);
-        Tail = StartBitOffset + Field->getBitWidthValue(Context);
-        StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
-                                                         StartBitOffset);
+
+      Barrier = true;
+      AtAlignedBoundary = true;
+    }
+
+    // InstallBest indicates whether we should create an access unit for the
+    // current best span: fields [Begin, BestEnd) occupying characters
+    // [BeginOffset, BestEndOffset).
+    bool InstallBest = false;
+    if (AtAlignedBoundary) {
+      // Field is the start of a new span or the end of the bitfields. The
+      // just-seen span now extends to BitSizeSinceBegin.
+
+      // Determine if we can accumulate that just-seen span into the current
+      // accumulation.
+      CharUnits AccessSize = bitsToCharUnits(BitSizeSinceBegin + CharBits - 1);
+      if (BestEnd == Begin) {
+        // This is the initial run at the start of a new span. By definition,
+        // this is the best seen so far.
+        BestEnd = Field;
+        BestEndOffset = BeginOffset + AccessSize;
+        if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
+          // Fine-grained access, so no merging of spans.
+          InstallBest = true;
+        else if (!BitSizeSinceBegin)
+          // A zero-sized initial span -- this will install nothing and reset
+          // for another.
+          InstallBest = true;
+      } else if (AccessSize > RegSize)
+        // Accumulating the just-seen span would create a multi-register access
+        // unit, which would increase register pressure.
+        InstallBest = true;
+
+      if (!InstallBest) {
+        // Determine if accumulating the just-seen span will create an expensive
+        // access unit or not.
+        llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
+        if (!Context.getTargetInfo().hasCheapUnalignedBitFieldAccess()) {
+          // Unaligned accesses are expensive. Only accumulate if the new unit
+          // is naturally aligned. Otherwise install the best we have, which is
+          // either the initial access unit (can't do better), or a naturally
+          // aligned accumulation (since we would have already installed it if
+          // it wasn't naturally aligned).
+          CharUnits Align = getAlignment(Type);
+          if (Align > Layout.getAlignment())
+            // The alignment required is greater than the containing structure
+            // itself.
+            InstallBest = true;
+          else if (!BeginOffset.isMultipleOf(Align))
+            // The access unit is not at a naturally aligned offset within the
+            // structure.
+            InstallBest = true;
+        }
+
+        if (!InstallBest) {
+          // Find the next used storage offset to determine what the limit of
+          // the current span is. That's either the offset of the next field
+          // with storage (which might be Field itself) or the end of the
+          // non-reusable tail padding.
+          CharUnits LimitOffset;
+          for (auto Probe = Field; Probe != FieldEnd; ++Probe)
+            if (!Probe->isZeroSize(Context)) {
+              // A member with storage sets the limit.
+              assert((getFieldBitOffset(*Probe) % CharBits) == 0 &&
+                     "Next storage is not byte-aligned");
+              LimitOffset = bitsToCharUnits(getFieldBitOffset(*Probe));
+              goto FoundLimit;
+            }
+          // We reached the end of the fields.  We can't necessarily use tail
+          // padding in C++ structs, so the NonVirtual size is what we must
+          // use there.
+          LimitOffset = RD ? Layout.getNonVirtualSize() : Layout.getDataSize();
+        FoundLimit:;
+
+          CharUnits TypeSize = getSize(Type);
+          if (BeginOffset + TypeSize <= LimitOffset) {
+            // There is space before LimitOffset to create a naturally-sized
+            // access unit.
+            BestEndOffset = BeginOffset + TypeSize;
+            BestEnd = Field;
+          }
+
+          if (Barrier)
+            // The next field is a barrier that we cannot merge across.
+            InstallBest = true;
+          else
+            // Otherwise, we're not installing. Update the bit size
+            // of the current span to go all the way to LimitOffset, which is
+            // the (aligned) offset of next bitfield to consider.
+            BitSizeSinceBegin = Context.toBits(LimitOffset - BeginOffset);
+        }
       }
-      ++Field;
-      continue;
     }
 
-    // If the start field of a new run is better as a single run, or
-    // if current field (or consecutive fields) is better as a single run, or
-    // if current field has zero width bitfield and either
-    // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
-    // true, or
-    // if the offset of current field is inconsistent with the offset of
-    // previous field plus its offset,
-    // skip the block below and go ahead to emit the storage.
-    // Otherwise, try to add bitfields to the run.
-    if (!StartFieldAsSingleRun && Field != FieldEnd &&
-        !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
-        (!Field->isZeroLengthBitField(Context) ||
-         (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
-          !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
-        Tail == getFieldBitOffset(*Field)) {
-      Tail += Field->getBitWidthValue(Context);
+    if (InstallBest) {
+      assert((Field == FieldEnd || !Field->isBitField() ||
+              (getFieldBitOffset(*Field) % CharBits) == 0) &&
+             "Installing but not at an aligned bitfield or limit");
+      CharUnits AccessSize = BestEndOffset - BeginOffset;
+      if (!AccessSize.isZero()) {
+        // Add the storage member for the access unit to the record. The
+        // bitfields get the offset of their storage but come afterward and
+        // remain there after a stable sort.
+        llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
+        Members.push_back(StorageInfo(BeginOffset, Type));
+        for (; Begin != BestEnd; ++Begin)
+          if (!Begin->isZeroLengthBitField(Context))
+            Members.push_back(
+                MemberInfo(BeginOffset, MemberInfo::Field, nullptr, *Begin));
+      }
+      // Reset to start a new span.
+      Field = BestEnd;
+      Begin = FieldEnd;
+    } else {
+      assert(Field != FieldEnd && Field->isBitField() &&
+             "Accumulating past end of bitfields");
+      assert(!Barrier && "Accumulating across barrier");
+      // Accumulate this bitfield into the current (potential) span.
+      BitSizeSinceBegin += Field->getBitWidthValue(Context);
       ++Field;
-      continue;
     }
-
-    // We've hit a break-point in the run and need to emit a storage field.
-    llvm::Type *Type = getIntNType(Tail - StartBitOffset);
-    // Add the storage member to the record and set the bitfield info for all of
-    // the bitfields in the run.  Bitfields get the offset of their storage but
-    // come afterward and remain there after a stable sort.
-    Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
-    for (; Run != Field; ++Run)
-      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
-                                   MemberInfo::Field, nullptr, *Run));
-    Run = FieldEnd;
-    StartFieldAsSingleRun = false;
   }
+
+  return Field;
 }
 
 void CGRecordLowering::accumulateBases() {

diff  --git a/clang/test/CodeGen/aapcs-bitfield-access-unit.c b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
index ff28397c529007..e95dba1c5f50cf 100644
--- a/clang/test/CodeGen/aapcs-bitfield-access-unit.c
+++ b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
@@ -29,10 +29,10 @@ struct st2 {
   short c : 7;
 } st2;
 // LAYOUT-LABEL: LLVMType:%struct.st2 =
-// LAYOUT-SAME: type { i16, i8 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: ]>
 
 struct st3 {
@@ -60,10 +60,10 @@ struct st5 {
   volatile char c : 5;
 } st5;
 // LAYOUT-LABEL: LLVMType:%struct.st5 =
-// LAYOUT-SAME: type { i16, i8 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:5 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:5 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: ]>
 
 struct st6 {
@@ -141,7 +141,7 @@ struct st12{
   int f : 16;
 } st12;
 // LAYOUT-LABEL: LLVMType:%struct.st12 =
-// LAYOUT-SAME: type { i24 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
@@ -152,10 +152,10 @@ struct st13 {
   int b : 32;
 } __attribute__((packed)) st13;
 // LAYOUT-LABEL: LLVMType:%struct.st13 =
-// LAYOUT-SAME: type { [5 x i8] }
+// LAYOUT-SAME: type <{ i8, i32 }>
 // LAYOUT: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:32 StorageOffset:1
 // LAYOUT-NEXT: ]>
 
 struct st14 {
@@ -183,12 +183,12 @@ struct st16 {
   int d : 16;
 } st16;
 // LAYOUT-LABEL: LLVMType:%struct.st16 =
-// LAYOUT-SAME: type { i48, i48 }
+// LAYOUT-SAME: type { i32, i16, i32, i16 }
 // LAYOUT: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:64 StorageOffset:8
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:8
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:32 StorageOffset:8
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:12
 // LAYOUT-NEXT: ]>
 
 struct st17 {
@@ -196,10 +196,10 @@ int b : 32;
 char c : 8;
 } __attribute__((packed)) st17;
 // LAYOUT-LABEL: LLVMType:%struct.st17 =
-// LAYOUT-SAME: type { [5 x i8] }
+// LAYOUT-SAME: type <{ i32, i8 }>
 // LAYOUT: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
 // LAYOUT-NEXT: ]>
 
 struct zero_bitfield {
@@ -221,7 +221,7 @@ struct zero_bitfield_ok {
   int b : 24;
 } st19;
 // LAYOUT-LABEL: LLVMType:%struct.zero_bitfield_ok =
-// LAYOUT-SAME: type { i16, i24 }
+// LAYOUT-SAME: type { i16, i32 }
 // LAYOUT: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:0

diff  --git a/clang/test/CodeGen/aapcs-bitfield.c b/clang/test/CodeGen/aapcs-bitfield.c
index 152ee26e7a3ea9..0df250d4ebc53e 100644
--- a/clang/test/CodeGen/aapcs-bitfield.c
+++ b/clang/test/CodeGen/aapcs-bitfield.c
@@ -299,77 +299,73 @@ struct st2 {
 
 // LE-LABEL: @st2_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LE-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // LE-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // LE-NEXT:    ret i32 [[CONV]]
 //
 // BE-LABEL: @st2_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BE-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // BE-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // BE-NEXT:    ret i32 [[CONV]]
 //
 // LENUMLOADS-LABEL: @st2_check_load(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
 //
 // BENUMLOADS-LABEL: @st2_check_load(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
 //
 // LEWIDTH-LABEL: @st2_check_load(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // LEWIDTH-NEXT:    ret i32 [[CONV]]
 //
 // BEWIDTH-LABEL: @st2_check_load(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // BEWIDTH-NEXT:    ret i32 [[CONV]]
 //
 // LEWIDTHNUM-LABEL: @st2_check_load(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // LEWIDTHNUM-NEXT:    ret i32 [[CONV]]
 //
 // BEWIDTHNUM-LABEL: @st2_check_load(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
 //
@@ -379,74 +375,66 @@ int st2_check_load(struct st2 *m) {
 
 // LE-LABEL: @st2_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LE-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LE-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @st2_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BE-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BE-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @st2_check_store(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LENUMLOADS-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LENUMLOADS-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @st2_check_store(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BENUMLOADS-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BENUMLOADS-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @st2_check_store(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTH-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LEWIDTH-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @st2_check_store(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BEWIDTH-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BEWIDTH-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @st2_check_store(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTHNUM-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LEWIDTHNUM-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @st2_check_store(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BEWIDTHNUM-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BEWIDTHNUM-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // BEWIDTHNUM-NEXT:    ret void
 //
 void st2_check_store(struct st2 *m) {
@@ -636,8 +624,8 @@ struct st4 {
 //
 // LEWIDTH-LABEL: @st4_check_load(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -645,8 +633,8 @@ struct st4 {
 //
 // BEWIDTH-LABEL: @st4_check_load(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -654,8 +642,8 @@ struct st4 {
 //
 // LEWIDTHNUM-LABEL: @st4_check_load(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -663,8 +651,8 @@ struct st4 {
 //
 // BEWIDTHNUM-LABEL: @st4_check_load(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -708,38 +696,38 @@ int st4_check_load(struct st4 *m) {
 //
 // LEWIDTH-LABEL: @st4_check_store(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @st4_check_store(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
-// BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @st4_check_store(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @st4_check_store(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
-// BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void st4_check_store(struct st4 *m) {
@@ -821,42 +809,44 @@ struct st5 {
 
 // LE-LABEL: @st5_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
-// LE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 11
+// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// LE-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// LE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
 // LE-NEXT:    ret i32 [[CONV]]
 //
 // BE-LABEL: @st5_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
-// BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// BE-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
 // BE-NEXT:    ret i32 [[CONV]]
 //
 // LENUMLOADS-LABEL: @st5_check_load(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
-// LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 11
+// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
 //
 // BENUMLOADS-LABEL: @st5_check_load(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
-// BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
 //
 // LEWIDTH-LABEL: @st5_check_load(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -864,16 +854,16 @@ struct st5 {
 //
 // BEWIDTH-LABEL: @st5_check_load(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
 // BEWIDTH-NEXT:    ret i32 [[CONV]]
 //
 // LEWIDTHNUM-LABEL: @st5_check_load(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -881,8 +871,8 @@ struct st5 {
 //
 // BEWIDTHNUM-LABEL: @st5_check_load(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
@@ -893,74 +883,70 @@ int st5_check_load(struct st5 *m) {
 
 // LE-LABEL: @st5_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
-// LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LE-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2031617
+// LE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LE-NEXT:    store volatile i32 [[BF_SET]], ptr [[M]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @st5_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
-// BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BE-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -63489
+// BE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2048
+// BE-NEXT:    store volatile i32 [[BF_SET]], ptr [[M]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @st5_check_store(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2031617
+// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], ptr [[M]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @st5_check_store(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -63489
+// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2048
+// BENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], ptr [[M]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @st5_check_store(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @st5_check_store(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @st5_check_store(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @st5_check_store(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
 // BEWIDTHNUM-NEXT:    ret void
 //
 void st5_check_store(struct st5 *m) {
@@ -980,8 +966,8 @@ struct st6 {
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LE-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // LE-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -997,8 +983,8 @@ struct st6 {
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BE-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // BE-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1014,8 +1000,8 @@ struct st6 {
 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1031,8 +1017,8 @@ struct st6 {
 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1048,8 +1034,8 @@ struct st6 {
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // LEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1065,8 +1051,8 @@ struct st6 {
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // BEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1082,8 +1068,8 @@ struct st6 {
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // LEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1099,8 +1085,8 @@ struct st6 {
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // BEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1704,9 +1690,9 @@ void store_st9(volatile struct st9 *m) {
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LE-NEXT:    store volatile i8 [[TMP1]], ptr [[M]], align 4
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LE-NEXT:    store volatile i8 [[TMP0]], ptr [[M]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_st9(
@@ -1714,9 +1700,9 @@ void store_st9(volatile struct st9 *m) {
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BE-NEXT:    store volatile i8 [[TMP1]], ptr [[M]], align 4
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BE-NEXT:    store volatile i8 [[TMP0]], ptr [[M]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_st9(
@@ -1724,10 +1710,10 @@ void store_st9(volatile struct st9 *m) {
 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[M]], align 4
-// LENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[M]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[M]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_st9(
@@ -1735,10 +1721,10 @@ void store_st9(volatile struct st9 *m) {
 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[M]], align 4
-// BENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[M]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[M]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_st9(
@@ -1949,9 +1935,9 @@ void store_st10(volatile struct st10 *m) {
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// LE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // LE-NEXT:    [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 1
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -1968,9 +1954,9 @@ void store_st10(volatile struct st10 *m) {
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// BE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // BE-NEXT:    [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 7
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -1987,9 +1973,9 @@ void store_st10(volatile struct st10 *m) {
 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 1
 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -2006,9 +1992,9 @@ void store_st10(volatile struct st10 *m) {
 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 7
 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -2767,146 +2753,70 @@ struct st13 {
 
 // LE-LABEL: @increment_b_st13(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_b_st13(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_b_st13(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// LENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_b_st13(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// BENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_b_st13(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LEWIDTH-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_b_st13(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BEWIDTH-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_b_st13(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_b_st13(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_b_st13(volatile struct st13 *s) {
@@ -2990,9 +2900,9 @@ struct st15 {
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // LE-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// LE-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LE-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// LE-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_a_st15(
@@ -3000,9 +2910,9 @@ struct st15 {
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // BE-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// BE-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BE-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// BE-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_a_st15(
@@ -3010,10 +2920,10 @@ struct st15 {
 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// LENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_a_st15(
@@ -3021,10 +2931,10 @@ struct st15 {
 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// BENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_a_st15(
@@ -3032,9 +2942,9 @@ struct st15 {
 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // LEWIDTH-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// LEWIDTH-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// LEWIDTH-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_a_st15(
@@ -3042,9 +2952,9 @@ struct st15 {
 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // BEWIDTH-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// BEWIDTH-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// BEWIDTH-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_a_st15(
@@ -3052,10 +2962,10 @@ struct st15 {
 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LEWIDTHNUM-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_a_st15(
@@ -3063,10 +2973,10 @@ struct st15 {
 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BEWIDTHNUM-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_a_st15(volatile struct st15 *s) {
@@ -3082,146 +2992,58 @@ struct st16 {
 
 // LE-LABEL: @increment_a_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_a_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_a_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_a_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_a_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_a_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_a_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_a_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_a_st16(struct st16 *s) {
@@ -3230,154 +3052,90 @@ void increment_a_st16(struct st16 *s) {
 
 // LE-LABEL: @increment_b_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_b_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_b_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_b_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_b_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTH-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_b_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTH-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_b_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTHNUM-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_b_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTHNUM-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_b_st16(struct st16 *s) {
@@ -3386,154 +3144,66 @@ void increment_b_st16(struct st16 *s) {
 
 // LE-LABEL: @increment_c_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_c_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_c_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_c_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_c_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_c_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_c_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_c_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_c_st16(struct st16 *s) {
@@ -3542,162 +3212,90 @@ void increment_c_st16(struct st16 *s) {
 
 // LE-LABEL: @increment_d_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_d_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_d_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_d_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_d_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTH-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_d_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTH-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_d_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTHNUM-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_d_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTHNUM-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_d_st16(struct st16 *s) {
@@ -3706,74 +3304,32 @@ void increment_d_st16(struct st16 *s) {
 
 // LE-LABEL: @increment_v_a_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_a_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_a_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 4
+// LENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_a_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 4
+// BENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_a_st16(
@@ -3812,140 +3368,110 @@ void increment_v_a_st16(volatile struct st16 *s) {
 
 // LE-LABEL: @increment_v_b_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    store volatile i16 [[TMP0]], ptr [[B]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_b_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    store volatile i16 [[TMP0]], ptr [[B]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_b_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT:    store volatile i16 [[TMP0]], ptr [[B]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_b_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT:    store volatile i16 [[TMP0]], ptr [[B]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_b_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_b_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_b_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_b_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // BEWIDTHNUM-NEXT:    ret void
@@ -3956,112 +3482,70 @@ void increment_v_b_st16(volatile struct st16 *s) {
 
 // LE-LABEL: @increment_v_c_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_c_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_c_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_c_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_c_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// LEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_c_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// BEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_c_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
-// LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_c_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
-// BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_v_c_st16(volatile struct st16 *s) {
@@ -4070,144 +3554,110 @@ void increment_v_c_st16(volatile struct st16 *s) {
 
 // LE-LABEL: @increment_v_d_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT:    store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    store volatile i16 [[TMP0]], ptr [[D]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_d_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT:    store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    store volatile i16 [[TMP0]], ptr [[D]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_d_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT:    store volatile i16 [[TMP0]], ptr [[D]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_d_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT:    store volatile i16 [[TMP0]], ptr [[D]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_d_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_d_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_d_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_d_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // BEWIDTHNUM-NEXT:    ret void
@@ -4224,146 +3674,62 @@ char c : 8;
 
 // LE-LABEL: @increment_v_b_st17(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_b_st17(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_b_st17(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// LENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_b_st17(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// BENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_b_st17(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_b_st17(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_b_st17(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_b_st17(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_v_b_st17(volatile struct st17 *s) {
@@ -4372,108 +3738,70 @@ void increment_v_b_st17(volatile struct st17 *s) {
 
 // LE-LABEL: @increment_v_c_st17(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// LE-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
-// LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// LE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_c_st17(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// BE-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
-// BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// BE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_c_st17(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// LENUMLOADS-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_c_st17(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// BENUMLOADS-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_c_st17(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
 // LEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// LEWIDTH-NEXT:    store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_c_st17(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
 // BEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT:    store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_c_st17(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP1]], align 1
-// LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_c_st17(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP1]], align 1
-// BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_v_c_st17(volatile struct st17 *s) {
@@ -4493,9 +3821,9 @@ struct zero_bitfield {
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LE-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LE-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_a_zero_bitfield(
@@ -4503,9 +3831,9 @@ struct zero_bitfield {
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BE-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BE-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_a_zero_bitfield(
@@ -4513,10 +3841,10 @@ struct zero_bitfield {
 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_a_zero_bitfield(
@@ -4524,10 +3852,10 @@ struct zero_bitfield {
 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_a_zero_bitfield(
@@ -4535,9 +3863,9 @@ struct zero_bitfield {
 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LEWIDTH-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LEWIDTH-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_a_zero_bitfield(
@@ -4545,9 +3873,9 @@ struct zero_bitfield {
 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BEWIDTH-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BEWIDTH-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
@@ -4555,10 +3883,10 @@ struct zero_bitfield {
 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTHNUM-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
@@ -4566,10 +3894,10 @@ struct zero_bitfield {
 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTHNUM-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_a_zero_bitfield(volatile struct zero_bitfield *s) {
@@ -4692,9 +4020,9 @@ struct zero_bitfield_ok {
 // LE-NEXT:    [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
 // LE-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
 // LE-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// LE-NEXT:    [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// LE-NEXT:    [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
 // LE-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// LE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // LE-NEXT:    [[BF_SHL6:%.*]] = shl i16 [[BF_VALUE]], 8
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL6]]
@@ -4716,9 +4044,9 @@ struct zero_bitfield_ok {
 // BE-NEXT:    [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
 // BE-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
 // BE-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// BE-NEXT:    [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// BE-NEXT:    [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
 // BE-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// BE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
 // BE-NEXT:    store volatile i16 [[BF_SET]], ptr [[S]], align 4
@@ -4739,9 +4067,9 @@ struct zero_bitfield_ok {
 // LENUMLOADS-NEXT:    [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
 // LENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
 // LENUMLOADS-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// LENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
 // LENUMLOADS-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // LENUMLOADS-NEXT:    [[BF_SHL6:%.*]] = shl i16 [[BF_VALUE]], 8
 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL6]]
@@ -4763,9 +4091,9 @@ struct zero_bitfield_ok {
 // BENUMLOADS-NEXT:    [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
 // BENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
 // BENUMLOADS-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// BENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
 // BENUMLOADS-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
 // BENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], ptr [[S]], align 4
@@ -4780,12 +4108,12 @@ struct zero_bitfield_ok {
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
 // LEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
 // LEWIDTH-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// LEWIDTH-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// LEWIDTH-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_a_zero_bitfield_ok(
@@ -4793,12 +4121,12 @@ struct zero_bitfield_ok {
 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
 // BEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
 // BEWIDTH-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// BEWIDTH-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// BEWIDTH-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
@@ -4807,13 +4135,13 @@ struct zero_bitfield_ok {
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
 // LEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
 // LEWIDTHNUM-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// LEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP2]], align 1
-// LEWIDTHNUM-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP0]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
@@ -4821,13 +4149,13 @@ struct zero_bitfield_ok {
 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
 // BEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
 // BEWIDTHNUM-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// BEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP2]], align 1
-// BEWIDTHNUM-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP0]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_a_zero_bitfield_ok(volatile struct zero_bitfield_ok *s) {

diff  --git a/clang/test/CodeGen/arm-bitfield-alignment.c b/clang/test/CodeGen/arm-bitfield-alignment.c
index d3a3d19a41e33e..5d0967ec70346c 100644
--- a/clang/test/CodeGen/arm-bitfield-alignment.c
+++ b/clang/test/CodeGen/arm-bitfield-alignment.c
@@ -1,7 +1,7 @@
-// RUN: %clang_cc1 -triple arm-none-eabi -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefix=LAYOUT
-// RUN: FileCheck %s -check-prefix=IR <%t
-// RUN: %clang_cc1 -triple aarch64 -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefix=LAYOUT
-// RUN: FileCheck %s -check-prefix=IR <%t
+// RUN: %clang_cc1 -triple arm-none-eabi -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT-32
+// RUN: FileCheck %s -check-prefixes=IR,IR-32 <%t
+// RUN: %clang_cc1 -triple aarch64 -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT-64
+// RUN: FileCheck %s -check-prefixes=IR,IR-64 <%t
 
 extern struct T {
   int b0 : 8;
@@ -14,12 +14,17 @@ int func(void) {
 }
 
 // IR: @g = external global %struct.T, align 4
-// IR: %{{.*}} = load i64, ptr @g, align 4
+// IR-32: %{{.*}} = load i32, ptr @g, align 4
+// IR-64: %{{.*}} = load i64, ptr @g, align 4
 
 // LAYOUT-LABEL: LLVMType:%struct.T =
-// LAYOUT-SAME: type { i40 }
+// LAYOUT-32-SAME: type { i32, i8 }
+// LAYOUT-64-SAME: type { i64 }
 // LAYOUT: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:32 Size:1 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:0 Size:1 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:32 Size:1 IsSigned:1 StorageSize:64 StorageOffset:0
 // LAYOUT-NEXT: ]>

diff  --git a/clang/test/CodeGen/arm64-be-bitfield.c b/clang/test/CodeGen/arm64-be-bitfield.c
index 28ca3be348379d..57e20b5b62b9ca 100644
--- a/clang/test/CodeGen/arm64-be-bitfield.c
+++ b/clang/test/CodeGen/arm64-be-bitfield.c
@@ -7,8 +7,10 @@ struct bt3 { signed b2:10; signed b3:10; } b16;
 signed callee_b0f(struct bt3 bp11) {
 // IR: callee_b0f(i64 [[ARG:%.*]])
 // IR: [[BP11:%.*]] = alloca %struct.bt3, align 4
-// IR: store i64 [[ARG]], ptr [[PTR:%.*]], align 8
-// IR: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[BP11]], ptr align 8 [[PTR]], i64 4
+// IR: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.bt3, ptr [[BP11]], i32 0, i32 0
+// IR: [[COERCE_HIGHBITS:%.*]] = lshr i64 [[ARG]], 32
+// IR: [[COERCE_VAL_II:%.*]] = trunc i64 [[COERCE_HIGHBITS]] to i32
+// IR: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
 // IR: [[BF_LOAD:%.*]] = load i32, ptr [[BP11]], align 4
 // IR: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 22
 // IR: ret i32 [[BF_ASHR]]
@@ -16,7 +18,7 @@ signed callee_b0f(struct bt3 bp11) {
 }
 
 // LAYOUT-LABEL: LLVMType:%struct.bt3 =
-// LAYOUT-SAME: type { i24 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:22 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:12 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0

diff  --git a/clang/test/CodeGen/bitfield-2.c b/clang/test/CodeGen/bitfield-2.c
index 3e0b30c7a17d80..8688ba6390ddbe 100644
--- a/clang/test/CodeGen/bitfield-2.c
+++ b/clang/test/CodeGen/bitfield-2.c
@@ -271,11 +271,11 @@ _Bool test_6(void) {
 // CHECK-RECORD: *** Dumping IRgen Record Layout
 // CHECK-RECORD: Record: RecordDecl{{.*}}s7
 // CHECK-RECORD: Layout: <CGRecordLayout
-// CHECK-RECORD:   LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] }
+// CHECK-RECORD:   LLVMType:%struct.s7 = type <{ i32, i32, i32, i64, [12 x i8] }>
 // CHECK-RECORD:   IsZeroInitializable:1
 // CHECK-RECORD:   BitFields:[
-// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12
-// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16
+// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:64 StorageOffset:12
+// CHECK-RECORD:     <CGBitFieldInfo Offset:32 Size:29 IsSigned:1 StorageSize:64 StorageOffset:12
 
 struct __attribute__((aligned(16))) s7 {
   int a, b, c;

diff  --git a/clang/test/CodeGen/bitfield-access-pad.c b/clang/test/CodeGen/bitfield-access-pad.c
index 20b81d887167ec..edda7b7798d057 100644
--- a/clang/test/CodeGen/bitfield-access-pad.c
+++ b/clang/test/CodeGen/bitfield-access-pad.c
@@ -37,6 +37,7 @@
 // RUN: %clang_cc1 -triple=hexagon-elf -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-STRICT-NT %s
 // RUN: %clang_cc1 -triple=mips-elf -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-STRICT-NT %s
 
+
 struct P1 {
   unsigned a :8;
   char :0;
@@ -45,23 +46,21 @@ struct P1 {
 // CHECK-LABEL: LLVMType:%struct.P1 =
 // LAYOUT-T-SAME: type { i8, i8, [2 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, i8 }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
-// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -77,22 +76,18 @@ struct P2 {
 // CHECK-LABEL: LLVMType:%struct.P2 =
 // LAYOUT-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
@@ -111,24 +106,18 @@ struct P3 {
 // CHECK-LABEL: LLVMType:%struct.P3 =
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -146,22 +135,18 @@ struct P4 {
 // CHECK-LABEL: LLVMType:%struct.P4 =
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -178,20 +163,18 @@ struct P5 {
 // CHECK-LABEL: LLVMType:%struct.P5 =
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -210,24 +193,18 @@ struct P6 {
 // CHECK-LABEL: LLVMType:%struct.P6 =
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -244,20 +221,18 @@ struct P7 {
 // CHECK-LABEL: LLVMType:%struct.P7 =
 // LAYOUT-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
@@ -276,20 +251,18 @@ struct __attribute__ ((aligned (2))) P7_align {
 // CHECK-LABEL: LLVMType:%struct.P7_align =
 // LAYOUT-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
@@ -314,11 +287,9 @@ struct P8 {
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:2
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
@@ -344,11 +315,9 @@ struct P9 {
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
@@ -365,11 +334,11 @@ struct __attribute__((aligned(4))) P10 {
   char : 0;
 } p10;
 // CHECK-LABEL: LLVMType:%struct.P10 =
-// LAYOUT-T-SAME: type { i24 }
-// LAYOUT-ARM64-T-SAME: type { i24 }
-// LAYOUT-NT-SAME: type { i24 }
-// LAYOUT-STRICT-NT-SAME: type { i24 }
-// LAYOUT-DWN32-SAME: type { i24 }
+// LAYOUT-T-SAME: type { i32 }
+// LAYOUT-ARM64-T-SAME: type { i32 }
+// LAYOUT-NT-SAME: type { i32 }
+// LAYOUT-STRICT-NT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type { i32 }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -378,12 +347,10 @@ struct __attribute__((aligned(4))) P10 {
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -401,11 +368,11 @@ struct __attribute__((aligned(4))) P11 {
   char : 0; // at a char boundary
 } p11;
 // CHECK-LABEL: LLVMType:%struct.P11 =
-// LAYOUT-T-SAME: type { i24 }
-// LAYOUT-ARM64-T-SAME: type { i24 }
-// LAYOUT-NT-SAME: type { i24 }
-// LAYOUT-STRICT-NT-SAME: type { i24 }
-// LAYOUT-DWN32-SAME: type { i24 }
+// LAYOUT-T-SAME: type { i32 }
+// LAYOUT-ARM64-T-SAME: type { i32 }
+// LAYOUT-NT-SAME: type { i32 }
+// LAYOUT-STRICT-NT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type { i32 }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -414,12 +381,10 @@ struct __attribute__((aligned(4))) P11 {
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0

diff  --git a/clang/test/CodeGen/bitfield-access-unit.c b/clang/test/CodeGen/bitfield-access-unit.c
index cf13fcf6c85b1e..1aed2e7202fc65 100644
--- a/clang/test/CodeGen/bitfield-access-unit.c
+++ b/clang/test/CodeGen/bitfield-access-unit.c
@@ -62,12 +62,12 @@ struct A {
   char b : 7;
 } a;
 // CHECK-LABEL: LLVMType:%struct.A =
-// LAYOUT-FLEX-SAME: type { i8, i8 }
+// LAYOUT-FLEX-SAME: type { i16 }
 // LAYOUT-STRICT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i16 }
 // CHECK: BitFields:[
-// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
 
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
@@ -82,11 +82,11 @@ struct __attribute__((aligned(2))) B {
   char b : 7;
 } b;
 // CHECK-LABEL: LLVMType:%struct.B =
-// LAYOUT-SAME: type { i8, i8 }
+// LAYOUT-SAME: type { i16 }
 // LAYOUT-DWN32-SAME: type { i16 }
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
 
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
@@ -100,12 +100,12 @@ struct C {
   char b : 7;
 } c;
 // CHECK-LABEL: LLVMType:%struct.C =
-// LAYOUT-FLEX-SAME: type { i32, i8, i8, i8 }
+// LAYOUT-FLEX-SAME: type <{ i32, i8, i16, i8 }>
 // LAYOUT-STRICT-SAME: type { i32, i8, i8, i8 }
 // LAYOUT-DWN32-SAME: type <{ i32, i8, i16, i8 }>
 // CHECK: BitFields:[
-// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:5
-// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
 
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:5
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
@@ -123,21 +123,20 @@ struct __attribute__((packed)) D {
 } d;
 // CHECK-LABEL: LLVMType:%struct.D =
 // LAYOUT-FLEX-SAME: type <{ i32, i16, i8 }>
-// LAYOUT-STRICT-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-STRICT-SAME: type <{ i32, i8, i8, i8 }>
 // LAYOUT-DWN32-FLEX-SAME: type <{ i32, i16, i8 }>
-// LAYOUT-DWN32-STRICT-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-DWN32-STRICT-SAME: type <{ i32, i8, i8, i8 }>
 // CHECK: BitFields:[
 // LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
 // LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
 
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:5
 
 // LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
 // LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
-
-// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
-// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:5
 // CHECK-NEXT: ]>
 
 struct E {
@@ -146,22 +145,21 @@ struct E {
   unsigned c : 12;
 } e;
 // CHECK-LABEL: LLVMType:%struct.E =
-// LAYOUT-FLEX64-SAME: type { i8, i16, i16, [2 x i8] }
-// LAYOUT-FLEX32-SAME: type { i8, i16, i16, [2 x i8] }
-// LAYOUT-STRICT-SAME: type { i8, i16, i16, [2 x i8] }
+// LAYOUT-FLEX64-SAME: type { i64 }
+// LAYOUT-FLEX32-SAME: type { i32, i16 }
+// LAYOUT-STRICT-SAME: type { i32, i16 }
 // LAYOUT-DWN32-SAME: type { i32 }
 // CHECK: BitFields:[
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:64 StorageOffset:0
 
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
-
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
 
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
 
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
@@ -176,30 +174,30 @@ struct F {
   signed char d : 7;
 } f;
 // CHECK-LABEL: LLVMType:%struct.F =
-// LAYOUT-FLEX64-SAME: type { i8, i16, i16, i8 }
-// LAYOUT-FLEX32-SAME: type { i8, i16, i16, i8 }
-// LAYOUT-STRICT-SAME: type { i8, i16, i16, i8 }
-// LAYOUT-DWN32-SAME: type { [5 x i8] }
+// LAYOUT-FLEX64-SAME: type { i64 }
+// LAYOUT-FLEX32-SAME: type { i32, i32 }
+// LAYOUT-STRICT-SAME: type { i32, i32 }
+// LAYOUT-DWN32-SAME: type <{ i32, i8 }>
 // CHECK: BitFields:[
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
 
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:4
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:4
 
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:4
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:4
 // CHECK-NEXT: ]>
 
 struct G {
@@ -210,18 +208,18 @@ struct G {
   signed char e;
 } g;
 // CHECK-LABEL: LLVMType:%struct.G =
-// LAYOUT-SAME: type { i8, i16, i16, i8, i8 }
-// LAYOUT-DWN32-SAME: type { [5 x i8], i8 }
+// LAYOUT-SAME: type { i32, i16, i8, i8 }
+// LAYOUT-DWN32-SAME: type <{ i32, i8, i8 }>
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:4
 // CHECK-NEXT: ]>
 
 #if _LP64
@@ -257,24 +255,24 @@ struct B64 {
   signed char e; // not a bitfield
 } b64;
 // CHECK-64-LABEL: LLVMType:%struct.B64 =
-// LAYOUT-64-FLEX-SAME: type { [7 x i8], i8 }
-// LAYOUT-64-STRICT-SAME: type { [7 x i8], i8 }
-// LAYOUT-64-DWN-SAME: type { [7 x i8], i8 }
+// LAYOUT-64-FLEX-SAME: type <{ i16, i8, i32, i8 }>
+// LAYOUT-64-STRICT-SAME: type <{ i16, i8, i16, i16, i8 }>
+// LAYOUT-64-DWN-SAME: type <{ i16, i8, i32, i8 }>
 // CHECK-64: BitFields:[
-// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-
-// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:3
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:5
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
 // CHECK-64-NEXT: ]>
 
 struct C64 {
@@ -285,20 +283,20 @@ struct C64 {
   signed char e : 7;
 } c64;
 // CHECK-64-LABEL: LLVMType:%struct.C64 =
-// LAYOUT-64-SAME: type { i16, [5 x i8], i8 }
-// LAYOUT-64-DWN-SAME: type { i16, [5 x i8], i8 }
+// LAYOUT-64-SAME: type {  i64 }
+// LAYOUT-64-DWN-SAME: type {  i64 }
 // CHECK-64: BitFields:[
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:7
-
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:7
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
 // CHECK-64-NEXT: ]>
 
 #endif

diff  --git a/clang/test/CodeGen/debug-info-bitfield-0-struct.c b/clang/test/CodeGen/debug-info-bitfield-0-struct.c
index 0535b626771429..9fadf898e34661 100644
--- a/clang/test/CodeGen/debug-info-bitfield-0-struct.c
+++ b/clang/test/CodeGen/debug-info-bitfield-0-struct.c
@@ -101,8 +101,10 @@ struct None_B {
   int y : 4;
 };
 
-struct None_C {
-  // BOTH-DAG: ![[NONE_C:[0-9]+]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "None_C", file: !{{[0-9]+}}, line: {{[0-9]+}}, size: 32, elements: ![[NONE_C_ELEMENTS:[0-9]+]])
+// AMDGCN does not do unaligned access cheaply, so the bitfield access units
+// would remain single bytes, without the aligned attribure
+struct __attribute__((aligned(4))) None_C {
+  // BOTH-DAG: ![[NONE_C:[0-9]+]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "None_C", file: !{{[0-9]+}}, line: {{[0-9]+}}, size: 32, align: 32, elements: ![[NONE_C_ELEMENTS:[0-9]+]])
   // BOTH-DAG: ![[NONE_C_ELEMENTS]] = !{![[NONE_C_X:[0-9]+]], ![[NONE_C_Y:[0-9]+]], ![[NONE_C_A:[0-9]+]], ![[NONE_C_B:[0-9]+]]}
   // BOTH-DAG: ![[NONE_C_X]] = !DIDerivedType(tag: DW_TAG_member, name: "x", scope: ![[NONE_C]], file: !{{[0-9]+}}, line: {{[0-9]+}}, baseType: !{{[0-9]+}}, size: 8, flags: DIFlagBitField, extraData: i64 0)
   // BOTH-DAG: ![[NONE_C_Y]] = !DIDerivedType(tag: DW_TAG_member, name: "y", scope: ![[NONE_C]], file: !{{[0-9]+}}, line: {{[0-9]+}}, baseType: !{{[0-9]+}}, size: 8, offset: 8, flags: DIFlagBitField, extraData: i64 0)

diff  --git a/clang/test/CodeGen/no-bitfield-type-align.c b/clang/test/CodeGen/no-bitfield-type-align.c
index f4698421ea1a72..1861c6886a35b3 100644
--- a/clang/test/CodeGen/no-bitfield-type-align.c
+++ b/clang/test/CodeGen/no-bitfield-type-align.c
@@ -12,7 +12,6 @@ struct S {
 // LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:15 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:15 Size:0 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:15 Size:15 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: ]>
 

diff  --git a/clang/test/CodeGen/struct-x86-darwin.c b/clang/test/CodeGen/struct-x86-darwin.c
index 350666a5168632..e79ecefb880dfb 100644
--- a/clang/test/CodeGen/struct-x86-darwin.c
+++ b/clang/test/CodeGen/struct-x86-darwin.c
@@ -38,10 +38,10 @@ struct STestB6 {int a:1; char b; int c:13; } stb6;
 // CHECK-NEXT: ]>
 
 // CHECK-LABEL: LLVMType:%struct.STestB2 =
-// CHECK-SAME: type { i8, i8, i8 }
+// CHECK-SAME: type <{ i8, i16 }>
 // CHECK: BitFields:[
-// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:1
-// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:4 IsSigned:1 StorageSize:8 StorageOffset:2
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:16 StorageOffset:1
+// CHECK-NEXT: <CGBitFieldInfo Offset:8 Size:4 IsSigned:1 StorageSize:16 StorageOffset:1
 // CHECK-NEXT: ]>
 
 // CHECK-LABEL: LLVMType:%struct.STestB3 =

diff  --git a/clang/test/CodeGen/tbaa-struct.cpp b/clang/test/CodeGen/tbaa-struct.cpp
index 9b4b7415142d93..ca076ce5aa2732 100644
--- a/clang/test/CodeGen/tbaa-struct.cpp
+++ b/clang/test/CodeGen/tbaa-struct.cpp
@@ -197,7 +197,7 @@ void copy12(UnionMember2 *a1, UnionMember2 *a2) {
 // CHECK-OLD: [[TS6]] = !{i64 0, i64 2, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE:!.+]]}
 // CHECK-OLD: [[TAG_DOUBLE]] = !{[[DOUBLE:!.+]], [[DOUBLE]], i64 0}
 // CHECK-OLD  [[DOUBLE]] = !{!"double", [[CHAR]], i64 0}
-// CHECK-OLD: [[TS7]] = !{i64 0, i64 1, [[TAG_CHAR]], i64 1, i64 1, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 3, i64 1, [[TAG_CHAR]], i64 4, i64 1, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]], i64 16, i64 1, [[TAG_CHAR]]}
+// CHECK-OLD: [[TS7]] = !{i64 0, i64 1, [[TAG_CHAR]], i64 1, i64 1, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 3, i64 2, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]], i64 16, i64 1, [[TAG_CHAR]]}
 // CHECK-OLD: [[TS8]] = !{i64 0, i64 4, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]]}
 // CHECK-OLD: [[TS9]] = !{i64 0, i64 8, [[TAG_CHAR]], i64 8, i64 4, [[TAG_INT]]}
 // CHECK-OLD: [[TS10]] = !{i64 0, i64 4, [[TAG_INT]], i64 8, i64 8, [[TAG_CHAR]]}

diff  --git a/clang/test/CodeGenCXX/bitfield-access-empty.cpp b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
index 194d2c9def6996..c5e6f55ffa6964 100644
--- a/clang/test/CodeGenCXX/bitfield-access-empty.cpp
+++ b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
@@ -120,8 +120,8 @@ struct P6 {
   unsigned c;
 } p6;
 // CHECK-LABEL: LLVMType:%struct.P6 =
-// LAYOUT-SAME: type { i24, i32 }
-// LAYOUT-DWN32-SAME: type { i24, i32 }
+// LAYOUT-SAME: type { i32, i32 }
+// LAYOUT-DWN32-SAME: type { i32, i32 }
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P6 =
 // CHECK: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -138,13 +138,13 @@ struct P7 {
   unsigned c;
 } p7;
 // CHECK-LABEL: LLVMType:%struct.P7 =
-// LAYOUT-SAME: type { [3 x i8], %struct.Empty, i32 }
-// LAYOUT-DWN32-SAME: type { [3 x i8], %struct.Empty, i32 }
+// LAYOUT-SAME: type { i16, i8, %struct.Empty, i32 }
+// LAYOUT-DWN32-SAME: type { i16, i8, %struct.Empty, i32 }
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P7 =
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:24 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>

diff  --git a/clang/test/CodeGenCXX/bitfield-access-tail.cpp b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
index dad0ab226282c2..68716fdf3b1daa 100644
--- a/clang/test/CodeGenCXX/bitfield-access-tail.cpp
+++ b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
@@ -48,15 +48,15 @@ struct Pod {
   int b : 8;
 } P;
 // CHECK-LABEL: LLVMType:%struct.Pod =
-// LAYOUT-SAME: type { i24 }
-// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// LAYOUT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.Pod =
 // CHECK: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>
 
 // No tail padding
@@ -65,15 +65,15 @@ struct __attribute__((packed)) PPod {
   int b : 8;
 } PP;
 // CHECK-LABEL: LLVMType:%struct.PPod =
-// LAYOUT-SAME: type { [3 x i8] }
-// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// LAYOUT-SAME: type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.PPod =
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>
 
 // Cannot use tail padding
@@ -83,17 +83,17 @@ struct NonPod {
   int b : 8;
 } NP;
 // CHECK-LABEL: LLVMType:%struct.NonPod =
-// LAYOUT-SAME: type { [3 x i8], i8 }
-// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// LAYOUT-SAME: type <{ i16, i8, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.
-// LAYOUT-SAME: NonPod.base = type { [3 x i8] }
-// LAYOUT-DWN32-SAME: NonPod = type { [3 x i8] }
+// LAYOUT-SAME: NonPod.base = type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: NonPod = type <{ i16, i8 }>
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>
 
 // No tail padding
@@ -103,13 +103,13 @@ struct __attribute__((packed)) PNonPod {
   int b : 8;
 } PNP;
 // CHECK-LABEL: LLVMType:%struct.PNonPod =
-// LAYOUT-SAME: type { [3 x i8] }
-// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// LAYOUT-SAME: type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.PNonPod =
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>

diff  --git a/clang/test/CodeGenCXX/bitfield-ir.cpp b/clang/test/CodeGenCXX/bitfield-ir.cpp
index fa53f4d8900ef0..76c144072da686 100644
--- a/clang/test/CodeGenCXX/bitfield-ir.cpp
+++ b/clang/test/CodeGenCXX/bitfield-ir.cpp
@@ -23,12 +23,9 @@ struct Int {
 // CHECK-LABEL: define dso_local void @_Z1AP4Tail
 // CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
-// CHECK-NEXT:    [[NARROW:%.*]] = add i24 [[BF_LOAD]], 1
-// CHECK-NEXT:    [[BF_VALUE:%.*]] = and i24 [[NARROW]], 65535
-// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], -65536
-// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_VALUE]], [[BF_CLEAR]]
-// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[P]], align 4
+// CHECK-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
+// CHECK-NEXT:    store i16 [[INC]], ptr [[P]], align 4
 // CHECK-NEXT:    ret void
 //
 void A (Tail *p) {
@@ -38,12 +35,10 @@ void A (Tail *p) {
 // CHECK-LABEL: define dso_local void @_Z1BP4Tail
 // CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = and i24 [[BF_LOAD]], -65536
-// CHECK-NEXT:    [[BF_SHL:%.*]] = add i24 [[TMP0]], 65536
-// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], 65535
-// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_SHL]], [[BF_CLEAR]]
-// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 2
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[B]], align 2
+// CHECK-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// CHECK-NEXT:    store i8 [[INC]], ptr [[B]], align 2
 // CHECK-NEXT:    ret void
 //
 void B (Tail *p) {
@@ -53,12 +48,9 @@ void B (Tail *p) {
 // CHECK-LABEL: define dso_local void @_Z1AP4Char
 // CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
-// CHECK-NEXT:    [[NARROW:%.*]] = add i24 [[BF_LOAD]], 1
-// CHECK-NEXT:    [[BF_VALUE:%.*]] = and i24 [[NARROW]], 65535
-// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], -65536
-// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_VALUE]], [[BF_CLEAR]]
-// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[P]], align 4
+// CHECK-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
+// CHECK-NEXT:    store i16 [[INC]], ptr [[P]], align 4
 // CHECK-NEXT:    ret void
 //
 void A (Char *p) {
@@ -68,12 +60,10 @@ void A (Char *p) {
 // CHECK-LABEL: define dso_local void @_Z1BP4Char
 // CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = and i24 [[BF_LOAD]], -65536
-// CHECK-NEXT:    [[BF_SHL:%.*]] = add i24 [[TMP0]], 65536
-// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], 65535
-// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_SHL]], [[BF_CLEAR]]
-// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 2
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[B]], align 2
+// CHECK-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// CHECK-NEXT:    store i8 [[INC]], ptr [[B]], align 2
 // CHECK-NEXT:    ret void
 //
 void B (Char *p) {

diff  --git a/clang/test/CodeGenCXX/bitfield.cpp b/clang/test/CodeGenCXX/bitfield.cpp
index ddc3f9345622c3..7545e02840e6b2 100644
--- a/clang/test/CodeGenCXX/bitfield.cpp
+++ b/clang/test/CodeGenCXX/bitfield.cpp
@@ -224,7 +224,7 @@ namespace N2 {
     void *p;
   };
 // LAYOUT-LABEL: LLVMType:%"struct.N2::S" =
-// LAYOUT-SAME: type { i24, ptr }
+// LAYOUT-SAME: type { i32, ptr }
 // LAYOUT: BitFields:[
 // LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -268,7 +268,7 @@ namespace N3 {
     unsigned b : 24;
   };
 // LAYOUT-LABEL: LLVMType:%"struct.N3::S" =
-// LAYOUT-SAME: type { i24 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
 // LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -378,7 +378,7 @@ namespace N5 {
 // LAYOUT-NEXT: ]>
 
 // LAYOUT-LABEL: LLVMType:%"struct.N5::U::Y" =
-// LAYOUT-SAME: type { i24 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N5::U::Y" =
 // LAYOUT: BitFields:[
 // LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0

diff  --git a/clang/test/OpenMP/atomic_capture_codegen.cpp b/clang/test/OpenMP/atomic_capture_codegen.cpp
index 08d1f21f8e0bda..eba7906d8eb8a7 100644
--- a/clang/test/OpenMP/atomic_capture_codegen.cpp
+++ b/clang/test/OpenMP/atomic_capture_codegen.cpp
@@ -811,7 +811,7 @@ int main(void) {
 #pragma omp atomic relaxed capture
   iv = bfx4.a = bfx4.a * ldv;
 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -831,7 +831,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -870,7 +870,7 @@ int main(void) {
 #pragma omp atomic capture release
   {bfx4.b /= ldv; iv = bfx4.b;}
 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) acquire, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) acquire, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -890,7 +890,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] acquire acquire, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] acquire acquire, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]

diff  --git a/clang/test/OpenMP/atomic_read_codegen.c b/clang/test/OpenMP/atomic_read_codegen.c
index 0a68c8e2c35a56..4294c145fa1c97 100644
--- a/clang/test/OpenMP/atomic_read_codegen.c
+++ b/clang/test/OpenMP/atomic_read_codegen.c
@@ -292,7 +292,7 @@ int main(void) {
 // CHECK: store x86_fp80
 #pragma omp atomic read
   ldv = bfx4.a;
-// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) monotonic, align 1
+// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @bfx4_packed, i32 0, i32 1) monotonic, align 1
 // CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
 // CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
 // CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
@@ -309,7 +309,7 @@ int main(void) {
 // CHECK: store x86_fp80
 #pragma omp atomic read relaxed
   ldv = bfx4.b;
-// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) acquire, align 1
+// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @bfx4_packed, i32 0, i32 1) acquire, align 1
 // CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
 // CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
 // CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1

diff  --git a/clang/test/OpenMP/atomic_update_codegen.cpp b/clang/test/OpenMP/atomic_update_codegen.cpp
index 31160b41764698..ce0765118922a1 100644
--- a/clang/test/OpenMP/atomic_update_codegen.cpp
+++ b/clang/test/OpenMP/atomic_update_codegen.cpp
@@ -737,7 +737,7 @@ int main(void) {
 #pragma omp atomic
   bfx4.a = bfx4.a * ldv;
 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -757,7 +757,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -792,7 +792,7 @@ int main(void) {
 #pragma omp atomic
   bfx4.b /= ldv;
 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -812,7 +812,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]

diff  --git a/clang/test/OpenMP/atomic_write_codegen.c b/clang/test/OpenMP/atomic_write_codegen.c
index afe8737d30b065..493b01cbac76c4 100644
--- a/clang/test/OpenMP/atomic_write_codegen.c
+++ b/clang/test/OpenMP/atomic_write_codegen.c
@@ -413,7 +413,7 @@ int main(void) {
   bfx4.a = ldv;
 // CHECK: load x86_fp80, ptr @{{.+}}
 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -423,7 +423,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -451,7 +451,7 @@ int main(void) {
   bfx4.b = ldv;
 // CHECK: load x86_fp80, ptr @{{.+}}
 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -462,7 +462,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]


        


More information about the cfe-commits mailing list