[clang] [clang] Better bitfield access units (PR #65742)

Nathan Sidwell via cfe-commits cfe-commits at lists.llvm.org
Mon Mar 18 09:46:57 PDT 2024


https://github.com/urnathan updated https://github.com/llvm/llvm-project/pull/65742

>From 9137b00d44c24b45549c0c6b894fa6b511ca18d9 Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Tue, 5 Sep 2023 11:08:25 -0400
Subject: [PATCH 1/8] [clang][NFC] Bitfield access unit tests

These new tests verify clang's choice of types to access
bitfields. This is in preparation for the next patch, which changes
that algorithm. With these tests already in place it'll be easier to
see what has changed.
---
 .../test/CodeGen/aapcs-bitfield-access-unit.c | 266 +++++++++++
 clang/test/CodeGen/arm-bitfield-alignment.c   |  18 +-
 clang/test/CodeGen/arm64-be-bitfield.c        |  16 +-
 clang/test/CodeGen/bitfield-access-pad.c      | 431 ++++++++++++++++++
 clang/test/CodeGen/bitfield-access-unit.c     | 304 ++++++++++++
 clang/test/CodeGen/no-bitfield-type-align.c   |  11 +-
 clang/test/CodeGen/struct-x86-darwin.c        |  69 ++-
 .../test/CodeGenCXX/bitfield-access-empty.cpp | 150 ++++++
 .../test/CodeGenCXX/bitfield-access-tail.cpp  | 115 +++++
 clang/test/CodeGenCXX/bitfield-ir.cpp         | 111 +++++
 clang/test/CodeGenCXX/bitfield.cpp            | 101 +++-
 11 files changed, 1569 insertions(+), 23 deletions(-)
 create mode 100644 clang/test/CodeGen/aapcs-bitfield-access-unit.c
 create mode 100644 clang/test/CodeGen/bitfield-access-pad.c
 create mode 100644 clang/test/CodeGen/bitfield-access-unit.c
 create mode 100644 clang/test/CodeGenCXX/bitfield-access-empty.cpp
 create mode 100644 clang/test/CodeGenCXX/bitfield-access-tail.cpp
 create mode 100644 clang/test/CodeGenCXX/bitfield-ir.cpp

diff --git a/clang/test/CodeGen/aapcs-bitfield-access-unit.c b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
new file mode 100644
index 00000000000000..a285c73c2a35f8
--- /dev/null
+++ b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
@@ -0,0 +1,266 @@
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi -fno-aapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_LE
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi -fno-aapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_BE
+
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi -faapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_LE
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi -faapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_BE
+
+struct st0 {
+  short c : 7;
+} st0;
+// LAYOUT-LABEL: LLVMType:%struct.st0 =
+// LAYOUT-SAME: type { i8, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:1 Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st1 {
+  int a : 10;
+  short c : 6;
+} st1;
+// LAYOUT-LABEL: LLVMType:%struct.st1 =
+// LAYOUT-SAME: type { i16, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:10 Size:6 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:6 Size:10 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:6 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st2 {
+  int a : 10;
+  short c : 7;
+} st2;
+// LAYOUT-LABEL: LLVMType:%struct.st2 =
+// LAYOUT-SAME: type { i16, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:7 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:6 Size:10 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:1 Size:7 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-NEXT: ]>
+
+struct st3 {
+  volatile short c : 7;
+} st3;
+// LAYOUT-LABEL: LLVMType:%struct.st3 =
+// LAYOUT-SAME: type { i8, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:1 Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st4 {
+  int b : 9;
+  volatile char c : 5;
+} st4;
+// LAYOUT-LABEL: LLVMType:%struct.st4 =
+// LAYOUT-SAME: type { i16, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:9 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:9 Size:5 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:7 Size:9 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:2 Size:5 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st5 {
+  int a : 12;
+  volatile char c : 5;
+} st5;
+// LAYOUT-LABEL: LLVMType:%struct.st5 =
+// LAYOUT-SAME: type { i16, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:4 Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:3 Size:5 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-NEXT: ]>
+
+struct st6 {
+  int a : 12;
+  char b;
+  int c : 5;
+} st6;
+// LAYOUT-LABEL: LLVMType:%struct.st6 =
+// LAYOUT-SAME: type { i16, i8, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:3
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:4 Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:3 Size:5 IsSigned:1 StorageSize:8 StorageOffset:3
+// LAYOUT-NEXT: ]>
+
+struct st7a {
+  char a;
+  int b : 5;
+} st7a;
+// LAYOUT-LABEL: LLVMType:%struct.st7a =
+// LAYOUT-SAME: type { i8, i8, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:3 Size:5 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
+struct st7b {
+  char x;
+  volatile struct st7a y;
+} st7b;
+// LAYOUT-LABEL: LLVMType:%struct.st7b =
+// LAYOUT-SAME: type { i8, [3 x i8], %struct.st7a }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: ]>
+
+struct st8 {
+  unsigned f : 16;
+} st8;
+// LAYOUT-LABEL: LLVMType:%struct.st8 =
+// LAYOUT-SAME: type { i16, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st9{
+  int f : 8;
+} st9;
+// LAYOUT-LABEL: LLVMType:%struct.st9 =
+// LAYOUT-SAME: type { i8, [3 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st10{
+  int e : 1;
+  int f : 8;
+} st10;
+// LAYOUT-LABEL: LLVMType:%struct.st10 =
+// LAYOUT-SAME: type { i16, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:1 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:1 Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:15 Size:1 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:7 Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st11{
+  char e;
+  int f : 16;
+} st11;
+// LAYOUT-LABEL: LLVMType:%struct.st11 =
+// LAYOUT-SAME: type <{ i8, i16, i8 }>
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:1 StorageSize:16 StorageOffset:1
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:1 StorageSize:16 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
+struct st12{
+  int e : 8;
+  int f : 16;
+} st12;
+// LAYOUT-LABEL: LLVMType:%struct.st12 =
+// LAYOUT-SAME: type { i24 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:8 Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:24 Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:8 Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st13 {
+  char a : 8;
+  int b : 32;
+} __attribute__((packed)) st13;
+// LAYOUT-LABEL: LLVMType:%struct.st13 =
+// LAYOUT-SAME: type { [5 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:8 Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:32 Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st14 {
+  char a : 8;
+} __attribute__((packed)) st14;
+// LAYOUT-LABEL: LLVMType:%struct.st14 =
+// LAYOUT-SAME: type { i8 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st15 {
+  short a : 8;
+} __attribute__((packed)) st15;
+// LAYOUT-LABEL: LLVMType:%struct.st15 =
+// LAYOUT-SAME: type { i8 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st16 {
+  int a : 32;
+  int b : 16;
+  int c : 32;
+  int d : 16;
+} st16;
+// LAYOUT-LABEL: LLVMType:%struct.st16 =
+// LAYOUT-SAME: type { i48, i48 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:32 Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:64 StorageOffset:8
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:32 Size:16 IsSigned:1 StorageSize:64 StorageOffset:8
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:32 Size:32 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:16 Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:32 Size:32 IsSigned:1 StorageSize:64 StorageOffset:8
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:16 Size:16 IsSigned:1 StorageSize:64 StorageOffset:8
+// LAYOUT-NEXT: ]>
+
+struct st17 {
+int b : 32;
+char c : 8;
+} __attribute__((packed)) st17;
+// LAYOUT-LABEL: LLVMType:%struct.st17 =
+// LAYOUT-SAME: type { [5 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:32 Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:8 Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct zero_bitfield {
+  int a : 8;
+  char : 0;
+  int b : 8;
+} st18;
+// LAYOUT-LABEL: LLVMType:%struct.zero_bitfield =
+// LAYOUT-SAME: type { i8, i8, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
+struct zero_bitfield_ok {
+  short a : 8;
+  char a1 : 8;
+  long : 0;
+  int b : 24;
+} st19;
+// LAYOUT-LABEL: LLVMType:%struct.zero_bitfield_ok =
+// LAYOUT-SAME: type { i16, i24 }
+// LAYOUT: BitFields:[
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:8 Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:32 StorageOffset:4
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:8 Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:32 StorageOffset:4
+// LAYOUT-NEXT: ]>
+
+
diff --git a/clang/test/CodeGen/arm-bitfield-alignment.c b/clang/test/CodeGen/arm-bitfield-alignment.c
index e34789face5584..d3a3d19a41e33e 100644
--- a/clang/test/CodeGen/arm-bitfield-alignment.c
+++ b/clang/test/CodeGen/arm-bitfield-alignment.c
@@ -1,5 +1,7 @@
-// RUN: %clang_cc1 -triple arm-none-eabi -ffreestanding -emit-llvm -o - %s | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -ffreestanding -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple arm-none-eabi -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefix=LAYOUT
+// RUN: FileCheck %s -check-prefix=IR <%t
+// RUN: %clang_cc1 -triple aarch64 -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefix=LAYOUT
+// RUN: FileCheck %s -check-prefix=IR <%t
 
 extern struct T {
   int b0 : 8;
@@ -11,5 +13,13 @@ int func(void) {
   return g.b1;
 }
 
-// CHECK: @g = external global %struct.T, align 4
-// CHECK: %{{.*}} = load i64, ptr @g, align 4
+// IR: @g = external global %struct.T, align 4
+// IR: %{{.*}} = load i64, ptr @g, align 4
+
+// LAYOUT-LABEL: LLVMType:%struct.T =
+// LAYOUT-SAME: type { i40 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:32 Size:1 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-NEXT: ]>
diff --git a/clang/test/CodeGen/arm64-be-bitfield.c b/clang/test/CodeGen/arm64-be-bitfield.c
index 58c31853929847..28ca3be348379d 100644
--- a/clang/test/CodeGen/arm64-be-bitfield.c
+++ b/clang/test/CodeGen/arm64-be-bitfield.c
@@ -1,11 +1,23 @@
-// RUN:  %clang_cc1 -triple aarch64_be-linux-gnu -ffreestanding -emit-llvm -O0 -o - %s | FileCheck --check-prefix IR %s
+// RUN:  %clang_cc1 -triple aarch64_be-linux-gnu -ffreestanding -emit-llvm -O0 -o %t -fdump-record-layouts-simple %s | FileCheck %s --check-prefix=LAYOUT
+// RUN: FileCheck %s --check-prefix=IR <%t
 
 struct bt3 { signed b2:10; signed b3:10; } b16;
 
 // Get the high 32-bits and then shift appropriately for big-endian.
 signed callee_b0f(struct bt3 bp11) {
 // IR: callee_b0f(i64 [[ARG:%.*]])
+// IR: [[BP11:%.*]] = alloca %struct.bt3, align 4
 // IR: store i64 [[ARG]], ptr [[PTR:%.*]], align 8
-// IR: call void @llvm.memcpy.p0.p0.i64(ptr {{.*}}, ptr align 8 [[PTR]], i64 4
+// IR: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[BP11]], ptr align 8 [[PTR]], i64 4
+// IR: [[BF_LOAD:%.*]] = load i32, ptr [[BP11]], align 4
+// IR: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 22
+// IR: ret i32 [[BF_ASHR]]
   return bp11.b2;
 }
+
+// LAYOUT-LABEL: LLVMType:%struct.bt3 =
+// LAYOUT-SAME: type { i24 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:22 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:12 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
diff --git a/clang/test/CodeGen/bitfield-access-pad.c b/clang/test/CodeGen/bitfield-access-pad.c
new file mode 100644
index 00000000000000..20b81d887167ec
--- /dev/null
+++ b/clang/test/CodeGen/bitfield-access-pad.c
@@ -0,0 +1,431 @@
+// Tests for bitfield access with zero-length bitfield padding
+
+// Configs that have cheap unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32 %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+
+// Big Endian
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+
+// Configs that have expensive unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+
+// Big endian
+// RUN: %clang_cc1 -triple=m68k-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=mips-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+
+// And now a few with -fno-bitfield-type-align. Precisely how this behaves is
+// ABI-dependent.
+// Cheap unaligned
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-ARM64-T %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32 %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+
+// Expensive unaligned
+// RUN: %clang_cc1 -triple=hexagon-elf -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-STRICT-NT %s
+// RUN: %clang_cc1 -triple=mips-elf -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-STRICT-NT %s
+
+struct P1 {
+  unsigned a :8;
+  char :0;
+  unsigned b :8;
+} p1;
+// CHECK-LABEL: LLVMType:%struct.P1 =
+// LAYOUT-T-SAME: type { i8, i8, [2 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, i8 }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+// This will often be align(1) with -fno-bitfield-type-align
+struct P2 {
+  unsigned a :8;
+  char :0;
+  short :0;
+  unsigned b :8;
+} p2;
+// CHECK-LABEL: LLVMType:%struct.P2 =
+// LAYOUT-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P3 {
+  unsigned a :8;
+  char :0;
+  short :0;
+  unsigned :0;
+  unsigned b :8;
+} p3;
+// CHECK-LABEL: LLVMType:%struct.P3 =
+// LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P4 {
+  unsigned a :8;
+  short :0;
+  unsigned :0;
+  unsigned b :8;
+} p4;
+// CHECK-LABEL: LLVMType:%struct.P4 =
+// LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P5 {
+  unsigned a :8;
+  unsigned :0;
+  unsigned b :8;
+} p5;
+// CHECK-LABEL: LLVMType:%struct.P5 =
+// LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P6 {
+  unsigned a :8;
+  unsigned :0;
+  short :0;
+  char :0;
+  unsigned b :8;
+} p6;
+// CHECK-LABEL: LLVMType:%struct.P6 =
+// LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P7 {
+  unsigned a : 8;
+  short : 0;
+  unsigned char b : 8;
+} p7;
+// CHECK-LABEL: LLVMType:%struct.P7 =
+// LAYOUT-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+// And with forced alignment for !useZeroLengthBitfieldAlignment machines (eg
+// hexagon)
+struct __attribute__ ((aligned (2))) P7_align {
+  unsigned a : 8;
+  short : 0;
+  unsigned char b : 8;
+} p7_align;
+// CHECK-LABEL: LLVMType:%struct.P7_align =
+// LAYOUT-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P8 {
+  unsigned a : 7;
+  short : 0;
+  unsigned char b : 7;
+} p8;
+// CHECK-LABEL: LLVMType:%struct.P8 =
+// LAYOUT-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P9 {
+  unsigned a : 7;
+  char : 0;
+  unsigned short b : 7;
+} p9;
+// CHECK-LABEL: LLVMType:%struct.P9 =
+// LAYOUT-T-SAME: type { i8, i8, [2 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, i8 }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct __attribute__((aligned(4))) P10 {
+  unsigned a : 7;
+  unsigned short b : 7;
+  unsigned c : 7;
+  char : 0;
+} p10;
+// CHECK-LABEL: LLVMType:%struct.P10 =
+// LAYOUT-T-SAME: type { i24 }
+// LAYOUT-ARM64-T-SAME: type { i24 }
+// LAYOUT-NT-SAME: type { i24 }
+// LAYOUT-STRICT-NT-SAME: type { i24 }
+// LAYOUT-DWN32-SAME: type { i24 }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// CHECK-NEXT: ]>
+
+struct __attribute__((aligned(4))) P11 {
+  unsigned a : 7;
+  unsigned short b : 7;
+  unsigned c : 10;
+  char : 0; // at a char boundary
+} p11;
+// CHECK-LABEL: LLVMType:%struct.P11 =
+// LAYOUT-T-SAME: type { i24 }
+// LAYOUT-ARM64-T-SAME: type { i24 }
+// LAYOUT-NT-SAME: type { i24 }
+// LAYOUT-STRICT-NT-SAME: type { i24 }
+// LAYOUT-DWN32-SAME: type { i24 }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+// CHECK-NEXT: ]>
diff --git a/clang/test/CodeGen/bitfield-access-unit.c b/clang/test/CodeGen/bitfield-access-unit.c
new file mode 100644
index 00000000000000..cf13fcf6c85b1e
--- /dev/null
+++ b/clang/test/CodeGen/bitfield-access-unit.c
@@ -0,0 +1,304 @@
+// Check arches with 32bit ints. (Not you, AVR & MSP430)
+
+// Configs that have cheap unaligned access
+
+// 64-bit Little Endian
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64-DWN %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+// RUN: %clang_cc1 -triple=loongarch64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+// RUN: %clang_cc1 -triple=ve-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64 %s
+// RUN: %clang_cc1 -triple=wasm64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64 %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+
+// 64-bit Big Endian
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+// RUN: %clang_cc1 -triple=systemz %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+
+// 32-bit Little Endian
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32,LAYOUT-DWN32-FLEX %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+// RUN: %clang_cc1 -triple=powerpcle-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+// RUN: %clang_cc1 -triple=wasm32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+
+// 32-bit Big Endian
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+
+// Configs that have expensive unaligned access
+// 64-bit Little Endian
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -target-feature +strict-align -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+// RUN: %clang_cc1 -triple=amdgcn-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+// RUN: %clang_cc1 -triple=loongarch64-elf -target-feature -ual %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+// RUN: %clang_cc1 -triple=riscv64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+
+// 64-big Big endian
+// RUN: %clang_cc1 -triple=mips64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+
+// 32-bit Little Endian
+// RUN: %clang_cc1 -triple=arc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -target-feature +strict-align -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32,LAYOUT-DWN32-STRICT %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -target-feature +strict-align -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=bpf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=csky %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=loongarch32-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=nvptx-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=riscv32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=spir-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=xcore-none-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+
+// 32-bit Big Endian
+// RUN: %clang_cc1 -triple=lanai-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=mips-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=sparc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=tce-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+
+// Both le64-elf and m68-elf are strict alignment ISAs with 4-byte aligned
+// 64-bit or 2-byte aligned 32-bit integer types. This more compex to describe here.
+
+// If unaligned access is expensive don't stick these together.
+struct A {
+  char a : 7;
+  char b : 7;
+} a;
+// CHECK-LABEL: LLVMType:%struct.A =
+// LAYOUT-FLEX-SAME: type { i8, i8 }
+// LAYOUT-STRICT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i16 }
+// CHECK: BitFields:[
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// CHECK-NEXT: ]>
+
+// But do here.
+struct __attribute__((aligned(2))) B {
+  char a : 7;
+  char b : 7;
+} b;
+// CHECK-LABEL: LLVMType:%struct.B =
+// LAYOUT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i16 }
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// CHECK-NEXT: ]>
+
+// Not here -- poor alignment within struct
+struct C {
+  int f1;
+  char f2;
+  char a : 7;
+  char b : 7;
+} c;
+// CHECK-LABEL: LLVMType:%struct.C =
+// LAYOUT-FLEX-SAME: type { i32, i8, i8, i8 }
+// LAYOUT-STRICT-SAME: type { i32, i8, i8, i8 }
+// LAYOUT-DWN32-SAME: type <{ i32, i8, i16, i8 }>
+// CHECK: BitFields:[
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:5
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:5
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
+// CHECK-NEXT: ]>
+
+// Not here, we're packed
+struct __attribute__((packed)) D {
+  int f1;
+  int a : 8;
+  int b : 8;
+  char _;
+} d;
+// CHECK-LABEL: LLVMType:%struct.D =
+// LAYOUT-FLEX-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-STRICT-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-DWN32-FLEX-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-DWN32-STRICT-SAME: type <{ i32, i16, i8 }>
+// CHECK: BitFields:[
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+
+// LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+
+// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct E {
+  char a : 7;
+  short b : 13;
+  unsigned c : 12;
+} e;
+// CHECK-LABEL: LLVMType:%struct.E =
+// LAYOUT-FLEX64-SAME: type { i8, i16, i16, [2 x i8] }
+// LAYOUT-FLEX32-SAME: type { i8, i16, i16, [2 x i8] }
+// LAYOUT-STRICT-SAME: type { i8, i16, i16, [2 x i8] }
+// LAYOUT-DWN32-SAME: type { i32 }
+// CHECK: BitFields:[
+
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:0
+// CHECK-NEXT: ]>
+
+struct F {
+  char a : 7;
+  short b : 13;
+  unsigned c : 12;
+  signed char d : 7;
+} f;
+// CHECK-LABEL: LLVMType:%struct.F =
+// LAYOUT-FLEX64-SAME: type { i8, i16, i16, i8 }
+// LAYOUT-FLEX32-SAME: type { i8, i16, i16, i8 }
+// LAYOUT-STRICT-SAME: type { i8, i16, i16, i8 }
+// LAYOUT-DWN32-SAME: type { [5 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
+// CHECK-NEXT: ]>
+
+struct G {
+  char a : 7;
+  short b : 13;
+  unsigned c : 12;
+  signed char d : 7;
+  signed char e;
+} g;
+// CHECK-LABEL: LLVMType:%struct.G =
+// LAYOUT-SAME: type { i8, i16, i16, i8, i8 }
+// LAYOUT-DWN32-SAME: type { [5 x i8], i8 }
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
+// CHECK-NEXT: ]>
+
+#if _LP64
+struct A64 {
+  int a : 16;
+  short b : 8;
+  long c : 16;
+  int d : 16;
+  signed char e : 8;
+} a64;
+// CHECK-64-LABEL: LLVMType:%struct.A64 =
+// LAYOUT-64-SAME: type { i64 }
+// LAYOUT-64-DWN-SAME: type { i64 }
+// CHECK-64: BitFields:[
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// CHECK-64-NEXT: ]>
+
+struct B64 {
+  int a : 16;
+  short b : 8;
+  long c : 16;
+  int d : 16;
+  signed char e; // not a bitfield
+} b64;
+// CHECK-64-LABEL: LLVMType:%struct.B64 =
+// LAYOUT-64-FLEX-SAME: type { [7 x i8], i8 }
+// LAYOUT-64-STRICT-SAME: type { [7 x i8], i8 }
+// LAYOUT-64-DWN-SAME: type { [7 x i8], i8 }
+// CHECK-64: BitFields:[
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// CHECK-64-NEXT: ]>
+
+struct C64 {
+  int a : 15;
+  short b : 8;
+  long c : 16;
+  int d : 15;
+  signed char e : 7;
+} c64;
+// CHECK-64-LABEL: LLVMType:%struct.C64 =
+// LAYOUT-64-SAME: type { i16, [5 x i8], i8 }
+// LAYOUT-64-DWN-SAME: type { i16, [5 x i8], i8 }
+// CHECK-64: BitFields:[
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:40 StorageOffset:2
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:40 StorageOffset:2
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:40 StorageOffset:2
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:7
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:40 StorageOffset:2
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:40 StorageOffset:2
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:40 StorageOffset:2
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:7
+// CHECK-64-NEXT: ]>
+
+#endif
diff --git a/clang/test/CodeGen/no-bitfield-type-align.c b/clang/test/CodeGen/no-bitfield-type-align.c
index 53ed5e9ad8f854..f4698421ea1a72 100644
--- a/clang/test/CodeGen/no-bitfield-type-align.c
+++ b/clang/test/CodeGen/no-bitfield-type-align.c
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -triple x86_64-apple-darwin -fno-bitfield-type-align -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -fno-bitfield-type-align -fdump-record-layouts-simple -emit-llvm -o %t %s | FileCheck %s -check-prefix=LAYOUT
+// RUN: FileCheck %s <%t
 
 struct S {
   unsigned short:   0;
@@ -7,6 +8,14 @@ struct S {
   unsigned short  f2:15;
 };
 
+// LAYOUT-LABEL: LLVMType:%struct.S =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:15 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:15 Size:0 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:15 Size:15 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
 // CHECK: define{{.*}} void @test_zero_width_bitfield(ptr noundef %[[A:.*]])
 // CHECK: %[[BF_LOAD:.*]] = load i32, ptr %[[V1:.*]], align 1
 // CHECK: %[[BF_CLEAR:.*]] = and i32 %[[BF_LOAD]], 32767
diff --git a/clang/test/CodeGen/struct-x86-darwin.c b/clang/test/CodeGen/struct-x86-darwin.c
index 5191441cabaf04..350666a5168632 100644
--- a/clang/test/CodeGen/struct-x86-darwin.c
+++ b/clang/test/CodeGen/struct-x86-darwin.c
@@ -1,25 +1,70 @@
-// RUN: %clang_cc1 %s -emit-llvm -triple=i686-apple-darwin9 -o - | FileCheck %s
-// CHECK: STest1 = type { i32, [4 x i16], double }
-// CHECK: STest2 = type { i16, i16, i32, i32 }
-// CHECK: STest3 = type { i8, i16, i32 }
-// CHECK: STestB1 = type { i8, i8 }
-// CHECK: STestB2 = type { i8, i8, i8 }
-// CHECK: STestB3 = type { i8, i8 }
-// CHECK: STestB4 = type { i8, i8, i8, i8 }
-// CHECK: STestB5 = type { i8, i16, i8 }
-// CHECK: STestB6 = type { i8, i8, i16 }
+// RUN: %clang_cc1 %s -emit-llvm -o /dev/null -triple=i686-apple-darwin9 -fdump-record-layouts-simple | FileCheck %s
+
 // Test struct layout for x86-darwin target
 
 struct STest1 {int x; short y[4]; double z; } st1;
 struct STest2 {short a,b; int c,d; } st2;
 struct STest3 {char a; short b; int c; } st3;
 
-// Bitfields 
+// Bitfields
 struct STestB1 {char a; char b:2; } stb1;
 struct STestB2 {char a; char b:5; char c:4; } stb2;
 struct STestB3 {char a; char b:2; } stb3;
 struct STestB4 {char a; short b:2; char c; } stb4;
 struct STestB5 {char a; short b:10; char c; } stb5;
-struct STestB6 {int a:1; char b; int c:13 } stb6;
+struct STestB6 {int a:1; char b; int c:13; } stb6;
 
 // Packed struct STestP1 {char a; short b; int c; } __attribute__((__packed__)) stp1;
+
+// CHECK-LABEL: LLVMType:%struct.STest1 =
+// CHECK-SAME: type { i32, [4 x i16], double }
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STest2 =
+// CHECK-SAME: type { i16, i16, i32, i32 }
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STest3 =
+// CHECK-SAME: type { i8, i16, i32 }
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB1 =
+// CHECK-SAME: type { i8, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:2 IsSigned:1 StorageSize:8 StorageOffset:1
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB2 =
+// CHECK-SAME: type { i8, i8, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:1
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:4 IsSigned:1 StorageSize:8 StorageOffset:2
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB3 =
+// CHECK-SAME: type { i8, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:2 IsSigned:1 StorageSize:8 StorageOffset:1
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB4 =
+// CHECK-SAME: type { i8, i8, i8, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:2 IsSigned:1 StorageSize:8 StorageOffset:1
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB5 =
+// CHECK-SAME: type { i8, i16, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:16 StorageOffset:2
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB6 =
+// CHECK-SAME: type { i8, i8, i16 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:1 IsSigned:1 StorageSize:8 StorageOffset:0
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// CHECK-NEXT: ]>
diff --git a/clang/test/CodeGenCXX/bitfield-access-empty.cpp b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
new file mode 100644
index 00000000000000..194d2c9def6996
--- /dev/null
+++ b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
@@ -0,0 +1,150 @@
+// Check if we can merge bitfields across empty members
+
+// Configs that have cheap unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32 %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=loongarch64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=powerpcle-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=ve-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=wasm32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=wasm64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Big Endian
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=systemz %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Configs that have expensive unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=amdgcn-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=arc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=bpf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=csky %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=loongarch32-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=nvptx-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=riscv32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=riscv64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=spir-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=xcore-none-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Big endian
+// RUN: %clang_cc1 -triple=lanai-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=m68k-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=mips-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=mips64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=sparc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=tce-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+struct Empty {};
+
+struct P1 {
+  unsigned a : 16;
+  [[no_unique_address]] Empty e;
+  unsigned b : 16;
+} p1;
+// CHECK-LABEL: LLVMType:%struct.P1 =
+// LAYOUT-SAME: type { i16, i16 }
+// LAYOUT-DWN32-SAME: type { i16, i16 }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P1 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:2
+// CHECK-NEXT: ]>
+
+struct P2 {
+  unsigned a : 15;
+  [[no_unique_address]] Empty e;
+  unsigned b : 15;
+} p2;
+// CHECK-LABEL: LLVMType:%struct.P2 =
+// LAYOUT-SAME: type { i16, i16 }
+// LAYOUT-DWN32-SAME: type { i16, i16 }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P2 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:0 StorageSize:16 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:0 StorageSize:16 StorageOffset:2
+// CHECK-NEXT: ]>
+
+struct P3 {
+  unsigned a : 16;
+  Empty e;
+  unsigned b : 16;
+} p3;
+// CHECK-LABEL: LLVMType:%struct.P3 =
+// LAYOUT-SAME: type { i16, %struct.Empty, i16, [2 x i8] }
+// LAYOUT-DWN32-SAME: type <{ i16, %struct.Empty, i16 }>
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P3 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:3
+// CHECK-NEXT: ]>
+
+struct P4 {
+  unsigned : 0;
+} p4;
+// CHECK-LABEL: LLVMType:%struct.P4 =
+// LAYOUT-SAME: type { {{.+}} }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P4 =
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+struct P5 {
+  ~P5();
+  unsigned : 0;
+} p5;
+// CHECK-LABEL: LLVMType:%struct.P5 =
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P5.base = type {}
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+struct P6 {
+  unsigned a : 16;
+  unsigned b : 8;
+  [[no_unique_address]] Empty e;
+  unsigned c;
+} p6;
+// CHECK-LABEL: LLVMType:%struct.P6 =
+// LAYOUT-SAME: type { i24, i32 }
+// LAYOUT-DWN32-SAME: type { i24, i32 }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P6 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:32 StorageOffset:0
+// CHECK-NEXT: ]>
+
+struct P7 {
+  unsigned a : 16;
+  unsigned b : 8;
+  Empty e;
+  unsigned c;
+} p7;
+// CHECK-LABEL: LLVMType:%struct.P7 =
+// LAYOUT-SAME: type { [3 x i8], %struct.Empty, i32 }
+// LAYOUT-DWN32-SAME: type { [3 x i8], %struct.Empty, i32 }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P7 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:24 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:24 StorageOffset:0
+// CHECK-NEXT: ]>
diff --git a/clang/test/CodeGenCXX/bitfield-access-tail.cpp b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
new file mode 100644
index 00000000000000..dad0ab226282c2
--- /dev/null
+++ b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
@@ -0,0 +1,115 @@
+// Check we use tail padding if it is known to be safe
+
+// Configs that have cheap unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32 %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=loongarch64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=powerpcle-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=ve-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=wasm32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=wasm64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Big Endian
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=systemz %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Configs that have expensive unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=amdgcn-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=arc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=bpf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=csky %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=loongarch32-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=nvptx-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=riscv32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=riscv64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=spir-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=xcore-none-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Big endian
+// RUN: %clang_cc1 -triple=lanai-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=m68k-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=mips-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=mips64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=sparc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=tce-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Can use tail padding
+struct Pod {
+  int a : 16;
+  int b : 8;
+} P;
+// CHECK-LABEL: LLVMType:%struct.Pod =
+// LAYOUT-SAME: type { i24 }
+// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.Pod =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// CHECK-NEXT: ]>
+
+// No tail padding
+struct __attribute__((packed)) PPod {
+  int a : 16;
+  int b : 8;
+} PP;
+// CHECK-LABEL: LLVMType:%struct.PPod =
+// LAYOUT-SAME: type { [3 x i8] }
+// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.PPod =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// CHECK-NEXT: ]>
+
+// Cannot use tail padding
+struct NonPod {
+  ~NonPod();
+  int a : 16;
+  int b : 8;
+} NP;
+// CHECK-LABEL: LLVMType:%struct.NonPod =
+// LAYOUT-SAME: type { [3 x i8], i8 }
+// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.
+// LAYOUT-SAME: NonPod.base = type { [3 x i8] }
+// LAYOUT-DWN32-SAME: NonPod = type { [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// CHECK-NEXT: ]>
+
+// No tail padding
+struct __attribute__((packed)) PNonPod {
+  ~PNonPod();
+  int a : 16;
+  int b : 8;
+} PNP;
+// CHECK-LABEL: LLVMType:%struct.PNonPod =
+// LAYOUT-SAME: type { [3 x i8] }
+// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.PNonPod =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// CHECK-NEXT: ]>
diff --git a/clang/test/CodeGenCXX/bitfield-ir.cpp b/clang/test/CodeGenCXX/bitfield-ir.cpp
new file mode 100644
index 00000000000000..fa53f4d8900ef0
--- /dev/null
+++ b/clang/test/CodeGenCXX/bitfield-ir.cpp
@@ -0,0 +1,111 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -O2 -emit-llvm -o - %s | FileCheck %s
+
+struct Tail {
+  ~Tail();
+  int a : 16;
+  int b : 8;
+};
+
+struct Char {
+  int a : 16;
+  int b : 8;
+  char c;
+};
+
+struct Int {
+  int a : 16;
+  int b : 8;
+  int c;
+};
+
+
+// CHECK-LABEL: define dso_local void @_Z1AP4Tail
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
+// CHECK-NEXT:    [[NARROW:%.*]] = add i24 [[BF_LOAD]], 1
+// CHECK-NEXT:    [[BF_VALUE:%.*]] = and i24 [[NARROW]], 65535
+// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], -65536
+// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_VALUE]], [[BF_CLEAR]]
+// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    ret void
+//
+void A (Tail *p) {
+  p->a++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1BP4Tail
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = and i24 [[BF_LOAD]], -65536
+// CHECK-NEXT:    [[BF_SHL:%.*]] = add i24 [[TMP0]], 65536
+// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], 65535
+// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_SHL]], [[BF_CLEAR]]
+// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    ret void
+//
+void B (Tail *p) {
+  p->b++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1AP4Char
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
+// CHECK-NEXT:    [[NARROW:%.*]] = add i24 [[BF_LOAD]], 1
+// CHECK-NEXT:    [[BF_VALUE:%.*]] = and i24 [[NARROW]], 65535
+// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], -65536
+// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_VALUE]], [[BF_CLEAR]]
+// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    ret void
+//
+void A (Char *p) {
+  p->a++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1BP4Char
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = and i24 [[BF_LOAD]], -65536
+// CHECK-NEXT:    [[BF_SHL:%.*]] = add i24 [[TMP0]], 65536
+// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], 65535
+// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_SHL]], [[BF_CLEAR]]
+// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    ret void
+//
+void B (Char *p) {
+  p->b++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1AP3Int
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[P]], align 4
+// CHECK-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
+// CHECK-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
+// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65536
+// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i32 [[BF_VALUE]], [[BF_CLEAR]]
+// CHECK-NEXT:    store i32 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    ret void
+//
+void A (Int *p) {
+  p->a++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1BP3Int
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[P]], align 4
+// CHECK-NEXT:    [[BF_VALUE:%.*]] = add i32 [[BF_LOAD]], 65536
+// CHECK-NEXT:    [[BF_SHL2:%.*]] = and i32 [[BF_VALUE]], 16711680
+// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16711681
+// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i32 [[BF_SHL2]], [[BF_CLEAR]]
+// CHECK-NEXT:    store i32 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    ret void
+//
+void B (Int *p) {
+  p->b++;
+}
diff --git a/clang/test/CodeGenCXX/bitfield.cpp b/clang/test/CodeGenCXX/bitfield.cpp
index a478eb44915e7a..ddc3f9345622c3 100644
--- a/clang/test/CodeGenCXX/bitfield.cpp
+++ b/clang/test/CodeGenCXX/bitfield.cpp
@@ -1,7 +1,9 @@
-// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s \
-// RUN:   | FileCheck -check-prefix=CHECK-X86-64 %s
-// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -emit-llvm -o - %s \
-// RUN:   | FileCheck -check-prefix=CHECK-PPC64 %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fdump-record-layouts-simple \
+// RUN:   -emit-llvm -o %t %s | FileCheck -check-prefixes=LAYOUT,LAYOUT-X86-64 %s
+// RUN: FileCheck -check-prefix=CHECK-X86-64 %s <%t
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -fdump-record-layouts-simple\
+// RUN:  -emit-llvm -o %t %s | FileCheck -check-prefixes=LAYOUT,LAYOUT-PPC64 %s
+// RUN: FileCheck -check-prefix=CHECK-PPC64 %s <%t
 //
 // Tests for bitfield access patterns in C++ with special attention to
 // conformance to C++11 memory model requirements.
@@ -19,6 +21,27 @@ namespace N0 {
     unsigned b70 : 6;
     unsigned b71 : 2;
   };
+// LAYOUT-LABEL: LLVMType:%"struct.N0::S" =
+// LAYOUT-SAME: type { i64 }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:14 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:14 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:16 Size:6 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:22 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:24 Size:30 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:54 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:56 Size:6 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:62 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:50 Size:14 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:48 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:42 Size:6 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:40 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:10 Size:30 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:2 Size:6 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
   unsigned read00(S* s) {
     // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read00
     // CHECK-X86-64:   %[[val:.*]]   = load i64, ptr %{{.*}}
@@ -149,6 +172,13 @@ namespace N1 {
     unsigned b : 1;
     char c;
   };
+// LAYOUT-LABEL: LLVMType:%"struct.N1::S" =
+// LAYOUT-SAME: type { i8, i8, i8, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:1 IsSigned:0 StorageSize:8 StorageOffset:1
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:7 Size:1 IsSigned:0 StorageSize:8 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
   unsigned read(S* s) {
     // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N14read
     // CHECK-X86-64:   %[[ptr:.*]] = getelementptr inbounds %{{.*}}, ptr %{{.*}}, i32 0, i32 1
@@ -193,6 +223,13 @@ namespace N2 {
     unsigned b : 24;
     void *p;
   };
+// LAYOUT-LABEL: LLVMType:%"struct.N2::S" =
+// LAYOUT-SAME: type { i24, ptr }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
   unsigned read(S* s) {
     // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N24read
     // CHECK-X86-64:   %[[val:.*]] = load i32, ptr %{{.*}}
@@ -230,6 +267,13 @@ namespace N3 {
   struct S {
     unsigned b : 24;
   };
+// LAYOUT-LABEL: LLVMType:%"struct.N3::S" =
+// LAYOUT-SAME: type { i24 }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
   unsigned read(S* s) {
     // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N34read
     // CHECK-X86-64:   %[[val:.*]] = load i32, ptr %{{.*}}
@@ -276,6 +320,14 @@ namespace N4 {
     char c;
   };
 #endif
+// LAYOUT-LABEL: LLVMType:%"struct.N4::Base" =
+// LAYOUT-SAME: type <{ ptr, [3 x i8], [5 x i8] }>
+// LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N4::Base.base" = type <{ ptr, [3 x i8] }>
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-NEXT: ]>
+
   unsigned read(Base* s) {
     // FIXME: We should widen this load as long as the function isn't being
     // instrumented by ThreadSanitizer.
@@ -317,6 +369,22 @@ namespace N5 {
     struct X { unsigned b : 24; char c; } x;
     struct Y { unsigned b : 24; } y;
   };
+// LAYOUT-LABEL: LLVMType:%"struct.N5::U::X" =
+// LAYOUT-SAME: type { [3 x i8], i8 }
+// LAYOUT-NEXT:  NonVirtualBaseLLVMType:%"struct.N5::U::X" =
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+// LAYOUT-LABEL: LLVMType:%"struct.N5::U::Y" =
+// LAYOUT-SAME: type { i24 }
+// LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N5::U::Y" =
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
   unsigned read(U* u) {
     // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N54read
     // CHECK-X86-64:   %[[val:.*]] = load i32, ptr %{{.*}}
@@ -360,6 +428,15 @@ namespace N6 {
     unsigned char : 0;
     unsigned char b2 : 8;
   };
+// LAYOUT-LABEL: LLVMType:%"struct.N6::S" =
+// LAYOUT-SAME: type { [3 x i8], i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:0 StorageSize:8 StorageOffset:3
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:0 StorageSize:8 StorageOffset:3
+// LAYOUT-NEXT: ]>
+
   unsigned read(S* s) {
     // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N64read
     // CHECK-X86-64:   %[[val1:.*]] = load i24, ptr %{{.*}}
@@ -416,6 +493,22 @@ namespace N7 {
     char c;
   };
 #endif
+// LAYOUT-LABEL: LLVMType:%"struct.N7::B1" =
+// LAYOUT-SAME: type <{ ptr, [3 x i8], [5 x i8] }>
+// LAYOUT-NEXT:  NonVirtualBaseLLVMType:%"struct.N7::B1.base" = type <{ ptr, [3 x i8] }>
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-NEXT: ]>
+
+// LAYOUT-LABEL: LLVMType:%"struct.N7::B2" =
+// LAYOUT-SAME: type <{ ptr, [3 x i8], [5 x i8], %"struct.N7::B1.base", [5 x i8] }>
+// LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N7::B2.base" = type <{ ptr, [3 x i8] }>
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-NEXT: ]>
+
   unsigned read(B2* s) {
     // FIXME: We should widen this load as long as the function isn't being
     // instrumented by ThreadSanitizer.

>From deb94c63147013b9e80843c4e1824a6ca34e5730 Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Fri, 8 Sep 2023 08:26:09 -0400
Subject: [PATCH 2/8] [clang] TargetInfo hook for unaligned bitfields

---
 clang/include/clang/Basic/TargetInfo.h | 15 +++++++++++++++
 clang/lib/Basic/TargetInfo.cpp         |  1 +
 clang/lib/Basic/Targets/AArch64.cpp    |  7 +++++--
 clang/lib/Basic/Targets/AArch64.h      |  1 -
 clang/lib/Basic/Targets/ARM.cpp        |  6 +++---
 clang/lib/Basic/Targets/ARM.h          |  2 --
 clang/lib/Basic/Targets/LoongArch.cpp  |  2 ++
 clang/lib/Basic/Targets/LoongArch.h    |  1 +
 clang/lib/Basic/Targets/Mips.h         |  2 ++
 clang/lib/Basic/Targets/PPC.h          |  1 +
 clang/lib/Basic/Targets/SystemZ.h      |  1 +
 clang/lib/Basic/Targets/VE.h           |  1 +
 clang/lib/Basic/Targets/WebAssembly.h  |  1 +
 clang/lib/Basic/Targets/X86.h          |  1 +
 14 files changed, 34 insertions(+), 8 deletions(-)

diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 374595edd2ce4a..e1ef7454f01669 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -267,6 +267,9 @@ class TargetInfo : public TransferrableTargetInfo,
   LLVM_PREFERRED_TYPE(bool)
   unsigned AllowAMDGPUUnsafeFPAtomics : 1;
 
+  LLVM_PREFERRED_TYPE(bool)
+  unsigned HasUnalignedAccess : 1;
+
   unsigned ARMCDECoprocMask : 8;
 
   unsigned MaxOpenCLWorkGroupSize;
@@ -859,6 +862,18 @@ class TargetInfo : public TransferrableTargetInfo,
     return PointerWidth;
   }
 
+  /// Return true iff unaligned accesses are a single instruction (rather than
+  /// a synthesized sequence).
+  bool hasUnalignedAccess() const { return HasUnalignedAccess; }
+
+  /// Return true iff unaligned accesses are cheap. This affects placement and
+  /// size of bitfield loads/stores. (Not the ABI-mandated placement of
+  /// the bitfields themselves.)
+  bool hasCheapUnalignedBitFieldAccess() const {
+    // Simply forward to the unaligned access getter.
+    return hasUnalignedAccess();
+  }
+
   /// \brief Returns the default value of the __USER_LABEL_PREFIX__ macro,
   /// which is the prefix given to user symbols by default.
   ///
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index 5d9055174c089a..f96956f31d50dc 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -157,6 +157,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
   HasAArch64SVETypes = false;
   HasRISCVVTypes = false;
   AllowAMDGPUUnsafeFPAtomics = false;
+  HasUnalignedAccess = false;
   ARMCDECoprocMask = 0;
 
   // Default to no types using fpret.
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index fa8b5a869594b3..8d499a770a600c 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -187,6 +187,8 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
   UseZeroLengthBitfieldAlignment = true;
 
+  HasUnalignedAccess = true;
+
   // AArch64 targets default to using the ARM C++ ABI.
   TheCXXABI.set(TargetCXXABI::GenericAArch64);
 
@@ -485,7 +487,7 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
   if (HasPAuthLR)
     Builder.defineMacro("__ARM_FEATURE_PAUTH_LR", "1");
 
-  if (HasUnaligned)
+  if (HasUnalignedAccess)
     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
 
   if ((FPU & NeonMode) && HasFullFP16)
@@ -909,7 +911,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
       HasSM4 = true;
     }
     if (Feature == "+strict-align")
-      HasUnaligned = false;
+      HasUnalignedAccess = false;
+
     // All predecessor archs are added but select the latest one for ArchKind.
     if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
       ArchInfo = &llvm::AArch64::ARMV8A;
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index 2dd6b2181e87df..50e04ed3692859 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -38,7 +38,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
   bool HasSHA2 = false;
   bool HasSHA3 = false;
   bool HasSM4 = false;
-  bool HasUnaligned = true;
   bool HasFullFP16 = false;
   bool HasDotProd = false;
   bool HasFP16FML = false;
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 55b71557452fa0..877799c66ec4f2 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -509,7 +509,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
   SHA2 = 0;
   AES = 0;
   DSP = 0;
-  Unaligned = 1;
+  HasUnalignedAccess = true;
   SoftFloat = false;
   // Note that SoftFloatABI is initialized in our constructor.
   HWDiv = 0;
@@ -576,7 +576,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
         return false;
       }
     } else if (Feature == "+strict-align") {
-      Unaligned = 0;
+      HasUnalignedAccess = false;
     } else if (Feature == "+fp16") {
       HW_FP |= HW_FP_HP;
     } else if (Feature == "+fullfp16") {
@@ -785,7 +785,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
     Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + CPUProfile + "'");
 
   // ACLE 6.4.3 Unaligned access supported in hardware
-  if (Unaligned)
+  if (HasUnalignedAccess)
     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
 
   // ACLE 6.4.4 LDREX/STREX
diff --git a/clang/lib/Basic/Targets/ARM.h b/clang/lib/Basic/Targets/ARM.h
index 71322a094f5edb..e69adbe754739f 100644
--- a/clang/lib/Basic/Targets/ARM.h
+++ b/clang/lib/Basic/Targets/ARM.h
@@ -88,8 +88,6 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
   LLVM_PREFERRED_TYPE(bool)
   unsigned DSP : 1;
   LLVM_PREFERRED_TYPE(bool)
-  unsigned Unaligned : 1;
-  LLVM_PREFERRED_TYPE(bool)
   unsigned DotProd : 1;
   LLVM_PREFERRED_TYPE(bool)
   unsigned HasMatMul : 1;
diff --git a/clang/lib/Basic/Targets/LoongArch.cpp b/clang/lib/Basic/Targets/LoongArch.cpp
index 88537989a05129..280bd1d8033cc6 100644
--- a/clang/lib/Basic/Targets/LoongArch.cpp
+++ b/clang/lib/Basic/Targets/LoongArch.cpp
@@ -285,6 +285,8 @@ bool LoongArchTargetInfo::handleTargetFeatures(
       HasFeatureLSX = true;
     else if (Feature == "+lasx")
       HasFeatureLASX = true;
+    else if (Feature == "-ual")
+      HasUnalignedAccess = false;
   }
   return true;
 }
diff --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index 3313102492cb8d..68572843f2d748 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -132,6 +132,7 @@ class LLVM_LIBRARY_VISIBILITY LoongArch64TargetInfo
       : LoongArchTargetInfo(Triple, Opts) {
     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
     IntMaxType = Int64Type = SignedLong;
+    HasUnalignedAccess = true;
     resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n64-S128");
     // TODO: select appropriate ABI.
     setABI("lp64d");
diff --git a/clang/lib/Basic/Targets/Mips.h b/clang/lib/Basic/Targets/Mips.h
index 23d4e1b598fa1e..c9dcf434c93b0b 100644
--- a/clang/lib/Basic/Targets/Mips.h
+++ b/clang/lib/Basic/Targets/Mips.h
@@ -328,6 +328,8 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public TargetInfo {
         IsMips16 = true;
       else if (Feature == "+micromips")
         IsMicromips = true;
+      else if (Feature == "+mips32r6" || Feature == "+mips64r6")
+        HasUnalignedAccess = true;
       else if (Feature == "+dsp")
         DspRev = std::max(DspRev, DSP1);
       else if (Feature == "+dspr2")
diff --git a/clang/lib/Basic/Targets/PPC.h b/clang/lib/Basic/Targets/PPC.h
index 70683916a8b04f..fa2f442e25846d 100644
--- a/clang/lib/Basic/Targets/PPC.h
+++ b/clang/lib/Basic/Targets/PPC.h
@@ -92,6 +92,7 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
     LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
     HasStrictFP = true;
     HasIbm128 = true;
+    HasUnalignedAccess = true;
   }
 
   // Set the language option for altivec based on our value.
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index 3e08b27972fa39..8e302acd51b8ad 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -47,6 +47,7 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
     LongDoubleFormat = &llvm::APFloat::IEEEquad();
     DefaultAlignForAttributeAligned = 64;
     MinGlobalAlign = 16;
+    HasUnalignedAccess = true;
     if (Triple.isOSzOS()) {
       TLSSupported = false;
       // All vector types are default aligned on an 8-byte boundary, even if the
diff --git a/clang/lib/Basic/Targets/VE.h b/clang/lib/Basic/Targets/VE.h
index ea9a092cad8090..7e8fdf6096ef23 100644
--- a/clang/lib/Basic/Targets/VE.h
+++ b/clang/lib/Basic/Targets/VE.h
@@ -40,6 +40,7 @@ class LLVM_LIBRARY_VISIBILITY VETargetInfo : public TargetInfo {
     Int64Type = SignedLong;
     RegParmMax = 8;
     MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+    HasUnalignedAccess = true;
 
     WCharType = UnsignedInt;
     WIntType = UnsignedInt;
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index 83b1711f9fdf6a..5568aa28eaefa7 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -84,6 +84,7 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
     SizeType = UnsignedLong;
     PtrDiffType = SignedLong;
     IntPtrType = SignedLong;
+    HasUnalignedAccess = true;
   }
 
   StringRef getABI() const override;
diff --git a/clang/lib/Basic/Targets/X86.h b/clang/lib/Basic/Targets/X86.h
index d2232c7d5275ab..c14e4d5f433d82 100644
--- a/clang/lib/Basic/Targets/X86.h
+++ b/clang/lib/Basic/Targets/X86.h
@@ -188,6 +188,7 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
     LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
     AddrSpaceMap = &X86AddrSpaceMap;
     HasStrictFP = true;
+    HasUnalignedAccess = true;
 
     bool IsWinCOFF =
         getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();

>From 417cfebf9e266aa3fd6a45c1984047ba4282c824 Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Sat, 16 Mar 2024 07:01:21 -0400
Subject: [PATCH 3/8] [clang] Better SysV bitfield access units

---
 clang/lib/CodeGen/CGRecordLayoutBuilder.cpp   |  352 +++-
 .../test/CodeGen/aapcs-bitfield-access-unit.c |   60 +-
 clang/test/CodeGen/aapcs-bitfield.c           | 1806 ++++++-----------
 clang/test/CodeGen/arm-bitfield-alignment.c   |   23 +-
 clang/test/CodeGen/arm64-be-bitfield.c        |    8 +-
 clang/test/CodeGen/bitfield-2.c               |    6 +-
 clang/test/CodeGen/bitfield-access-pad.c      |   97 +-
 clang/test/CodeGen/bitfield-access-unit.c     |  168 +-
 .../CodeGen/debug-info-bitfield-0-struct.c    |    6 +-
 clang/test/CodeGen/no-bitfield-type-align.c   |    1 -
 clang/test/CodeGen/struct-x86-darwin.c        |    6 +-
 clang/test/CodeGen/tbaa-struct.cpp            |    2 +-
 .../test/CodeGenCXX/bitfield-access-empty.cpp |   16 +-
 .../test/CodeGenCXX/bitfield-access-tail.cpp  |   48 +-
 clang/test/CodeGenCXX/bitfield-ir.cpp         |   38 +-
 clang/test/CodeGenCXX/bitfield.cpp            |    6 +-
 clang/test/OpenMP/atomic_capture_codegen.cpp  |    8 +-
 clang/test/OpenMP/atomic_read_codegen.c       |    4 +-
 clang/test/OpenMP/atomic_update_codegen.cpp   |    8 +-
 clang/test/OpenMP/atomic_write_codegen.c      |    8 +-
 20 files changed, 1064 insertions(+), 1607 deletions(-)

diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 7822903b89ce47..d6f9e29b42d10c 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -47,8 +47,10 @@ namespace {
 ///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
 ///   C++ examples that require clipping:
 ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
-///   struct A { int a : 24; }; // a must be clipped because a struct like B
-//    could exist: struct B : A { char b; }; // b goes at offset 3
+///   struct A { int a : 24; ~A(); }; // a must be clipped because:
+///   struct B : A { char b; }; // b goes at offset 3
+/// * The allocation of bitfield access units is described in more detail in
+///   CGRecordLowering::accumulateBitFields.
 /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
 ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
 ///   has an underlying storage type.  Therefore empty structures containing
@@ -91,6 +93,10 @@ struct CGRecordLowering {
     // MemberInfos are sorted so we define a < operator.
     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
   };
+  // BitFieldAccessUnit is helper structure for accumulateBitFields. It
+  // represents a set of bitfields in the same load/store unit.
+  class BitFieldAccessUnit;
+
   // The constructor.
   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
   // Short helper routines.
@@ -184,8 +190,13 @@ struct CGRecordLowering {
   void lower(bool NonVirtualBaseType);
   void lowerUnion(bool isNoUniqueAddress);
   void accumulateFields();
-  void accumulateBitFields(RecordDecl::field_iterator Field,
-                           RecordDecl::field_iterator FieldEnd);
+  RecordDecl::field_iterator
+  accumulateBitFields(RecordDecl::field_iterator Field,
+                      RecordDecl::field_iterator FieldEnd);
+  BitFieldAccessUnit
+  gatherBitFieldAccessUnit(RecordDecl::field_iterator Field,
+                           RecordDecl::field_iterator FieldEnd) const;
+  void installBitFieldAccessUnit(const BitFieldAccessUnit &);
   void computeVolatileBitfields();
   void accumulateBases();
   void accumulateVPtrs();
@@ -378,13 +389,15 @@ void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
 void CGRecordLowering::accumulateFields() {
   for (RecordDecl::field_iterator Field = D->field_begin(),
                                   FieldEnd = D->field_end();
-    Field != FieldEnd;) {
+       Field != FieldEnd;) {
     if (Field->isBitField()) {
-      RecordDecl::field_iterator Start = Field;
-      // Iterate to gather the list of bitfields.
-      for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
-      accumulateBitFields(Start, Field);
-    } else if (!Field->isZeroSize(Context)) {
+      Field = accumulateBitFields(Field, FieldEnd);
+      assert((Field == FieldEnd || !Field->isBitField()) &&
+             "Failed to accumulate all the bitfields");
+    } else if (Field->isZeroSize(Context)) {
+      // Empty fields have no storage.
+      ++Field;
+    } else {
       // Use base subobject layout for the potentially-overlapping field,
       // as it is done in RecordLayoutBuilder
       Members.push_back(MemberInfo(
@@ -394,33 +407,141 @@ void CGRecordLowering::accumulateFields() {
               : getStorageType(*Field),
           *Field));
       ++Field;
-    } else {
-      ++Field;
     }
   }
 }
 
-void
+// A run of bitfields assigned to the same access unit -- the size of memory
+// loads & stores.
+class CGRecordLowering::BitFieldAccessUnit {
+  RecordDecl::field_iterator Begin; // Field at start of this access unit.
+  RecordDecl::field_iterator End;   // Field just after this access unit.
+
+  CharUnits StartOffset; // Starting offset in the containing record.
+  CharUnits EndOffset;   // Finish offset (exclusive) in the containing record.
+
+  bool IsBarrier;        // Is a barrier between access units.
+  bool ContainsVolatile; // This access unit contains a volatile bitfield.
+
+public:
+  BitFieldAccessUnit(RecordDecl::field_iterator B, RecordDecl::field_iterator E,
+                     CharUnits SO, CharUnits EO, bool Barrier = false,
+                     bool Volatile = false)
+      : Begin(B), End(E), StartOffset(SO), EndOffset(EO), IsBarrier(Barrier),
+        ContainsVolatile(Volatile) {}
+
+  // Re-set the end of this unit if there is space before Probe starts.
+  void enlargeIfSpace(const BitFieldAccessUnit &Probe, CharUnits Offset) {
+    if (Probe.getStartOffset() >= Offset) {
+      End = Probe.begin();
+      EndOffset = Offset;
+    }
+  }
+
+public:
+  RecordDecl::field_iterator begin() const { return Begin; }
+  RecordDecl::field_iterator end() const { return End; }
+
+public:
+  // Accessors
+  CharUnits getSize() const { return EndOffset - StartOffset; }
+  CharUnits getStartOffset() const { return StartOffset; }
+  CharUnits getEndOffset() const { return EndOffset; }
+
+  // Predicates
+  bool isBarrier() const { return IsBarrier; }
+  bool hasVolatile() const { return ContainsVolatile; }
+  bool isEnd() const { return begin() == end(); }
+};
+
+CGRecordLowering::BitFieldAccessUnit CGRecordLowering::gatherBitFieldAccessUnit(
+    RecordDecl::field_iterator Field,
+    RecordDecl::field_iterator FieldEnd) const {
+  if (Field == FieldEnd || !Field->isBitField()) {
+    // Skip over any empty fields to find the next used offset.
+    auto Probe = Field;
+    while (Probe != FieldEnd && Probe->isZeroSize(Context))
+      ++Probe;
+    // We can't necessarily use tail padding in C++ structs, so the NonVirtual
+    // size is what we must use there.
+    CharUnits Limit = Probe != FieldEnd
+                          ? bitsToCharUnits(getFieldBitOffset(*Probe))
+                      : RD ? Layout.getNonVirtualSize()
+                           : Layout.getDataSize();
+    return BitFieldAccessUnit(Field, Field, Limit, Limit, true);
+  }
+
+  auto Begin = Field;
+  uint64_t StartBit = getFieldBitOffset(*Field);
+  uint64_t BitSize = Field->getBitWidthValue(Context);
+  unsigned CharBits = Context.getCharWidth();
+  bool Volatile = Field->getType().isVolatileQualified();
+
+  assert(!(StartBit % CharBits) && "Not at start of char");
+
+  // Gather bitfields until we have one starting at a byte boundary.
+  for (++Field; Field != FieldEnd && Field->isBitField(); ++Field) {
+    uint64_t BitOffset = getFieldBitOffset(*Field);
+    if (!(BitOffset % CharBits))
+      // This BitField starts at a byte boundary. It belongs in the next access
+      // unit (initially).
+      break;
+    assert(BitOffset == StartBit + BitSize &&
+           "Concatenating non-contiguous bitfields");
+    BitSize += Field->getBitWidthValue(Context);
+    if (Field->getType().isVolatileQualified())
+      Volatile = true;
+  }
+
+  // A zero-sized access unit is only a barrier under some conditions.
+  bool Barrier =
+      !BitSize && (Context.getTargetInfo().useZeroLengthBitfieldAlignment() ||
+                   Context.getTargetInfo().useBitFieldTypeAlignment());
+
+  return BitFieldAccessUnit(Begin, Field, bitsToCharUnits(StartBit),
+                            bitsToCharUnits(StartBit + BitSize + CharBits - 1),
+                            Barrier, Volatile);
+}
+
+// Create the containing access unit and install the bitfields.
+void CGRecordLowering::installBitFieldAccessUnit(const BitFieldAccessUnit &U) {
+  if (U.getSize().isZero())
+    return;
+
+  // Add the storage member for the access unit to the record. The
+  // bitfields get the offset of their storage but come afterward and remain
+  // there after a stable sort.
+  llvm::Type *Type = getIntNType(Context.toBits(U.getSize()));
+  Members.push_back(StorageInfo(U.getStartOffset(), Type));
+  for (auto F : U)
+    if (!F->isZeroLengthBitField(Context))
+      Members.push_back(
+          MemberInfo(U.getStartOffset(), MemberInfo::Field, nullptr, F));
+}
+
+// Create members for bitfields. Field is a bitfield, and FieldEnd is the end
+// iterator of the record. Return the first non-bitfield encountered.
+RecordDecl::field_iterator
 CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
                                       RecordDecl::field_iterator FieldEnd) {
-  // Run stores the first element of the current run of bitfields.  FieldEnd is
-  // used as a special value to note that we don't have a current run.  A
-  // bitfield run is a contiguous collection of bitfields that can be stored in
-  // the same storage block.  Zero-sized bitfields and bitfields that would
-  // cross an alignment boundary break a run and start a new one.
-  RecordDecl::field_iterator Run = FieldEnd;
-  // Tail is the offset of the first bit off the end of the current run.  It's
-  // used to determine if the ASTRecordLayout is treating these two bitfields as
-  // contiguous.  StartBitOffset is offset of the beginning of the Run.
-  uint64_t StartBitOffset, Tail = 0;
   if (isDiscreteBitFieldABI()) {
-    for (; Field != FieldEnd; ++Field) {
-      uint64_t BitOffset = getFieldBitOffset(*Field);
+    // Run stores the first element of the current run of bitfields. FieldEnd is
+    // used as a special value to note that we don't have a current run. A
+    // bitfield run is a contiguous collection of bitfields that can be stored
+    // in the same storage block. Zero-sized bitfields and bitfields that would
+    // cross an alignment boundary break a run and start a new one.
+    RecordDecl::field_iterator Run = FieldEnd;
+    // Tail is the offset of the first bit off the end of the current run. It's
+    // used to determine if the ASTRecordLayout is treating these two bitfields
+    // as contiguous. StartBitOffset is offset of the beginning of the Run.
+    uint64_t StartBitOffset, Tail = 0;
+    for (; Field != FieldEnd && Field->isBitField(); ++Field) {
       // Zero-width bitfields end runs.
       if (Field->isZeroLengthBitField(Context)) {
         Run = FieldEnd;
         continue;
       }
+      uint64_t BitOffset = getFieldBitOffset(*Field);
       llvm::Type *Type =
           Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
       // If we don't have a run yet, or don't live within the previous run's
@@ -439,82 +560,127 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                    MemberInfo::Field, nullptr, *Field));
     }
-    return;
+    return Field;
   }
 
-  // Check if OffsetInRecord (the size in bits of the current run) is better
-  // as a single field run. When OffsetInRecord has legal integer width, and
-  // its bitfield offset is naturally aligned, it is better to make the
-  // bitfield a separate storage component so as it can be accessed directly
-  // with lower cost.
-  auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
-                                      uint64_t StartBitOffset) {
-    if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
-      return false;
-    if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
-        !DataLayout.fitsInLegalInteger(OffsetInRecord))
-      return false;
-    // Make sure StartBitOffset is naturally aligned if it is treated as an
-    // IType integer.
-    if (StartBitOffset %
-            Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
-        0)
-      return false;
-    return true;
+  // The SysV ABI can overlap bitfield storage units with both other bitfield
+  // storage units /and/ other non-bitfield data members. Accessing a sequence
+  // of bitfields mustn't interfere with adjacent non-bitfields -- they're
+  // permitted to be accessed in separate threads for instance.
+
+  // We split runs of bit-fields into a sequence of "access units". When we emit
+  // a load or store of a bit-field, we'll load/store the entire containing
+  // access unit. As mentioned, the standard requires that these loads and
+  // stores must not interfere with accesses to other memory locations, and it
+  // defines the bit-field's memory location as the current run of
+  // non-zero-width bit-fields. So an access unit must never overlap with
+  // non-bit-field storage or cross a zero-width bit-field. Otherwise, we're
+  // free to draw the lines as we see fit.
+
+  // Drawing these lines well can be complicated. LLVM generally can't modify a
+  // program to access memory that it didn't before, so using very narrow access
+  // units can prevent the compiler from using optimal access patterns. For
+  // example, suppose a run of bit-fields occupies four bytes in a struct. If we
+  // split that into four 1-byte access units, then a sequence of assignments
+  // that doesn't touch all four bytes may have to be emitted with multiple
+  // 8-bit stores instead of a single 32-bit store. On the other hand, if we use
+  // very wide access units, we may find ourselves emitting accesses to
+  // bit-fields we didn't really need to touch, just because LLVM was unable to
+  // clean up after us.
+
+  // It is desirable to have access units be aligned powers of 2 no larger than
+  // a register. (On non-strict alignment ISAs, the alignment requirement can be
+  // dropped.) A three byte access unit will be accessed using 2-byte and 1-byte
+  // accesses and bit manipulation. If no bitfield straddles across the two
+  // separate accesses, it is better to have separate 2-byte and 1-byte access
+  // units, as then LLVM will not generate unnecessary memory accesses, or bit
+  // manipulation. Similarly, on a strict-alignment architecture, it is better
+  // to keep access-units naturally aligned, to avoid similar bit
+  // manipulation synthesizing larger unaligned accesses.
+
+  // We do this in two phases, processing a sequential run of bitfield
+  // declarations.
+
+  // a) Bitfields that share parts of a single byte are, of necessity, placed in
+  // the same access unit. That unit will encompass a consecutive
+  // run where adjacent bitfields share parts of a byte. (The first bitfield of
+  // such an access unit will start at the beginning of a byte.)
+
+  // b) Accumulate adjacent access units when the combined unit is naturally
+  // sized, no larger than a register, and on a strict alignment ISA,
+  // aligned. Note that this requires lookahead to one or more subsequent access
+  // units. For instance, consider a 2-byte access-unit followed by 2 1-byte
+  // units. We can merge that into a 4-byte access-unit, but we would not want
+  // to merge a 2-byte followed by a single 1-byte (and no available tail
+  // padding).
+
+  // This accumulation is prevented when:
+  // *) it would cross a zero-width bitfield (ABI-dependent), or
+  // *) one of the candidate access units contains a volatile bitfield, or
+  // *) fine-grained bitfield access option is in effect.
+
+  CharUnits RegSize =
+      bitsToCharUnits(Context.getTargetInfo().getRegisterWidth());
+  // The natural size of an access of Size, unless poorly aligned, in which case
+  // CharUnits::Zero is returned.
+  auto NaturalSize = [&](CharUnits StartOffset, CharUnits Size) {
+    if (Size.isZero())
+      return Size;
+    llvm::Type *Type = getIntNType(Context.toBits(Size));
+    if (!Context.getTargetInfo().hasCheapUnalignedBitFieldAccess()) {
+      // This alignment is that of the storage used -- for instance
+      // (usually) 4 bytes for a 24-bit type.
+      CharUnits Align = getAlignment(Type);
+      if (Align > Layout.getAlignment() || !StartOffset.isMultipleOf(Align))
+        return CharUnits::Zero();
+    }
+    // This is the storage-size of Type -- for instance a 24-bit type will
+    // require 4 bytes.
+    return getSize(Type);
   };
 
-  // The start field is better as a single field run.
-  bool StartFieldAsSingleRun = false;
-  for (;;) {
-    // Check to see if we need to start a new run.
-    if (Run == FieldEnd) {
-      // If we're out of fields, return.
-      if (Field == FieldEnd)
-        break;
-      // Any non-zero-length bitfield can start a new run.
-      if (!Field->isZeroLengthBitField(Context)) {
-        Run = Field;
-        StartBitOffset = getFieldBitOffset(*Field);
-        Tail = StartBitOffset + Field->getBitWidthValue(Context);
-        StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
-                                                         StartBitOffset);
+  BitFieldAccessUnit Current = gatherBitFieldAccessUnit(Field, FieldEnd);
+  do {
+    BitFieldAccessUnit Probe =
+        gatherBitFieldAccessUnit(Current.end(), FieldEnd);
+    if (!Current.getSize().isZero() &&
+        !Types.getCodeGenOpts().FineGrainedBitfieldAccesses) {
+      CharUnits Size = NaturalSize(Current.getStartOffset(), Current.getSize());
+      if (!Size.isZero()) {
+        CharUnits EndOffset = Current.getStartOffset() + Size;
+
+        Current.enlargeIfSpace(Probe, EndOffset);
+
+        if (!Current.hasVolatile())
+          // Look for beneficial merging with subsequent access units.
+          while (!Probe.isBarrier() && !Probe.hasVolatile()) {
+            CharUnits NewSize = Probe.getEndOffset() - Current.getStartOffset();
+            if (NewSize > Size) {
+              if (NewSize > RegSize)
+                break;
+
+              Size = NaturalSize(Current.getStartOffset(), NewSize);
+              if (Size.isZero())
+                break;
+              assert(Size <= RegSize && "Unexpectedly large storage");
+              EndOffset = Current.getStartOffset() + Size;
+            }
+            Probe = gatherBitFieldAccessUnit(Probe.end(), FieldEnd);
+            Current.enlargeIfSpace(Probe, EndOffset);
+          }
       }
-      ++Field;
-      continue;
     }
 
-    // If the start field of a new run is better as a single run, or
-    // if current field (or consecutive fields) is better as a single run, or
-    // if current field has zero width bitfield and either
-    // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
-    // true, or
-    // if the offset of current field is inconsistent with the offset of
-    // previous field plus its offset,
-    // skip the block below and go ahead to emit the storage.
-    // Otherwise, try to add bitfields to the run.
-    if (!StartFieldAsSingleRun && Field != FieldEnd &&
-        !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
-        (!Field->isZeroLengthBitField(Context) ||
-         (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
-          !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
-        Tail == getFieldBitOffset(*Field)) {
-      Tail += Field->getBitWidthValue(Context);
-      ++Field;
-      continue;
-    }
+    installBitFieldAccessUnit(Current);
 
-    // We've hit a break-point in the run and need to emit a storage field.
-    llvm::Type *Type = getIntNType(Tail - StartBitOffset);
-    // Add the storage member to the record and set the bitfield info for all of
-    // the bitfields in the run.  Bitfields get the offset of their storage but
-    // come afterward and remain there after a stable sort.
-    Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
-    for (; Run != Field; ++Run)
-      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
-                                   MemberInfo::Field, nullptr, *Run));
-    Run = FieldEnd;
-    StartFieldAsSingleRun = false;
-  }
+    // If Probe is the next access unit, we can use that, otherwise (we scanned
+    // ahead and found nothing good), we have to recompute the next access unit.
+    Current = Current.end() == Probe.begin()
+                  ? Probe
+                  : gatherBitFieldAccessUnit(Current.end(), FieldEnd);
+  } while (!Current.isEnd());
+
+  return Current.begin();
 }
 
 void CGRecordLowering::accumulateBases() {
diff --git a/clang/test/CodeGen/aapcs-bitfield-access-unit.c b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
index a285c73c2a35f8..963610eed6bae1 100644
--- a/clang/test/CodeGen/aapcs-bitfield-access-unit.c
+++ b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
@@ -1,8 +1,8 @@
-// RUN: %clang_cc1 -triple armv8-none-linux-eabi -fno-aapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_LE
-// RUN: %clang_cc1 -triple armebv8-none-linux-eabi -fno-aapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_BE
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi   -fno-aapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_LE
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi   -fno-aapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_BE
 
-// RUN: %clang_cc1 -triple armv8-none-linux-eabi -faapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_LE
-// RUN: %clang_cc1 -triple armebv8-none-linux-eabi -faapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_BE
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi   -faapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_LE
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi   -faapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT_BE
 
 struct st0 {
   short c : 7;
@@ -32,12 +32,12 @@ struct st2 {
   short c : 7;
 } st2;
 // LAYOUT-LABEL: LLVMType:%struct.st2 =
-// LAYOUT-SAME: type { i16, i8 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:7 IsSigned:1 StorageSize:8 StorageOffset:2
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:6 Size:10 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:1 Size:7 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:16 Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:22 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:9 Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: ]>
 
 struct st3 {
@@ -159,7 +159,7 @@ struct st12{
   int f : 16;
 } st12;
 // LAYOUT-LABEL: LLVMType:%struct.st12 =
-// LAYOUT-SAME: type { i24 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
 // LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:8 Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
@@ -172,12 +172,12 @@ struct st13 {
   int b : 32;
 } __attribute__((packed)) st13;
 // LAYOUT-LABEL: LLVMType:%struct.st13 =
-// LAYOUT-SAME: type { [5 x i8] }
+// LAYOUT-SAME: type <{ i8, i32 }>
 // LAYOUT: BitFields:[
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:8 Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:32 Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:32 StorageOffset:1
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:32 StorageOffset:1
 // LAYOUT-NEXT: ]>
 
 struct st14 {
@@ -207,16 +207,16 @@ struct st16 {
   int d : 16;
 } st16;
 // LAYOUT-LABEL: LLVMType:%struct.st16 =
-// LAYOUT-SAME: type { i48, i48 }
+// LAYOUT-SAME: type { i32, i16, i32, i16 }
 // LAYOUT: BitFields:[
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:32 Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:64 StorageOffset:8
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:32 Size:16 IsSigned:1 StorageSize:64 StorageOffset:8
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:32 Size:32 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:16 Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:32 Size:32 IsSigned:1 StorageSize:64 StorageOffset:8
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:16 Size:16 IsSigned:1 StorageSize:64 StorageOffset:8
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:32 StorageOffset:8
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:1 StorageSize:16 StorageOffset:12
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:32 StorageOffset:8
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:1 StorageSize:16 StorageOffset:12
 // LAYOUT-NEXT: ]>
 
 struct st17 {
@@ -224,12 +224,12 @@ int b : 32;
 char c : 8;
 } __attribute__((packed)) st17;
 // LAYOUT-LABEL: LLVMType:%struct.st17 =
-// LAYOUT-SAME: type { [5 x i8] }
+// LAYOUT-SAME: type <{ i32, i8 }>
 // LAYOUT: BitFields:[
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:32 Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:8 Size:32 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:32 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
 // LAYOUT-NEXT: ]>
 
 struct zero_bitfield {
@@ -253,7 +253,7 @@ struct zero_bitfield_ok {
   int b : 24;
 } st19;
 // LAYOUT-LABEL: LLVMType:%struct.zero_bitfield_ok =
-// LAYOUT-SAME: type { i16, i24 }
+// LAYOUT-SAME: type { i16, i32 }
 // LAYOUT: BitFields:[
 // LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:8 Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
diff --git a/clang/test/CodeGen/aapcs-bitfield.c b/clang/test/CodeGen/aapcs-bitfield.c
index 152ee26e7a3ea9..a5d2c2cfd8507c 100644
--- a/clang/test/CodeGen/aapcs-bitfield.c
+++ b/clang/test/CodeGen/aapcs-bitfield.c
@@ -299,77 +299,73 @@ struct st2 {
 
 // LE-LABEL: @st2_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LE-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // LE-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // LE-NEXT:    ret i32 [[CONV]]
 //
 // BE-LABEL: @st2_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BE-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // BE-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // BE-NEXT:    ret i32 [[CONV]]
 //
 // LENUMLOADS-LABEL: @st2_check_load(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
 //
 // BENUMLOADS-LABEL: @st2_check_load(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
 //
 // LEWIDTH-LABEL: @st2_check_load(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // LEWIDTH-NEXT:    ret i32 [[CONV]]
 //
 // BEWIDTH-LABEL: @st2_check_load(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // BEWIDTH-NEXT:    ret i32 [[CONV]]
 //
 // LEWIDTHNUM-LABEL: @st2_check_load(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // LEWIDTHNUM-NEXT:    ret i32 [[CONV]]
 //
 // BEWIDTHNUM-LABEL: @st2_check_load(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
 //
@@ -379,74 +375,66 @@ int st2_check_load(struct st2 *m) {
 
 // LE-LABEL: @st2_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LE-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LE-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @st2_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BE-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BE-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @st2_check_store(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LENUMLOADS-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LENUMLOADS-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @st2_check_store(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BENUMLOADS-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BENUMLOADS-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @st2_check_store(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTH-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LEWIDTH-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @st2_check_store(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BEWIDTH-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BEWIDTH-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @st2_check_store(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTHNUM-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LEWIDTHNUM-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @st2_check_store(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BEWIDTHNUM-NEXT:    store i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BEWIDTHNUM-NEXT:    store i32 [[BF_SET]], ptr [[M]], align 4
 // BEWIDTHNUM-NEXT:    ret void
 //
 void st2_check_store(struct st2 *m) {
@@ -636,8 +624,8 @@ struct st4 {
 //
 // LEWIDTH-LABEL: @st4_check_load(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -645,8 +633,8 @@ struct st4 {
 //
 // BEWIDTH-LABEL: @st4_check_load(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -654,8 +642,8 @@ struct st4 {
 //
 // LEWIDTHNUM-LABEL: @st4_check_load(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -663,8 +651,8 @@ struct st4 {
 //
 // BEWIDTHNUM-LABEL: @st4_check_load(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -708,38 +696,38 @@ int st4_check_load(struct st4 *m) {
 //
 // LEWIDTH-LABEL: @st4_check_store(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @st4_check_store(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
-// BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @st4_check_store(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @st4_check_store(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
-// BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void st4_check_store(struct st4 *m) {
@@ -980,8 +968,8 @@ struct st6 {
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LE-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // LE-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -997,8 +985,8 @@ struct st6 {
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BE-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // BE-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1014,8 +1002,8 @@ struct st6 {
 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1031,8 +1019,8 @@ struct st6 {
 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1048,8 +1036,8 @@ struct st6 {
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // LEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1065,8 +1053,8 @@ struct st6 {
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // BEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1082,8 +1070,8 @@ struct st6 {
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // LEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1099,8 +1087,8 @@ struct st6 {
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
 // BEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1704,9 +1692,9 @@ void store_st9(volatile struct st9 *m) {
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LE-NEXT:    store volatile i8 [[TMP1]], ptr [[M]], align 4
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LE-NEXT:    store volatile i8 [[TMP0]], ptr [[M]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_st9(
@@ -1714,9 +1702,9 @@ void store_st9(volatile struct st9 *m) {
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BE-NEXT:    store volatile i8 [[TMP1]], ptr [[M]], align 4
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BE-NEXT:    store volatile i8 [[TMP0]], ptr [[M]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_st9(
@@ -1724,10 +1712,10 @@ void store_st9(volatile struct st9 *m) {
 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[M]], align 4
-// LENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[M]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[M]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_st9(
@@ -1735,10 +1723,10 @@ void store_st9(volatile struct st9 *m) {
 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[M]], align 4
-// BENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[M]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[M]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_st9(
@@ -1949,9 +1937,9 @@ void store_st10(volatile struct st10 *m) {
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// LE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // LE-NEXT:    [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 1
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -1968,9 +1956,9 @@ void store_st10(volatile struct st10 *m) {
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// BE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // BE-NEXT:    [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 7
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -1987,9 +1975,9 @@ void store_st10(volatile struct st10 *m) {
 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 1
 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -2006,9 +1994,9 @@ void store_st10(volatile struct st10 *m) {
 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 7
 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -2767,146 +2755,70 @@ struct st13 {
 
 // LE-LABEL: @increment_b_st13(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_b_st13(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_b_st13(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// LENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_b_st13(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// BENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_b_st13(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LEWIDTH-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_b_st13(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BEWIDTH-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_b_st13(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_b_st13(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[B]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_b_st13(volatile struct st13 *s) {
@@ -2990,9 +2902,9 @@ struct st15 {
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // LE-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// LE-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LE-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// LE-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_a_st15(
@@ -3000,9 +2912,9 @@ struct st15 {
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // BE-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// BE-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BE-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// BE-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_a_st15(
@@ -3010,10 +2922,10 @@ struct st15 {
 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// LENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_a_st15(
@@ -3021,10 +2933,10 @@ struct st15 {
 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// BENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_a_st15(
@@ -3032,9 +2944,9 @@ struct st15 {
 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // LEWIDTH-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// LEWIDTH-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// LEWIDTH-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_a_st15(
@@ -3042,9 +2954,9 @@ struct st15 {
 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // BEWIDTH-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// BEWIDTH-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// BEWIDTH-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_a_st15(
@@ -3052,10 +2964,10 @@ struct st15 {
 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LEWIDTHNUM-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_a_st15(
@@ -3063,10 +2975,10 @@ struct st15 {
 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i16 [[INC]] to i8
 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BEWIDTHNUM-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_a_st15(volatile struct st15 *s) {
@@ -3082,146 +2994,58 @@ struct st16 {
 
 // LE-LABEL: @increment_a_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_a_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_a_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_a_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_a_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_a_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_a_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_a_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT:    store i32 [[INC]], ptr [[S]], align 4
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_a_st16(struct st16 *s) {
@@ -3230,154 +3054,90 @@ void increment_a_st16(struct st16 *s) {
 
 // LE-LABEL: @increment_b_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_b_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_b_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_b_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_b_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTH-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_b_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTH-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_b_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTHNUM-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_b_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTHNUM-NEXT:    store i16 [[TMP0]], ptr [[B]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_b_st16(struct st16 *s) {
@@ -3386,154 +3146,66 @@ void increment_b_st16(struct st16 *s) {
 
 // LE-LABEL: @increment_c_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_c_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_c_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_c_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_c_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_c_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_c_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_c_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[C]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT:    store i32 [[INC]], ptr [[C]], align 4
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_c_st16(struct st16 *s) {
@@ -3542,162 +3214,90 @@ void increment_c_st16(struct st16 *s) {
 
 // LE-LABEL: @increment_d_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LE-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_d_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BE-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_d_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_d_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_d_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTH-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_d_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTH-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTH-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_d_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTHNUM-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_d_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], ptr [[D]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTHNUM-NEXT:    store i16 [[TMP0]], ptr [[D]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_d_st16(struct st16 *s) {
@@ -3706,74 +3306,32 @@ void increment_d_st16(struct st16 *s) {
 
 // LE-LABEL: @increment_v_a_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_a_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_a_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 4
+// LENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_a_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 4
+// BENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_a_st16(
@@ -3812,140 +3370,110 @@ void increment_v_a_st16(volatile struct st16 *s) {
 
 // LE-LABEL: @increment_v_b_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    store volatile i16 [[TMP0]], ptr [[B]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_b_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    store volatile i16 [[TMP0]], ptr [[B]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_b_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT:    store volatile i16 [[TMP0]], ptr [[B]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_b_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT:    store volatile i16 [[TMP0]], ptr [[B]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_b_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_b_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_b_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_b_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // BEWIDTHNUM-NEXT:    ret void
@@ -3956,112 +3484,70 @@ void increment_v_b_st16(volatile struct st16 *s) {
 
 // LE-LABEL: @increment_v_c_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_c_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_c_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_c_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_c_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// LEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_c_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// BEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_c_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
-// LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_c_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
-// BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[C]], align 4
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_v_c_st16(volatile struct st16 *s) {
@@ -4070,144 +3556,110 @@ void increment_v_c_st16(volatile struct st16 *s) {
 
 // LE-LABEL: @increment_v_d_st16(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT:    store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT:    store volatile i16 [[TMP0]], ptr [[D]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_d_st16(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT:    store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT:    store volatile i16 [[TMP0]], ptr [[D]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_d_st16(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT:    store volatile i16 [[TMP0]], ptr [[D]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_d_st16(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT:    store volatile i16 [[TMP0]], ptr [[D]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_d_st16(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_d_st16(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_d_st16(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_d_st16(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
 // BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
 // BEWIDTHNUM-NEXT:    ret void
@@ -4224,146 +3676,62 @@ char c : 8;
 
 // LE-LABEL: @increment_v_b_st17(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_b_st17(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_b_st17(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// LENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_b_st17(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// BENUMLOADS-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_b_st17(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_b_st17(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTH-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_b_st17(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_b_st17(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], ptr [[S]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_v_b_st17(volatile struct st17 *s) {
@@ -4372,108 +3740,70 @@ void increment_v_b_st17(volatile struct st17 *s) {
 
 // LE-LABEL: @increment_v_c_st17(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 32
-// LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// LE-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
-// LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// LE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LE-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_v_c_st17(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 32
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 32
-// BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// BE-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
-// BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BE-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// BE-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// BE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BE-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_v_c_st17(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// LENUMLOADS-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_v_c_st17(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// BENUMLOADS-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_v_c_st17(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
 // LEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// LEWIDTH-NEXT:    store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// LEWIDTH-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_v_c_st17(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
 // BEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT:    store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// BEWIDTH-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_v_c_st17(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP1]], align 1
-// LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_v_c_st17(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP1]], align 1
-// BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], ptr [[C]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_v_c_st17(volatile struct st17 *s) {
@@ -4493,9 +3823,9 @@ struct zero_bitfield {
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LE-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LE-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @increment_a_zero_bitfield(
@@ -4503,9 +3833,9 @@ struct zero_bitfield {
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BE-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BE-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BE-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @increment_a_zero_bitfield(
@@ -4513,10 +3843,10 @@ struct zero_bitfield {
 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @increment_a_zero_bitfield(
@@ -4524,10 +3854,10 @@ struct zero_bitfield {
 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BENUMLOADS-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @increment_a_zero_bitfield(
@@ -4535,9 +3865,9 @@ struct zero_bitfield {
 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LEWIDTH-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LEWIDTH-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_a_zero_bitfield(
@@ -4545,9 +3875,9 @@ struct zero_bitfield {
 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BEWIDTH-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BEWIDTH-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BEWIDTH-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
@@ -4555,10 +3885,10 @@ struct zero_bitfield {
 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTHNUM-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
@@ -4566,10 +3896,10 @@ struct zero_bitfield {
 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = trunc i32 [[INC]] to i8
 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTHNUM-NEXT:    store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BEWIDTHNUM-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_a_zero_bitfield(volatile struct zero_bitfield *s) {
@@ -4692,9 +4022,9 @@ struct zero_bitfield_ok {
 // LE-NEXT:    [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
 // LE-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
 // LE-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// LE-NEXT:    [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// LE-NEXT:    [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
 // LE-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// LE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// LE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // LE-NEXT:    [[BF_SHL6:%.*]] = shl i16 [[BF_VALUE]], 8
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL6]]
@@ -4716,9 +4046,9 @@ struct zero_bitfield_ok {
 // BE-NEXT:    [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
 // BE-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
 // BE-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// BE-NEXT:    [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// BE-NEXT:    [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
 // BE-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// BE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// BE-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
 // BE-NEXT:    store volatile i16 [[BF_SET]], ptr [[S]], align 4
@@ -4739,9 +4069,9 @@ struct zero_bitfield_ok {
 // LENUMLOADS-NEXT:    [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
 // LENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
 // LENUMLOADS-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// LENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// LENUMLOADS-NEXT:    [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
 // LENUMLOADS-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // LENUMLOADS-NEXT:    [[BF_SHL6:%.*]] = shl i16 [[BF_VALUE]], 8
 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL6]]
@@ -4763,9 +4093,9 @@ struct zero_bitfield_ok {
 // BENUMLOADS-NEXT:    [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
 // BENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
 // BENUMLOADS-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// BENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// BENUMLOADS-NEXT:    [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
 // BENUMLOADS-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// BENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
 // BENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], ptr [[S]], align 4
@@ -4780,12 +4110,12 @@ struct zero_bitfield_ok {
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
 // LEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
 // LEWIDTH-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// LEWIDTH-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// LEWIDTH-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @increment_a_zero_bitfield_ok(
@@ -4793,12 +4123,12 @@ struct zero_bitfield_ok {
 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
 // BEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
 // BEWIDTH-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// BEWIDTH-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// BEWIDTH-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
@@ -4807,13 +4137,13 @@ struct zero_bitfield_ok {
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
 // LEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
 // LEWIDTHNUM-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// LEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP2]], align 1
-// LEWIDTHNUM-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// LEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP0]], align 1
+// LEWIDTHNUM-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
@@ -4821,13 +4151,13 @@ struct zero_bitfield_ok {
 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
 // BEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
 // BEWIDTHNUM-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// BEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP2]], align 1
-// BEWIDTHNUM-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// BEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP0]], align 1
+// BEWIDTHNUM-NEXT:    store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
 // BEWIDTHNUM-NEXT:    ret void
 //
 void increment_a_zero_bitfield_ok(volatile struct zero_bitfield_ok *s) {
diff --git a/clang/test/CodeGen/arm-bitfield-alignment.c b/clang/test/CodeGen/arm-bitfield-alignment.c
index d3a3d19a41e33e..5d0967ec70346c 100644
--- a/clang/test/CodeGen/arm-bitfield-alignment.c
+++ b/clang/test/CodeGen/arm-bitfield-alignment.c
@@ -1,7 +1,7 @@
-// RUN: %clang_cc1 -triple arm-none-eabi -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefix=LAYOUT
-// RUN: FileCheck %s -check-prefix=IR <%t
-// RUN: %clang_cc1 -triple aarch64 -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefix=LAYOUT
-// RUN: FileCheck %s -check-prefix=IR <%t
+// RUN: %clang_cc1 -triple arm-none-eabi -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT-32
+// RUN: FileCheck %s -check-prefixes=IR,IR-32 <%t
+// RUN: %clang_cc1 -triple aarch64 -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT-64
+// RUN: FileCheck %s -check-prefixes=IR,IR-64 <%t
 
 extern struct T {
   int b0 : 8;
@@ -14,12 +14,17 @@ int func(void) {
 }
 
 // IR: @g = external global %struct.T, align 4
-// IR: %{{.*}} = load i64, ptr @g, align 4
+// IR-32: %{{.*}} = load i32, ptr @g, align 4
+// IR-64: %{{.*}} = load i64, ptr @g, align 4
 
 // LAYOUT-LABEL: LLVMType:%struct.T =
-// LAYOUT-SAME: type { i40 }
+// LAYOUT-32-SAME: type { i32, i8 }
+// LAYOUT-64-SAME: type { i64 }
 // LAYOUT: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:64 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:32 Size:1 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:0 Size:1 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:32 Size:1 IsSigned:1 StorageSize:64 StorageOffset:0
 // LAYOUT-NEXT: ]>
diff --git a/clang/test/CodeGen/arm64-be-bitfield.c b/clang/test/CodeGen/arm64-be-bitfield.c
index 28ca3be348379d..57e20b5b62b9ca 100644
--- a/clang/test/CodeGen/arm64-be-bitfield.c
+++ b/clang/test/CodeGen/arm64-be-bitfield.c
@@ -7,8 +7,10 @@ struct bt3 { signed b2:10; signed b3:10; } b16;
 signed callee_b0f(struct bt3 bp11) {
 // IR: callee_b0f(i64 [[ARG:%.*]])
 // IR: [[BP11:%.*]] = alloca %struct.bt3, align 4
-// IR: store i64 [[ARG]], ptr [[PTR:%.*]], align 8
-// IR: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[BP11]], ptr align 8 [[PTR]], i64 4
+// IR: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.bt3, ptr [[BP11]], i32 0, i32 0
+// IR: [[COERCE_HIGHBITS:%.*]] = lshr i64 [[ARG]], 32
+// IR: [[COERCE_VAL_II:%.*]] = trunc i64 [[COERCE_HIGHBITS]] to i32
+// IR: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
 // IR: [[BF_LOAD:%.*]] = load i32, ptr [[BP11]], align 4
 // IR: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 22
 // IR: ret i32 [[BF_ASHR]]
@@ -16,7 +18,7 @@ signed callee_b0f(struct bt3 bp11) {
 }
 
 // LAYOUT-LABEL: LLVMType:%struct.bt3 =
-// LAYOUT-SAME: type { i24 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:22 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:12 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
diff --git a/clang/test/CodeGen/bitfield-2.c b/clang/test/CodeGen/bitfield-2.c
index 3e0b30c7a17d80..8688ba6390ddbe 100644
--- a/clang/test/CodeGen/bitfield-2.c
+++ b/clang/test/CodeGen/bitfield-2.c
@@ -271,11 +271,11 @@ _Bool test_6(void) {
 // CHECK-RECORD: *** Dumping IRgen Record Layout
 // CHECK-RECORD: Record: RecordDecl{{.*}}s7
 // CHECK-RECORD: Layout: <CGRecordLayout
-// CHECK-RECORD:   LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] }
+// CHECK-RECORD:   LLVMType:%struct.s7 = type <{ i32, i32, i32, i64, [12 x i8] }>
 // CHECK-RECORD:   IsZeroInitializable:1
 // CHECK-RECORD:   BitFields:[
-// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12
-// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16
+// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:64 StorageOffset:12
+// CHECK-RECORD:     <CGBitFieldInfo Offset:32 Size:29 IsSigned:1 StorageSize:64 StorageOffset:12
 
 struct __attribute__((aligned(16))) s7 {
   int a, b, c;
diff --git a/clang/test/CodeGen/bitfield-access-pad.c b/clang/test/CodeGen/bitfield-access-pad.c
index 20b81d887167ec..caf940438fd855 100644
--- a/clang/test/CodeGen/bitfield-access-pad.c
+++ b/clang/test/CodeGen/bitfield-access-pad.c
@@ -37,6 +37,7 @@
 // RUN: %clang_cc1 -triple=hexagon-elf -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-STRICT-NT %s
 // RUN: %clang_cc1 -triple=mips-elf -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-STRICT-NT %s
 
+
 struct P1 {
   unsigned a :8;
   char :0;
@@ -46,22 +47,20 @@ struct P1 {
 // LAYOUT-T-SAME: type { i8, i8, [2 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, i8 }
 // LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
-// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
-
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -78,21 +77,17 @@ struct P2 {
 // LAYOUT-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
@@ -112,23 +107,17 @@ struct P3 {
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -147,21 +136,17 @@ struct P4 {
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -179,19 +164,17 @@ struct P5 {
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -211,23 +194,17 @@ struct P6 {
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
@@ -245,19 +222,17 @@ struct P7 {
 // LAYOUT-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
@@ -284,11 +259,9 @@ struct __attribute__ ((aligned (2))) P7_align {
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
@@ -314,11 +287,9 @@ struct P8 {
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:2
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
@@ -344,11 +315,9 @@ struct P9 {
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
@@ -367,9 +336,9 @@ struct __attribute__((aligned(4))) P10 {
 // CHECK-LABEL: LLVMType:%struct.P10 =
 // LAYOUT-T-SAME: type { i24 }
 // LAYOUT-ARM64-T-SAME: type { i24 }
-// LAYOUT-NT-SAME: type { i24 }
-// LAYOUT-STRICT-NT-SAME: type { i24 }
-// LAYOUT-DWN32-SAME: type { i24 }
+// LAYOUT-NT-SAME: type { i32 }
+// LAYOUT-STRICT-NT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type { i32 }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -378,12 +347,10 @@ struct __attribute__((aligned(4))) P10 {
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -403,9 +370,9 @@ struct __attribute__((aligned(4))) P11 {
 // CHECK-LABEL: LLVMType:%struct.P11 =
 // LAYOUT-T-SAME: type { i24 }
 // LAYOUT-ARM64-T-SAME: type { i24 }
-// LAYOUT-NT-SAME: type { i24 }
-// LAYOUT-STRICT-NT-SAME: type { i24 }
-// LAYOUT-DWN32-SAME: type { i24 }
+// LAYOUT-NT-SAME: type { i32 }
+// LAYOUT-STRICT-NT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type { i32 }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -414,12 +381,10 @@ struct __attribute__((aligned(4))) P11 {
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:0 IsSigned:1 StorageSize:32 StorageOffset:0
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
diff --git a/clang/test/CodeGen/bitfield-access-unit.c b/clang/test/CodeGen/bitfield-access-unit.c
index cf13fcf6c85b1e..1aed2e7202fc65 100644
--- a/clang/test/CodeGen/bitfield-access-unit.c
+++ b/clang/test/CodeGen/bitfield-access-unit.c
@@ -62,12 +62,12 @@ struct A {
   char b : 7;
 } a;
 // CHECK-LABEL: LLVMType:%struct.A =
-// LAYOUT-FLEX-SAME: type { i8, i8 }
+// LAYOUT-FLEX-SAME: type { i16 }
 // LAYOUT-STRICT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i16 }
 // CHECK: BitFields:[
-// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
 
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
@@ -82,11 +82,11 @@ struct __attribute__((aligned(2))) B {
   char b : 7;
 } b;
 // CHECK-LABEL: LLVMType:%struct.B =
-// LAYOUT-SAME: type { i8, i8 }
+// LAYOUT-SAME: type { i16 }
 // LAYOUT-DWN32-SAME: type { i16 }
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
 
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
@@ -100,12 +100,12 @@ struct C {
   char b : 7;
 } c;
 // CHECK-LABEL: LLVMType:%struct.C =
-// LAYOUT-FLEX-SAME: type { i32, i8, i8, i8 }
+// LAYOUT-FLEX-SAME: type <{ i32, i8, i16, i8 }>
 // LAYOUT-STRICT-SAME: type { i32, i8, i8, i8 }
 // LAYOUT-DWN32-SAME: type <{ i32, i8, i16, i8 }>
 // CHECK: BitFields:[
-// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:5
-// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
 
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:5
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
@@ -123,21 +123,20 @@ struct __attribute__((packed)) D {
 } d;
 // CHECK-LABEL: LLVMType:%struct.D =
 // LAYOUT-FLEX-SAME: type <{ i32, i16, i8 }>
-// LAYOUT-STRICT-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-STRICT-SAME: type <{ i32, i8, i8, i8 }>
 // LAYOUT-DWN32-FLEX-SAME: type <{ i32, i16, i8 }>
-// LAYOUT-DWN32-STRICT-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-DWN32-STRICT-SAME: type <{ i32, i8, i8, i8 }>
 // CHECK: BitFields:[
 // LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
 // LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
 
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:5
 
 // LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
 // LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
-
-// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
-// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:5
 // CHECK-NEXT: ]>
 
 struct E {
@@ -146,22 +145,21 @@ struct E {
   unsigned c : 12;
 } e;
 // CHECK-LABEL: LLVMType:%struct.E =
-// LAYOUT-FLEX64-SAME: type { i8, i16, i16, [2 x i8] }
-// LAYOUT-FLEX32-SAME: type { i8, i16, i16, [2 x i8] }
-// LAYOUT-STRICT-SAME: type { i8, i16, i16, [2 x i8] }
+// LAYOUT-FLEX64-SAME: type { i64 }
+// LAYOUT-FLEX32-SAME: type { i32, i16 }
+// LAYOUT-STRICT-SAME: type { i32, i16 }
 // LAYOUT-DWN32-SAME: type { i32 }
 // CHECK: BitFields:[
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:64 StorageOffset:0
 
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
-
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
 
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
 
 // LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
@@ -176,30 +174,30 @@ struct F {
   signed char d : 7;
 } f;
 // CHECK-LABEL: LLVMType:%struct.F =
-// LAYOUT-FLEX64-SAME: type { i8, i16, i16, i8 }
-// LAYOUT-FLEX32-SAME: type { i8, i16, i16, i8 }
-// LAYOUT-STRICT-SAME: type { i8, i16, i16, i8 }
-// LAYOUT-DWN32-SAME: type { [5 x i8] }
+// LAYOUT-FLEX64-SAME: type { i64 }
+// LAYOUT-FLEX32-SAME: type { i32, i32 }
+// LAYOUT-STRICT-SAME: type { i32, i32 }
+// LAYOUT-DWN32-SAME: type <{ i32, i8 }>
 // CHECK: BitFields:[
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
-// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
 
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
-// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:4
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:4
 
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
-// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:4
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:4
 // CHECK-NEXT: ]>
 
 struct G {
@@ -210,18 +208,18 @@ struct G {
   signed char e;
 } g;
 // CHECK-LABEL: LLVMType:%struct.G =
-// LAYOUT-SAME: type { i8, i16, i16, i8, i8 }
-// LAYOUT-DWN32-SAME: type { [5 x i8], i8 }
+// LAYOUT-SAME: type { i32, i16, i8, i8 }
+// LAYOUT-DWN32-SAME: type <{ i32, i8, i8 }>
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:40 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:40 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:4
 // CHECK-NEXT: ]>
 
 #if _LP64
@@ -257,24 +255,24 @@ struct B64 {
   signed char e; // not a bitfield
 } b64;
 // CHECK-64-LABEL: LLVMType:%struct.B64 =
-// LAYOUT-64-FLEX-SAME: type { [7 x i8], i8 }
-// LAYOUT-64-STRICT-SAME: type { [7 x i8], i8 }
-// LAYOUT-64-DWN-SAME: type { [7 x i8], i8 }
+// LAYOUT-64-FLEX-SAME: type <{ i16, i8, i32, i8 }>
+// LAYOUT-64-STRICT-SAME: type <{ i16, i8, i16, i16, i8 }>
+// LAYOUT-64-DWN-SAME: type <{ i16, i8, i32, i8 }>
 // CHECK-64: BitFields:[
-// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-
-// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:56 StorageOffset:0
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:3
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:5
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
 // CHECK-64-NEXT: ]>
 
 struct C64 {
@@ -285,20 +283,20 @@ struct C64 {
   signed char e : 7;
 } c64;
 // CHECK-64-LABEL: LLVMType:%struct.C64 =
-// LAYOUT-64-SAME: type { i16, [5 x i8], i8 }
-// LAYOUT-64-DWN-SAME: type { i16, [5 x i8], i8 }
+// LAYOUT-64-SAME: type {  i64 }
+// LAYOUT-64-DWN-SAME: type {  i64 }
 // CHECK-64: BitFields:[
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:7
-
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:40 StorageOffset:2
-// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:7
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
 // CHECK-64-NEXT: ]>
 
 #endif
diff --git a/clang/test/CodeGen/debug-info-bitfield-0-struct.c b/clang/test/CodeGen/debug-info-bitfield-0-struct.c
index 0535b626771429..9fadf898e34661 100644
--- a/clang/test/CodeGen/debug-info-bitfield-0-struct.c
+++ b/clang/test/CodeGen/debug-info-bitfield-0-struct.c
@@ -101,8 +101,10 @@ struct None_B {
   int y : 4;
 };
 
-struct None_C {
-  // BOTH-DAG: ![[NONE_C:[0-9]+]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "None_C", file: !{{[0-9]+}}, line: {{[0-9]+}}, size: 32, elements: ![[NONE_C_ELEMENTS:[0-9]+]])
+// AMDGCN does not do unaligned access cheaply, so the bitfield access units
+// would remain single bytes, without the aligned attribure
+struct __attribute__((aligned(4))) None_C {
+  // BOTH-DAG: ![[NONE_C:[0-9]+]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "None_C", file: !{{[0-9]+}}, line: {{[0-9]+}}, size: 32, align: 32, elements: ![[NONE_C_ELEMENTS:[0-9]+]])
   // BOTH-DAG: ![[NONE_C_ELEMENTS]] = !{![[NONE_C_X:[0-9]+]], ![[NONE_C_Y:[0-9]+]], ![[NONE_C_A:[0-9]+]], ![[NONE_C_B:[0-9]+]]}
   // BOTH-DAG: ![[NONE_C_X]] = !DIDerivedType(tag: DW_TAG_member, name: "x", scope: ![[NONE_C]], file: !{{[0-9]+}}, line: {{[0-9]+}}, baseType: !{{[0-9]+}}, size: 8, flags: DIFlagBitField, extraData: i64 0)
   // BOTH-DAG: ![[NONE_C_Y]] = !DIDerivedType(tag: DW_TAG_member, name: "y", scope: ![[NONE_C]], file: !{{[0-9]+}}, line: {{[0-9]+}}, baseType: !{{[0-9]+}}, size: 8, offset: 8, flags: DIFlagBitField, extraData: i64 0)
diff --git a/clang/test/CodeGen/no-bitfield-type-align.c b/clang/test/CodeGen/no-bitfield-type-align.c
index f4698421ea1a72..1861c6886a35b3 100644
--- a/clang/test/CodeGen/no-bitfield-type-align.c
+++ b/clang/test/CodeGen/no-bitfield-type-align.c
@@ -12,7 +12,6 @@ struct S {
 // LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:15 IsSigned:0 StorageSize:32 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:15 Size:0 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:15 Size:15 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: ]>
 
diff --git a/clang/test/CodeGen/struct-x86-darwin.c b/clang/test/CodeGen/struct-x86-darwin.c
index 350666a5168632..e79ecefb880dfb 100644
--- a/clang/test/CodeGen/struct-x86-darwin.c
+++ b/clang/test/CodeGen/struct-x86-darwin.c
@@ -38,10 +38,10 @@ struct STestB6 {int a:1; char b; int c:13; } stb6;
 // CHECK-NEXT: ]>
 
 // CHECK-LABEL: LLVMType:%struct.STestB2 =
-// CHECK-SAME: type { i8, i8, i8 }
+// CHECK-SAME: type <{ i8, i16 }>
 // CHECK: BitFields:[
-// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:1
-// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:4 IsSigned:1 StorageSize:8 StorageOffset:2
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:16 StorageOffset:1
+// CHECK-NEXT: <CGBitFieldInfo Offset:8 Size:4 IsSigned:1 StorageSize:16 StorageOffset:1
 // CHECK-NEXT: ]>
 
 // CHECK-LABEL: LLVMType:%struct.STestB3 =
diff --git a/clang/test/CodeGen/tbaa-struct.cpp b/clang/test/CodeGen/tbaa-struct.cpp
index 9b4b7415142d93..ca076ce5aa2732 100644
--- a/clang/test/CodeGen/tbaa-struct.cpp
+++ b/clang/test/CodeGen/tbaa-struct.cpp
@@ -197,7 +197,7 @@ void copy12(UnionMember2 *a1, UnionMember2 *a2) {
 // CHECK-OLD: [[TS6]] = !{i64 0, i64 2, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE:!.+]]}
 // CHECK-OLD: [[TAG_DOUBLE]] = !{[[DOUBLE:!.+]], [[DOUBLE]], i64 0}
 // CHECK-OLD  [[DOUBLE]] = !{!"double", [[CHAR]], i64 0}
-// CHECK-OLD: [[TS7]] = !{i64 0, i64 1, [[TAG_CHAR]], i64 1, i64 1, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 3, i64 1, [[TAG_CHAR]], i64 4, i64 1, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]], i64 16, i64 1, [[TAG_CHAR]]}
+// CHECK-OLD: [[TS7]] = !{i64 0, i64 1, [[TAG_CHAR]], i64 1, i64 1, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 3, i64 2, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]], i64 16, i64 1, [[TAG_CHAR]]}
 // CHECK-OLD: [[TS8]] = !{i64 0, i64 4, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]]}
 // CHECK-OLD: [[TS9]] = !{i64 0, i64 8, [[TAG_CHAR]], i64 8, i64 4, [[TAG_INT]]}
 // CHECK-OLD: [[TS10]] = !{i64 0, i64 4, [[TAG_INT]], i64 8, i64 8, [[TAG_CHAR]]}
diff --git a/clang/test/CodeGenCXX/bitfield-access-empty.cpp b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
index 194d2c9def6996..c5e6f55ffa6964 100644
--- a/clang/test/CodeGenCXX/bitfield-access-empty.cpp
+++ b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
@@ -120,8 +120,8 @@ struct P6 {
   unsigned c;
 } p6;
 // CHECK-LABEL: LLVMType:%struct.P6 =
-// LAYOUT-SAME: type { i24, i32 }
-// LAYOUT-DWN32-SAME: type { i24, i32 }
+// LAYOUT-SAME: type { i32, i32 }
+// LAYOUT-DWN32-SAME: type { i32, i32 }
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P6 =
 // CHECK: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -138,13 +138,13 @@ struct P7 {
   unsigned c;
 } p7;
 // CHECK-LABEL: LLVMType:%struct.P7 =
-// LAYOUT-SAME: type { [3 x i8], %struct.Empty, i32 }
-// LAYOUT-DWN32-SAME: type { [3 x i8], %struct.Empty, i32 }
+// LAYOUT-SAME: type { i16, i8, %struct.Empty, i32 }
+// LAYOUT-DWN32-SAME: type { i16, i8, %struct.Empty, i32 }
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P7 =
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:24 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>
diff --git a/clang/test/CodeGenCXX/bitfield-access-tail.cpp b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
index dad0ab226282c2..68716fdf3b1daa 100644
--- a/clang/test/CodeGenCXX/bitfield-access-tail.cpp
+++ b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
@@ -48,15 +48,15 @@ struct Pod {
   int b : 8;
 } P;
 // CHECK-LABEL: LLVMType:%struct.Pod =
-// LAYOUT-SAME: type { i24 }
-// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// LAYOUT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.Pod =
 // CHECK: BitFields:[
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>
 
 // No tail padding
@@ -65,15 +65,15 @@ struct __attribute__((packed)) PPod {
   int b : 8;
 } PP;
 // CHECK-LABEL: LLVMType:%struct.PPod =
-// LAYOUT-SAME: type { [3 x i8] }
-// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// LAYOUT-SAME: type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.PPod =
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>
 
 // Cannot use tail padding
@@ -83,17 +83,17 @@ struct NonPod {
   int b : 8;
 } NP;
 // CHECK-LABEL: LLVMType:%struct.NonPod =
-// LAYOUT-SAME: type { [3 x i8], i8 }
-// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// LAYOUT-SAME: type <{ i16, i8, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.
-// LAYOUT-SAME: NonPod.base = type { [3 x i8] }
-// LAYOUT-DWN32-SAME: NonPod = type { [3 x i8] }
+// LAYOUT-SAME: NonPod.base = type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: NonPod = type <{ i16, i8 }>
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>
 
 // No tail padding
@@ -103,13 +103,13 @@ struct __attribute__((packed)) PNonPod {
   int b : 8;
 } PNP;
 // CHECK-LABEL: LLVMType:%struct.PNonPod =
-// LAYOUT-SAME: type { [3 x i8] }
-// LAYOUT-DWN32-SAME: type { [3 x i8] }
+// LAYOUT-SAME: type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
 // CHECK-NEXT: NonVirtualBaseLLVMType:%struct.PNonPod =
 // CHECK: BitFields:[
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:24 StorageOffset:0
-// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:24 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
 // CHECK-NEXT: ]>
diff --git a/clang/test/CodeGenCXX/bitfield-ir.cpp b/clang/test/CodeGenCXX/bitfield-ir.cpp
index fa53f4d8900ef0..76c144072da686 100644
--- a/clang/test/CodeGenCXX/bitfield-ir.cpp
+++ b/clang/test/CodeGenCXX/bitfield-ir.cpp
@@ -23,12 +23,9 @@ struct Int {
 // CHECK-LABEL: define dso_local void @_Z1AP4Tail
 // CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
-// CHECK-NEXT:    [[NARROW:%.*]] = add i24 [[BF_LOAD]], 1
-// CHECK-NEXT:    [[BF_VALUE:%.*]] = and i24 [[NARROW]], 65535
-// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], -65536
-// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_VALUE]], [[BF_CLEAR]]
-// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[P]], align 4
+// CHECK-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
+// CHECK-NEXT:    store i16 [[INC]], ptr [[P]], align 4
 // CHECK-NEXT:    ret void
 //
 void A (Tail *p) {
@@ -38,12 +35,10 @@ void A (Tail *p) {
 // CHECK-LABEL: define dso_local void @_Z1BP4Tail
 // CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = and i24 [[BF_LOAD]], -65536
-// CHECK-NEXT:    [[BF_SHL:%.*]] = add i24 [[TMP0]], 65536
-// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], 65535
-// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_SHL]], [[BF_CLEAR]]
-// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 2
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[B]], align 2
+// CHECK-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// CHECK-NEXT:    store i8 [[INC]], ptr [[B]], align 2
 // CHECK-NEXT:    ret void
 //
 void B (Tail *p) {
@@ -53,12 +48,9 @@ void B (Tail *p) {
 // CHECK-LABEL: define dso_local void @_Z1AP4Char
 // CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
-// CHECK-NEXT:    [[NARROW:%.*]] = add i24 [[BF_LOAD]], 1
-// CHECK-NEXT:    [[BF_VALUE:%.*]] = and i24 [[NARROW]], 65535
-// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], -65536
-// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_VALUE]], [[BF_CLEAR]]
-// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i16, ptr [[P]], align 4
+// CHECK-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
+// CHECK-NEXT:    store i16 [[INC]], ptr [[P]], align 4
 // CHECK-NEXT:    ret void
 //
 void A (Char *p) {
@@ -68,12 +60,10 @@ void A (Char *p) {
 // CHECK-LABEL: define dso_local void @_Z1BP4Char
 // CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i24, ptr [[P]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = and i24 [[BF_LOAD]], -65536
-// CHECK-NEXT:    [[BF_SHL:%.*]] = add i24 [[TMP0]], 65536
-// CHECK-NEXT:    [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], 65535
-// CHECK-NEXT:    [[BF_SET:%.*]] = or disjoint i24 [[BF_SHL]], [[BF_CLEAR]]
-// CHECK-NEXT:    store i24 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 2
+// CHECK-NEXT:    [[BF_LOAD:%.*]] = load i8, ptr [[B]], align 2
+// CHECK-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// CHECK-NEXT:    store i8 [[INC]], ptr [[B]], align 2
 // CHECK-NEXT:    ret void
 //
 void B (Char *p) {
diff --git a/clang/test/CodeGenCXX/bitfield.cpp b/clang/test/CodeGenCXX/bitfield.cpp
index ddc3f9345622c3..7545e02840e6b2 100644
--- a/clang/test/CodeGenCXX/bitfield.cpp
+++ b/clang/test/CodeGenCXX/bitfield.cpp
@@ -224,7 +224,7 @@ namespace N2 {
     void *p;
   };
 // LAYOUT-LABEL: LLVMType:%"struct.N2::S" =
-// LAYOUT-SAME: type { i24, ptr }
+// LAYOUT-SAME: type { i32, ptr }
 // LAYOUT: BitFields:[
 // LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -268,7 +268,7 @@ namespace N3 {
     unsigned b : 24;
   };
 // LAYOUT-LABEL: LLVMType:%"struct.N3::S" =
-// LAYOUT-SAME: type { i24 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
 // LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
 // LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
@@ -378,7 +378,7 @@ namespace N5 {
 // LAYOUT-NEXT: ]>
 
 // LAYOUT-LABEL: LLVMType:%"struct.N5::U::Y" =
-// LAYOUT-SAME: type { i24 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N5::U::Y" =
 // LAYOUT: BitFields:[
 // LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
diff --git a/clang/test/OpenMP/atomic_capture_codegen.cpp b/clang/test/OpenMP/atomic_capture_codegen.cpp
index 08d1f21f8e0bda..eba7906d8eb8a7 100644
--- a/clang/test/OpenMP/atomic_capture_codegen.cpp
+++ b/clang/test/OpenMP/atomic_capture_codegen.cpp
@@ -811,7 +811,7 @@ int main(void) {
 #pragma omp atomic relaxed capture
   iv = bfx4.a = bfx4.a * ldv;
 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -831,7 +831,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -870,7 +870,7 @@ int main(void) {
 #pragma omp atomic capture release
   {bfx4.b /= ldv; iv = bfx4.b;}
 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) acquire, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) acquire, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -890,7 +890,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] acquire acquire, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] acquire acquire, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
diff --git a/clang/test/OpenMP/atomic_read_codegen.c b/clang/test/OpenMP/atomic_read_codegen.c
index 0a68c8e2c35a56..4294c145fa1c97 100644
--- a/clang/test/OpenMP/atomic_read_codegen.c
+++ b/clang/test/OpenMP/atomic_read_codegen.c
@@ -292,7 +292,7 @@ int main(void) {
 // CHECK: store x86_fp80
 #pragma omp atomic read
   ldv = bfx4.a;
-// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) monotonic, align 1
+// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @bfx4_packed, i32 0, i32 1) monotonic, align 1
 // CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
 // CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
 // CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
@@ -309,7 +309,7 @@ int main(void) {
 // CHECK: store x86_fp80
 #pragma omp atomic read relaxed
   ldv = bfx4.b;
-// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) acquire, align 1
+// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @bfx4_packed, i32 0, i32 1) acquire, align 1
 // CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
 // CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
 // CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1
diff --git a/clang/test/OpenMP/atomic_update_codegen.cpp b/clang/test/OpenMP/atomic_update_codegen.cpp
index 31160b41764698..ce0765118922a1 100644
--- a/clang/test/OpenMP/atomic_update_codegen.cpp
+++ b/clang/test/OpenMP/atomic_update_codegen.cpp
@@ -737,7 +737,7 @@ int main(void) {
 #pragma omp atomic
   bfx4.a = bfx4.a * ldv;
 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -757,7 +757,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -792,7 +792,7 @@ int main(void) {
 #pragma omp atomic
   bfx4.b /= ldv;
 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -812,7 +812,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
diff --git a/clang/test/OpenMP/atomic_write_codegen.c b/clang/test/OpenMP/atomic_write_codegen.c
index afe8737d30b065..493b01cbac76c4 100644
--- a/clang/test/OpenMP/atomic_write_codegen.c
+++ b/clang/test/OpenMP/atomic_write_codegen.c
@@ -413,7 +413,7 @@ int main(void) {
   bfx4.a = ldv;
 // CHECK: load x86_fp80, ptr @{{.+}}
 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -423,7 +423,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -451,7 +451,7 @@ int main(void) {
   bfx4.b = ldv;
 // CHECK: load x86_fp80, ptr @{{.+}}
 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -462,7 +462,7 @@ int main(void) {
 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
 // CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]]
 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]

>From a10cbbe4dd7658dfe6c31845a4196518fdf24c10 Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Tue, 12 Mar 2024 13:31:54 -0400
Subject: [PATCH 4/8] Refactor accumulation and remove BitFieldAccessUnit class

---
 clang/lib/CodeGen/CGRecordLayoutBuilder.cpp | 289 ++++++++------------
 1 file changed, 120 insertions(+), 169 deletions(-)

diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index d6f9e29b42d10c..d3ae22e17cfbb1 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -93,10 +93,6 @@ struct CGRecordLowering {
     // MemberInfos are sorted so we define a < operator.
     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
   };
-  // BitFieldAccessUnit is helper structure for accumulateBitFields. It
-  // represents a set of bitfields in the same load/store unit.
-  class BitFieldAccessUnit;
-
   // The constructor.
   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
   // Short helper routines.
@@ -193,10 +189,6 @@ struct CGRecordLowering {
   RecordDecl::field_iterator
   accumulateBitFields(RecordDecl::field_iterator Field,
                       RecordDecl::field_iterator FieldEnd);
-  BitFieldAccessUnit
-  gatherBitFieldAccessUnit(RecordDecl::field_iterator Field,
-                           RecordDecl::field_iterator FieldEnd) const;
-  void installBitFieldAccessUnit(const BitFieldAccessUnit &);
   void computeVolatileBitfields();
   void accumulateBases();
   void accumulateVPtrs();
@@ -411,114 +403,6 @@ void CGRecordLowering::accumulateFields() {
   }
 }
 
-// A run of bitfields assigned to the same access unit -- the size of memory
-// loads & stores.
-class CGRecordLowering::BitFieldAccessUnit {
-  RecordDecl::field_iterator Begin; // Field at start of this access unit.
-  RecordDecl::field_iterator End;   // Field just after this access unit.
-
-  CharUnits StartOffset; // Starting offset in the containing record.
-  CharUnits EndOffset;   // Finish offset (exclusive) in the containing record.
-
-  bool IsBarrier;        // Is a barrier between access units.
-  bool ContainsVolatile; // This access unit contains a volatile bitfield.
-
-public:
-  BitFieldAccessUnit(RecordDecl::field_iterator B, RecordDecl::field_iterator E,
-                     CharUnits SO, CharUnits EO, bool Barrier = false,
-                     bool Volatile = false)
-      : Begin(B), End(E), StartOffset(SO), EndOffset(EO), IsBarrier(Barrier),
-        ContainsVolatile(Volatile) {}
-
-  // Re-set the end of this unit if there is space before Probe starts.
-  void enlargeIfSpace(const BitFieldAccessUnit &Probe, CharUnits Offset) {
-    if (Probe.getStartOffset() >= Offset) {
-      End = Probe.begin();
-      EndOffset = Offset;
-    }
-  }
-
-public:
-  RecordDecl::field_iterator begin() const { return Begin; }
-  RecordDecl::field_iterator end() const { return End; }
-
-public:
-  // Accessors
-  CharUnits getSize() const { return EndOffset - StartOffset; }
-  CharUnits getStartOffset() const { return StartOffset; }
-  CharUnits getEndOffset() const { return EndOffset; }
-
-  // Predicates
-  bool isBarrier() const { return IsBarrier; }
-  bool hasVolatile() const { return ContainsVolatile; }
-  bool isEnd() const { return begin() == end(); }
-};
-
-CGRecordLowering::BitFieldAccessUnit CGRecordLowering::gatherBitFieldAccessUnit(
-    RecordDecl::field_iterator Field,
-    RecordDecl::field_iterator FieldEnd) const {
-  if (Field == FieldEnd || !Field->isBitField()) {
-    // Skip over any empty fields to find the next used offset.
-    auto Probe = Field;
-    while (Probe != FieldEnd && Probe->isZeroSize(Context))
-      ++Probe;
-    // We can't necessarily use tail padding in C++ structs, so the NonVirtual
-    // size is what we must use there.
-    CharUnits Limit = Probe != FieldEnd
-                          ? bitsToCharUnits(getFieldBitOffset(*Probe))
-                      : RD ? Layout.getNonVirtualSize()
-                           : Layout.getDataSize();
-    return BitFieldAccessUnit(Field, Field, Limit, Limit, true);
-  }
-
-  auto Begin = Field;
-  uint64_t StartBit = getFieldBitOffset(*Field);
-  uint64_t BitSize = Field->getBitWidthValue(Context);
-  unsigned CharBits = Context.getCharWidth();
-  bool Volatile = Field->getType().isVolatileQualified();
-
-  assert(!(StartBit % CharBits) && "Not at start of char");
-
-  // Gather bitfields until we have one starting at a byte boundary.
-  for (++Field; Field != FieldEnd && Field->isBitField(); ++Field) {
-    uint64_t BitOffset = getFieldBitOffset(*Field);
-    if (!(BitOffset % CharBits))
-      // This BitField starts at a byte boundary. It belongs in the next access
-      // unit (initially).
-      break;
-    assert(BitOffset == StartBit + BitSize &&
-           "Concatenating non-contiguous bitfields");
-    BitSize += Field->getBitWidthValue(Context);
-    if (Field->getType().isVolatileQualified())
-      Volatile = true;
-  }
-
-  // A zero-sized access unit is only a barrier under some conditions.
-  bool Barrier =
-      !BitSize && (Context.getTargetInfo().useZeroLengthBitfieldAlignment() ||
-                   Context.getTargetInfo().useBitFieldTypeAlignment());
-
-  return BitFieldAccessUnit(Begin, Field, bitsToCharUnits(StartBit),
-                            bitsToCharUnits(StartBit + BitSize + CharBits - 1),
-                            Barrier, Volatile);
-}
-
-// Create the containing access unit and install the bitfields.
-void CGRecordLowering::installBitFieldAccessUnit(const BitFieldAccessUnit &U) {
-  if (U.getSize().isZero())
-    return;
-
-  // Add the storage member for the access unit to the record. The
-  // bitfields get the offset of their storage but come afterward and remain
-  // there after a stable sort.
-  llvm::Type *Type = getIntNType(Context.toBits(U.getSize()));
-  Members.push_back(StorageInfo(U.getStartOffset(), Type));
-  for (auto F : U)
-    if (!F->isZeroLengthBitField(Context))
-      Members.push_back(
-          MemberInfo(U.getStartOffset(), MemberInfo::Field, nullptr, F));
-}
-
 // Create members for bitfields. Field is a bitfield, and FieldEnd is the end
 // iterator of the record. Return the first non-bitfield encountered.
 RecordDecl::field_iterator
@@ -621,66 +505,133 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
 
   CharUnits RegSize =
       bitsToCharUnits(Context.getTargetInfo().getRegisterWidth());
-  // The natural size of an access of Size, unless poorly aligned, in which case
-  // CharUnits::Zero is returned.
-  auto NaturalSize = [&](CharUnits StartOffset, CharUnits Size) {
-    if (Size.isZero())
-      return Size;
-    llvm::Type *Type = getIntNType(Context.toBits(Size));
-    if (!Context.getTargetInfo().hasCheapUnalignedBitFieldAccess()) {
-      // This alignment is that of the storage used -- for instance
-      // (usually) 4 bytes for a 24-bit type.
-      CharUnits Align = getAlignment(Type);
-      if (Align > Layout.getAlignment() || !StartOffset.isMultipleOf(Align))
-        return CharUnits::Zero();
+  unsigned CharBits = Context.getCharWidth();
+
+  RecordDecl::field_iterator Begin = FieldEnd;
+  CharUnits StartOffset;
+  uint64_t BitSize;
+  CharUnits BestEndOffset;
+  RecordDecl::field_iterator BestEnd = Begin;
+  bool Volatile;
+
+  for (;;) {
+    CharUnits Limit;
+    bool Barrier;
+    bool Install = false;
+
+    if (Field != FieldEnd && Field->isBitField()) {
+      uint64_t BitOffset = getFieldBitOffset(*Field);
+      if (Begin == FieldEnd) {
+        // Beginning a new access unit.
+        Begin = Field;
+        BestEnd = Begin;
+
+        assert(!(BitOffset % CharBits) && "Not at start of char");
+        StartOffset = bitsToCharUnits(BitOffset);
+        BitSize = 0;
+        Volatile = false;
+      } else if (BitOffset % CharBits) {
+        // Bitfield occupies the same char as previous.
+        assert(BitOffset == Context.toBits(StartOffset) + BitSize &&
+               "Concatenating non-contiguous bitfields");
+      } else {
+        // Bitfield begins a new access unit.
+        Limit = bitsToCharUnits(BitOffset);
+        Barrier = false;
+        if (Field->isZeroLengthBitField(Context) &&
+            (Context.getTargetInfo().useZeroLengthBitfieldAlignment() ||
+             Context.getTargetInfo().useBitFieldTypeAlignment()))
+          Barrier = true;
+        Install = true;
+      }
+    } else if (Begin == FieldEnd) {
+      // Completed the bitfields.
+      break;
+    } else {
+      // End of the bitfield span, with active access unit.
+      auto Probe = Field;
+      while (Probe != FieldEnd && Probe->isZeroSize(Context))
+        ++Probe;
+      // We can't necessarily use tail padding in C++ structs, so the NonVirtual
+      // size is what we must use there.
+      Limit = Probe != FieldEnd ? bitsToCharUnits(getFieldBitOffset(*Probe))
+              : RD              ? Layout.getNonVirtualSize()
+                                : Layout.getDataSize();
+      Barrier = true;
+      Install = true;
     }
-    // This is the storage-size of Type -- for instance a 24-bit type will
-    // require 4 bytes.
-    return getSize(Type);
-  };
 
-  BitFieldAccessUnit Current = gatherBitFieldAccessUnit(Field, FieldEnd);
-  do {
-    BitFieldAccessUnit Probe =
-        gatherBitFieldAccessUnit(Current.end(), FieldEnd);
-    if (!Current.getSize().isZero() &&
-        !Types.getCodeGenOpts().FineGrainedBitfieldAccesses) {
-      CharUnits Size = NaturalSize(Current.getStartOffset(), Current.getSize());
-      if (!Size.isZero()) {
-        CharUnits EndOffset = Current.getStartOffset() + Size;
-
-        Current.enlargeIfSpace(Probe, EndOffset);
-
-        if (!Current.hasVolatile())
-          // Look for beneficial merging with subsequent access units.
-          while (!Probe.isBarrier() && !Probe.hasVolatile()) {
-            CharUnits NewSize = Probe.getEndOffset() - Current.getStartOffset();
-            if (NewSize > Size) {
-              if (NewSize > RegSize)
-                break;
-
-              Size = NaturalSize(Current.getStartOffset(), NewSize);
-              if (Size.isZero())
-                break;
-              assert(Size <= RegSize && "Unexpectedly large storage");
-              EndOffset = Current.getStartOffset() + Size;
-            }
-            Probe = gatherBitFieldAccessUnit(Probe.end(), FieldEnd);
-            Current.enlargeIfSpace(Probe, EndOffset);
+    if (Install) {
+      // Found the start of a new access unit. Determine if that completes the
+      // current one, or potentially extends it.
+      Install = false;
+      CharUnits Size = bitsToCharUnits(BitSize + CharBits - 1);
+      if (BestEnd == Begin) {
+        // This is the initial access unit.
+        BestEnd = Field;
+        BestEndOffset = StartOffset + Size;
+        if (!BitSize || Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
+          // A barrier, or we're fine grained.
+          Install = true;
+      } else if (Size > RegSize || Volatile)
+        // Too big to accumulate, or just-seen access unit contains a volatile.
+        Install = true;
+
+      if (!Install) {
+        llvm::Type *Type = getIntNType(Context.toBits(Size));
+        if (!Context.getTargetInfo().hasCheapUnalignedBitFieldAccess()) {
+          // This alignment is that of the storage used -- for instance
+          // (usually) 4 bytes for a 24-bit type.
+          CharUnits Align = getAlignment(Type);
+          if (Align > Layout.getAlignment() || !StartOffset.isMultipleOf(Align))
+            // Not naturally aligned.
+            Install = true;
+        }
+
+        if (!Install) {
+          Size = getSize(Type);
+          if (StartOffset + Size <= Limit) {
+            // The next unit starts later, extend the current access unit to
+            // include the just-gathered access unit.
+            BestEndOffset = StartOffset + Size;
+            BestEnd = Field;
           }
+
+          if (Volatile || Barrier)
+            // Contained a volatile, or next access unit is a barrier.
+            Install = true;
+          else
+            BitSize = Context.toBits(Limit - StartOffset);
+        }
       }
     }
 
-    installBitFieldAccessUnit(Current);
-
-    // If Probe is the next access unit, we can use that, otherwise (we scanned
-    // ahead and found nothing good), we have to recompute the next access unit.
-    Current = Current.end() == Probe.begin()
-                  ? Probe
-                  : gatherBitFieldAccessUnit(Current.end(), FieldEnd);
-  } while (!Current.isEnd());
+    if (Install) {
+      CharUnits Size = BestEndOffset - StartOffset;
+      if (!Size.isZero()) {
+        // Add the storage member for the access unit to the record. The
+        // bitfields get the offset of their storage but come afterward and
+        // remain there after a stable sort.
+        llvm::Type *Type = getIntNType(Context.toBits(Size));
+        Members.push_back(StorageInfo(StartOffset, Type));
+        for (; Begin != BestEnd; ++Begin)
+          if (!Begin->isZeroLengthBitField(Context))
+            Members.push_back(
+                MemberInfo(StartOffset, MemberInfo::Field, nullptr, *Begin));
+      }
+      // Reset to start a new Access Unit.
+      Field = BestEnd;
+      Begin = FieldEnd;
+    } else {
+      // Accumulate this bitfield into the (potentially) current access unit.
+      BitSize += Field->getBitWidthValue(Context);
+      if (Field->getType().isVolatileQualified())
+        Volatile = true;
+      ++Field;
+    }
+  }
 
-  return Current.begin();
+  return Field;
 }
 
 void CGRecordLowering::accumulateBases() {

>From 1f9b452c05e3715783624c7be7ebba17a365f285 Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Fri, 15 Mar 2024 11:39:47 -0400
Subject: [PATCH 5/8] Update comments, varnames and asserts.

---
 clang/lib/CodeGen/CGRecordLayoutBuilder.cpp | 221 +++++++++++++-------
 1 file changed, 141 insertions(+), 80 deletions(-)

diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index d3ae22e17cfbb1..bd022979956a48 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -482,23 +482,22 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
   // to keep access-units naturally aligned, to avoid similar bit
   // manipulation synthesizing larger unaligned accesses.
 
-  // We do this in two phases, processing a sequential run of bitfield
-  // declarations.
-
-  // a) Bitfields that share parts of a single byte are, of necessity, placed in
-  // the same access unit. That unit will encompass a consecutive
-  // run where adjacent bitfields share parts of a byte. (The first bitfield of
-  // such an access unit will start at the beginning of a byte.)
-
-  // b) Accumulate adjacent access units when the combined unit is naturally
-  // sized, no larger than a register, and on a strict alignment ISA,
-  // aligned. Note that this requires lookahead to one or more subsequent access
-  // units. For instance, consider a 2-byte access-unit followed by 2 1-byte
-  // units. We can merge that into a 4-byte access-unit, but we would not want
-  // to merge a 2-byte followed by a single 1-byte (and no available tail
-  // padding).
-
-  // This accumulation is prevented when:
+  // Bitfields that share parts of a single byte are, of necessity, placed in
+  // the same access unit. That unit will encompass a consecutive run where
+  // adjacent bitfields share parts of a byte. (The first bitfield of such an
+  // access unit will start at the beginning of a byte.)
+
+  // We then try and accumulate adjacent access units when the combined unit is
+  // naturally sized, no larger than a register, and (on a strict alignment
+  // ISA), naturally aligned. Note that this requires lookahead to one or more
+  // subsequent access units. For instance, consider a 2-byte access-unit
+  // followed by 2 1-byte units. We can merge that into a 4-byte access-unit,
+  // but we would not want to merge a 2-byte followed by a single 1-byte (and no
+  // available tail padding). We keep track of the best access unit seen so far,
+  // and use that when we determine we cannot accumulate any more. Then we start
+  // again at the bitfield following that best one.
+
+  // The accumulation is also prevented when:
   // *) it would cross a zero-width bitfield (ABI-dependent), or
   // *) one of the candidate access units contains a volatile bitfield, or
   // *) fine-grained bitfield access option is in effect.
@@ -507,124 +506,186 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
       bitsToCharUnits(Context.getTargetInfo().getRegisterWidth());
   unsigned CharBits = Context.getCharWidth();
 
+  // Data about the start of the span we're accumulating to create an access
+  // unit from. Begin is the first bitfield of the span. If Begin is FieldEnd,
+  // we've not got a current span. The span starts at the BeginOffset character
+  // boundary. BitSizeSinceBegin is the size (in bits) of the span -- this might
+  // include padding when we've advanced to a subsequent bitfield run.
   RecordDecl::field_iterator Begin = FieldEnd;
-  CharUnits StartOffset;
-  uint64_t BitSize;
-  CharUnits BestEndOffset;
+  CharUnits BeginOffset;
+  uint64_t BitSizeSinceBegin;
+
+  // The (non-inclusive) end of the largest acceptable access unit we've found
+  // since Begin. If this is Begin, we're gathering the initial set of bitfields
+  // of a new span. BestEndOffset is the end of that acceptable access unit --
+  // it might extend beyond the last character of the bitfield run, using
+  // available padding characters.
   RecordDecl::field_iterator BestEnd = Begin;
-  bool Volatile;
+  CharUnits BestEndOffset;
+
+  bool Volatile; // True iff the initial span or post-BestEnd span contains a
+                 // volatile bitfield. We do not want to merge spans containing
+                 // a volatile bitfield.
 
   for (;;) {
-    CharUnits Limit;
-    bool Barrier;
-    bool Install = false;
+    // AtAlignedBoundary is true iff Field is the (potential) start of a new
+    // span (or the end of the bitfields). When true, LimitOffset is the
+    // character offset of that span and Barrier indicates whether the that new
+    // span cannot be merged into the current one.
+    bool AtAlignedBoundary = false;
+    CharUnits LimitOffset;
+    bool Barrier = false;
 
     if (Field != FieldEnd && Field->isBitField()) {
       uint64_t BitOffset = getFieldBitOffset(*Field);
       if (Begin == FieldEnd) {
-        // Beginning a new access unit.
+        // Beginning a new span.
         Begin = Field;
         BestEnd = Begin;
 
-        assert(!(BitOffset % CharBits) && "Not at start of char");
-        StartOffset = bitsToCharUnits(BitOffset);
-        BitSize = 0;
+        assert((BitOffset % CharBits) == 0 && "Not at start of char");
+        BeginOffset = bitsToCharUnits(BitOffset);
+        BitSizeSinceBegin = 0;
         Volatile = false;
-      } else if (BitOffset % CharBits) {
-        // Bitfield occupies the same char as previous.
-        assert(BitOffset == Context.toBits(StartOffset) + BitSize &&
+      } else if ((BitOffset % CharBits) != 0) {
+        // Bitfield occupies the same char as previous, it must be part of the
+        // same span.
+        assert(BitOffset == Context.toBits(BeginOffset) + BitSizeSinceBegin &&
                "Concatenating non-contiguous bitfields");
       } else {
-        // Bitfield begins a new access unit.
-        Limit = bitsToCharUnits(BitOffset);
-        Barrier = false;
+        // Bitfield could begin a new span.
+        LimitOffset = bitsToCharUnits(BitOffset);
         if (Field->isZeroLengthBitField(Context) &&
             (Context.getTargetInfo().useZeroLengthBitfieldAlignment() ||
              Context.getTargetInfo().useBitFieldTypeAlignment()))
           Barrier = true;
-        Install = true;
+        AtAlignedBoundary = true;
       }
     } else if (Begin == FieldEnd) {
       // Completed the bitfields.
       break;
     } else {
-      // End of the bitfield span, with active access unit.
+      // We've reached the end of the bitfield run while accumulating a span.
+      // Determine the limit of that span: either the offset of the next field,
+      // or if we're at the end of the record the end of its non-reuseable tail
+      // padding. (I.e. treat the next unusable char as the start of an
+      // unmergeable span.)
       auto Probe = Field;
       while (Probe != FieldEnd && Probe->isZeroSize(Context))
         ++Probe;
       // We can't necessarily use tail padding in C++ structs, so the NonVirtual
       // size is what we must use there.
-      Limit = Probe != FieldEnd ? bitsToCharUnits(getFieldBitOffset(*Probe))
-              : RD              ? Layout.getNonVirtualSize()
-                                : Layout.getDataSize();
+      LimitOffset = Probe != FieldEnd
+                        ? bitsToCharUnits(getFieldBitOffset(*Probe))
+                    : RD ? Layout.getNonVirtualSize()
+                         : Layout.getDataSize();
       Barrier = true;
-      Install = true;
+      AtAlignedBoundary = true;
     }
 
-    if (Install) {
-      // Found the start of a new access unit. Determine if that completes the
-      // current one, or potentially extends it.
-      Install = false;
-      CharUnits Size = bitsToCharUnits(BitSize + CharBits - 1);
+    // InstallBest indicates whether we should create an access unit for the
+    // current best span: fields [Begin, BestEnd) occupying characters
+    // [BeginOffset, BestEndOffset).
+    bool InstallBest = false;
+    if (AtAlignedBoundary) {
+      // Field is the start of a new span. The just-seen span is now extended to
+      // BitSizeSinceBegin. Determine if we can accumulate that just-seen span
+      // into the current accumulation.
+      CharUnits AccessSize = bitsToCharUnits(BitSizeSinceBegin + CharBits - 1);
       if (BestEnd == Begin) {
-        // This is the initial access unit.
+        // This is the initial run at the start of a new span. By definition,
+        // this is the best seen so far.
         BestEnd = Field;
-        BestEndOffset = StartOffset + Size;
-        if (!BitSize || Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
-          // A barrier, or we're fine grained.
-          Install = true;
-      } else if (Size > RegSize || Volatile)
-        // Too big to accumulate, or just-seen access unit contains a volatile.
-        Install = true;
-
-      if (!Install) {
-        llvm::Type *Type = getIntNType(Context.toBits(Size));
+        BestEndOffset = BeginOffset + AccessSize;
+        if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
+          // Fine-grained access, so no merging of spans.
+          InstallBest = true;
+        else if (!BitSizeSinceBegin)
+          // A zero-sized initial span -- this will install nothing and reset
+          // for another.
+          InstallBest = true;
+      } else if (Volatile) {
+        // We've encountered a volatile bitfield in the just-seen non-initial
+        // span. It should not be merged into the current accumulation.
+        InstallBest = true;
+      } else if (AccessSize > RegSize)
+        // Accumulating the just-seen span would create a multi-register access
+        // unit, which would increase register pressure.
+        InstallBest = true;
+
+      if (!InstallBest) {
+        // Determine if accumulating the just-seen span will create an expensive
+        // access-unit or not.
+        llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
         if (!Context.getTargetInfo().hasCheapUnalignedBitFieldAccess()) {
-          // This alignment is that of the storage used -- for instance
-          // (usually) 4 bytes for a 24-bit type.
+          // Unaligned accesses are expensive. Only accumulate if the new unit
+          // is naturally aligned. Otherwise install the best we have, which is
+          // either the initial access unit (can't do better), or a naturally
+          // aligned subsequent accumulation.
           CharUnits Align = getAlignment(Type);
-          if (Align > Layout.getAlignment() || !StartOffset.isMultipleOf(Align))
-            // Not naturally aligned.
-            Install = true;
+          if (Align > Layout.getAlignment())
+            // The alignment required is greater than the containing structure
+            // itself.
+            InstallBest = true;
+          else if (!BeginOffset.isMultipleOf(Align))
+            // The access unit is not at a naturally aligned offset within the
+            // structure.
+            InstallBest = true;
         }
 
-        if (!Install) {
-          Size = getSize(Type);
-          if (StartOffset + Size <= Limit) {
-            // The next unit starts later, extend the current access unit to
-            // include the just-gathered access unit.
-            BestEndOffset = StartOffset + Size;
+        if (!InstallBest) {
+          CharUnits TypeSize = getSize(Type);
+          if (BeginOffset + TypeSize <= LimitOffset) {
+            // There is padding before LimitOffset that we can extend into,
+            // creating a naturally-sized access unit.
+            BestEndOffset = BeginOffset + TypeSize;
             BestEnd = Field;
           }
 
-          if (Volatile || Barrier)
-            // Contained a volatile, or next access unit is a barrier.
-            Install = true;
+          if (Barrier)
+            // The next field is a barrier that we cannot merge-across.
+            InstallBest = true;
+          else if (Volatile)
+            // The initial span contains a volatile bitfield, do not merge any
+            // subsequent spans. (We can only get here for the initial span, any
+            // subsequent potential volatile span will have already bailed
+            // above.)
+            InstallBest = true;
           else
-            BitSize = Context.toBits(Limit - StartOffset);
+            // LimitOffset is the offset of the (aligned) next bitfield in this
+            // case.
+            BitSizeSinceBegin = Context.toBits(LimitOffset - BeginOffset);
         }
       }
     }
 
-    if (Install) {
-      CharUnits Size = BestEndOffset - StartOffset;
-      if (!Size.isZero()) {
+    if (InstallBest) {
+      assert((Field == FieldEnd || !Field->isBitField() ||
+              (getFieldBitOffset(*Field) % CharBits) == 0) &&
+             "Installing but not at an aligned bitfield or limit");
+      CharUnits AccessSize = BestEndOffset - BeginOffset;
+      if (!AccessSize.isZero()) {
         // Add the storage member for the access unit to the record. The
         // bitfields get the offset of their storage but come afterward and
         // remain there after a stable sort.
-        llvm::Type *Type = getIntNType(Context.toBits(Size));
-        Members.push_back(StorageInfo(StartOffset, Type));
+        llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
+        Members.push_back(StorageInfo(BeginOffset, Type));
         for (; Begin != BestEnd; ++Begin)
           if (!Begin->isZeroLengthBitField(Context))
             Members.push_back(
-                MemberInfo(StartOffset, MemberInfo::Field, nullptr, *Begin));
+                MemberInfo(BeginOffset, MemberInfo::Field, nullptr, *Begin));
       }
-      // Reset to start a new Access Unit.
+      // Reset to start a new span.
       Field = BestEnd;
       Begin = FieldEnd;
     } else {
-      // Accumulate this bitfield into the (potentially) current access unit.
-      BitSize += Field->getBitWidthValue(Context);
+      assert(Field != FieldEnd && Field->isBitField() &&
+             "Accumulating past end of bitfields");
+      assert(((getFieldBitOffset(*Field) % CharBits) != 0 ||
+              (!Volatile && !Barrier)) &&
+             "Accumulating across volatile or barrier");
+      // Accumulate this bitfield into the current (potential) span.
+      BitSizeSinceBegin += Field->getBitWidthValue(Context);
       if (Field->getType().isVolatileQualified())
         Volatile = true;
       ++Field;

>From a4bbfed1bfe5a354edf995689c02920d532d49f7 Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Mon, 18 Mar 2024 07:57:37 -0400
Subject: [PATCH 6/8] Remove volatile bitfield separation heuristic

---
 clang/lib/CodeGen/CGRecordLayoutBuilder.cpp   |  20 +--
 .../test/CodeGen/aapcs-bitfield-access-unit.c |  10 +-
 clang/test/CodeGen/aapcs-bitfield.c           | 114 +++++++++---------
 3 files changed, 62 insertions(+), 82 deletions(-)

diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index bd022979956a48..7fda745fc144e3 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -499,7 +499,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
 
   // The accumulation is also prevented when:
   // *) it would cross a zero-width bitfield (ABI-dependent), or
-  // *) one of the candidate access units contains a volatile bitfield, or
   // *) fine-grained bitfield access option is in effect.
 
   CharUnits RegSize =
@@ -523,10 +522,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
   RecordDecl::field_iterator BestEnd = Begin;
   CharUnits BestEndOffset;
 
-  bool Volatile; // True iff the initial span or post-BestEnd span contains a
-                 // volatile bitfield. We do not want to merge spans containing
-                 // a volatile bitfield.
-
   for (;;) {
     // AtAlignedBoundary is true iff Field is the (potential) start of a new
     // span (or the end of the bitfields). When true, LimitOffset is the
@@ -546,7 +541,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
         assert((BitOffset % CharBits) == 0 && "Not at start of char");
         BeginOffset = bitsToCharUnits(BitOffset);
         BitSizeSinceBegin = 0;
-        Volatile = false;
       } else if ((BitOffset % CharBits) != 0) {
         // Bitfield occupies the same char as previous, it must be part of the
         // same span.
@@ -604,10 +598,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
           // A zero-sized initial span -- this will install nothing and reset
           // for another.
           InstallBest = true;
-      } else if (Volatile) {
-        // We've encountered a volatile bitfield in the just-seen non-initial
-        // span. It should not be merged into the current accumulation.
-        InstallBest = true;
       } else if (AccessSize > RegSize)
         // Accumulating the just-seen span would create a multi-register access
         // unit, which would increase register pressure.
@@ -645,12 +635,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
           if (Barrier)
             // The next field is a barrier that we cannot merge-across.
             InstallBest = true;
-          else if (Volatile)
-            // The initial span contains a volatile bitfield, do not merge any
-            // subsequent spans. (We can only get here for the initial span, any
-            // subsequent potential volatile span will have already bailed
-            // above.)
-            InstallBest = true;
           else
             // LimitOffset is the offset of the (aligned) next bitfield in this
             // case.
@@ -682,12 +666,10 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
       assert(Field != FieldEnd && Field->isBitField() &&
              "Accumulating past end of bitfields");
       assert(((getFieldBitOffset(*Field) % CharBits) != 0 ||
-              (!Volatile && !Barrier)) &&
+              !Barrier) &&
              "Accumulating across volatile or barrier");
       // Accumulate this bitfield into the current (potential) span.
       BitSizeSinceBegin += Field->getBitWidthValue(Context);
-      if (Field->getType().isVolatileQualified())
-        Volatile = true;
       ++Field;
     }
   }
diff --git a/clang/test/CodeGen/aapcs-bitfield-access-unit.c b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
index 963610eed6bae1..d4028a0ddf85eb 100644
--- a/clang/test/CodeGen/aapcs-bitfield-access-unit.c
+++ b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
@@ -68,12 +68,12 @@ struct st5 {
   volatile char c : 5;
 } st5;
 // LAYOUT-LABEL: LLVMType:%struct.st5 =
-// LAYOUT-SAME: type { i16, i8 }
+// LAYOUT-SAME: type { i32 }
 // LAYOUT: BitFields:[
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:2
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:4 Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
-// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:3 Size:5 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:12 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:16 Size:5 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:20 Size:12 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:11 Size:5 IsSigned:1 StorageSize:32 StorageOffset:0
 // LAYOUT-NEXT: ]>
 
 struct st6 {
diff --git a/clang/test/CodeGen/aapcs-bitfield.c b/clang/test/CodeGen/aapcs-bitfield.c
index a5d2c2cfd8507c..0df250d4ebc53e 100644
--- a/clang/test/CodeGen/aapcs-bitfield.c
+++ b/clang/test/CodeGen/aapcs-bitfield.c
@@ -809,42 +809,44 @@ struct st5 {
 
 // LE-LABEL: @st5_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
-// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
-// LE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 11
+// LE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// LE-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// LE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
 // LE-NEXT:    ret i32 [[CONV]]
 //
 // BE-LABEL: @st5_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
-// BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// BE-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
 // BE-NEXT:    ret i32 [[CONV]]
 //
 // LENUMLOADS-LABEL: @st5_check_load(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
-// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
-// LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 11
+// LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
 //
 // BENUMLOADS-LABEL: @st5_check_load(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
-// BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
 //
 // LEWIDTH-LABEL: @st5_check_load(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -852,16 +854,16 @@ struct st5 {
 //
 // BEWIDTH-LABEL: @st5_check_load(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
 // BEWIDTH-NEXT:    ret i32 [[CONV]]
 //
 // LEWIDTHNUM-LABEL: @st5_check_load(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -869,8 +871,8 @@ struct st5 {
 //
 // BEWIDTHNUM-LABEL: @st5_check_load(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
@@ -881,74 +883,70 @@ int st5_check_load(struct st5 *m) {
 
 // LE-LABEL: @st5_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
-// LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LE-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2031617
+// LE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LE-NEXT:    store volatile i32 [[BF_SET]], ptr [[M]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @st5_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
-// BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BE-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -63489
+// BE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2048
+// BE-NEXT:    store volatile i32 [[BF_SET]], ptr [[M]], align 4
 // BE-NEXT:    ret void
 //
 // LENUMLOADS-LABEL: @st5_check_store(
 // LENUMLOADS-NEXT:  entry:
-// LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
-// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2031617
+// LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], ptr [[M]], align 4
 // LENUMLOADS-NEXT:    ret void
 //
 // BENUMLOADS-LABEL: @st5_check_store(
 // BENUMLOADS-NEXT:  entry:
-// BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
-// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -63489
+// BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2048
+// BENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], ptr [[M]], align 4
 // BENUMLOADS-NEXT:    ret void
 //
 // LEWIDTH-LABEL: @st5_check_store(
 // LEWIDTH-NEXT:  entry:
-// LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
 // LEWIDTH-NEXT:    ret void
 //
 // BEWIDTH-LABEL: @st5_check_store(
 // BEWIDTH-NEXT:  entry:
-// BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
 // BEWIDTH-NEXT:    ret void
 //
 // LEWIDTHNUM-LABEL: @st5_check_store(
 // LEWIDTHNUM-NEXT:  entry:
-// LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
 // LEWIDTHNUM-NEXT:    ret void
 //
 // BEWIDTHNUM-LABEL: @st5_check_store(
 // BEWIDTHNUM-NEXT:  entry:
-// BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
 // BEWIDTHNUM-NEXT:    ret void
 //
 void st5_check_store(struct st5 *m) {

>From 59cb2e682c3eb52f87408bcb913ee36b37dd4e1e Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Mon, 18 Mar 2024 09:04:03 -0400
Subject: [PATCH 7/8] Always barrier

---
 clang/lib/CodeGen/CGRecordLayoutBuilder.cpp | 18 ++++---
 clang/test/CodeGen/bitfield-access-pad.c    | 58 ++++++++++-----------
 2 files changed, 40 insertions(+), 36 deletions(-)

diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 7fda745fc144e3..595caa3e0a3499 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -498,7 +498,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
   // again at the bitfield following that best one.
 
   // The accumulation is also prevented when:
-  // *) it would cross a zero-width bitfield (ABI-dependent), or
+  // *) it would cross a character-aigned zero-width bitfield, or
   // *) fine-grained bitfield access option is in effect.
 
   CharUnits RegSize =
@@ -542,16 +542,20 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
         BeginOffset = bitsToCharUnits(BitOffset);
         BitSizeSinceBegin = 0;
       } else if ((BitOffset % CharBits) != 0) {
-        // Bitfield occupies the same char as previous, it must be part of the
-        // same span.
+        // Bitfield occupies the same character as previous bitfield, it must be
+        // part of the same span. This can include zero-length bitfields, should
+        // the target not align them to character boundaries. Such non-alignment
+        // is at variance with the C++ std that requires zero-length bitfields
+        // be a barrier between access units. But of course we can't achieve
+        // that in the middle of a character.
         assert(BitOffset == Context.toBits(BeginOffset) + BitSizeSinceBegin &&
                "Concatenating non-contiguous bitfields");
       } else {
-        // Bitfield could begin a new span.
+        // Bitfield potentially begins a new span. This includes zero-length
+        // bitfields on non-aligning targets that lie at character boundaries
+        // (those are barriers to merging).
         LimitOffset = bitsToCharUnits(BitOffset);
-        if (Field->isZeroLengthBitField(Context) &&
-            (Context.getTargetInfo().useZeroLengthBitfieldAlignment() ||
-             Context.getTargetInfo().useBitFieldTypeAlignment()))
+        if (Field->isZeroLengthBitField(Context))
           Barrier = true;
         AtAlignedBoundary = true;
       }
diff --git a/clang/test/CodeGen/bitfield-access-pad.c b/clang/test/CodeGen/bitfield-access-pad.c
index caf940438fd855..34d8f674a56acc 100644
--- a/clang/test/CodeGen/bitfield-access-pad.c
+++ b/clang/test/CodeGen/bitfield-access-pad.c
@@ -46,15 +46,15 @@ struct P1 {
 // CHECK-LABEL: LLVMType:%struct.P1 =
 // LAYOUT-T-SAME: type { i8, i8, [2 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, i8 }
-// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
 // LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
@@ -76,15 +76,15 @@ struct P2 {
 // CHECK-LABEL: LLVMType:%struct.P2 =
 // LAYOUT-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
-// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
 // LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
@@ -106,15 +106,15 @@ struct P3 {
 // CHECK-LABEL: LLVMType:%struct.P3 =
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
-// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
 // LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
@@ -135,15 +135,15 @@ struct P4 {
 // CHECK-LABEL: LLVMType:%struct.P4 =
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
-// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
 // LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
@@ -163,15 +163,15 @@ struct P5 {
 // CHECK-LABEL: LLVMType:%struct.P5 =
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
-// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
 // LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
@@ -193,15 +193,15 @@ struct P6 {
 // CHECK-LABEL: LLVMType:%struct.P6 =
 // LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
-// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
 // LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
@@ -221,15 +221,15 @@ struct P7 {
 // CHECK-LABEL: LLVMType:%struct.P7 =
 // LAYOUT-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
-// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
 // LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
@@ -251,18 +251,18 @@ struct __attribute__ ((aligned (2))) P7_align {
 // CHECK-LABEL: LLVMType:%struct.P7_align =
 // LAYOUT-T-SAME: type { i8, i8, i8, i8 }
 // LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
-// LAYOUT-NT-SAME: type { i16 }
-// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
 // LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
 
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
-// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
 
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
 // LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
@@ -370,8 +370,8 @@ struct __attribute__((aligned(4))) P11 {
 // CHECK-LABEL: LLVMType:%struct.P11 =
 // LAYOUT-T-SAME: type { i24 }
 // LAYOUT-ARM64-T-SAME: type { i24 }
-// LAYOUT-NT-SAME: type { i32 }
-// LAYOUT-STRICT-NT-SAME: type { i32 }
+// LAYOUT-NT-SAME: type { i24 }
+// LAYOUT-STRICT-NT-SAME: type { i24 }
 // LAYOUT-DWN32-SAME: type { i32 }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0

>From 416c79a53141b539f3b385f9f538ef15522a05d2 Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Mon, 18 Mar 2024 11:43:48 -0400
Subject: [PATCH 8/8] Look for next limit after barriers

---
 clang/lib/CodeGen/CGRecordLayoutBuilder.cpp | 45 +++++++++++++--------
 clang/test/CodeGen/bitfield-access-pad.c    | 12 +++---
 2 files changed, 34 insertions(+), 23 deletions(-)

diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 595caa3e0a3499..b665393ec85c89 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -554,30 +554,41 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
         // Bitfield potentially begins a new span. This includes zero-length
         // bitfields on non-aligning targets that lie at character boundaries
         // (those are barriers to merging).
-        LimitOffset = bitsToCharUnits(BitOffset);
         if (Field->isZeroLengthBitField(Context))
+          // This is a barrier, but it might be followed by padding we could
+          // use. Hence handle exactly as if we reached the end of the bitfield
+          // run.
           Barrier = true;
-        AtAlignedBoundary = true;
+        else {
+          LimitOffset = bitsToCharUnits(BitOffset);
+          AtAlignedBoundary = true;
+        }
       }
-    } else if (Begin == FieldEnd) {
-      // Completed the bitfields.
-      break;
     } else {
-      // We've reached the end of the bitfield run while accumulating a span.
-      // Determine the limit of that span: either the offset of the next field,
-      // or if we're at the end of the record the end of its non-reuseable tail
-      // padding. (I.e. treat the next unusable char as the start of an
-      // unmergeable span.)
+      // We've reached the end of the bitfield run. Either we're done, or this
+      // is a barrier for the current span.
+      if (Begin == FieldEnd)
+        break;
+
+      Barrier = true;
+    }
+
+    if (Barrier) {
+      // We're at a barrier, find the next used storage to determine what the
+      // limit of the current span is. That's wither the offset of the next
+      // field with storage or the end of the non-reusable tail padding.
       auto Probe = Field;
       while (Probe != FieldEnd && Probe->isZeroSize(Context))
         ++Probe;
-      // We can't necessarily use tail padding in C++ structs, so the NonVirtual
-      // size is what we must use there.
-      LimitOffset = Probe != FieldEnd
-                        ? bitsToCharUnits(getFieldBitOffset(*Probe))
-                    : RD ? Layout.getNonVirtualSize()
-                         : Layout.getDataSize();
-      Barrier = true;
+      if (Probe != FieldEnd) {
+        assert((getFieldBitOffset(*Field) % CharBits) == 0 &&
+               "Next storage is not byte-aligned");
+        LimitOffset = bitsToCharUnits(getFieldBitOffset(*Probe));
+      } else {
+        // We can't necessarily use tail padding in C++ structs, so the
+        // NonVirtual size is what we must use there.
+        LimitOffset = RD ? Layout.getNonVirtualSize() : Layout.getDataSize();
+      }
       AtAlignedBoundary = true;
     }
 
diff --git a/clang/test/CodeGen/bitfield-access-pad.c b/clang/test/CodeGen/bitfield-access-pad.c
index 34d8f674a56acc..edda7b7798d057 100644
--- a/clang/test/CodeGen/bitfield-access-pad.c
+++ b/clang/test/CodeGen/bitfield-access-pad.c
@@ -334,8 +334,8 @@ struct __attribute__((aligned(4))) P10 {
   char : 0;
 } p10;
 // CHECK-LABEL: LLVMType:%struct.P10 =
-// LAYOUT-T-SAME: type { i24 }
-// LAYOUT-ARM64-T-SAME: type { i24 }
+// LAYOUT-T-SAME: type { i32 }
+// LAYOUT-ARM64-T-SAME: type { i32 }
 // LAYOUT-NT-SAME: type { i32 }
 // LAYOUT-STRICT-NT-SAME: type { i32 }
 // LAYOUT-DWN32-SAME: type { i32 }
@@ -368,10 +368,10 @@ struct __attribute__((aligned(4))) P11 {
   char : 0; // at a char boundary
 } p11;
 // CHECK-LABEL: LLVMType:%struct.P11 =
-// LAYOUT-T-SAME: type { i24 }
-// LAYOUT-ARM64-T-SAME: type { i24 }
-// LAYOUT-NT-SAME: type { i24 }
-// LAYOUT-STRICT-NT-SAME: type { i24 }
+// LAYOUT-T-SAME: type { i32 }
+// LAYOUT-ARM64-T-SAME: type { i32 }
+// LAYOUT-NT-SAME: type { i32 }
+// LAYOUT-STRICT-NT-SAME: type { i32 }
 // LAYOUT-DWN32-SAME: type { i32 }
 // CHECK: BitFields:[
 // LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0



More information about the cfe-commits mailing list