[clang] [CIR] Implemented get/set for volatile bitfields (PR #151875)

via cfe-commits cfe-commits at lists.llvm.org
Sun Aug 3 10:32:31 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-clang

Author: None (Andres-Salamanca)

<details>
<summary>Changes</summary>

This PR adds support for loading and storing volatile bit-field members according to the AAPCS specification.

> A volatile bit-field must always be accessed using an access width appropriate to the type of its container, except when any of the following are true:
>
> * The bit-field container overlaps with a zero-length bit-field.
> * The bit-field container overlaps with a non-bit-field member.

For example, if a bit-field is declared as `int`, the load/store must use a 32-bit access, even if the field itself is only 3 bits wide.

---
Full diff: https://github.com/llvm/llvm-project/pull/151875.diff


4 Files Affected:

- (modified) clang/lib/CIR/CodeGen/CIRGenBuilder.h (+23-7) 
- (modified) clang/lib/CIR/CodeGen/CIRGenExpr.cpp (+16-9) 
- (modified) clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp (+3-2) 
- (modified) clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c (+225-13) 


``````````diff
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 73c9fb924f682..ff8e12190c972 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -410,21 +410,37 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
   mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType,
                                 Address dstAddr, mlir::Type storageType,
                                 mlir::Value src, const CIRGenBitFieldInfo &info,
-                                bool isLvalueVolatile) {
+                                bool isLvalueVolatile, bool useVolatile) {
+    unsigned offset = useVolatile ? info.volatileOffset : info.offset;
+
+    // If using AAPCS and the field is volatile, load with the size of the
+    // declared field
+    storageType =
+        useVolatile ? cir::IntType::get(storageType.getContext(),
+                                        info.volatileStorageSize, info.isSigned)
+                    : storageType;
     return create<cir::SetBitfieldOp>(
         loc, resultType, dstAddr.getPointer(), storageType, src, info.name,
-        info.size, info.offset, info.isSigned, isLvalueVolatile,
+        info.size, offset, info.isSigned, isLvalueVolatile,
         dstAddr.getAlignment().getAsAlign().value());
   }
 
   mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType,
                                 Address addr, mlir::Type storageType,
                                 const CIRGenBitFieldInfo &info,
-                                bool isLvalueVolatile) {
-    return create<cir::GetBitfieldOp>(
-        loc, resultType, addr.getPointer(), storageType, info.name, info.size,
-        info.offset, info.isSigned, isLvalueVolatile,
-        addr.getAlignment().getAsAlign().value());
+                                bool isLvalueVolatile, bool useVolatile) {
+    unsigned offset = useVolatile ? info.volatileOffset : info.offset;
+
+    // If using AAPCS and the field is volatile, load with the size of the
+    // declared field
+    storageType =
+        useVolatile ? cir::IntType::get(storageType.getContext(),
+                                        info.volatileStorageSize, info.isSigned)
+                    : storageType;
+    return create<cir::GetBitfieldOp>(loc, resultType, addr.getPointer(),
+                                      storageType, info.name, info.size, offset,
+                                      info.isSigned, isLvalueVolatile,
+                                      addr.getAlignment().getAsAlign().value());
   }
 };
 
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index cd37a2bd276bc..574a46715c2ee 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -322,22 +322,27 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
   assert(!cir::MissingFeatures::opTBAA());
 }
 
+/// Helper method to check if the underlying ABI is AAPCS
+static bool isAAPCS(const TargetInfo &targetInfo) {
+  return targetInfo.getABI().starts_with("aapcs");
+}
+
 mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src,
                                                            LValue dst) {
 
-  assert(!cir::MissingFeatures::armComputeVolatileBitfields());
-
   const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
   mlir::Type resLTy = convertTypeForMem(dst.getType());
   Address ptr = dst.getBitFieldAddress();
 
-  assert(!cir::MissingFeatures::armComputeVolatileBitfields());
+  bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
+                     dst.isVolatileQualified() &&
+                     info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
 
   mlir::Value dstAddr = dst.getAddress().getPointer();
 
   return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
                                    ptr.getElementType(), src.getValue(), info,
-                                   dst.isVolatileQualified());
+                                   dst.isVolatileQualified(), useVoaltile);
 }
 
 RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) {
@@ -347,10 +352,12 @@ RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) {
   mlir::Type resLTy = convertType(lv.getType());
   Address ptr = lv.getBitFieldAddress();
 
-  assert(!cir::MissingFeatures::armComputeVolatileBitfields());
+  bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
+                     isAAPCS(cgm.getTarget());
 
-  mlir::Value field = builder.createGetBitfield(
-      getLoc(loc), resLTy, ptr, ptr.getElementType(), info, lv.isVolatile());
+  mlir::Value field =
+      builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
+                                info, lv.isVolatile(), useVoaltile);
   assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck() && "NYI");
   return RValue::get(field);
 }
@@ -375,10 +382,10 @@ LValue CIRGenFunction::emitLValueForBitField(LValue base,
   const CIRGenRecordLayout &layout =
       cgm.getTypes().getCIRGenRecordLayout(field->getParent());
   const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
-  assert(!cir::MissingFeatures::armComputeVolatileBitfields());
+
   assert(!cir::MissingFeatures::preservedAccessIndexRegion());
-  unsigned idx = layout.getCIRFieldNo(field);
 
+  unsigned idx = layout.getCIRFieldNo(field);
   Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
 
   mlir::Location loc = getLoc(field->getLocation());
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index ecf31a7024987..1764967329969 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -847,8 +847,9 @@ void CIRRecordLowering::computeVolatileBitfields() {
 
       const CharUnits fEnd =
           fOffset +
-          astContext.toCharUnitsFromBits(astContext.toBits(
-              getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())))) -
+          astContext.toCharUnitsFromBits(
+              getSizeInBits(cirGenTypes.convertTypeForMem(f->getType()))
+                  .getQuantity()) -
           CharUnits::One();
       // If no overlap, continue.
       if (end < fOffset || fEnd < storageOffset)
diff --git a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
index 3643cf257933e..00378f725d76a 100644
--- a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
+++ b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
@@ -1,8 +1,13 @@
 // RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-cir -fdump-record-layouts %s -o %t.cir 1> %t.cirlayout
 // RUN: FileCheck --input-file=%t.cirlayout %s --check-prefix=CIR-LAYOUT
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+
+// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
 
 // RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -emit-llvm -fdump-record-layouts %s -o %t.ll 1> %t.ogcglayout
 // RUN: FileCheck --input-file=%t.ogcglayout %s --check-prefix=OGCG-LAYOUT
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
 
 typedef struct  {
     unsigned int a : 9;
@@ -53,21 +58,228 @@ typedef struct{
 
 typedef struct{
     volatile unsigned int a : 3;
-    unsigned int z: 2;
-    volatile unsigned int b : 5;
+    unsigned int z;
+    volatile unsigned long b : 16;
 } st4;
 
 // CIR-LAYOUT: BitFields:[
-// CIR-LAYOUT-NEXT:   <CIRBitFieldInfo name:a offset:0 size:3 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
-// CIR-LAYOUT-NEXT:   <CIRBitFieldInfo name:z offset:3 size:2 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:3 volatileStorageSize:32 volatileStorageOffset:0>
-// CIR-LAYOUT-NEXT:   <CIRBitFieldInfo name:b offset:5 size:5 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:5 volatileStorageSize:32 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT:   <CIRBitFieldInfo name:a offset:0 size:3 isSigned:0 storageSize:8 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT:   <CIRBitFieldInfo name:b offset:0 size:16 isSigned:0 storageSize:16 storageOffset:8 volatileOffset:0 volatileStorageSize:64 volatileStorageOffset:1>
 
 // OGCG-LAYOUT: BitFields:[
-// OGCG-LAYOUT-NEXT:   <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
-// OGCG-LAYOUT-NEXT:   <CGBitFieldInfo Offset:3 Size:2 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:3 VolatileStorageSize:32 VolatileStorageOffset:0>
-// OGCG-LAYOUT-NEXT:   <CGBitFieldInfo Offset:5 Size:5 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:5 VolatileStorageSize:32 VolatileStorageOffset:0>
-
-st1 s1;
-st2 s2;
-st3 s3;
-st4 s4;
+// OGCG-LAYOUT-NEXT:   <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
+// OGCG-LAYOUT-NEXT:   <CGBitFieldInfo Offset:0 Size:16 IsSigned:0 StorageSize:16 StorageOffset:8 VolatileOffset:0 VolatileStorageSize:64 VolatileStorageOffset:1>
+
+
+void def () {
+  st1 s1;
+  st2 s2;
+  st3 s3;
+  st4 s4;
+}
+
+int check_load(st1 *s1) {
+  return s1->b;
+}
+
+// CIR:  cir.func dso_local @check_load
+// CIR:    [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st1>>, !cir.ptr<!rec_st1>
+// CIR:    [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "b"} : !cir.ptr<!rec_st1> -> !cir.ptr<!u16i>
+// CIR:    [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b, [[MEMBER]] {is_volatile} : !cir.ptr<!u16i>) -> !u32i
+// CIR:    [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i
+// CIR:    cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
+// CIR:    [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR:    cir.return [[RET]] : !s32i
+
+// LLVM:define dso_local i32 @check_load
+// LLVM:  [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// LLVM:  [[MEMBER:%.*]] = getelementptr %struct.st1, ptr [[LOAD]], i32 0, i32 0
+// LLVM:  [[LOADVOL:%.*]] = load volatile i32, ptr [[MEMBER]], align 4
+// LLVM:  [[LSHR:%.*]] = lshr i32 [[LOADVOL]], 9
+// LLVM:  [[CLEAR:%.*]] = and i32 [[LSHR]], 1
+// LLVM:  store i32 [[CLEAR]], ptr [[RETVAL:%.*]], align 4
+// LLVM:  [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
+// LLVM:  ret i32 [[RET]]
+
+// OGCG: define dso_local i32 @check_load
+// OGCG:   [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// OGCG:   [[LOADVOL:%.*]] = load volatile i32, ptr [[LOAD]], align 4
+// OGCG:   [[LSHR:%.*]] = lshr i32 [[LOADVOL]], 9
+// OGCG:   [[CLEAR:%.*]] = and i32 [[LSHR]], 1
+// OGCG:   ret i32 [[CLEAR]]
+
+// this volatile bit-field container overlaps with a zero-length bit-field,
+// so it may be accessed without using the container's width.
+int check_load_exception(st3 *s3) {
+  return s3->b;
+}
+
+// CIR:  cir.func dso_local @check_load_exception
+// CIR:    [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st3>>, !cir.ptr<!rec_st3>
+// CIR:    [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st3> -> !cir.ptr<!u8i>
+// CIR:    [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b1, [[MEMBER]] {is_volatile} : !cir.ptr<!u8i>) -> !u32i
+// CIR:    [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i
+// CIR:    cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
+// CIR:    [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR:    cir.return [[RET]] : !s32i
+
+// LLVM:define dso_local i32 @check_load_exception
+// LLVM:  [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// LLVM:  [[MEMBER:%.*]] = getelementptr %struct.st3, ptr [[LOAD]], i32 0, i32 2
+// LLVM:  [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
+// LLVM:  [[CLEAR:%.*]] = and i8 [[LOADVOL]], 31
+// LLVM:  [[CAST:%.*]] = zext i8 [[CLEAR]] to i32
+// LLVM:  store i32 [[CAST]], ptr [[RETVAL:%.*]], align 4
+// LLVM:  [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
+// LLVM:  ret i32 [[RET]]
+
+// OGCG: define dso_local i32 @check_load_exception
+// OGCG:   [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// OGCG:   [[MEMBER:%.*]] = getelementptr inbounds nuw %struct.st3, ptr [[LOAD]], i32 0, i32 2
+// OGCG:   [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
+// OGCG:   [[CLEAR:%.*]] = and i8 [[LOADVOL]], 31
+// OGCG:   [[CAST:%.*]] = zext i8 [[CLEAR]] to i32
+// OGCG:   ret i32 [[CAST]]
+
+typedef struct {
+    volatile int a : 24;
+    char b;
+    volatile int c: 30;
+ } clip;
+
+int clip_load_exception2(clip *c) {
+  return c->a;
+}
+
+// CIR:  cir.func dso_local @clip_load_exception2
+// CIR:    [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_clip>>, !cir.ptr<!rec_clip>
+// CIR:    [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_clip> -> !cir.ptr<!cir.array<!u8i x 3>>
+// CIR:    [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_a1, [[MEMBER]] {is_volatile} : !cir.ptr<!cir.array<!u8i x 3>>) -> !s32i
+// CIR:    cir.store [[BITFI]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
+// CIR:    [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR:    cir.return [[RET]] : !s32i
+
+// LLVM:define dso_local i32 @clip_load_exception2
+// LLVM:  [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// LLVM:  [[MEMBER:%.*]] = getelementptr %struct.clip, ptr [[LOAD]], i32 0, i32 0
+// LLVM:  [[LOADVOL:%.*]] = load volatile i24, ptr [[MEMBER]], align 4
+// LLVM:  [[CAST:%.*]] = sext i24 [[LOADVOL]] to i32
+// LLVM:  store i32 [[CAST]], ptr [[RETVAL:%.*]], align 4
+// LLVM:  [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
+// LLVM:  ret i32 [[RET]]
+
+// OGCG: define dso_local i32 @clip_load_exception2
+// OGCG:   [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// OGCG:   [[LOADVOL:%.*]] = load volatile i24, ptr [[LOAD]], align 4
+// OGCG:   [[CAST:%.*]] = sext i24 [[LOADVOL]] to i32
+// OGCG:   ret i32 [[CAST]]
+
+void check_store(st2 *s2) {
+  s2->a = 1;
+}
+
+// CIR:  cir.func dso_local @check_store
+// CIR:    [[CONST:%.*]] = cir.const #cir.int<1> : !s32i
+// CIR:    [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !s16i
+// CIR:    [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st2>>, !cir.ptr<!rec_st2>
+// CIR:    [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_st2> -> !cir.ptr<!u32i>
+// CIR:    [[SETBF:%.*]] = cir.set_bitfield align(8) (#bfi_a, [[MEMBER]] : !cir.ptr<!u32i>, [[CAST]] : !s16i) {is_volatile} -> !s16i
+// CIR:    cir.return
+
+// LLVM:define dso_local void @check_store
+// LLVM:  [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// LLVM:  [[MEMBER:%.*]] = getelementptr %struct.st2, ptr [[LOAD]], i32 0, i32 0
+// LLVM:  [[LOADVOL:%.*]] = load volatile i16, ptr [[MEMBER]], align 8
+// LLVM:  [[CLEAR:%.*]] = and i16 [[LOADVOL]], -8
+// LLVM:  [[SET:%.*]] = or i16 [[CLEAR]], 1
+// LLVM:  store volatile i16 [[SET]], ptr [[MEMBER]], align 8
+// LLVM:  ret void
+
+// OGCG: define dso_local void @check_store
+// OGCG:   [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// OGCG:   [[LOADVOL:%.*]] = load volatile i16, ptr [[LOAD]], align 8
+// OGCG:   [[CLEAR:%.*]] = and i16 [[LOADVOL]], -8
+// OGCG:   [[SET:%.*]] = or i16 [[CLEAR]], 1
+// OGCG:   store volatile i16 [[SET]], ptr [[LOAD]], align 8
+// OGCG:   ret void
+
+// this volatile bit-field container overlaps with a zero-length bit-field,
+// so it may be accessed without using the container's width.
+void check_store_exception(st3 *s3) {
+  s3->b = 2;
+}
+
+// CIR:  cir.func dso_local @check_store_exception
+// CIR:    [[CONST:%.*]] = cir.const #cir.int<2> : !s32i
+// CIR:    [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !u32i
+// CIR:    [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st3>>, !cir.ptr<!rec_st3>
+// CIR:    [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st3> -> !cir.ptr<!u8i>
+// CIR:    [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_b1, [[MEMBER]] : !cir.ptr<!u8i>, [[CAST]] : !u32i) {is_volatile} -> !u32i
+// CIR:    cir.return
+
+// LLVM:define dso_local void @check_store_exception
+// LLVM:  [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// LLVM:  [[MEMBER:%.*]] = getelementptr %struct.st3, ptr [[LOAD]], i32 0, i32 2
+// LLVM:  [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
+// LLVM:  [[CLEAR:%.*]] = and i8 [[LOADVOL]], -32
+// LLVM:  [[SET:%.*]] = or i8 [[CLEAR]], 2
+// LLVM:  store volatile i8 [[SET]], ptr [[MEMBER]], align 4
+// LLVM:  ret void
+
+// OGCG: define dso_local void @check_store_exception
+// OGCG:   [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// OGCG:   [[MEMBER:%.*]] = getelementptr inbounds nuw %struct.st3, ptr [[LOAD]], i32 0, i32 2
+// OGCG:   [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
+// OGCG:   [[CLEAR:%.*]] = and i8 [[LOADVOL]], -32
+// OGCG:   [[SET:%.*]] = or i8 [[CLEAR]], 2
+// OGCG:   store volatile i8 [[SET]], ptr [[MEMBER]], align 4
+// OGCG:   ret void
+
+void clip_store_exception2(clip *c) {
+  c->a = 3;
+}
+
+// CIR:  cir.func dso_local @clip_store_exception2
+// CIR:    [[CONST:%.*]] = cir.const #cir.int<3> : !s32i
+// CIR:    [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_clip>>, !cir.ptr<!rec_clip>
+// CIR:    [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_clip> -> !cir.ptr<!cir.array<!u8i x 3>>
+// CIR:    [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_a1, [[MEMBER]] : !cir.ptr<!cir.array<!u8i x 3>>, [[CONST]] : !s32i) {is_volatile} -> !s32i
+// CIR:    cir.return
+
+// LLVM:define dso_local void @clip_store_exception2
+// LLVM:  [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// LLVM:  [[MEMBER:%.*]] = getelementptr %struct.clip, ptr [[LOAD]], i32 0, i32 0
+// LLVM:  store volatile i24 3, ptr [[MEMBER]], align 4
+// LLVM:  ret void
+
+// OGCG: define dso_local void @clip_store_exception2
+// OGCG:   [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// OGCG:   store volatile i24 3, ptr [[LOAD]], align 4
+// OGCG:   ret void
+
+void check_store_second_member (st4 *s4) {
+  s4->b = 1;
+}
+
+// CIR:  cir.func dso_local @check_store_second_member
+// CIR:    [[ONE:%.*]] = cir.const #cir.int<1> : !s32i
+// CIR:    [[CAST:%.*]] = cir.cast(integral, [[ONE]] : !s32i), !u64i
+// CIR:    [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st4>>, !cir.ptr<!rec_st4>
+// CIR:    [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st4> -> !cir.ptr<!u16i>
+// CIR:    cir.set_bitfield align(8) (#bfi_b2, [[MEMBER]] : !cir.ptr<!u16i>, [[CAST]] : !u64i) {is_volatile} -> !u64i
+
+// LLVM: define dso_local void @check_store_second_member
+// LLVM:   [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// LLVM:   [[MEMBER:%.*]] = getelementptr %struct.st4, ptr [[LOAD]], i32 0, i32 2
+// LLVM:   [[VAL:%.*]] = load volatile i64, ptr [[MEMBER]], align 8
+// LLVM:   [[CLEAR:%.*]] = and i64 [[VAL]], -65536
+// LLVM:   [[SET:%.*]] = or i64 [[CLEAR]], 1
+// LLVM:   store volatile i64 [[SET]], ptr [[MEMBER]], align 8
+
+// OGCG: define dso_local void @check_store_second_member
+// OGCG:   [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
+// OGCG:   [[MEMBER:%.*]] = getelementptr inbounds i64, ptr [[LOAD]], i64 1
+// OGCG:   [[LOADBF:%.*]] = load volatile i64, ptr [[MEMBER]], align 8
+// OGCG:   [[CLR:%.*]] = and i64 [[LOADBF]], -65536
+// OGCG:   [[SET:%.*]] = or i64 [[CLR]], 1
+// OGCG:   store volatile i64 [[SET]], ptr [[MEMBER]], align 8

``````````

</details>


https://github.com/llvm/llvm-project/pull/151875


More information about the cfe-commits mailing list