[clang] 78921cd - [CIR] Upstream ArraySubscriptExpr for fixed size array (#134536)
via cfe-commits
cfe-commits at lists.llvm.org
Thu Apr 10 15:05:26 PDT 2025
Author: Amr Hesham
Date: 2025-04-11T00:05:22+02:00
New Revision: 78921cd884c37ba18ded34b6d32a548c069d6a84
URL: https://github.com/llvm/llvm-project/commit/78921cd884c37ba18ded34b6d32a548c069d6a84
DIFF: https://github.com/llvm/llvm-project/commit/78921cd884c37ba18ded34b6d32a548c069d6a84.diff
LOG: [CIR] Upstream ArraySubscriptExpr for fixed size array (#134536)
This change adds ArraySubscriptExpr for fixed size ArrayType
Issue #130197
Added:
clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
Modified:
clang/include/clang/CIR/MissingFeatures.h
clang/lib/CIR/CodeGen/CIRGenBuilder.h
clang/lib/CIR/CodeGen/CIRGenExpr.cpp
clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
clang/lib/CIR/CodeGen/CIRGenFunction.cpp
clang/lib/CIR/CodeGen/CIRGenFunction.h
clang/lib/CIR/CodeGen/CIRGenValue.h
clang/lib/CIR/CodeGen/CMakeLists.txt
clang/test/CIR/CodeGen/array.cpp
clang/test/CIR/Lowering/array.cpp
Removed:
################################################################################
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index bacb7879a5527..c39590421b647 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -141,6 +141,8 @@ struct MissingFeatures {
static bool mangledNames() { return false; }
static bool setDLLStorageClass() { return false; }
static bool openMP() { return false; }
+ static bool emitCheckedInBoundsGEP() { return false; }
+ static bool preservedAccessIndexRegion() { return false; }
// Missing types
static bool dataMemberType() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
new file mode 100644
index 0000000000000..2f1940e656172
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
@@ -0,0 +1,40 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenBuilder.h"
+
+using namespace clang::CIRGen;
+
+mlir::Value CIRGenBuilderTy::maybeBuildArrayDecay(mlir::Location loc,
+ mlir::Value arrayPtr,
+ mlir::Type eltTy) {
+ const auto arrayPtrTy = mlir::cast<cir::PointerType>(arrayPtr.getType());
+ const auto arrayTy = mlir::dyn_cast<cir::ArrayType>(arrayPtrTy.getPointee());
+
+ if (arrayTy) {
+ const cir::PointerType flatPtrTy = getPointerTo(arrayTy.getEltType());
+ return create<cir::CastOp>(loc, flatPtrTy, cir::CastKind::array_to_ptrdecay,
+ arrayPtr);
+ }
+
+ assert(arrayPtrTy.getPointee() == eltTy &&
+ "flat pointee type must match original array element type");
+ return arrayPtr;
+}
+
+mlir::Value CIRGenBuilderTy::getArrayElement(mlir::Location arrayLocBegin,
+ mlir::Location arrayLocEnd,
+ mlir::Value arrayPtr,
+ mlir::Type eltTy, mlir::Value idx,
+ bool shouldDecay) {
+ mlir::Value basePtr = arrayPtr;
+ if (shouldDecay)
+ basePtr = maybeBuildArrayDecay(arrayLocBegin, arrayPtr, eltTy);
+ const mlir::Type flatPtrTy = basePtr.getType();
+ return create<cir::PtrStrideOp>(arrayLocEnd, flatPtrTy, basePtr, idx);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 61a747254b3d0..0d8568742e960 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -198,6 +198,19 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
return create<cir::BinOp>(loc, cir::BinOpKind::Div, lhs, rhs);
}
+
+ /// Create a cir.ptr_stride operation to get access to an array element.
+ /// \p idx is the index of the element to access, \p shouldDecay is true if
+ /// the result should decay to a pointer to the element type.
+ mlir::Value getArrayElement(mlir::Location arrayLocBegin,
+ mlir::Location arrayLocEnd, mlir::Value arrayPtr,
+ mlir::Type eltTy, mlir::Value idx,
+ bool shouldDecay);
+
+ /// Returns a decayed pointer to the first element of the array
+ /// pointed to by \p arrayPtr.
+ mlir::Value maybeBuildArrayDecay(mlir::Location loc, mlir::Value arrayPtr,
+ mlir::Type eltTy);
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 4c20170d75131..b38ed4d0a14e8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -12,6 +12,7 @@
#include "Address.h"
#include "CIRGenFunction.h"
+#include "CIRGenModule.h"
#include "CIRGenValue.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "clang/AST/Attr.h"
@@ -430,6 +431,143 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) {
llvm_unreachable("Unknown unary operator kind!");
}
+/// If the specified expr is a simple decay from an array to pointer,
+/// return the array subexpression.
+/// FIXME: this could be abstracted into a common AST helper.
+static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
+ // If this isn't just an array->pointer decay, bail out.
+ const auto *castExpr = dyn_cast<CastExpr>(e);
+ if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
+ return nullptr;
+
+ // If this is a decay from variable width array, bail out.
+ const Expr *subExpr = castExpr->getSubExpr();
+ if (subExpr->getType()->isVariableArrayType())
+ return nullptr;
+
+ return subExpr;
+}
+
+static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
+ // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
+ if (auto constantOp = dyn_cast<cir::ConstantOp>(idx.getDefiningOp()))
+ return mlir::dyn_cast<cir::IntAttr>(constantOp.getValue());
+ return {};
+}
+
+static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
+ CharUnits eltSize) {
+ // If we have a constant index, we can use the exact offset of the
+ // element we're accessing.
+ const cir::IntAttr constantIdx = getConstantIndexOrNull(idx);
+ if (constantIdx) {
+ const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
+ return arrayAlign.alignmentAtOffset(offset);
+ }
+ // Otherwise, use the worst-case alignment for any element.
+ return arrayAlign.alignmentOfArrayElement(eltSize);
+}
+
+static QualType getFixedSizeElementType(const ASTContext &astContext,
+ const VariableArrayType *vla) {
+ QualType eltType;
+ do {
+ eltType = vla->getElementType();
+ } while ((vla = astContext.getAsVariableArrayType(eltType)));
+ return eltType;
+}
+
+static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf,
+ mlir::Location beginLoc,
+ mlir::Location endLoc, mlir::Value ptr,
+ mlir::Type eltTy, mlir::Value idx,
+ bool shouldDecay) {
+ CIRGenModule &cgm = cgf.getCIRGenModule();
+ // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
+ // that would enhance tracking this later in CIR?
+ assert(!cir::MissingFeatures::emitCheckedInBoundsGEP());
+ return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
+ shouldDecay);
+}
+
+static Address emitArraySubscriptPtr(CIRGenFunction &cgf,
+ mlir::Location beginLoc,
+ mlir::Location endLoc, Address addr,
+ QualType eltType, mlir::Value idx,
+ mlir::Location loc, bool shouldDecay) {
+
+ // Determine the element size of the statically-sized base. This is
+ // the thing that the indices are expressed in terms of.
+ if (const VariableArrayType *vla =
+ cgf.getContext().getAsVariableArrayType(eltType)) {
+ eltType = getFixedSizeElementType(cgf.getContext(), vla);
+ }
+
+ // We can use that to compute the best alignment of the element.
+ const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
+ const CharUnits eltAlign =
+ getArrayElementAlign(addr.getAlignment(), idx, eltSize);
+
+ assert(!cir::MissingFeatures::preservedAccessIndexRegion());
+ const mlir::Value eltPtr =
+ emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
+ addr.getElementType(), idx, shouldDecay);
+ const mlir::Type elementType = cgf.convertTypeForMem(eltType);
+ return Address(eltPtr, elementType, eltAlign);
+}
+
+LValue
+CIRGenFunction::emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e) {
+ if (e->getBase()->getType()->isVectorType() &&
+ !isa<ExtVectorElementExpr>(e->getBase())) {
+ cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: VectorType");
+ return LValue::makeAddr(Address::invalid(), e->getType(), LValueBaseInfo());
+ }
+
+ if (isa<ExtVectorElementExpr>(e->getBase())) {
+ cgm.errorNYI(e->getSourceRange(),
+ "emitArraySubscriptExpr: ExtVectorElementExpr");
+ return LValue::makeAddr(Address::invalid(), e->getType(), LValueBaseInfo());
+ }
+
+ if (getContext().getAsVariableArrayType(e->getType())) {
+ cgm.errorNYI(e->getSourceRange(),
+ "emitArraySubscriptExpr: VariableArrayType");
+ return LValue::makeAddr(Address::invalid(), e->getType(), LValueBaseInfo());
+ }
+
+ if (e->getType()->getAs<ObjCObjectType>()) {
+ cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
+ return LValue::makeAddr(Address::invalid(), e->getType(), LValueBaseInfo());
+ }
+
+ // The index must always be an integer, which is not an aggregate. Emit it
+ // in lexical order (this complexity is, sadly, required by C++17).
+ assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
+ "index was neither LHS nor RHS");
+ const mlir::Value idx = emitScalarExpr(e->getIdx());
+ if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
+ LValue arrayLV;
+ if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
+ arrayLV = emitArraySubscriptExpr(ase);
+ else
+ arrayLV = emitLValue(array);
+
+ // Propagate the alignment from the array itself to the result.
+ const Address addr = emitArraySubscriptPtr(
+ *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
+ arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
+ /*shouldDecay=*/true);
+
+ return LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
+ }
+
+ // The base must be a pointer; emit it with an estimate of its alignment.
+ cgm.errorNYI(e->getSourceRange(),
+ "emitArraySubscriptExpr: The base must be a pointer");
+ return {};
+}
+
LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) {
// Comma expressions just emit their LHS then their RHS as an l-value.
if (e->getOpcode() == BO_Comma) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 94b5d955715ae..c2b4110a772a0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -158,6 +158,16 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
mlir::Value VisitCastExpr(CastExpr *e);
mlir::Value VisitCallExpr(const CallExpr *e);
+ mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
+ if (e->getBase()->getType()->isVectorType()) {
+ assert(!cir::MissingFeatures::scalableVectors());
+ cgf.getCIRGenModule().errorNYI("VisitArraySubscriptExpr: VectorType");
+ return {};
+ }
+ // Just load the lvalue formed by the subscript expression.
+ return emitLoadOfLValue(e);
+ }
+
mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
return VisitCastExpr(e);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index b123fc3bb1117..9dace721e7417 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -509,6 +509,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
std::string("l-value not implemented for '") +
e->getStmtClassName() + "'");
return LValue();
+ case Expr::ArraySubscriptExprClass:
+ return emitArraySubscriptExpr(cast<ArraySubscriptExpr>(e));
case Expr::UnaryOperatorClass:
return emitUnaryOpLValue(cast<UnaryOperator>(e));
case Expr::BinaryOperatorClass:
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 311aca8cd3f6f..17aa68492f983 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -434,6 +434,8 @@ class CIRGenFunction : public CIRGenTypeCache {
/// should be returned.
RValue emitAnyExpr(const clang::Expr *e);
+ LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
+
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d);
/// Emit code and set up symbol table for a variable declaration with auto,
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index 1b702daae4b4c..d4d6f5a44622e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -23,6 +23,8 @@
#include "mlir/IR/Value.h"
+#include "clang/CIR/MissingFeatures.h"
+
namespace clang::CIRGen {
/// This trivial value class is used to represent the result of an
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 16534d9a72587..dc18f7f2af160 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -8,6 +8,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
add_clang_library(clangCIR
CIRGenerator.cpp
+ CIRGenBuilder.cpp
CIRGenCall.cpp
CIRGenDecl.cpp
CIRGenDeclOpenACC.cpp
diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp
index 0d28ebc66f83c..431164c797f76 100644
--- a/clang/test/CIR/CodeGen/array.cpp
+++ b/clang/test/CIR/CodeGen/array.cpp
@@ -1,141 +1,368 @@
-// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - 2>&1 | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
int a[10];
-// CHECK: cir.global external @a = #cir.zero : !cir.array<!s32i x 10>
+// CIR: cir.global external @a = #cir.zero : !cir.array<!s32i x 10>
+
+// LLVM: @a = dso_local global [10 x i32] zeroinitializer
+
+// OGCG: @a = global [10 x i32] zeroinitializer
int aa[10][5];
-// CHECK: cir.global external @aa = #cir.zero : !cir.array<!cir.array<!s32i x 5> x 10>
+// CIR: cir.global external @aa = #cir.zero : !cir.array<!cir.array<!s32i x 5> x 10>
+
+// LLVM: @aa = dso_local global [10 x [5 x i32]] zeroinitializer
+
+// OGCG: @aa = global [10 x [5 x i32]] zeroinitializer
extern int b[10];
-// CHECK: cir.global external @b = #cir.zero : !cir.array<!s32i x 10>
+// CIR: cir.global external @b = #cir.zero : !cir.array<!s32i x 10>
+
+// LLVM: @b = dso_local global [10 x i32] zeroinitializer
extern int bb[10][5];
-// CHECK: cir.global external @bb = #cir.zero : !cir.array<!cir.array<!s32i x 5> x 10>
+// CIR: cir.global external @bb = #cir.zero : !cir.array<!cir.array<!s32i x 5> x 10>
+
+// LLVM: @bb = dso_local global [10 x [5 x i32]] zeroinitializer
int c[10] = {};
-// CHECK: cir.global external @c = #cir.zero : !cir.array<!s32i x 10>
+// CIR: cir.global external @c = #cir.zero : !cir.array<!s32i x 10>
+
+// LLVM: @c = dso_local global [10 x i32] zeroinitializer
+
+// OGCG: @c = global [10 x i32] zeroinitializer
int d[3] = {1, 2, 3};
-// CHECK: cir.global external @d = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array<!s32i x 3>
+// CIR: cir.global external @d = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array<!s32i x 3>
+
+// LLVM: @d = dso_local global [3 x i32] [i32 1, i32 2, i32 3]
+
+// OGCG: @d = global [3 x i32] [i32 1, i32 2, i32 3]
int dd[3][2] = {{1, 2}, {3, 4}, {5, 6}};
-// CHECK: cir.global external @dd = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array<!s32i x 2>, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array<!s32i x 2>, #cir.const_array<[#cir.int<5> : !s32i, #cir.int<6> : !s32i]> : !cir.array<!s32i x 2>]> : !cir.array<!cir.array<!s32i x 2> x 3>
+// CIR: cir.global external @dd = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array<!s32i x 2>, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array<!s32i x 2>, #cir.const_array<[#cir.int<5> : !s32i, #cir.int<6> : !s32i]> : !cir.array<!s32i x 2>]> : !cir.array<!cir.array<!s32i x 2> x 3>
+
+// LLVM: @dd = dso_local global [3 x [2 x i32]] [
+// LLVM: [2 x i32] [i32 1, i32 2], [2 x i32]
+// LLVM: [i32 3, i32 4], [2 x i32] [i32 5, i32 6]]
+
+// OGCG: @dd = global [3 x [2 x i32]] [
+// OGCG: [2 x i32] [i32 1, i32 2], [2 x i32]
+// OGCG: [i32 3, i32 4], [2 x i32] [i32 5, i32 6]]
int e[10] = {1, 2};
-// CHECK: cir.global external @e = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i], trailing_zeros> : !cir.array<!s32i x 10>
+// CIR: cir.global external @e = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i], trailing_zeros> : !cir.array<!s32i x 10>
+
+// LLVM: @e = dso_local global [10 x i32] [i32 1, i32 2, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0]
int f[5] = {1, 2};
-// CHECK: cir.global external @f = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i]> : !cir.array<!s32i x 5>
+// CIR: cir.global external @f = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i]> : !cir.array<!s32i x 5>
+
+// LLVM: @f = dso_local global [5 x i32] [i32 1, i32 2, i32 0, i32 0, i32 0]
+
+// OGCG: @f = global [5 x i32] [i32 1, i32 2, i32 0, i32 0, i32 0]
+
+// OGCG: @[[FUN2_ARR:.*]] = private unnamed_addr constant [2 x i32] [i32 5, i32 0], align 4
+// OGCG: @[[FUN3_ARR:.*]] = private unnamed_addr constant [2 x i32] [i32 5, i32 6], align 4
+// OGCG: @[[FUN4_ARR:.*]] = private unnamed_addr constant [2 x [1 x i32]] [
+// OGCG: [1 x i32] [i32 5], [1 x i32] [i32 6]], align 4
+// OGCG: @[[FUN5_ARR:.*]] = private unnamed_addr constant [2 x [1 x i32]] [
+// OGCG: [1 x i32] [i32 5], [1 x i32] zeroinitializer], align 4
void func() {
int arr[10];
-
- // CHECK: %[[ARR:.*]] = cir.alloca !cir.array<!s32i x 10>, !cir.ptr<!cir.array<!s32i x 10>>, ["arr"]
+ int e = arr[0];
+ int e2 = arr[1];
}
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!s32i x 10>, !cir.ptr<!cir.array<!s32i x 10>>, ["arr"]
+// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
+// CIR: %[[INIT_2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e2", init]
+// CIR: %[[IDX:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 10>>), !cir.ptr<!s32i>
+// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[IDX]] : !s32i), !cir.ptr<!s32i>
+// CIR: %[[TMP:.*]] = cir.load %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
+// CIR" cir.store %[[TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[IDX:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 10>>), !cir.ptr<!s32i>
+// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[IDX]] : !s32i), !cir.ptr<!s32i>
+// CIR: %[[TMP:.*]] = cir.load %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
+// CIR" cir.store %[[TMP]], %[[INIT_2]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: define void @func()
+// LLVM-NEXT: %[[ARR_ALLOCA:.*]] = alloca [10 x i32], i64 1, align 16
+// LLVM-NEXT: %[[INIT:.*]] = alloca i32, i64 1, align 4
+// LLVM-NEXT: %[[INIT_2:.*]] = alloca i32, i64 1, align 4
+// LLVM-NEXT: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0
+// LLVM-NEXT: %[[ELE_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 0
+// LLVM-NEXT: %[[TMP:.*]] = load i32, ptr %[[ELE_PTR]], align 4
+// LLVM-NEXT: store i32 %[[TMP]], ptr %[[INIT]], align 4
+// LLVM-NEXT: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0
+// LLVM-NEXT: %[[ELE_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1
+// LLVM-NEXT: %[[TMP:.*]] = load i32, ptr %[[ELE_PTR]], align 4
+
+// OGCG: %arr = alloca [10 x i32], align 16
+// OGCG: %e = alloca i32, align 4
+// OGCG: %e2 = alloca i32, align 4
+// OGCG: %arrayidx = getelementptr inbounds [10 x i32], ptr %arr, i64 0, i64 0
+// OGCG: %0 = load i32, ptr %arrayidx, align 16
+// OGCG: store i32 %0, ptr %e, align 4
+// OGCG: %arrayidx1 = getelementptr inbounds [10 x i32], ptr %arr, i64 0, i64 1
+// OGCG: %1 = load i32, ptr %arrayidx1, align 4
+// OGCG: store i32 %1, ptr %e2, align 4
+
void func2() {
int arr[2] = {5};
-
- // CHECK: %[[ARR2:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
- // CHECK: %[[ELE_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp", init]
- // CHECK: %[[ARR_2_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR2]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
- // CHECK: %[[V1:.*]] = cir.const #cir.int<5> : !s32i
- // CHECK: cir.store %[[V1]], %[[ARR_2_PTR]] : !s32i, !cir.ptr<!s32i>
- // CHECK: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i
- // CHECK: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_2_PTR]] : !cir.ptr<!s32i>, %[[OFFSET_0]] : !s64i), !cir.ptr<!s32i>
- // CHECK: cir.store %[[ELE_PTR]], %[[ELE_ALLOCA]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
- // CHECK: %[[LOAD_1:.*]] = cir.load %[[ELE_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
- // CHECK: %[[V2:.*]] = cir.const #cir.int<0> : !s32i
- // CHECK: cir.store %[[V2]], %[[LOAD_1]] : !s32i, !cir.ptr<!s32i>
- // CHECK: %[[OFFSET_1:.*]] = cir.const #cir.int<1> : !s64i
- // CHECK: %[[ELE_1_PTR:.*]] = cir.ptr_stride(%[[LOAD_1]] : !cir.ptr<!s32i>, %[[OFFSET_1]] : !s64i), !cir.ptr<!s32i>
- // CHECK: cir.store %[[ELE_1_PTR]], %[[ELE_ALLOCA]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
}
+// CIR: %[[ARR2:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
+// CIR: %[[ELE_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp", init]
+// CIR: %[[ARR_2_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR2]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[V1:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store %[[V1]], %[[ARR_2_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_2_PTR]] : !cir.ptr<!s32i>, %[[OFFSET_0]] : !s64i), !cir.ptr<!s32i>
+// CIR: cir.store %[[ELE_PTR]], %[[ELE_ALLOCA]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[LOAD_1:.*]] = cir.load %[[ELE_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR: %[[V2:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store %[[V2]], %[[LOAD_1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[OFFSET_1:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ELE_1_PTR:.*]] = cir.ptr_stride(%[[LOAD_1]] : !cir.ptr<!s32i>, %[[OFFSET_1]] : !s64i), !cir.ptr<!s32i>
+// CIR: cir.store %[[ELE_1_PTR]], %[[ELE_ALLOCA]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+
+// LLVM: define void @func2()
+// LLVM: %[[ARR_ALLOCA:.*]] = alloca [2 x i32], i64 1, align 4
+// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0
+// LLVM: store i32 5, ptr %[[ARR_PTR]], align 4
+// LLVM: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1
+// LLVM: store ptr %[[ELE_1_PTR]], ptr %[[TMP]], align 8
+// LLVM: %[[TMP2:.*]] = load ptr, ptr %[[TMP]], align 8
+// LLVM: store i32 0, ptr %[[TMP2]], align 4
+// LLVM: %[[ELE_1:.*]] = getelementptr i32, ptr %[[TMP2]], i64 1
+// LLVM: store ptr %[[ELE_1]], ptr %[[TMP]], align 8
+
+// OGCG: %arr = alloca [2 x i32], align 4
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %arr, ptr align 4 @[[FUN2_ARR]], i64 8, i1 false)
+
void func3() {
int arr[2] = {5, 6};
- // CHECK: %[[ARR3:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
- // CHECK: %[[ARR_3_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR3]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
- // CHECK: %[[V0:.*]] = cir.const #cir.int<5> : !s32i
- // CHECK: cir.store %[[V0]], %[[ARR_3_PTR]] : !s32i, !cir.ptr<!s32i>
- // CHECK: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i
- // CHECK: %[[ELE_1_PTR:.*]] = cir.ptr_stride(%[[ARR_3_PTR]] : !cir.ptr<!s32i>, %[[OFFSET_0]] : !s64i), !cir.ptr<!s32i>
- // CHECK: %[[V1:.*]] = cir.const #cir.int<6> : !s32i
- // CHECK: cir.store %[[V1]], %[[ELE_1_PTR]] : !s32i, !cir.ptr<!s32i>
+ int idx = 1;
+ int e = arr[idx];
}
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
+// CIR: %[[IDX:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["idx", init]
+// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
+// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[V0:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store %[[V0]], %[[ARR_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ELE_1_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[OFFSET_0]] : !s64i), !cir.ptr<!s32i>
+// CIR: %[[V1:.*]] = cir.const #cir.int<6> : !s32i
+// CIR: cir.store %[[V1]], %[[ELE_1_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[IDX_V:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store %[[IDX_V]], %[[IDX]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP_IDX:.*]] = cir.load %[[IDX]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[TMP_IDX]] : !s32i), !cir.ptr<!s32i>
+// CIR: %[[ELE_TMP:.*]] = cir.load %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[ELE_TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: define void @func3()
+// LLVM: %[[ARR_ALLOCA:.*]] = alloca [2 x i32], i64 1, align 4
+// LLVM: %[[IDX:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0
+// LLVM: store i32 5, ptr %[[ARR_PTR]], align 4
+// LLVM: %[[ELE_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1
+// LLVM: store i32 6, ptr %[[ELE_1_PTR]], align 4
+// LLVM: store i32 1, ptr %[[IDX]], align 4
+// LLVM: %[[TMP1:.*]] = load i32, ptr %[[IDX]], align 4
+// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0
+// LLVM: %[[IDX_I64:.*]] = sext i32 %[[TMP1]] to i64
+// LLVM: %[[ELE:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 %[[IDX_I64]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[ELE]], align 4
+// LLVM: store i32 %[[TMP2]], ptr %[[INIT]], align 4
+
+// OGCG: %arr = alloca [2 x i32], align 4
+// OGCG: %idx = alloca i32, align 4
+// OGCG: %e = alloca i32, align 4
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %arr, ptr align 4 @[[FUN3_ARR]], i64 8, i1 false)
+// OGCG: store i32 1, ptr %idx, align 4
+// OGCG: %0 = load i32, ptr %idx, align 4
+// OGCG: %idxprom = sext i32 %0 to i64
+// OGCG: %arrayidx = getelementptr inbounds [2 x i32], ptr %arr, i64 0, i64 %idxprom
+// OGCG: %1 = load i32, ptr %arrayidx, align 4
+// OGCG: store i32 %1, ptr %e, align 4
+
void func4() {
int arr[2][1] = {{5}, {6}};
-
- // CHECK: %[[ARR:.*]] = cir.alloca !cir.array<!cir.array<!s32i x 1> x 2>, !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>, ["arr", init]
- // CHECK: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>), !cir.ptr<!cir.array<!s32i x 1>>
- // CHECK: %[[ARR_0_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
- // CHECK: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i
- // CHECK: cir.store %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr<!s32i>
- // CHECK: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
- // CHECK: %[[ARR_1:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
- // CHECK: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
- // CHECK: %[[V_1_0:.*]] = cir.const #cir.int<6> : !s32i
- // CHECK: cir.store %[[V_1_0]], %[[ARR_1_PTR]] : !s32i, !cir.ptr<!s32i>
+ int e = arr[1][0];
}
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!cir.array<!s32i x 1> x 2>, !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>, ["arr", init]
+// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
+// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ARR_0_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
+// CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
+// CIR: %[[V_1_0:.*]] = cir.const #cir.int<6> : !s32i
+// CIR: cir.store %[[V_1_0]], %[[ARR_1_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[IDX:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[IDX_1]] : !s32i), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
+// CIR: %[[ELE_0:.*]] = cir.ptr_stride(%[[ARR_1_PTR]] : !cir.ptr<!s32i>, %[[IDX]] : !s32i), !cir.ptr<!s32i>
+// CIR: %[[TMP:.*]] = cir.load %[[ELE_0]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: define void @func4()
+// LLVM: %[[ARR_ALLOCA:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
+// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0
+// LLVM: %[[ARR_0_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0
+// LLVM: store i32 5, ptr %[[ARR_0_0]], align 4
+// LLVM: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
+// LLVM: %[[ARR_1_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0
+// LLVM: store i32 6, ptr %[[ARR_1_0]], align 4
+// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0
+// LLVM: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
+// LLVM: %[[ARR_1_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0
+// LLVM: %[[ELE_PTR:.*]] = getelementptr i32, ptr %[[ARR_1_0]], i64 0
+// LLVM: %[[TMP:.*]] = load i32, ptr %[[ELE_PTR]], align 4
+// LLVM: store i32 %[[TMP]], ptr %[[INIT]], align 4
+
+// OGCG: %arr = alloca [2 x [1 x i32]], align 4
+// OGCG: %e = alloca i32, align 4
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %arr, ptr align 4 @[[FUN4_ARR]], i64 8, i1 false)
+// OGCG: %arrayidx = getelementptr inbounds [2 x [1 x i32]], ptr %arr, i64 0, i64 1
+// OGCG: %arrayidx1 = getelementptr inbounds [1 x i32], ptr %arrayidx, i64 0, i64 0
+// OGCG: %0 = load i32, ptr %arrayidx1, align 4
+// OGCG: store i32 %0, ptr %e, align 4
+
void func5() {
int arr[2][1] = {{5}};
-
- // CHECK: %[[ARR:.*]] = cir.alloca !cir.array<!cir.array<!s32i x 1> x 2>, !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>, ["arr", init]
- // CHECK: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>, ["arrayinit.temp", init]
- // CHECK: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %0 : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>), !cir.ptr<!cir.array<!s32i x 1>>
- // CHECK: %[[ARR_0_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
- // CHECK: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i
- // CHECK: cir.store %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr<!s32i>
- // CHECK: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
- // CHECK: %6 = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
- // CHECK: cir.store %6, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
- // CHECK: %7 = cir.load %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>, !cir.ptr<!cir.array<!s32i x 1>>
- // CHECK: %8 = cir.const #cir.zero : !cir.array<!s32i x 1>
- // CHECK: cir.store %8, %7 : !cir.array<!s32i x 1>, !cir.ptr<!cir.array<!s32i x 1>>
- // CHECK: %[[OFFSET_1:.*]] = cir.const #cir.int<1> : !s64i
- // CHECK: %10 = cir.ptr_stride(%7 : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET_1]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
- // CHECK: cir.store %10, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
}
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!cir.array<!s32i x 1> x 2>, !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>, ["arr", init]
+// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>, ["arrayinit.temp", init]
+// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %0 : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ARR_0_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
+// CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %6 = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: cir.store %6, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
+// CIR: %7 = cir.load %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>, !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %8 = cir.const #cir.zero : !cir.array<!s32i x 1>
+// CIR: cir.store %8, %7 : !cir.array<!s32i x 1>, !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[OFFSET_1:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %10 = cir.ptr_stride(%7 : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET_1]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: cir.store %10, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
+
+// LLVM: define void @func5()
+// LLVM: %[[ARR_ALLOCA:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
+// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0
+// LLVM: %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0
+// LLVM: store i32 5, ptr %[[ARR_0]], align 4
+// LLVM: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
+// LLVM: store ptr %[[ARR_1]], ptr %[[TMP]], align 8
+// LLVM: %[[ARR_1_VAL:.*]] = load ptr, ptr %[[TMP]], align 8
+// LLVM: store [1 x i32] zeroinitializer, ptr %[[ARR_1_VAL]], align 4
+// LLVM: %[[ARR_1_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_1_VAL]], i64 1
+// LLVM: store ptr %[[ARR_1_PTR]], ptr %[[TMP]], align 8
+
+// ORGC: %arr = alloca [2 x [1 x i32]], align 4
+// ORGC: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %arr, ptr align 4 @[[FUN5_ARR]], i64 8, i1 false)
+
void func6() {
int x = 4;
int arr[2] = { x, 5 };
-
- // CHECK: %[[VAR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init]
- // CHECK: %[[ARR:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
- // CHECK: %[[V:.*]] = cir.const #cir.int<4> : !s32i
- // CHECK: cir.store %[[V]], %[[VAR]] : !s32i, !cir.ptr<!s32i>
- // CHECK: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
- // CHECK: %[[TMP:.*]] = cir.load %[[VAR]] : !cir.ptr<!s32i>, !s32i
- // CHECK: cir.store %[[TMP]], %[[ARR_PTR]] : !s32i, !cir.ptr<!s32i>
- // CHECK: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
- // CHECK: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[OFFSET]] : !s64i), !cir.ptr<!s32i>
- // CHECK: %[[V1:.*]] = cir.const #cir.int<5> : !s32i
- // CHECK: cir.store %[[V1]], %[[ELE_PTR]] : !s32i, !cir.ptr<!s32i>
}
+// CIR: %[[VAR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init]
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
+// CIR: %[[V:.*]] = cir.const #cir.int<4> : !s32i
+// CIR: cir.store %[[V]], %[[VAR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[TMP:.*]] = cir.load %[[VAR]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[TMP]], %[[ARR_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[OFFSET]] : !s64i), !cir.ptr<!s32i>
+// CIR: %[[V1:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store %[[V1]], %[[ELE_PTR]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: define void @func6()
+// LLVM: %[[VAR:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
+// LLVM: store i32 4, ptr %[[VAR]], align 4
+// LLVM: %[[ELE_0:.*]] = getelementptr i32, ptr %[[ARR]], i32 0
+// LLVM: %[[TMP:.*]] = load i32, ptr %[[VAR]], align 4
+// LLVM: store i32 %[[TMP]], ptr %[[ELE_0]], align 4
+// LLVM: %[[ELE_1:.*]] = getelementptr i32, ptr %[[ELE_0]], i64 1
+// LLVM: store i32 5, ptr %[[ELE_1]], align 4
+
+// OGCG: %x = alloca i32, align 4
+// OGCG: %arr = alloca [2 x i32], align 4
+// OGCG: store i32 4, ptr %x, align 4
+// OGCG: %0 = load i32, ptr %x, align 4
+// OGCG: store i32 %0, ptr %arr, align 4
+// OGCG: %arrayinit.element = getelementptr inbounds i32, ptr %arr, i64 1
+// OGCG: store i32 5, ptr %arrayinit.element, align 4
+
void func7() {
int* arr[1] = {};
-
- // CHECK: %[[ARR:.*]] = cir.alloca !cir.array<!cir.ptr<!s32i> x 1>, !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>, ["arr", init]
- // CHECK: %[[ARR_TMP:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, ["arrayinit.temp", init]
- // CHECK: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>), !cir.ptr<!cir.ptr<!s32i>>
- // CHECK: cir.store %[[ARR_PTR]], %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
- // CHECK: %[[TMP:.*]] = cir.load %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!s32i>>
- // CHECK: %[[NULL_PTR:.*]] = cir.const #cir.ptr<null> : !cir.ptr<!s32i>
- // CHECK: cir.store %[[NULL_PTR]], %[[TMP]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
- // CHECK: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
- // CHECK: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[TMP]] : !cir.ptr<!cir.ptr<!s32i>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.ptr<!s32i>>
- // CHECK: cir.store %[[ELE_PTR]], %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
}
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!cir.ptr<!s32i> x 1>, !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>, ["arr", init]
+// CIR: %[[ARR_TMP:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, ["arrayinit.temp", init]
+// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>), !cir.ptr<!cir.ptr<!s32i>>
+// CIR: cir.store %[[ARR_PTR]], %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CIR: %[[TMP:.*]] = cir.load %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[NULL_PTR:.*]] = cir.const #cir.ptr<null> : !cir.ptr<!s32i>
+// CIR: cir.store %[[NULL_PTR]], %[[TMP]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
+// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[TMP]] : !cir.ptr<!cir.ptr<!s32i>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.ptr<!s32i>>
+// CIR: cir.store %[[ELE_PTR]], %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+
+// LLVM: define void @func7()
+// LLVM: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8
+// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[ELE_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0
+// LLVM: store ptr %[[ELE_PTR]], ptr %[[ALLOCA]], align 8
+// LLVM: %[[TMP:.*]] = load ptr, ptr %[[ALLOCA]], align 8
+// LLVM: store ptr null, ptr %[[TMP]], align 8
+// LLVM: %[[ELE:.*]] = getelementptr ptr, ptr %[[TMP]], i64 1
+// LLVM: store ptr %[[ELE]], ptr %[[ALLOCA]], align 8
+
+// OGCG: %[[ARR:.*]] = alloca [1 x ptr], align 8
+// OGCG: call void @llvm.memset.p0.i64(ptr align 8 %[[ARR]], i8 0, i64 8, i1 false)
+
void func8(int p[10]) {}
-// CHECK: cir.func @func8(%arg0: !cir.ptr<!s32i>
-// CHECK: cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["p", init]
+// CIR: cir.func @func8(%arg0: !cir.ptr<!s32i>
+// CIR: cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["p", init]
+
+// LLVM: define void @func8(ptr {{%.*}})
+// LLVM-NEXT: alloca ptr, i64 1, align 8
+
+// OGCG: alloca ptr, align 8
void func9(int pp[10][5]) {}
-// CHECK: cir.func @func9(%arg0: !cir.ptr<!cir.array<!s32i x 5>>
-// CHECK: cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CIR: cir.func @func9(%arg0: !cir.ptr<!cir.array<!s32i x 5>>
+// CIR: cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+
+// LLVM: define void @func9(ptr {{%.*}})
+// LLVM-NEXT: alloca ptr, i64 1, align 8
+
+// OGCG: alloca ptr, align 8
diff --git a/clang/test/CIR/Lowering/array.cpp b/clang/test/CIR/Lowering/array.cpp
index e1c977eb43141..036a7b4f2d613 100644
--- a/clang/test/CIR/Lowering/array.cpp
+++ b/clang/test/CIR/Lowering/array.cpp
@@ -31,12 +31,24 @@ int f[5] = {1, 2};
void func() {
int arr[10];
+ int e = arr[0];
+ int e2 = arr[1];
}
// CHECK: define void @func()
-// CHECK-NEXT: alloca [10 x i32], i64 1, align 16
+// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = alloca [10 x i32], i64 1, align 16
+// CHECK-NEXT: %[[INIT:.*]] = alloca i32, i64 1, align 4
+// CHECK-NEXT: %[[INIT_2:.*]] = alloca i32, i64 1, align 4
+// CHECK-NEXT: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0
+// CHECK-NEXT: %[[ELE_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 0
+// CHECK-NEXT: %[[TMP:.*]] = load i32, ptr %[[ELE_PTR]], align 4
+// CHECK-NEXT: store i32 %[[TMP]], ptr %[[INIT]], align 4
+// CHECK-NEXT: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR_ALLOCA]], i32 0
+// CHECK-NEXT: %[[ELE_PTR:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i64 1
+// CHECK-NEXT: %[[TMP:.*]] = load i32, ptr %[[ELE_PTR]], align 4
+// CHECK-NEXT: store i32 %[[TMP]], ptr %[[INIT_2]], align 4
void func2() {
- int arr2[2] = {5};
+ int arr[2] = {5};
}
// CHECK: define void @func2()
// CHECK: %[[ARR_ALLOCA:.*]] = alloca [2 x i32], i64 1, align 4
@@ -61,19 +73,27 @@ void func3() {
// CHECK: store i32 6, ptr %[[ELE_1_PTR]], align 4
void func4() {
- int arr4[2][1] = {{5}, {6}};
+ int arr[2][1] = {{5}, {6}};
+ int e = arr[1][0];
}
// CHECK: define void @func4()
// CHECK: %[[ARR_ALLOCA:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
-// CHECK: %[[ARR_0:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0
-// CHECK: %[[ARR_0_ELE_0:.*]] = getelementptr i32, ptr %[[ARR_0]], i32 0
-// CHECK: store i32 5, ptr %[[ARR_0_ELE_0]], align 4
-// CHECK: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %2, i64 1
-// CHECK: %[[ARR_0_ELE_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0
-// CHECK: store i32 6, ptr %[[ARR_0_ELE_0]], align 4
+// CHECK: %[[INIT:.*]] = alloca i32, i64 1, align 4
+// CHECK: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0
+// CHECK: %[[ARR_0_0:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0
+// CHECK: store i32 5, ptr %[[ARR_0_0]], align 4
+// CHECK: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
+// CHECK: %[[ARR_1_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0
+// CHECK: store i32 6, ptr %[[ARR_1_0]], align 4
+// CHECK: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR_ALLOCA]], i32 0
+// CHECK: %[[ARR_1:.*]] = getelementptr [1 x i32], ptr %[[ARR_PTR]], i64 1
+// CHECK: %[[ARR_1_0:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0
+// CHECK: %[[ELE_PTR:.*]] = getelementptr i32, ptr %[[ARR_1_0]], i64 0
+// CHECK: %[[TMP:.*]] = load i32, ptr %[[ELE_PTR]], align 4
+// CHECK: store i32 %[[TMP]], ptr %[[INIT]], align 4
void func5() {
- int arr5[2][1] = {{5}};
+ int arr[2][1] = {{5}};
}
// CHECK: define void @func5()
// CHECK: %[[ARR_ALLOCA:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
More information about the cfe-commits
mailing list