[flang-commits] [flang] bd7eff1 - [mlir][flang] Make use of the new `GEPArg` builder of GEP Op to simplify code
Markus Böck via flang-commits
flang-commits at lists.llvm.org
Mon Aug 1 08:56:30 PDT 2022
Author: Markus Böck
Date: 2022-08-01T17:22:55+02:00
New Revision: bd7eff1f2a7462ffbebc6beb8c7a3fecb1c39350
URL: https://github.com/llvm/llvm-project/commit/bd7eff1f2a7462ffbebc6beb8c7a3fecb1c39350
DIFF: https://github.com/llvm/llvm-project/commit/bd7eff1f2a7462ffbebc6beb8c7a3fecb1c39350.diff
LOG: [mlir][flang] Make use of the new `GEPArg` builder of GEP Op to simplify code
This is the follow up on https://reviews.llvm.org/D130730 which goes through upstream code and removes creating constant values in favour of using the constant indices in GEP directly. This leads to less and more readable code and more compact IR as well.
Differential Revision: https://reviews.llvm.org/D130731
Added:
Modified:
flang/lib/Optimizer/CodeGen/CodeGen.cpp
flang/test/Fir/alloc.fir
flang/test/Fir/boxproc.fir
flang/test/Fir/convert-to-llvm.fir
flang/test/Fir/embox.fir
flang/test/Fir/field-index.fir
flang/test/Fir/loop10.fir
flang/test/Fir/rebox-susbtring.fir
flang/test/Lower/complex-part.f90
mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
mlir/lib/Conversion/LLVMCommon/Pattern.cpp
mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir
mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-opencl.mlir
mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll
Removed:
################################################################################
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index e1e59cf6428d8..94ced070adcae 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -141,12 +141,9 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
mlir::Type resultTy,
mlir::ConversionPatternRewriter &rewriter,
unsigned boxValue) const {
- mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
- mlir::LLVM::ConstantOp cValuePos =
- genConstantOffset(loc, rewriter, boxValue);
auto pty = mlir::LLVM::LLVMPointerType::get(resultTy);
auto p = rewriter.create<mlir::LLVM::GEPOp>(
- loc, pty, box, mlir::ValueRange{c0, cValuePos});
+ loc, pty, box, llvm::ArrayRef<mlir::LLVM::GEPArg>{0, boxValue});
return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p);
}
@@ -156,26 +153,21 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys,
mlir::Value box, mlir::Value dim,
mlir::ConversionPatternRewriter &rewriter) const {
- mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
- mlir::LLVM::ConstantOp cDims =
- genConstantOffset(loc, rewriter, kDimsPosInBox);
mlir::LLVM::LoadOp l0 =
- loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter);
+ loadFromOffset(loc, box, 0, kDimsPosInBox, dim, 0, retTys[0], rewriter);
mlir::LLVM::LoadOp l1 =
- loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter);
+ loadFromOffset(loc, box, 0, kDimsPosInBox, dim, 1, retTys[1], rewriter);
mlir::LLVM::LoadOp l2 =
- loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter);
+ loadFromOffset(loc, box, 0, kDimsPosInBox, dim, 2, retTys[2], rewriter);
return {l0.getResult(), l1.getResult(), l2.getResult()};
}
mlir::LLVM::LoadOp
- loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0,
- mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off,
- mlir::Type ty,
+ loadFromOffset(mlir::Location loc, mlir::Value a, int32_t c0, int32_t cDims,
+ mlir::Value dim, int off, mlir::Type ty,
mlir::ConversionPatternRewriter &rewriter) const {
auto pty = mlir::LLVM::LLVMPointerType::get(ty);
- mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off);
- mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c);
+ mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, off);
return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
}
@@ -183,33 +175,25 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim,
mlir::ConversionPatternRewriter &rewriter) const {
auto idxTy = lowerTy().indexType();
- auto c0 = genConstantOffset(loc, rewriter, 0);
- auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox);
auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim);
- return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy,
- rewriter);
+ return loadFromOffset(loc, box, 0, kDimsPosInBox, dimValue, kDimStridePos,
+ idxTy, rewriter);
}
/// Read base address from a fir.box. Returned address has type ty.
mlir::Value
loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box,
mlir::ConversionPatternRewriter &rewriter) const {
- mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
- mlir::LLVM::ConstantOp cAddr =
- genConstantOffset(loc, rewriter, kAddrPosInBox);
auto pty = mlir::LLVM::LLVMPointerType::get(ty);
- mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr);
+ mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, 0, kAddrPosInBox);
return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
}
mlir::Value
loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box,
mlir::ConversionPatternRewriter &rewriter) const {
- mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
- mlir::LLVM::ConstantOp cElemLen =
- genConstantOffset(loc, rewriter, kElemLenPosInBox);
auto pty = mlir::LLVM::LLVMPointerType::get(ty);
- mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen);
+ mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, 0, kElemLenPosInBox);
return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
}
@@ -262,7 +246,7 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty,
mlir::ConversionPatternRewriter &rewriter,
mlir::Value base, ARGS... args) const {
- llvm::SmallVector<mlir::Value> cv = {args...};
+ llvm::SmallVector<mlir::LLVM::GEPArg> cv = {args...};
return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv);
}
@@ -1016,9 +1000,8 @@ static mlir::Value
computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy,
mlir::ConversionPatternRewriter &rewriter) {
auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy);
- mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
- llvm::SmallVector<mlir::Value> args = {one};
- auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args);
+ auto gep = rewriter.create<mlir::LLVM::GEPOp>(
+ loc, ptrTy, nullPtr, llvm::ArrayRef<mlir::LLVM::GEPArg>{1});
return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep);
}
@@ -1255,10 +1238,8 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
auto ptrTy = mlir::LLVM::LLVMPointerType::get(
this->lowerTy().convertType(boxEleTy));
auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy);
- auto one =
- genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1);
- auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr,
- mlir::ValueRange{one});
+ auto gep = rewriter.create<mlir::LLVM::GEPOp>(
+ loc, ptrTy, nullPtr, llvm::ArrayRef<mlir::LLVM::GEPArg>{1});
auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>(
loc, this->lowerTy().indexType(), gep);
return {eleSize,
@@ -1414,7 +1395,7 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
mlir::ValueRange cstInteriorIndices,
mlir::ValueRange componentIndices,
llvm::Optional<mlir::Value> substringOffset) const {
- llvm::SmallVector<mlir::Value> gepArgs{outerOffset};
+ llvm::SmallVector<mlir::LLVM::GEPArg> gepArgs{outerOffset};
mlir::Type resultTy =
base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType();
// Fortran is column major, llvm GEP is row major: reverse the indices here.
@@ -1454,12 +1435,12 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
if (gepArgs.size() != 1)
fir::emitFatalError(loc,
"corrupted substring GEP in fir.embox/fir.rebox");
- mlir::Type outterOffsetTy = gepArgs[0].getType();
+ mlir::Type outterOffsetTy = gepArgs[0].get<mlir::Value>().getType();
mlir::Value cast =
this->integerCast(loc, rewriter, outterOffsetTy, *substringOffset);
- gepArgs[0] = rewriter.create<mlir::LLVM::AddOp>(loc, outterOffsetTy,
- gepArgs[0], cast);
+ gepArgs[0] = rewriter.create<mlir::LLVM::AddOp>(
+ loc, outterOffsetTy, gepArgs[0].get<mlir::Value>(), cast);
}
}
resultTy = mlir::LLVM::LLVMPointerType::get(resultTy);
@@ -2157,8 +2138,7 @@ struct XArrayCoorOpConversion
auto sliceOps = coor.slice().begin();
mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
mlir::Value prevExt = one;
- mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
- mlir::Value offset = zero;
+ mlir::Value offset = genConstantIndex(loc, idxTy, rewriter, 0);
const bool isShifted = !coor.shift().empty();
const bool isSliced = !coor.slice().empty();
const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>();
@@ -2219,7 +2199,7 @@ struct XArrayCoorOpConversion
loadBaseAddrFromBox(loc, baseTy, operands[0], rewriter);
mlir::Type voidPtrTy = getVoidPtrType();
base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
- llvm::SmallVector<mlir::Value> args{offset};
+ llvm::SmallVector<mlir::LLVM::GEPArg> args{offset};
auto addr =
rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args);
if (coor.subcomponent().empty()) {
@@ -2228,7 +2208,7 @@ struct XArrayCoorOpConversion
}
auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr);
args.clear();
- args.push_back(zero);
+ args.push_back(0);
if (!coor.lenParams().empty()) {
// If type parameters are present, then we don't want to use a GEPOp
// as below, as the LLVM struct type cannot be statically defined.
@@ -2276,7 +2256,7 @@ struct XArrayCoorOpConversion
auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy);
base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, operands[0]);
}
- llvm::SmallVector<mlir::Value> args = {offset};
+ llvm::SmallVector<mlir::LLVM::GEPArg> args = {offset};
for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
args.push_back(operands[i]);
rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args);
@@ -2307,10 +2287,7 @@ struct CoordinateOpConversion
// Complex type - basically, extract the real or imaginary part
if (fir::isa_complex(objectTy)) {
- mlir::LLVM::ConstantOp c0 =
- genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
- llvm::SmallVector<mlir::Value> offs = {c0, operands[1]};
- mlir::Value gep = genGEP(loc, ty, rewriter, base, offs);
+ mlir::Value gep = genGEP(loc, ty, rewriter, base, 0, operands[1]);
rewriter.replaceOp(coor, gep);
return mlir::success();
}
@@ -2429,8 +2406,6 @@ struct CoordinateOpConversion
// coordinate_of %box, %idx1, %idx2 : !fir.ref<f32>
// 2.4. TODO: Either document or disable any other case that the following
// implementation might convert.
- mlir::LLVM::ConstantOp c0 =
- genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
mlir::Value resultAddr =
loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()),
boxBaseAddr, rewriter);
@@ -2458,9 +2433,9 @@ struct CoordinateOpConversion
}
auto voidPtrBase =
rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr);
- llvm::SmallVector<mlir::Value> args = {off};
- resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy,
- voidPtrBase, args);
+ resultAddr = rewriter.create<mlir::LLVM::GEPOp>(
+ loc, voidPtrTy, voidPtrBase,
+ llvm::ArrayRef<mlir::LLVM::GEPArg>{off});
i += arrTy.getDimension() - 1;
cpnTy = arrTy.getEleTy();
} else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) {
@@ -2469,12 +2444,11 @@ struct CoordinateOpConversion
mlir::Value nxtOpnd = operands[i];
auto memObj =
rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr);
- llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd};
cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd));
auto llvmCurrentObjTy = lowerTy().convertType(cpnTy);
auto gep = rewriter.create<mlir::LLVM::GEPOp>(
loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj,
- args);
+ llvm::ArrayRef<mlir::LLVM::GEPArg>{0, nxtOpnd});
resultAddr =
rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep);
} else {
@@ -2529,11 +2503,9 @@ struct CoordinateOpConversion
loc, "fir.coordinate_of with a dynamic element size is unsupported");
if (hasKnownShape || columnIsDeferred) {
- llvm::SmallVector<mlir::Value> offs;
+ llvm::SmallVector<mlir::LLVM::GEPArg> offs;
if (hasKnownShape && hasSubdimension) {
- mlir::LLVM::ConstantOp c0 =
- genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
- offs.push_back(c0);
+ offs.push_back(0);
}
llvm::Optional<int> dims;
llvm::SmallVector<mlir::Value> arrIdx;
diff --git a/flang/test/Fir/alloc.fir b/flang/test/Fir/alloc.fir
index 48cbd1ddebbf8..668550a65c725 100644
--- a/flang/test/Fir/alloc.fir
+++ b/flang/test/Fir/alloc.fir
@@ -48,14 +48,14 @@ func.func @alloca_scalar_char_kind() -> !fir.ref<!fir.char<2,10>> {
}
// CHECK-LABEL: define ptr @allocmem_scalar_char(
-// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([10 x i8], ptr null, i64 1) to i64))
+// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([10 x i8], ptr null, i32 1) to i64))
func.func @allocmem_scalar_char() -> !fir.heap<!fir.char<1,10>> {
%1 = fir.allocmem !fir.char<1,10>
return %1 : !fir.heap<!fir.char<1,10>>
}
// CHECK-LABEL: define ptr @allocmem_scalar_char_kind(
-// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([10 x i16], ptr null, i64 1) to i64))
+// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([10 x i16], ptr null, i32 1) to i64))
func.func @allocmem_scalar_char_kind() -> !fir.heap<!fir.char<2,10>> {
%1 = fir.allocmem !fir.char<2,10>
return %1 : !fir.heap<!fir.char<2,10>>
@@ -131,14 +131,14 @@ func.func @alloca_array_of_dynchar(%l: i32) -> !fir.ref<!fir.array<3x3x!fir.char
}
// CHECK-LABEL: define ptr @allocmem_array_of_nonchar(
-// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([3 x [3 x i32]], ptr null, i64 1) to i64))
+// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([3 x [3 x i32]], ptr null, i32 1) to i64))
func.func @allocmem_array_of_nonchar() -> !fir.heap<!fir.array<3x3xi32>> {
%1 = fir.allocmem !fir.array<3x3xi32>
return %1 : !fir.heap<!fir.array<3x3xi32>>
}
// CHECK-LABEL: define ptr @allocmem_array_of_char(
-// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([3 x [3 x [10 x i8]]], ptr null, i64 1) to i64))
+// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([3 x [3 x [10 x i8]]], ptr null, i32 1) to i64))
func.func @allocmem_array_of_char() -> !fir.heap<!fir.array<3x3x!fir.char<1,10>>> {
%1 = fir.allocmem !fir.array<3x3x!fir.char<1,10>>
return %1 : !fir.heap<!fir.array<3x3x!fir.char<1,10>>>
@@ -175,7 +175,7 @@ func.func @alloca_dynarray_of_nonchar2(%e: index) -> !fir.ref<!fir.array<?x?xi32
// CHECK-LABEL: define ptr @allocmem_dynarray_of_nonchar(
// CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([3 x i32], ptr null, i64 1) to i64), %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([3 x i32], ptr null, i32 1) to i64), %[[extent]]
// CHECK: call ptr @malloc(i64 %[[prod1]])
func.func @allocmem_dynarray_of_nonchar(%e: index) -> !fir.heap<!fir.array<3x?xi32>> {
%1 = fir.allocmem !fir.array<3x?xi32>, %e
@@ -213,7 +213,7 @@ func.func @alloca_dynarray_of_char2(%e : index) -> !fir.ref<!fir.array<?x?x!fir.
// CHECK-LABEL: define ptr @allocmem_dynarray_of_char(
// CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([3 x [10 x i16]], ptr null, i64 1) to i64), %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([3 x [10 x i16]], ptr null, i32 1) to i64), %[[extent]]
// CHECK: call ptr @malloc(i64 %[[prod1]])
func.func @allocmem_dynarray_of_char(%e : index) -> !fir.heap<!fir.array<3x?x!fir.char<2,10>>> {
%1 = fir.allocmem !fir.array<3x?x!fir.char<2,10>>, %e
@@ -222,7 +222,7 @@ func.func @allocmem_dynarray_of_char(%e : index) -> !fir.heap<!fir.array<3x?x!fi
// CHECK-LABEL: define ptr @allocmem_dynarray_of_char2(
// CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([10 x i16], ptr null, i64 1) to i64), %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([10 x i16], ptr null, i32 1) to i64), %[[extent]]
// CHECK: %[[prod2:.*]] = mul i64 %[[prod1]], %[[extent]]
// CHECK: call ptr @malloc(i64 %[[prod2]])
func.func @allocmem_dynarray_of_char2(%e : index) -> !fir.heap<!fir.array<?x?x!fir.char<2,10>>> {
@@ -316,7 +316,7 @@ func.func @allocmem_array_with_holes_nonchar(%0 : index, %1 : index) -> !fir.hea
// CHECK-LABEL: define ptr @allocmem_array_with_holes_char(
// CHECK-SAME: i64 %[[e:.*]])
-// CHECK: %[[mul:.*]] = mul i64 mul (i64 ptrtoint (ptr getelementptr ([3 x [10 x i16]], ptr null, i64 1) to i64), i64 4), %[[e]]
+// CHECK: %[[mul:.*]] = mul i64 mul (i64 ptrtoint (ptr getelementptr ([3 x [10 x i16]], ptr null, i32 1) to i64), i64 4), %[[e]]
// CHECK: call ptr @malloc(i64 %[[mul]])
func.func @allocmem_array_with_holes_char(%e: index) -> !fir.heap<!fir.array<3x?x4x!fir.char<2,10>>> {
%1 = fir.allocmem !fir.array<3x?x4x!fir.char<2,10>>, %e
diff --git a/flang/test/Fir/boxproc.fir b/flang/test/Fir/boxproc.fir
index 30c293e182505..c4e6e0ffd5d04 100644
--- a/flang/test/Fir/boxproc.fir
+++ b/flang/test/Fir/boxproc.fir
@@ -3,7 +3,7 @@
// CHECK-LABEL: define void @_QPtest_proc_dummy()
// CHECK: %[[VAL_0:.*]] = alloca i32, i64 1, align 4
// CHECK: %[[VAL_1:.*]] = alloca { ptr }, i64 1, align 8
-// CHECK: %[[VAL_2:.*]] = getelementptr { ptr }, ptr %[[VAL_1]], i64 0, i32 0
+// CHECK: %[[VAL_2:.*]] = getelementptr { ptr }, ptr %[[VAL_1]], i32 0, i32 0
// CHECK: store ptr %[[VAL_0]], ptr %[[VAL_2]], align 8
// CHECK: store i32 1, ptr %[[VAL_0]], align 4
// CHECK: %[[VAL_3:.*]] = alloca [32 x i8], i64 1, align 1
@@ -64,7 +64,7 @@ func.func @_QPtest_proc_dummy_other(%arg0: !fir.boxproc<() -> ()>) {
// CHECK: %[[VAL_0:.*]] = alloca [40 x i8], i64 1, align 1
// CHECK: %[[VAL_1:.*]] = alloca [10 x i8], i64 1, align 1
// CHECK: %[[VAL_2:.*]] = alloca { { ptr, i64 } }, i64 1, align 8
-// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i64 0, i32 0
+// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i32 0, i32 0
// CHECK: %[[VAL_5:.*]] = insertvalue { ptr, i64 } undef, ptr %[[VAL_1]], 0
// CHECK: %[[VAL_6:.*]] = insertvalue { ptr, i64 } %[[VAL_5]], i64 10, 1
// CHECK: store { ptr, i64 } %[[VAL_6]], ptr %[[VAL_3]], align 8
@@ -73,7 +73,7 @@ func.func @_QPtest_proc_dummy_other(%arg0: !fir.boxproc<() -> ()>) {
// CHECK: %[[VAL_11:.*]] = phi
// CHECK: %[[VAL_13:.*]] = phi
// CHECK: %[[VAL_15:.*]] = icmp sgt i64 %[[VAL_13]], 0
-// CHECK: %[[VAL_18:.*]] = getelementptr [10 x [1 x i8]], ptr %[[VAL_1]], i64 0, i64 %[[VAL_11]]
+// CHECK: %[[VAL_18:.*]] = getelementptr [10 x [1 x i8]], ptr %[[VAL_1]], i32 0, i64 %[[VAL_11]]
// CHECK: store [1 x i8] c" ", ptr %[[VAL_18]], align 1
// CHECK: %[[VAL_20:.*]] = alloca [32 x i8], i64 1, align 1
// CHECK: call void @llvm.init.trampoline(ptr %[[VAL_20]], ptr @_QFtest_proc_dummy_charPgen_message, ptr %[[VAL_2]])
@@ -89,7 +89,7 @@ func.func @_QPtest_proc_dummy_other(%arg0: !fir.boxproc<() -> ()>) {
// CHECK-LABEL: define { ptr, i64 } @_QFtest_proc_dummy_charPgen_message(ptr
// CHECK-SAME: %[[VAL_0:.*]], i64 %[[VAL_1:.*]], ptr nest %[[VAL_2:.*]])
-// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i64 0, i32 0
+// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i32 0, i32 0
// CHECK: %[[VAL_4:.*]] = load { ptr, i64 }, ptr %[[VAL_3]], align 8
// CHECK: %[[VAL_5:.*]] = extractvalue { ptr, i64 } %[[VAL_4]], 0
// CHECK: %[[VAL_6:.*]] = extractvalue { ptr, i64 } %[[VAL_4]], 1
diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir
index 3d37f62bfa164..00ecfae4e3f4e 100644
--- a/flang/test/Fir/convert-to-llvm.fir
+++ b/flang/test/Fir/convert-to-llvm.fir
@@ -216,7 +216,7 @@ func.func @test_alloc_and_freemem_several() {
// CHECK-LABEL: llvm.func @test_alloc_and_freemem_several() {
// CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr<array<100 x f32>>
-// CHECK: [[PTR:%.*]] = llvm.getelementptr [[NULL]][{{.*}}] : (!llvm.ptr<array<100 x f32>>, i64) -> !llvm.ptr<array<100 x f32>>
+// CHECK: [[PTR:%.*]] = llvm.getelementptr [[NULL]][{{.*}}] : (!llvm.ptr<array<100 x f32>>) -> !llvm.ptr<array<100 x f32>>
// CHECK: [[N:%.*]] = llvm.ptrtoint [[PTR]] : !llvm.ptr<array<100 x f32>> to i64
// CHECK: [[MALLOC:%.*]] = llvm.call @malloc([[N]])
// CHECK: [[B1:%.*]] = llvm.bitcast [[MALLOC]] : !llvm.ptr<i8> to !llvm.ptr<array<100 x f32>>
@@ -883,8 +883,7 @@ func.func @extract_rank(%arg0: !fir.box<!fir.array<*:f64>>) -> i32 {
// CHECK-LABEL: llvm.func @extract_rank(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i32
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 3] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr<i32>
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 3] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<i32>
// CHECK: %[[RANK:.*]] = llvm.load %[[GEP]] : !llvm.ptr<i32>
// CHECK: llvm.return %[[RANK]] : i32
@@ -899,8 +898,7 @@ func.func @extract_addr(%arg0: !fir.box<!fir.array<*:f64>>) -> !fir.ref<f64> {
// CHECK-LABEL: llvm.func @extract_addr(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<f64>
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 0] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr<ptr<f64>>
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<ptr<f64>>
// CHECK: %[[ADDR:.*]] = llvm.load %[[GEP]] : !llvm.ptr<ptr<f64>>
// CHECK: llvm.return %[[ADDR]] : !llvm.ptr<f64>
@@ -910,24 +908,20 @@ func.func @extract_addr(%arg0: !fir.box<!fir.array<*:f64>>) -> !fir.ref<f64> {
func.func @extract_dims(%arg0: !fir.box<!fir.array<*:f64>>) -> index {
%c1 = arith.constant 0 : i32
- %cast = fir.convert %arg0 : (!fir.box<!fir.array<*:f64>>) -> !fir.box<!fir.array<?xf64>>
+ %cast = fir.convert %arg0 : (!fir.box<!fir.array<*:f64>>) -> !fir.box<!fir.array<?xf64>>
%0:3 = fir.box_dims %cast, %c1 : (!fir.box<!fir.array<?xf64>>, i32) -> (index, index, index)
return %0 : index
}
// CHECK-LABEL: llvm.func @extract_dims(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i64
-// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[CAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr<struct<(ptr<f64>, i64, i32, i8, i8, i8, i8)>> to !llvm.ptr<struct<(ptr<f64>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[CAST]][%[[C0]], 7, %[[C0_1]], %[[C0_2]]] : (!llvm.ptr<struct<(ptr<f64>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>, i32, i32, i32) -> !llvm.ptr<i64>
+// CHECK: %[[CAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr<struct<(ptr<f64>, i64, i32, i8, i8, i8, i8)>> to !llvm.ptr<struct<(ptr<f64>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>
+// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[CAST]][0, 7, %[[C0]], 0] : (!llvm.ptr<struct<(ptr<f64>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr<i64>
// CHECK: %[[LOAD0:.*]] = llvm.load %[[GEP0]] : !llvm.ptr<i64>
-// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32
-// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[CAST]][%[[C0]], 7, %[[C0_1]], %[[C1]]] : (!llvm.ptr<struct<(ptr<f64>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>, i32, i32, i32) -> !llvm.ptr<i64>
+// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[CAST]][0, 7, %[[C0]], 1] : (!llvm.ptr<struct<(ptr<f64>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr<i64>
// CHECK: %[[LOAD1:.*]] = llvm.load %[[GEP1]] : !llvm.ptr<i64>
-// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK: %[[GEP2:.*]] = llvm.getelementptr %[[CAST]][%[[C0]], 7, %[[C0_1]], %[[C2]]] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32, i32, i32) -> !llvm.ptr<i64>
+// CHECK: %[[GEP2:.*]] = llvm.getelementptr %[[CAST]][0, 7, %[[C0]], 2] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr<i64>
// CHECK: %[[LOAD2:.*]] = llvm.load %[[GEP2]] : !llvm.ptr<i64>
// CHECK: llvm.return %[[LOAD0]] : i64
@@ -942,8 +936,7 @@ func.func @extract_elesize(%arg0: !fir.box<f32>) -> i32 {
// CHECK-LABEL: llvm.func @extract_elesize(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i32
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 1] : (!llvm.ptr<struct<(ptr<f32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr<i32>
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 1] : (!llvm.ptr<struct<(ptr<f32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<i32>
// CHECK: %[[ELE_SIZE:.*]] = llvm.load %[[GEP]] : !llvm.ptr<i32>
// CHECK: llvm.return %[[ELE_SIZE]] : i32
@@ -959,8 +952,7 @@ func.func @box_isarray(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @box_isarray(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i1
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 3] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr<i32>
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 3] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<i32>
// CHECK: %[[RANK:.*]] = llvm.load %[[GEP]] : !llvm.ptr<i32>
// CHECK: %[[C0_ISARRAY:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[IS_ARRAY:.*]] = llvm.icmp "ne" %[[RANK]], %[[C0_ISARRAY]] : i32
@@ -979,8 +971,7 @@ func.func @box_isalloc(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @box_isalloc(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i1
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 5] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr<i32>
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 5] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<i32>
// CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr<i32>
// CHECK: %[[ATTR_ISALLOC:.*]] = llvm.mlir.constant(2 : i32) : i32
// CHECK: %[[AND:.*]] = llvm.and %[[ATTR]], %[[ATTR_ISALLOC]] : i32
@@ -1001,8 +992,7 @@ func.func @box_isptr(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @box_isptr(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i1
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 5] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr<i32>
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 5] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<i32>
// CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr<i32>
// CHECK: %[[ATTR_ISALLOC:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[AND:.*]] = llvm.and %[[ATTR]], %[[ATTR_ISALLOC]] : i32
@@ -1489,8 +1479,7 @@ func.func @box_tdesc(%arg0: !fir.box<f64>) {
// CHECK-LABEL: llvm.func @box_tdesc(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) {
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 4] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr<i8>
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 4] : (!llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<i8>
// CHECK: %[[LOAD:.*]] = llvm.load %[[GEP]] : !llvm.ptr<i{{.*}}>
// CHECK: %{{.*}} = llvm.inttoptr %[[LOAD]] : i{{.*}} to !llvm.ptr<i{{.*}}>
@@ -1653,9 +1642,9 @@ func.func @embox1(%arg0: !fir.ref<!fir.type<_QMtest_dinitTtseq{i:i32}>>) {
// CHECK: %{{.*}} = llvm.insertvalue %[[TYPE_CODE_I8]], %{{.*}}[4 : i32] : !llvm.struct<(ptr<struct<"_QMtest_dinitTtseq", (i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i{{.*}}>, array<1 x i{{.*}}>)>
// CHECK: %[[F18ADDENDUM:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[F18ADDENDUM_I8:.*]] = llvm.trunc %[[F18ADDENDUM]] : i32 to i8
-// CHECK: %{{.*}} = llvm.insertvalue %[[F18ADDENDUM_I8]], %18[6 : i32] : !llvm.struct<(ptr<struct<"_QMtest_dinitTtseq", (i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i{{.*}}>, array<1 x i{{.*}}>)>
+// CHECK: %{{.*}} = llvm.insertvalue %[[F18ADDENDUM_I8]], %17[6 : i32] : !llvm.struct<(ptr<struct<"_QMtest_dinitTtseq", (i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i{{.*}}>, array<1 x i{{.*}}>)>
// CHECK: %[[TDESC:.*]] = llvm.mlir.addressof @_QMtest_dinitE.dt.tseq : !llvm.ptr<i8>
-// CHECK: %[[TDESC_CAST:.*]] = llvm.bitcast %22 : !llvm.ptr<i8> to !llvm.ptr<i8>
+// CHECK: %[[TDESC_CAST:.*]] = llvm.bitcast %21 : !llvm.ptr<i8> to !llvm.ptr<i8>
// CHECK: %{{.*}} = llvm.insertvalue %[[TDESC_CAST]], %{{.*}}[7 : i32] : !llvm.struct<(ptr<struct<"_QMtest_dinitTtseq", (i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i{{.*}}>, array<1 x i{{.*}}>)>
// -----
@@ -1797,7 +1786,7 @@ func.func @xembox1(%arg0: !fir.ref<!fir.array<?x!fir.char<1, 10>>>) {
// integer::n,sh1,sh2
// double precision::arr(sh1:n,sh2:n)
// call xb(arr(2:n,4:n))
-// end subroutine
+// end subroutine
// ```
// N is the upperbound, sh1 and sh2 are the shifts or lowerbounds
@@ -1815,7 +1804,7 @@ func.func @_QPsb(%N: index, %sh1: index, %sh2: index) {
%box = fircg.ext_embox %arr(%n1, %n2) origin %sh1, %sh2[%c2, %N, %c1, %c4, %N, %c1] : (!fir.ref<!fir.array<?x?xf64>>, index, index, index, index, index, index, index, index, index, index) -> !fir.box<!fir.array<?x?xf64>>
fir.call @_QPxb(%box) : (!fir.box<!fir.array<?x?xf64>>) -> ()
return
-}
+}
func.func private @_QPxb(!fir.box<!fir.array<?x?xf64>>)
// CHECK-LABEL: llvm.func @_QPsb(
@@ -1932,10 +1921,9 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
// CHECK: %[[BOX6:.*]] = llvm.insertvalue %[[F18ADDENDUM_I8]], %[[BOX5]][6 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
+// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %[[ELE_TYPE:.*]] = llvm.mlir.null : !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>
-// CHECK: %[[C1_0:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][%[[C1_0]]] : (!llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>, i64) -> !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>
+// CHECK: %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][1] : (!llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>) -> !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>
// CHECK: %[[PTRTOINT_DTYPE_SIZE:.*]] = llvm.ptrtoint %[[GEP_DTYPE_SIZE]] : !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>> to i64
// CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %3, %30 : i64
// CHECK: %[[EXT_SUB:.*]] = llvm.sub %[[C10]], %[[C1]] : i64
@@ -2034,15 +2022,12 @@ func.func @ext_array_coor3(%arg0: !fir.box<!fir.array<?xi32>>) {
// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[IDX:.*]] = llvm.sub %[[C0]], %[[C1]] : i64
// CHECK: %[[DIFF0:.*]] = llvm.mul %[[IDX]], %[[C1]] : i64
-// CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[DIMOFFSET:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[STRIDPOS:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK: %[[GEPSTRIDE:.*]] = llvm.getelementptr %[[ARG0]][%[[C0_2]], 7, %[[DIMOFFSET]], %[[STRIDPOS]]] : (!llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+// CHECK: %[[GEPSTRIDE:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[DIMOFFSET]], 2] : (!llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i64) -> !llvm.ptr<i64>
// CHECK: %[[LOADEDSTRIDE:.*]] = llvm.load %[[GEPSTRIDE]] : !llvm.ptr<i64>
// CHECK: %[[SC:.*]] = llvm.mul %[[DIFF0]], %[[LOADEDSTRIDE]] : i64
// CHECK: %[[OFFSET:.*]] = llvm.add %[[SC]], %[[C0_1]] : i64
-// CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[GEPADDR:.*]] = llvm.getelementptr %[[ARG0]][%[[C0_3]], 0] : (!llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr<ptr<i32>>
+// CHECK: %[[GEPADDR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) -> !llvm.ptr<ptr<i32>>
// CHECK: %[[LOADEDADDR:.*]] = llvm.load %[[GEPADDR]] : !llvm.ptr<ptr<i32>>
// CHECK: %[[LOADEDADDRBITCAST:.*]] = llvm.bitcast %[[LOADEDADDR]] : !llvm.ptr<i32> to !llvm.ptr<i8>
// CHECK: %[[GEPADDROFFSET:.*]] = llvm.getelementptr %[[LOADEDADDRBITCAST]][%[[OFFSET]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
@@ -2212,17 +2197,12 @@ func.func @test_rebox_1(%arg0: !fir.box<!fir.array<?x?xf32>>) {
//CHECK: %[[ADDENDUM_I8:.*]] = llvm.trunc %[[ADDENDUM]] : i32 to i8
//CHECK: %[[RBOX_TMP6:.*]] = llvm.insertvalue %[[ADDENDUM_I8]], %[[RBOX_TMP5]][6 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[DIM1:.*]] = llvm.mlir.constant(0 : i64) : i64
-//CHECK: %[[GEP_ZERO_1:.*]] = llvm.mlir.constant(0 : i32) : i32
-//CHECK: %[[LB1_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32
-//CHECK: %[[DIM1_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][%[[GEP_ZERO_1]], 7, %[[DIM1]], %[[LB1_IDX]]] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+//CHECK: %[[DIM1_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[DIM1]], 2] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr<i64>
//CHECK: %[[DIM1_STRIDE:.*]] = llvm.load %[[DIM1_STRIDE_REF]] : !llvm.ptr<i64>
//CHECK: %[[DIM2:.*]] = llvm.mlir.constant(1 : i64) : i64
-//CHECK: %[[GEP_ZERO_2:.*]] = llvm.mlir.constant(0 : i32) : i32
-//CHECK: %[[STRIDE2_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32
-//CHECK: %[[DIM2_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][%[[GEP_ZERO_2]], 7, %[[DIM2]], %[[STRIDE2_IDX]]] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+//CHECK: %[[DIM2_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[DIM2]], 2] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr<i64>
//CHECK: %[[DIM2_STRIDE:.*]] = llvm.load %[[DIM2_STRIDE_REF]] : !llvm.ptr<i64>
-//CHECK: %[[ZERO_1:.*]] = llvm.mlir.constant(0 : i32) : i32
-//CHECK: %[[SOURCE_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_1]], 0] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32) -> !llvm.ptr<ptr<f32>>
+//CHECK: %[[SOURCE_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>) -> !llvm.ptr<ptr<f32>>
//CHECK: %[[SOURCE_ARRAY:.*]] = llvm.load %[[SOURCE_ARRAY_PTR]] : !llvm.ptr<ptr<f32>>
//CHECK: %[[ZERO_ELEMS:.*]] = llvm.mlir.constant(0 : i64) : i64
//CHECK: %[[SOURCE_ARRAY_I8PTR:.*]] = llvm.bitcast %[[SOURCE_ARRAY]] : !llvm.ptr<f32> to !llvm.ptr<i8>
@@ -2285,12 +2265,9 @@ func.func @foo(%arg0: !fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}
//CHECK: %[[ADDENDUM_I8:.*]] = llvm.trunc %[[ADDENDUM]] : i32 to i8
//CHECK: %[[RBOX_TMP6:.*]] = llvm.insertvalue %[[ADDENDUM_I8]], %[[RBOX_TMP5]][6 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[DIM1:.*]] = llvm.mlir.constant(0 : i64) : i64
-//CHECK: %[[ZERO_3:.*]] = llvm.mlir.constant(0 : i32) : i32
-//CHECK: %[[STRIDE_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32
-//CHECK: %[[SRC_STRIDE_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_3]], 7, %[[DIM1]], %[[STRIDE_IDX]]] : (!llvm.ptr<struct<(ptr<struct<"t", (i32, array<10 x i8>)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+//CHECK: %[[SRC_STRIDE_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[DIM1]], 2] : (!llvm.ptr<struct<(ptr<struct<"t", (i32, array<10 x i8>)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>, i64) -> !llvm.ptr<i64>
//CHECK: %[[SRC_STRIDE:.*]] = llvm.load %[[SRC_STRIDE_PTR]] : !llvm.ptr<i64>
-//CHECK: %[[ZERO_4:.*]] = llvm.mlir.constant(0 : i32) : i32
-//CHECK: %[[SRC_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_4]], 0] : (!llvm.ptr<struct<(ptr<struct<"t", (i32, array<10 x i8>)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>, i32) -> !llvm.ptr<ptr<struct<"t", (i32, array<10 x i8>)>>>
+//CHECK: %[[SRC_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr<struct<(ptr<struct<"t", (i32, array<10 x i8>)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>) -> !llvm.ptr<ptr<struct<"t", (i32, array<10 x i8>)>>>
//CHECK: %[[SRC_ARRAY:.*]] = llvm.load %[[SRC_ARRAY_PTR]] : !llvm.ptr<ptr<struct<"t", (i32, array<10 x i8>)>>>
//CHECK: %[[ZERO_6:.*]] = llvm.mlir.constant(0 : i64) : i64
//CHECK: %[[SRC_CAST:.*]] = llvm.bitcast %[[SRC_ARRAY]] : !llvm.ptr<struct<"t", (i32, array<10 x i8>)>> to !llvm.ptr<struct<"t", (i32, array<10 x i8>)>>
@@ -2330,8 +2307,7 @@ func.func @coordinate_ref_complex(%arg0: !fir.ref<!fir.complex<16>>) {
}
// CHECK-LABEL: llvm.func @coordinate_ref_complex
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(f128, f128)>>
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %{{.*}} = llvm.getelementptr %[[ARG0]][%[[C0]], 0] : (!llvm.ptr<struct<(f128, f128)>>, i64) -> !llvm.ptr<f32>
+// CHECK: %{{.*}} = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr<struct<(f128, f128)>>) -> !llvm.ptr<f32>
// CHECK-NEXT: llvm.return
// -----
@@ -2344,8 +2320,7 @@ func.func @coordinate_box_complex(%arg0: !fir.box<!fir.complex<16>>) {
}
// CHECK-LABEL: llvm.func @coordinate_box_complex
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr<struct<(ptr<struct<(f128, f128)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>
-// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %{{.*}} = llvm.getelementptr %[[BOX]][%[[C0]], 0] : (!llvm.ptr<struct<(ptr<struct<(f128, f128)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i64) -> !llvm.ptr<f32>
+// CHECK: %{{.*}} = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr<struct<(ptr<struct<(f128, f128)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr<f32>
// CHECK-NEXT: llvm.return
// -----
@@ -2362,13 +2337,11 @@ func.func @coordinate_box_derived_1(%arg0: !fir.box<!fir.type<derived_1{field_1:
// CHECK-LABEL: llvm.func @coordinate_box_derived_1
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr<struct<(ptr<struct<"derived_1", (i32, i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i8>, array<1 x i64>)>>)
// CHECK: %[[COORDINATE:.*]] = llvm.mlir.constant(1 : i32) : i32
-// CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[C0_1]], 0] : (!llvm.ptr<struct<(ptr<struct<"derived_1", (i32, i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i8>, array<1 x i64>)>>, i32) -> !llvm.ptr<ptr<struct<"derived_1", (i32, i32)>>>
+// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr<struct<(ptr<struct<"derived_1", (i32, i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i8>, array<1 x i64>)>>) -> !llvm.ptr<ptr<struct<"derived_1", (i32, i32)>>>
// CHECK: %[[DERIVED_VAL:.*]] = llvm.load %[[DERIVED_ADDR]] : !llvm.ptr<ptr<struct<"derived_1", (i32, i32)>>>
// CHECK: %[[DERIVED_CAST:.*]] = llvm.bitcast %[[DERIVED_VAL]] : !llvm.ptr<struct<"derived_1", (i32, i32)>> to !llvm.ptr<struct<"derived_1", (i32, i32)>>
-// CHECK: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[DERIVED_CAST]][%[[C0_3]], 1] : (!llvm.ptr<struct<"derived_1", (i32, i32)>>, i64) -> !llvm.ptr<i32>
-// CHECK: %[[CAST_TO_I8_PTR:.*]] = llvm.bitcast %7 : !llvm.ptr<i32> to !llvm.ptr<i8>
+// CHECK: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[DERIVED_CAST]][0, 1] : (!llvm.ptr<struct<"derived_1", (i32, i32)>>) -> !llvm.ptr<i32>
+// CHECK: %[[CAST_TO_I8_PTR:.*]] = llvm.bitcast %[[SUBOBJECT_ADDR]] : !llvm.ptr<i32> to !llvm.ptr<i8>
// CHECK: %{{.*}} = llvm.bitcast %[[CAST_TO_I8_PTR]] : !llvm.ptr<i8> to !llvm.ptr<i32>
// CHECK-NEXT: llvm.return
@@ -2384,15 +2357,13 @@ func.func @coordinate_box_derived_2(%arg0: !fir.box<!fir.type<derived_2{field_1:
// CHECK-SAME: (%[[BOX:.*]]: !llvm.ptr<struct<(ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i8>, array<1 x i64>)>>)
// CHECK-NEXT: %[[C0_0:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32
-// CHECK-NEXT: %[[C0_3:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK-NEXT: %[[C0_1:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[C0_1]], 0] : (!llvm.ptr<struct<(ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>, i{{.*}}, i{{.*}}32, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i8>, array<1 x i64>)>>, i32) -> !llvm.ptr<ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>>
+// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr<struct<(ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>, i{{.*}}, i{{.*}}32, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i8>, array<1 x i64>)>>) -> !llvm.ptr<ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>>
// CHECK-NEXT: %[[DERIVED_VAL:.*]] = llvm.load %[[DERIVED_ADDR]] : !llvm.ptr<ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>>
// CHECK-NEXT: %[[DERIVED_CAST_I8_PTR:.*]] = llvm.bitcast %[[DERIVED_VAL]] : !llvm.ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>> to !llvm.ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>
-// CHECK-NEXT: %[[ANOTHER_DERIVED_ADDR:.*]] = llvm.getelementptr %[[DERIVED_CAST_I8_PTR]][%[[C0_3]], 0] : (!llvm.ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>, i64) -> !llvm.ptr<struct<"another_derived", (i32, f32)>>
+// CHECK-NEXT: %[[ANOTHER_DERIVED_ADDR:.*]] = llvm.getelementptr %[[DERIVED_CAST_I8_PTR]][0, 0] : (!llvm.ptr<struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>>) -> !llvm.ptr<struct<"another_derived", (i32, f32)>>
// CHECK-NEXT: %[[ANOTHER_DERIVED_ADDR_AS_VOID_PTR:.*]] = llvm.bitcast %[[ANOTHER_DERIVED_ADDR]] : !llvm.ptr<struct<"another_derived", (i32, f32)>> to !llvm.ptr<i8>
// CHECK-NEXT: %[[ANOTHER_DERIVED_RECAST:.*]] = llvm.bitcast %[[ANOTHER_DERIVED_ADDR_AS_VOID_PTR]] : !llvm.ptr<i8> to !llvm.ptr<struct<"another_derived", (i32, f32)>>
-// CHECK-NEXT: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[ANOTHER_DERIVED_RECAST]][%[[C0_3]], 1] : (!llvm.ptr<struct<"another_derived", (i32, f32)>>, i64) -> !llvm.ptr<f32>
+// CHECK-NEXT: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[ANOTHER_DERIVED_RECAST]][0, 1] : (!llvm.ptr<struct<"another_derived", (i32, f32)>>) -> !llvm.ptr<f32>
// CHECK-NEXT: %[[SUBOBJECT_AS_VOID_PTR:.*]] = llvm.bitcast %[[SUBOBJECT_ADDR]] : !llvm.ptr<f32> to !llvm.ptr<i8>
// CHECK-NEXT: %{{.*}} = llvm.bitcast %[[SUBOBJECT_AS_VOID_PTR]] : !llvm.ptr<i8> to !llvm.ptr<i32>
// CHECK-NEXT: llvm.return
@@ -2412,21 +2383,13 @@ func.func @coordinate_box_array_1d(%arg0: !fir.box<!fir.array<10 x f32>>, %arg1:
// CHECK-LABEL: llvm.func @coordinate_box_array_1d
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr<struct<(ptr<array<10 x f32>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>
// CHECK-SAME: %[[COORDINATE:.*]]: i64
-// CHECK-NEXT: %{{.*}} = llvm.mlir.constant(0 : i64) : i64
// There's only one box here. Its index is `0`. Generate it.
-// CHECK-NEXT: %[[BOX_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX]], 0] : (!llvm.ptr<struct<(ptr<array<10 x f32>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr<ptr<array<10 x f32>>>
+// CHECK: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr<struct<(ptr<array<10 x f32>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) -> !llvm.ptr<ptr<array<10 x f32>>>
// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr<ptr<array<10 x f32>>>
// CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64
-// Same as [[BOX_IDX]], just recreated.
-// CHECK-NEXT: %[[BOX_IDX_1:.*]] = llvm.mlir.constant(0 : i32) : i32
-// Index of the array that contains the CFI_dim_t objects
-// CHECK-NEXT: %[[CFI_DIM_IDX:.*]] = llvm.mlir.constant(7 : i32) : i32
// Index of the 1st CFI_dim_t object (corresonds the the 1st dimension)
// CHECK-NEXT: %[[DIM_1_IDX:.*]] = llvm.mlir.constant(0 : i64) : i64
-// Index of the memory stride within a CFI_dim_t object
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX_1]], 7, %[[DIM_1_IDX]], %[[DIM_1_MEM_STRIDE]]] : (!llvm.ptr<struct<(ptr<array<10 x f32>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_1_IDX]], 2] : (!llvm.ptr<struct<(ptr<array<10 x f32>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i64) -> !llvm.ptr<i64>
// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr<i64>
// CHECK-NEXT: %[[BYTE_OFFSET:.*]] = llvm.mul %[[COORDINATE]], %[[DIM_1_MEM_STRIDE_VAL]] : i64
// CHECK-NEXT: %[[SUBOJECT_OFFSET:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] : i64
@@ -2443,22 +2406,12 @@ func.func @coordinate_of_box_dynamic_array_1d(%arg0: !fir.box<!fir.array<? x f32
// CHECK-LABEL: llvm.func @coordinate_of_box_dynamic_array_1d
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr<struct<(ptr<f32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>
// CHECK-SAME: %[[COORDINATE:.*]]: i64
-// CHECK-NEXT: %{{.*}} = llvm.mlir.constant(0 : i64) : i64
-// There's only one box here. Its index is `0`. Generate it.
-// CHECK-NEXT: %[[BOX_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK-NEXT: %[[BOX_1ST_ELEM_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX]], 0] : (!llvm.ptr<struct<(ptr<f32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr<ptr<f32>>
+// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr<struct<(ptr<f32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) -> !llvm.ptr<ptr<f32>>
// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr<ptr<f32>>
// CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64
-// Same as [[BOX_IDX]], just recreated.
-// CHECK-NEXT: %[[BOX_IDX_1:.*]] = llvm.mlir.constant(0 : i32) : i32
-// Index of the array that contains the CFI_dim_t objects
-// CHECK-NEXT: %[[CFI_DIM_IDX:.*]] = llvm.mlir.constant(7 : i32) : i32
// Index of the 1st CFI_dim_t object (corresonds the the 1st dimension)
// CHECK-NEXT: %[[DIM_1_IDX:.*]] = llvm.mlir.constant(0 : i64) : i64
-// Index of the memory stride within a CFI_dim_t object
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX_1]], 7, %[[DIM_1_IDX]], %[[DIM_1_MEM_STRIDE]]] : (!llvm.ptr<struct<(ptr<f32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_1_IDX]], 2] : (!llvm.ptr<struct<(ptr<f32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i64) -> !llvm.ptr<i64>
// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr<i64>
// CHECK-NEXT: %[[BYTE_OFFSET:.*]] = llvm.mul %[[COORDINATE]], %[[DIM_1_MEM_STRIDE_VAL]] : i64
// CHECK-NEXT: %[[SUBOJECT_OFFSET:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] : i64
@@ -2477,34 +2430,18 @@ func.func @coordinate_box_array_2d(%arg0: !fir.box<!fir.array<10 x 10 x f32>>, %
// CHECK-LABEL: llvm.func @coordinate_box_array_2d
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr<struct<(ptr<array<10 x array<10 x f32>>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>
// CHECK-SAME: %[[COORDINATE_1:.*]]: i64, %[[COORDINATE_2:.*]]: i64)
-// CHECK-NEXT: %{{.*}} = llvm.mlir.constant(0 : i64) : i64
-// There's only one box here. Its index is `0`. Generate it.
-// CHECK-NEXT: %[[BOX_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK-NEXT: %[[BOX_1ST_ELEM_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX]], 0] : (!llvm.ptr<struct<(ptr<array<10 x array<10 x f32>>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i32) -> !llvm.ptr<ptr<array<10 x array<10 x f32>>>>
+// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr<struct<(ptr<array<10 x array<10 x f32>>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>) -> !llvm.ptr<ptr<array<10 x array<10 x f32>>>>
// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr<ptr<array<10 x array<10 x f32>>>>
// CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64
-// Same as [[BOX_IDX]], just recreated.
-// CHECK-NEXT: %[[BOX_IDX_1:.*]] = llvm.mlir.constant(0 : i32) : i32
-// Index of the array that contains the CFI_dim_t objects
-// CHECK-NEXT: %[[CFI_DIM_IDX:.*]] = llvm.mlir.constant(7 : i32) : i32
// Index of the 1st CFI_dim_t object (corresonds the the 1st dimension)
// CHECK-NEXT: %[[DIM_1_IDX:.*]] = llvm.mlir.constant(0 : i64) : i64
-// Index of the memory stride within a CFI_dim_t object
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX_1]], 7, %[[DIM_1_IDX]], %[[DIM_1_MEM_STRIDE]]] : (!llvm.ptr<struct<(ptr<array<10 x array<10 x f32>>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_1_IDX]], 2] : (!llvm.ptr<struct<(ptr<array<10 x array<10 x f32>>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr<i64>
// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr<i64>
// CHECK-NEXT: %[[BYTE_OFFSET_1:.*]] = llvm.mul %[[COORDINATE_1]], %[[DIM_1_MEM_STRIDE_VAL]] : i64
// CHECK-NEXT: %[[SUBOBJECT_OFFSET_1:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] : i64
-// Same as [[BOX_IDX]], just recreated.
-// CHECK-NEXT: %[[BOX_IDX_2:.*]] = llvm.mlir.constant(0 : i32) : i32
-// Index of the array that contains the CFI_dim_t objects (same as CFI_DIM_IDX, just recreated)
-// CHECK-NEXT: %[[CFI_DIM_IDX_1:.*]] = llvm.mlir.constant(7 : i32) : i32
// Index of the 1st CFI_dim_t object (corresonds the the 2nd dimension)
// CHECK-NEXT: %[[DIM_2_IDX:.*]] = llvm.mlir.constant(1 : i64) : i64
-// Index of the memory stride within a CFI_dim_t object
-// CHECK-NEXT: %[[DIM_2_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK-NEXT: %[[DIM_2_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX_2]], 7, %[[DIM_2_IDX]], %[[DIM_2_MEM_STRIDE]]] : (!llvm.ptr<struct<(ptr<array<10 x array<10 x f32>>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+// CHECK-NEXT: %[[DIM_2_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_2_IDX]], 2] : (!llvm.ptr<struct<(ptr<array<10 x array<10 x f32>>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr<i64>
// CHECK-NEXT: %[[DIM_2_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_2_MEM_STRIDE_ADDR]] : !llvm.ptr<i64>
// CHECK-NEXT: %[[BYTE_OFFSET_2:.*]] = llvm.mul %[[COORDINATE_2]], %[[DIM_2_MEM_STRIDE_VAL]] : i64
// CHECK-NEXT: %[[SUBOBJECT_OFFSET_2:.*]] = llvm.add %[[BYTE_OFFSET_2]], %[[SUBOBJECT_OFFSET_1]] : i64
@@ -2526,23 +2463,18 @@ func.func @coordinate_box_derived_inside_array(%arg0: !fir.box<!fir.array<10 x !
// CHECK-LABEL: llvm.func @coordinate_box_derived_inside_array(
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr<struct<(ptr<array<10 x struct<"derived_3", (f32, f32)>>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>,
// CHECK-SAME: %[[COORDINATE_1:.*]]: i64) {
-// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK: %[[VAL_6:.*]] = llvm.getelementptr %[[BOX]]{{\[}}%[[VAL_4]], 0] : (!llvm.ptr<struct<(ptr<array<10 x struct<"derived_3", (f32, f32)>>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>, i32) -> !llvm.ptr<ptr<array<10 x struct<"derived_3", (f32, f32)>>>>
+// CHECK: %[[VAL_6:.*]] = llvm.getelementptr %[[BOX]]{{\[}}0, 0] : (!llvm.ptr<struct<(ptr<array<10 x struct<"derived_3", (f32, f32)>>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>) -> !llvm.ptr<ptr<array<10 x struct<"derived_3", (f32, f32)>>>>
// CHECK: %[[ARRAY:.*]] = llvm.load %[[VAL_6]] : !llvm.ptr<ptr<array<10 x struct<"derived_3", (f32, f32)>>>>
// CHECK: %[[VAL_8:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_9:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[DIM_IDX:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[DIM_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32
-// CHECK: %[[VAL_13:.*]] = llvm.getelementptr %[[BOX]][%[[VAL_9]], 7, %[[DIM_IDX]], %[[DIM_MEM_STRIDE]]] : (!llvm.ptr<struct<(ptr<array<10 x struct<"derived_3", (f32, f32)>>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>, i32, i64, i32) -> !llvm.ptr<i64>
+// CHECK: %[[VAL_13:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_IDX]], 2] : (!llvm.ptr<struct<(ptr<array<10 x struct<"derived_3", (f32, f32)>>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>, i64) -> !llvm.ptr<i64>
// CHECK: %[[VAL_14:.*]] = llvm.load %[[VAL_13]] : !llvm.ptr<i64>
// CHECK: %[[VAL_15:.*]] = llvm.mul %[[COORDINATE_1]], %[[VAL_14]] : i64
// CHECK: %[[OFFSET:.*]] = llvm.add %[[VAL_15]], %[[VAL_8]] : i64
// CHECK: %[[VAL_17:.*]] = llvm.bitcast %[[ARRAY]] : !llvm.ptr<array<10 x struct<"derived_3", (f32, f32)>>> to !llvm.ptr<i8>
// CHECK: %[[VAL_18:.*]] = llvm.getelementptr %[[VAL_17]][%[[OFFSET]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
// CHECK: %[[DERIVED:.*]] = llvm.bitcast %[[VAL_18]] : !llvm.ptr<i8> to !llvm.ptr<struct<"derived_3", (f32, f32)>>
-// CHECK: %[[VAL_20:.*]] = llvm.getelementptr %[[DERIVED]][%[[VAL_3]], 1] : (!llvm.ptr<struct<"derived_3", (f32, f32)>>, i64) -> !llvm.ptr<f32>
+// CHECK: %[[VAL_20:.*]] = llvm.getelementptr %[[DERIVED]][0, 1] : (!llvm.ptr<struct<"derived_3", (f32, f32)>>) -> !llvm.ptr<f32>
// CHECK: %[[VAL_21:.*]] = llvm.bitcast %[[VAL_20]] : !llvm.ptr<f32> to !llvm.ptr<i8>
// CHECK: %[[VAL_22:.*]] = llvm.bitcast %[[VAL_21]] : !llvm.ptr<i8> to !llvm.ptr<f32>
// CHECK: llvm.return
@@ -2572,8 +2504,7 @@ func.func @coordinate_array_known_size_1d(%arg0: !fir.ref<!fir.array<10 x i32>>,
// CHECK-LABEL: llvm.func @coordinate_array_known_size_1d(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr<array<10 x i32>>,
// CHECK-SAME: %[[VAL_1:.*]]: i64) {
-// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_2]], %[[VAL_1]]] : (!llvm.ptr<array<10 x i32>>, i64, i64) -> !llvm.ptr<i32>
+// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, %[[VAL_1]]] : (!llvm.ptr<array<10 x i32>>, i64) -> !llvm.ptr<i32>
// CHECK: llvm.return
// CHECK: }
@@ -2587,8 +2518,7 @@ func.func @coordinate_array_known_size_2d_get_i32(%arg0: !fir.ref<!fir.array<10
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr<array<10 x array<10 x i32>>>,
// CHECK-SAME: %[[VAL_1:.*]]: i64,
// CHECK-SAME: %[[VAL_2:.*]]: i64) {
-// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_4:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_2]], %[[VAL_1]]] : (!llvm.ptr<array<10 x array<10 x i32>>>, i64, i64, i64) -> !llvm.ptr<i32>
+// CHECK: %[[VAL_4:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, %[[VAL_2]], %[[VAL_1]]] : (!llvm.ptr<array<10 x array<10 x i32>>>, i64, i64) -> !llvm.ptr<i32>
// CHECK: llvm.return
// CHECK: }
@@ -2601,8 +2531,7 @@ func.func @coordinate_array_known_size_2d_get_array(%arg0: !fir.ref<!fir.array<1
// CHECK-LABEL: llvm.func @coordinate_array_known_size_2d_get_array(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr<array<10 x array<10 x i32>>>,
// CHECK-SAME: %[[VAL_1:.*]]: i64) {
-// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]][%[[VAL_2]], %[[VAL_1]]] : (!llvm.ptr<array<10 x array<10 x i32>>>, i64, i64) -> !llvm.ptr<array<10 x i32>>
+// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]][0, %[[VAL_1]]] : (!llvm.ptr<array<10 x array<10 x i32>>>, i64) -> !llvm.ptr<array<10 x i32>>
// CHECK: llvm.return
// CHECK: }
@@ -2616,8 +2545,7 @@ func.func @coordinate_ref_derived(%arg0: !fir.ref<!fir.type<dervied_4{field_1:i3
}
// CHECK-LABEL: llvm.func @coordinate_ref_derived(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr<struct<"dervied_4", (i32, i32)>>) {
-// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_2]], 1] : (!llvm.ptr<struct<"dervied_4", (i32, i32)>>, i64) -> !llvm.ptr<i32>
+// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 1] : (!llvm.ptr<struct<"dervied_4", (i32, i32)>>) -> !llvm.ptr<i32>
// CHECK: llvm.return
// CHECK: }
@@ -2631,8 +2559,7 @@ func.func @coordinate_ref_derived_nested(%arg0: !fir.ref<!fir.type<derived_5{fie
}
// CHECK-LABEL: llvm.func @coordinate_ref_derived_nested(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr<struct<"derived_5", (struct<"nested_derived", (i32, f32)>, i32)>>) {
-// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_4:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_3]], 0, 1] : (!llvm.ptr<struct<"derived_5", (struct<"nested_derived", (i32, f32)>, i32)>>, i64) -> !llvm.ptr<i32>
+// CHECK: %[[VAL_4:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0, 1] : (!llvm.ptr<struct<"derived_5", (struct<"nested_derived", (i32, f32)>, i32)>>) -> !llvm.ptr<i32>
// CHECK: llvm.return
// CHECK: }
@@ -2661,8 +2588,7 @@ func.func @test_coordinate_of_tuple(%tup : !fir.ref<tuple<!fir.ref<i32>>>) {
}
// CHECK-LABEL: llvm.func @test_coordinate_of_tuple(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr<struct<(ptr<i32>)>>) {
-// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_2]], 0] : (!llvm.ptr<struct<(ptr<i32>)>>, i64) -> !llvm.ptr<i32>
+// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0] : (!llvm.ptr<struct<(ptr<i32>)>>) -> !llvm.ptr<i32>
// CHECK: llvm.return
// CHECK: }
diff --git a/flang/test/Fir/embox.fir b/flang/test/Fir/embox.fir
index 79aded8b5f71a..02eba454c47ac 100644
--- a/flang/test/Fir/embox.fir
+++ b/flang/test/Fir/embox.fir
@@ -42,7 +42,7 @@ func.func @_QPtest_dt_slice() {
// CHECK: %[[a5:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }
// CHECK-SAME: { ptr undef, i64 4, i32 20180515, i8 1, i8 9, i8 0, i8 0, [1 x [3 x i64]]
// CHECK-SAME: [i64 1, i64 5, i64 mul
-// CHECK-SAME: (i64 ptrtoint (ptr getelementptr (%_QFtest_dt_sliceTt, ptr null, i64 1) to i64), i64 2)]] }
+// CHECK-SAME: (i64 ptrtoint (ptr getelementptr (%_QFtest_dt_sliceTt, ptr null, i32 1) to i64), i64 2)]] }
// CHECK-SAME: , ptr %[[a4]], 0
// CHECK: store { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } %[[a5]], ptr %[[a1]], align 8,
diff --git a/flang/test/Fir/field-index.fir b/flang/test/Fir/field-index.fir
index cbbb5ff8f641e..8c9391f3901ec 100644
--- a/flang/test/Fir/field-index.fir
+++ b/flang/test/Fir/field-index.fir
@@ -10,7 +10,7 @@
// CHECK-SAME: (ptr %[[arg0:.*]])
func.func @simple_field(%arg0: !fir.ref<!fir.type<a{x:f32,i:i32}>>) -> i32 {
%1 = fir.field_index i, !fir.type<a{x:f32,i:i32}>
- // CHECK: %[[GEP:.*]] = getelementptr %a, ptr %[[arg0]], i64 0, i32 1
+ // CHECK: %[[GEP:.*]] = getelementptr %a, ptr %[[arg0]], i32 0, i32 1
%2 = fir.coordinate_of %arg0, %1 : (!fir.ref<!fir.type<a{x:f32,i:i32}>>, !fir.field) -> !fir.ref<i32>
// CHECK: load i32, ptr %[[GEP]]
%3 = fir.load %2 : !fir.ref<i32>
@@ -22,7 +22,7 @@ func.func @simple_field(%arg0: !fir.ref<!fir.type<a{x:f32,i:i32}>>) -> i32 {
func.func @derived_field(%arg0: !fir.ref<!fir.type<c{x:f32,some_b:!fir.type<b{x:f32,i:i32}>}>>) -> i32 {
%1 = fir.field_index some_b, !fir.type<c{x:f32,some_b:!fir.type<b{x:f32,i:i32}>}>
%2 = fir.field_index i, !fir.type<b{x:f32,i:i32}>
- // CHECK: %[[GEP:.*]] = getelementptr %c, ptr %[[arg0]], i64 0, i32 1, i32 1
+ // CHECK: %[[GEP:.*]] = getelementptr %c, ptr %[[arg0]], i32 0, i32 1, i32 1
%3 = fir.coordinate_of %arg0, %1, %2 : (!fir.ref<!fir.type<c{x:f32,some_b:!fir.type<b{x:f32,i:i32}>}>>, !fir.field, !fir.field) -> !fir.ref<i32>
// CHECK: load i32, ptr %[[GEP]]
%4 = fir.load %3 : !fir.ref<i32>
diff --git a/flang/test/Fir/loop10.fir b/flang/test/Fir/loop10.fir
index e1c908bb10671..c0eb723826b1a 100644
--- a/flang/test/Fir/loop10.fir
+++ b/flang/test/Fir/loop10.fir
@@ -13,7 +13,7 @@ func.func @x(%addr : !fir.ref<!fir.array<10x10xi32>>) -> index {
// CHECK-DAG: %[[COL:.*]] = phi i64 {{.*}} [ 11,
// CHECK: icmp sgt i64 %[[COL]], 0
fir.do_loop %jv = %c0 to %c10 step %c1 {
- // CHECK: getelementptr {{.*}} %[[ADDR]], i64 0, i64 %[[R]], i64 %[[C]]
+ // CHECK: getelementptr {{.*}} %[[ADDR]], i32 0, i64 %[[R]], i64 %[[C]]
%ptr = fir.coordinate_of %addr, %jv, %iv : (!fir.ref<!fir.array<10x10xi32>>, index, index) -> !fir.ref<i32>
%c22 = arith.constant 22 : i32
// CHECK: store i32 22,
diff --git a/flang/test/Fir/rebox-susbtring.fir b/flang/test/Fir/rebox-susbtring.fir
index 05c04877b73e5..a92e686162873 100644
--- a/flang/test/Fir/rebox-susbtring.fir
+++ b/flang/test/Fir/rebox-susbtring.fir
@@ -19,10 +19,9 @@ func.func @char_section(%arg0: !fir.box<!fir.array<?x!fir.char<1,20>>>) {
// Only test the computation of the base address offset computation accounting for the substring
// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[VAL_30:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_37:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_7]], 0] : (!llvm.ptr<[[char20_descriptor_t]]>)>>, i32) -> !llvm.ptr<ptr<array<20 x i8>>>
+// CHECK: %[[VAL_37:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0] : (!llvm.ptr<[[char20_descriptor_t]]>)>>) -> !llvm.ptr<ptr<array<20 x i8>>>
// CHECK: %[[VAL_38:.*]] = llvm.load %[[VAL_37]] : !llvm.ptr<ptr<array<20 x i8>>>
// CHECK: %[[VAL_39:.*]] = llvm.bitcast %[[VAL_38]] : !llvm.ptr<array<20 x i8>> to !llvm.ptr<array<20 x i8>>
// CHECK: %[[VAL_40:.*]] = llvm.getelementptr %[[VAL_39]]{{\[}}%[[VAL_30]], %[[VAL_4]]] : (!llvm.ptr<array<20 x i8>>, i64, i64) -> !llvm.ptr<i8>
@@ -52,11 +51,9 @@ func.func @foo(%arg0: !fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}
// Only test the computation of the base address offset computation accounting for the substring of the component
// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i32) : i32
-// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[VAL_17:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[VAL_21:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK: %[[VAL_30:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_17]], 0] : (!llvm.ptr<[[struct_t_descriptor:.*]]>, i32) -> !llvm.ptr<ptr<[[struct_t]]>>
+// CHECK: %[[VAL_30:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0] : (!llvm.ptr<[[struct_t_descriptor:.*]]>) -> !llvm.ptr<ptr<[[struct_t]]>>
// CHECK: %[[VAL_31:.*]] = llvm.load %[[VAL_30]] : !llvm.ptr<ptr<[[struct_t]]>>
// CHECK: %[[VAL_32:.*]] = llvm.bitcast %[[VAL_31]] : !llvm.ptr<[[struct_t]]> to !llvm.ptr<[[struct_t]]>
// CHECK: %[[VAL_33:.*]] = llvm.getelementptr %[[VAL_32]]{{\[}}%[[VAL_21]], 1, %[[VAL_4]]] : (!llvm.ptr<[[struct_t]]>, i64, i64) -> !llvm.ptr<i8>
diff --git a/flang/test/Lower/complex-part.f90 b/flang/test/Lower/complex-part.f90
index 896f17d91f171..48f58a6fe28db 100644
--- a/flang/test/Lower/complex-part.f90
+++ b/flang/test/Lower/complex-part.f90
@@ -8,5 +8,5 @@
! Verify that the offset in the struct does not regress from i32.
! CHECK-LABEL: define void @_QQmain()
-! CHECK: getelementptr { float, float }, ptr %{{[0-9]+}}, i64 0, i32 0
+! CHECK: getelementptr { float, float }, ptr %{{[0-9]+}}, i32 0, i32 0
diff --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
index 159a726cd9196..b923b8cec1e24 100644
--- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
+++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
@@ -567,10 +567,8 @@ class RuntimeCreateOpLowering : public OpConversionPattern<RuntimeCreateOp> {
// %Size = getelementptr %T* null, int 1
// %SizeI = ptrtoint %T* %Size to i64
auto nullPtr = rewriter.create<LLVM::NullOp>(loc, storagePtrType);
- auto one = rewriter.create<LLVM::ConstantOp>(
- loc, i64, rewriter.getI64IntegerAttr(1));
auto gep = rewriter.create<LLVM::GEPOp>(loc, storagePtrType, nullPtr,
- one.getResult());
+ ArrayRef<LLVM::GEPArg>{1});
return rewriter.create<LLVM::PtrToIntOp>(loc, i64, gep);
};
diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
index 85d1a5234b8f2..e115d9cd71da8 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
@@ -82,12 +82,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
// Rewrite workgroup memory attributions to addresses of global buffers.
rewriter.setInsertionPointToStart(&gpuFuncOp.front());
unsigned numProperArguments = gpuFuncOp.getNumArguments();
- auto i32Type = IntegerType::get(rewriter.getContext(), 32);
- Value zero = nullptr;
- if (!workgroupBuffers.empty())
- zero = rewriter.create<LLVM::ConstantOp>(loc, i32Type,
- rewriter.getI32IntegerAttr(0));
for (const auto &en : llvm::enumerate(workgroupBuffers)) {
LLVM::GlobalOp global = en.value();
Value address = rewriter.create<LLVM::AddressOfOp>(loc, global);
@@ -95,7 +90,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
global.getType().cast<LLVM::LLVMArrayType>().getElementType();
Value memory = rewriter.create<LLVM::GEPOp>(
loc, LLVM::LLVMPointerType::get(elementType, global.getAddrSpace()),
- address, ArrayRef<Value>{zero, zero});
+ address, ArrayRef<LLVM::GEPArg>{0, 0});
// Build a memref descriptor pointing to the buffer to plug with the
// existing memref infrastructure. This may use more registers than
@@ -170,7 +165,6 @@ LogicalResult GPUPrintfOpToHIPLowering::matchAndRewrite(
mlir::Type llvmI8 = typeConverter->convertType(rewriter.getI8Type());
mlir::Type i8Ptr = LLVM::LLVMPointerType::get(llvmI8);
- mlir::Type llvmIndex = typeConverter->convertType(rewriter.getIndexType());
mlir::Type llvmI32 = typeConverter->convertType(rewriter.getI32Type());
mlir::Type llvmI64 = typeConverter->convertType(rewriter.getI64Type());
// Note: this is the GPUModule op, not the ModuleOp that surrounds it
@@ -226,10 +220,8 @@ LogicalResult GPUPrintfOpToHIPLowering::matchAndRewrite(
// Get a pointer to the format string's first element and pass it to printf()
Value globalPtr = rewriter.create<LLVM::AddressOfOp>(loc, global);
- Value zero = rewriter.create<LLVM::ConstantOp>(
- loc, llvmIndex, rewriter.getIntegerAttr(llvmIndex, 0));
Value stringStart = rewriter.create<LLVM::GEPOp>(
- loc, i8Ptr, globalPtr, mlir::ValueRange({zero, zero}));
+ loc, i8Ptr, globalPtr, ArrayRef<LLVM::GEPArg>{0, 0});
Value stringLen = rewriter.create<LLVM::ConstantOp>(
loc, llvmI64, rewriter.getI64IntegerAttr(formatStringSize));
@@ -289,7 +281,6 @@ LogicalResult GPUPrintfOpToLLVMCallLowering::matchAndRewrite(
mlir::Type llvmI8 = typeConverter->convertType(rewriter.getIntegerType(8));
mlir::Type i8Ptr = LLVM::LLVMPointerType::get(llvmI8, addressSpace);
- mlir::Type llvmIndex = typeConverter->convertType(rewriter.getIndexType());
// Note: this is the GPUModule op, not the ModuleOp that surrounds it
// This ensures that global constants and declarations are placed within
@@ -325,10 +316,8 @@ LogicalResult GPUPrintfOpToLLVMCallLowering::matchAndRewrite(
// Get a pointer to the format string's first element
Value globalPtr = rewriter.create<LLVM::AddressOfOp>(loc, global);
- Value zero = rewriter.create<LLVM::ConstantOp>(
- loc, llvmIndex, rewriter.getIntegerAttr(llvmIndex, 0));
Value stringStart = rewriter.create<LLVM::GEPOp>(
- loc, i8Ptr, globalPtr, mlir::ValueRange({zero, zero}));
+ loc, i8Ptr, globalPtr, ArrayRef<LLVM::GEPArg>{0, 0});
// Construct arguments and function call
auto argsRange = adaptor.args();
diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
index 529efab558928..aaee8211ffd85 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
@@ -653,17 +653,14 @@ Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateParamsArray(
loc, llvmInt32Type, builder.getI32IntegerAttr(numArguments));
auto arrayPtr = builder.create<LLVM::AllocaOp>(loc, llvmPointerPointerType,
arraySize, /*alignment=*/0);
- auto zero = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type,
- builder.getI32IntegerAttr(0));
for (const auto &en : llvm::enumerate(arguments)) {
- auto index = builder.create<LLVM::ConstantOp>(
- loc, llvmInt32Type, builder.getI32IntegerAttr(en.index()));
auto fieldPtr = builder.create<LLVM::GEPOp>(
loc, LLVM::LLVMPointerType::get(argumentTypes[en.index()]), structPtr,
- ArrayRef<Value>{zero, index.getResult()});
+ ArrayRef<LLVM::GEPArg>{0, en.index()});
builder.create<LLVM::StoreOp>(loc, en.value(), fieldPtr);
- auto elementPtr = builder.create<LLVM::GEPOp>(loc, llvmPointerPointerType,
- arrayPtr, index.getResult());
+ auto elementPtr =
+ builder.create<LLVM::GEPOp>(loc, llvmPointerPointerType, arrayPtr,
+ ArrayRef<LLVM::GEPArg>{en.index()});
auto casted =
builder.create<LLVM::BitcastOp>(loc, llvmPointerType, fieldPtr);
builder.create<LLVM::StoreOp>(loc, casted, elementPtr);
@@ -811,8 +808,8 @@ LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite(
Type elementPtrType = getElementPtrType(memRefType);
Value nullPtr = rewriter.create<LLVM::NullOp>(loc, elementPtrType);
- Value gepPtr = rewriter.create<LLVM::GEPOp>(loc, elementPtrType, nullPtr,
- ArrayRef<Value>{numElements});
+ Value gepPtr =
+ rewriter.create<LLVM::GEPOp>(loc, elementPtrType, nullPtr, numElements);
auto sizeBytes =
rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gepPtr);
diff --git a/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp b/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
index c9b25f738a009..df21c4281035d 100644
--- a/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
@@ -138,7 +138,6 @@ Value MemRefDescriptor::size(OpBuilder &builder, Location loc, Value pos,
auto arrayPtrTy = LLVM::LLVMPointerType::get(arrayTy);
// Copy size values to stack-allocated memory.
- auto zero = createIndexAttrConstant(builder, loc, indexType, 0);
auto one = createIndexAttrConstant(builder, loc, indexType, 1);
auto sizes = builder.create<LLVM::ExtractValueOp>(
loc, arrayTy, value,
@@ -149,7 +148,7 @@ Value MemRefDescriptor::size(OpBuilder &builder, Location loc, Value pos,
// Load an return size value of interest.
auto resultPtr = builder.create<LLVM::GEPOp>(loc, indexPtrTy, sizesPtr,
- ValueRange({zero, pos}));
+ ArrayRef<LLVM::GEPArg>{0, pos});
return builder.create<LLVM::LoadOp>(loc, resultPtr);
}
@@ -402,10 +401,8 @@ Value UnrankedMemRefDescriptor::alignedPtr(OpBuilder &builder, Location loc,
Value elementPtrPtr =
builder.create<LLVM::BitcastOp>(loc, elemPtrPtrType, memRefDescPtr);
- Value one =
- createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 1);
Value alignedGep = builder.create<LLVM::GEPOp>(
- loc, elemPtrPtrType, elementPtrPtr, ValueRange({one}));
+ loc, elemPtrPtrType, elementPtrPtr, ArrayRef<LLVM::GEPArg>{1});
return builder.create<LLVM::LoadOp>(loc, alignedGep);
}
@@ -417,10 +414,8 @@ void UnrankedMemRefDescriptor::setAlignedPtr(OpBuilder &builder, Location loc,
Value elementPtrPtr =
builder.create<LLVM::BitcastOp>(loc, elemPtrPtrType, memRefDescPtr);
- Value one =
- createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 1);
Value alignedGep = builder.create<LLVM::GEPOp>(
- loc, elemPtrPtrType, elementPtrPtr, ValueRange({one}));
+ loc, elemPtrPtrType, elementPtrPtr, ArrayRef<LLVM::GEPArg>{1});
builder.create<LLVM::StoreOp>(loc, alignedPtr, alignedGep);
}
@@ -431,10 +426,8 @@ Value UnrankedMemRefDescriptor::offset(OpBuilder &builder, Location loc,
Value elementPtrPtr =
builder.create<LLVM::BitcastOp>(loc, elemPtrPtrType, memRefDescPtr);
- Value two =
- createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 2);
Value offsetGep = builder.create<LLVM::GEPOp>(
- loc, elemPtrPtrType, elementPtrPtr, ValueRange({two}));
+ loc, elemPtrPtrType, elementPtrPtr, ArrayRef<LLVM::GEPArg>{2});
offsetGep = builder.create<LLVM::BitcastOp>(
loc, LLVM::LLVMPointerType::get(typeConverter.getIndexType()), offsetGep);
return builder.create<LLVM::LoadOp>(loc, offsetGep);
@@ -447,10 +440,8 @@ void UnrankedMemRefDescriptor::setOffset(OpBuilder &builder, Location loc,
Value elementPtrPtr =
builder.create<LLVM::BitcastOp>(loc, elemPtrPtrType, memRefDescPtr);
- Value two =
- createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 2);
Value offsetGep = builder.create<LLVM::GEPOp>(
- loc, elemPtrPtrType, elementPtrPtr, ValueRange({two}));
+ loc, elemPtrPtrType, elementPtrPtr, ArrayRef<LLVM::GEPArg>{2});
offsetGep = builder.create<LLVM::BitcastOp>(
loc, LLVM::LLVMPointerType::get(typeConverter.getIndexType()), offsetGep);
builder.create<LLVM::StoreOp>(loc, offset, offsetGep);
@@ -467,21 +458,16 @@ Value UnrankedMemRefDescriptor::sizeBasePtr(
Value structPtr =
builder.create<LLVM::BitcastOp>(loc, structPtrTy, memRefDescPtr);
- Type int32Type = typeConverter.convertType(builder.getI32Type());
- Value zero =
- createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 0);
- Value three = builder.create<LLVM::ConstantOp>(loc, int32Type,
- builder.getI32IntegerAttr(3));
return builder.create<LLVM::GEPOp>(loc, LLVM::LLVMPointerType::get(indexTy),
- structPtr, ValueRange({zero, three}));
+ structPtr, ArrayRef<LLVM::GEPArg>{0, 3});
}
Value UnrankedMemRefDescriptor::size(OpBuilder &builder, Location loc,
LLVMTypeConverter &typeConverter,
Value sizeBasePtr, Value index) {
Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType());
- Value sizeStoreGep = builder.create<LLVM::GEPOp>(loc, indexPtrTy, sizeBasePtr,
- ValueRange({index}));
+ Value sizeStoreGep =
+ builder.create<LLVM::GEPOp>(loc, indexPtrTy, sizeBasePtr, index);
return builder.create<LLVM::LoadOp>(loc, sizeStoreGep);
}
@@ -490,8 +476,8 @@ void UnrankedMemRefDescriptor::setSize(OpBuilder &builder, Location loc,
Value sizeBasePtr, Value index,
Value size) {
Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType());
- Value sizeStoreGep = builder.create<LLVM::GEPOp>(loc, indexPtrTy, sizeBasePtr,
- ValueRange({index}));
+ Value sizeStoreGep =
+ builder.create<LLVM::GEPOp>(loc, indexPtrTy, sizeBasePtr, index);
builder.create<LLVM::StoreOp>(loc, size, sizeStoreGep);
}
@@ -499,8 +485,7 @@ Value UnrankedMemRefDescriptor::strideBasePtr(OpBuilder &builder, Location loc,
LLVMTypeConverter &typeConverter,
Value sizeBasePtr, Value rank) {
Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType());
- return builder.create<LLVM::GEPOp>(loc, indexPtrTy, sizeBasePtr,
- ValueRange({rank}));
+ return builder.create<LLVM::GEPOp>(loc, indexPtrTy, sizeBasePtr, rank);
}
Value UnrankedMemRefDescriptor::stride(OpBuilder &builder, Location loc,
@@ -508,8 +493,8 @@ Value UnrankedMemRefDescriptor::stride(OpBuilder &builder, Location loc,
Value strideBasePtr, Value index,
Value stride) {
Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType());
- Value strideStoreGep = builder.create<LLVM::GEPOp>(
- loc, indexPtrTy, strideBasePtr, ValueRange({index}));
+ Value strideStoreGep =
+ builder.create<LLVM::GEPOp>(loc, indexPtrTy, strideBasePtr, index);
return builder.create<LLVM::LoadOp>(loc, strideStoreGep);
}
@@ -518,7 +503,7 @@ void UnrankedMemRefDescriptor::setStride(OpBuilder &builder, Location loc,
Value strideBasePtr, Value index,
Value stride) {
Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType());
- Value strideStoreGep = builder.create<LLVM::GEPOp>(
- loc, indexPtrTy, strideBasePtr, ValueRange({index}));
+ Value strideStoreGep =
+ builder.create<LLVM::GEPOp>(loc, indexPtrTy, strideBasePtr, index);
builder.create<LLVM::StoreOp>(loc, stride, strideStoreGep);
}
diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
index 7c99402cc62c7..b6288f5c0717b 100644
--- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
@@ -163,8 +163,8 @@ void ConvertToLLVMPattern::getMemRefDescriptorSizes(
// Buffer size in bytes.
Type elementPtrType = getElementPtrType(memRefType);
Value nullPtr = rewriter.create<LLVM::NullOp>(loc, elementPtrType);
- Value gepPtr = rewriter.create<LLVM::GEPOp>(loc, elementPtrType, nullPtr,
- ArrayRef<Value>{runningStride});
+ Value gepPtr =
+ rewriter.create<LLVM::GEPOp>(loc, elementPtrType, nullPtr, runningStride);
sizeBytes = rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gepPtr);
}
@@ -178,9 +178,8 @@ Value ConvertToLLVMPattern::getSizeInBytes(
auto convertedPtrType =
LLVM::LLVMPointerType::get(typeConverter->convertType(type));
auto nullPtr = rewriter.create<LLVM::NullOp>(loc, convertedPtrType);
- auto gep = rewriter.create<LLVM::GEPOp>(
- loc, convertedPtrType, nullPtr,
- ArrayRef<Value>{createIndexConstant(rewriter, loc, 1)});
+ auto gep = rewriter.create<LLVM::GEPOp>(loc, convertedPtrType, nullPtr,
+ ArrayRef<LLVM::GEPArg>{1});
return rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gep);
}
diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index 884931770fdee..18747df79e47b 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -389,19 +389,15 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> {
// Get pointer to offset field of memref<element_type> descriptor.
Type indexPtrTy = LLVM::LLVMPointerType::get(
getTypeConverter()->getIndexType(), addressSpace);
- Value two = rewriter.create<LLVM::ConstantOp>(
- loc, typeConverter->convertType(rewriter.getI32Type()),
- rewriter.getI32IntegerAttr(2));
Value offsetPtr = rewriter.create<LLVM::GEPOp>(
- loc, indexPtrTy, scalarMemRefDescPtr,
- ValueRange({createIndexConstant(rewriter, loc, 0), two}));
+ loc, indexPtrTy, scalarMemRefDescPtr, ArrayRef<LLVM::GEPArg>{0, 2});
// The size value that we have to extract can be obtained using GEPop with
// `dimOp.index() + 1` index argument.
Value idxPlusOne = rewriter.create<LLVM::AddOp>(
loc, createIndexConstant(rewriter, loc, 1), adaptor.getIndex());
- Value sizePtr = rewriter.create<LLVM::GEPOp>(loc, indexPtrTy, offsetPtr,
- ValueRange({idxPlusOne}));
+ Value sizePtr =
+ rewriter.create<LLVM::GEPOp>(loc, indexPtrTy, offsetPtr, idxPlusOne);
return rewriter.create<LLVM::LoadOp>(loc, sizePtr);
}
@@ -664,11 +660,9 @@ struct GetGlobalMemrefOpLowering : public AllocLikeOpLLVMLowering {
Type elementType = typeConverter->convertType(type.getElementType());
Type elementPtrType = LLVM::LLVMPointerType::get(elementType, memSpace);
- SmallVector<Value> operands;
- operands.insert(operands.end(), type.getRank() + 1,
- createIndexConstant(rewriter, loc, 0));
- auto gep =
- rewriter.create<LLVM::GEPOp>(loc, elementPtrType, addressOf, operands);
+ auto gep = rewriter.create<LLVM::GEPOp>(
+ loc, elementPtrType, addressOf,
+ SmallVector<LLVM::GEPArg>(type.getRank() + 1, 0));
// We do not expect the memref obtained using `memref.get_global` to be
// ever deallocated. Set the allocated pointer to be known bad value to
@@ -1286,8 +1280,8 @@ struct MemRefReshapeOpLowering
// Copy size from shape to descriptor.
Type llvmIndexPtrType = LLVM::LLVMPointerType::get(indexType);
- Value sizeLoadGep = rewriter.create<LLVM::GEPOp>(
- loc, llvmIndexPtrType, shapeOperandPtr, ValueRange{indexArg});
+ Value sizeLoadGep = rewriter.create<LLVM::GEPOp>(loc, llvmIndexPtrType,
+ shapeOperandPtr, indexArg);
Value size = rewriter.create<LLVM::LoadOp>(loc, sizeLoadGep);
UnrankedMemRefDescriptor::setSize(rewriter, loc, *getTypeConverter(),
targetSizesBase, indexArg, size);
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 33fe8902b977b..8ff803f0fd7d5 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -2981,12 +2981,9 @@ Value mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
// Get the pointer to the first character in the global string.
Value globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
- Value cst0 = builder.create<LLVM::ConstantOp>(
- loc, IntegerType::get(ctx, 64),
- builder.getIntegerAttr(builder.getIndexType(), 0));
return builder.create<LLVM::GEPOp>(
loc, LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8)), globalPtr,
- ValueRange{cst0, cst0});
+ ArrayRef<GEPArg>{0, 0});
}
bool mlir::LLVM::satisfiesLLVMModule(Operation *op) {
diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
index 8784397fa1f5c..5929ab0f0062d 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
@@ -10,8 +10,7 @@ func.func @create_token() {
// CHECK-LABEL: @create_value
func.func @create_value() {
// CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<f32>
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK: %[[OFFSET:.*]] = llvm.getelementptr %[[NULL]][%[[ONE]]]
+ // CHECK: %[[OFFSET:.*]] = llvm.getelementptr %[[NULL]][1]
// CHECK: %[[SIZE:.*]] = llvm.ptrtoint %[[OFFSET]]
// CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue(%[[SIZE]])
%0 = async.runtime.create : !async.value<f32>
diff --git a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
index b11d15712e83e..0ee55981b47a2 100644
--- a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
@@ -32,8 +32,7 @@ module attributes {gpu.container_module} {
// CHECK-DAG: [[C256:%.*]] = llvm.mlir.constant(256 : i32) : i32
// CHECK-DAG: [[C8:%.*]] = llvm.mlir.constant(8 : index) : i64
// CHECK: [[ADDRESSOF:%.*]] = llvm.mlir.addressof @[[GLOBAL]]
- // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index)
- // CHECK: [[BINARY:%.*]] = llvm.getelementptr [[ADDRESSOF]]{{\[}}[[C0]], [[C0]]]
+ // CHECK: [[BINARY:%.*]] = llvm.getelementptr [[ADDRESSOF]]{{\[}}0, 0]
// CHECK-SAME: -> !llvm.ptr<i8>
// CHECK: [[MODULE:%.*]] = llvm.call @mgpuModuleLoad([[BINARY]])
diff --git a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
index 194bc91661e77..9c94e9c14dea2 100644
--- a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
+++ b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
@@ -67,14 +67,12 @@ gpu.module @kernel {
// ROCDL-SAME: {
gpu.func @workgroup(%arg0: f32) workgroup(%arg1: memref<4xf32, 3>) {
// Get the address of the first element in the global array.
- // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<4 x f32>, 3>
- // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
+ // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][0, 0]
// NVVM-SAME: !llvm.ptr<f32, 3>
- // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<4 x f32>, 3>
- // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
+ // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][0, 0]
// ROCDL-SAME: !llvm.ptr<f32, 3>
// Populate the memref descriptor.
@@ -130,14 +128,12 @@ gpu.module @kernel {
// ROCDL-LABEL: llvm.func @workgroup3d
gpu.func @workgroup3d(%arg0: f32) workgroup(%arg1: memref<4x2x6xf32, 3>) {
// Get the address of the first element in the global array.
- // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<48 x f32>, 3>
- // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
+ // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][0, 0]
// NVVM-SAME: !llvm.ptr<f32, 3>
- // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<48 x f32>, 3>
- // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
+ // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][0, 0]
// ROCDL-SAME: !llvm.ptr<f32, 3>
// Populate the memref descriptor.
diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir
index b2efa495fede4..43618a736bfaf 100644
--- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir
+++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir
@@ -12,8 +12,7 @@ gpu.module @test_module {
// CHECK: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK-NEXT: %[[DESC0:.*]] = llvm.call @__ockl_printf_begin(%0) : (i64) -> i64
// CHECK-NEXT: %[[FORMATSTR:.*]] = llvm.mlir.addressof @[[$PRINT_GLOBAL0]] : !llvm.ptr<array<14 x i8>>
- // CHECK-NEXT: %[[CST1:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][%[[CST1]], %[[CST1]]] : (!llvm.ptr<array<14 x i8>>, i64, i64) -> !llvm.ptr<i8>
+ // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][0, 0] : (!llvm.ptr<array<14 x i8>>) -> !llvm.ptr<i8>
// CHECK-NEXT: %[[FORMATLEN:.*]] = llvm.mlir.constant(14 : i64) : i64
// CHECK-NEXT: %[[ISLAST:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK-NEXT: %[[ISNTLAST:.*]] = llvm.mlir.constant(0 : i32) : i32
@@ -29,8 +28,7 @@ gpu.module @test_module {
// CHECK: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK-NEXT: %[[DESC0:.*]] = llvm.call @__ockl_printf_begin(%0) : (i64) -> i64
// CHECK-NEXT: %[[FORMATSTR:.*]] = llvm.mlir.addressof @[[$PRINT_GLOBAL1]] : !llvm.ptr<array<11 x i8>>
- // CHECK-NEXT: %[[CST1:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][%[[CST1]], %[[CST1]]] : (!llvm.ptr<array<11 x i8>>, i64, i64) -> !llvm.ptr<i8>
+ // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][0, 0] : (!llvm.ptr<array<11 x i8>>) -> !llvm.ptr<i8>
// CHECK-NEXT: %[[FORMATLEN:.*]] = llvm.mlir.constant(11 : i64) : i64
// CHECK-NEXT: %[[ISLAST:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK-NEXT: %[[ISNTLAST:.*]] = llvm.mlir.constant(0 : i32) : i32
diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-opencl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-opencl.mlir
index 8e5af9dff5a31..987768a350e80 100644
--- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-opencl.mlir
+++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-opencl.mlir
@@ -7,8 +7,7 @@ gpu.module @test_module {
// CHECK: (%[[ARG0:.*]]: i32)
gpu.func @test_printf(%arg0: i32) {
// CHECK: %[[IMM0:.*]] = llvm.mlir.addressof @[[$PRINT_GLOBAL]] : !llvm.ptr<array<11 x i8>, 4>
- // CHECK-NEXT: %[[IMM1:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-NEXT: %[[IMM2:.*]] = llvm.getelementptr %[[IMM0]][%[[IMM1]], %[[IMM1]]] : (!llvm.ptr<array<11 x i8>, 4>, i64, i64) -> !llvm.ptr<i8, 4>
+ // CHECK-NEXT: %[[IMM2:.*]] = llvm.getelementptr %[[IMM0]][0, 0] : (!llvm.ptr<array<11 x i8>, 4>) -> !llvm.ptr<i8, 4>
// CHECK-NEXT: %{{.*}} = llvm.call @printf(%[[IMM2]], %[[ARG0]]) : (!llvm.ptr<i8, 4>, i32) -> i32
gpu.printf "Hello: %d\n" %arg0 : i32
gpu.return
diff --git a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
index 639d89976baf4..74749e7221a96 100644
--- a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
@@ -387,12 +387,11 @@ func.func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
// CHECK: %{{.*}}, %[[IDXarg:.*]]: index
func.func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index {
// CHECK-DAG: %[[IDX:.*]] = builtin.unrealized_conversion_cast %[[IDXarg]]
- // CHECK-DAG: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-DAG: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK-DAG: %[[SIZES:.*]] = llvm.extractvalue %{{.*}}[3] : ![[DESCR_TY:.*]]
// CHECK-DAG: %[[SIZES_PTR:.*]] = llvm.alloca %[[C1]] x !llvm.array<2 x i64> : (i64) -> !llvm.ptr<array<2 x i64>>
// CHECK-DAG: llvm.store %[[SIZES]], %[[SIZES_PTR]] : !llvm.ptr<array<2 x i64>>
- // CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][%[[C0]], %[[IDX]]] : (!llvm.ptr<array<2 x i64>>, i64, i64) -> !llvm.ptr<i64>
+ // CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][0, %[[IDX]]] : (!llvm.ptr<array<2 x i64>>, i64) -> !llvm.ptr<i64>
// CHECK-DAG: %[[RESULT:.*]] = llvm.load %[[RESULT_PTR]] : !llvm.ptr<i64>
%result = memref.dim %arg, %idx : memref<3x?xf32>
return %result : index
@@ -454,9 +453,8 @@ func.func @memref_reinterpret_cast_unranked_to_dynamic_shape(%offset: index,
// CHECK: [[BASE_PTR_PTR:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
// CHECK: [[BASE_PTR:%.*]] = llvm.load [[BASE_PTR_PTR]] : !llvm.ptr<ptr<f32>>
// CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
-// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64
-// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]]
-// CHECK-SAME: : (!llvm.ptr<ptr<f32>>, i64) -> !llvm.ptr<ptr<f32>>
+// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}1]
+// CHECK-SAME: : (!llvm.ptr<ptr<f32>>) -> !llvm.ptr<ptr<f32>>
// CHECK: [[ALIGNED_PTR:%.*]] = llvm.load [[ALIGNED_PTR_PTR]] : !llvm.ptr<ptr<f32>>
// CHECK: [[OUT_1:%.*]] = llvm.insertvalue [[BASE_PTR]], [[OUT_0]][0] : [[TY]]
// CHECK: [[OUT_2:%.*]] = llvm.insertvalue [[ALIGNED_PTR]], [[OUT_1]][1] : [[TY]]
@@ -498,20 +496,17 @@ func.func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
// CHECK-SAME: !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
// CHECK: llvm.store [[ALLOC_PTR]], [[BASE_PTR_PTR]] : !llvm.ptr<ptr<f32>>
// CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
-// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64
-// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]]
+// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}1]
// CHECK: llvm.store [[ALIGN_PTR]], [[ALIGNED_PTR_PTR]] : !llvm.ptr<ptr<f32>>
// CHECK: [[BASE_PTR_PTR__:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
-// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : i64
-// CHECK: [[OFFSET_PTR_:%.*]] = llvm.getelementptr [[BASE_PTR_PTR__]]{{\[}}[[C2]]]
+// CHECK: [[OFFSET_PTR_:%.*]] = llvm.getelementptr [[BASE_PTR_PTR__]]{{\[}}2]
// CHECK: [[OFFSET_PTR:%.*]] = llvm.bitcast [[OFFSET_PTR_]]
// CHECK: llvm.store [[OFFSET]], [[OFFSET_PTR]] : !llvm.ptr<i64>
// Iterate over shape operand in reverse order and set sizes and strides.
// CHECK: [[STRUCT_PTR:%.*]] = llvm.bitcast [[UNDERLYING_DESC]]
// CHECK-SAME: !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, i64)>>
-// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) : i64
-// CHECK: [[SIZES_PTR:%.*]] = llvm.getelementptr [[STRUCT_PTR]]{{\[}}[[C0]], 3]
+// CHECK: [[SIZES_PTR:%.*]] = llvm.getelementptr [[STRUCT_PTR]]{{\[}}0, 3]
// CHECK: [[STRIDES_PTR:%.*]] = llvm.getelementptr [[SIZES_PTR]]{{\[}}[[RANK]]]
// CHECK: [[SHAPE_IN_PTR:%.*]] = llvm.extractvalue [[SHAPE]][1] : [[SHAPE_TY]]
// CHECK: [[C1_:%.*]] = llvm.mlir.constant(1 : index) : i64
diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
index 0822852d5c99e..0a50960f7903b 100644
--- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
@@ -570,11 +570,9 @@ func.func @dim_of_unranked(%unranked: memref<*xi32>) -> index {
// CHECK: %[[ZERO_D_DESC:.*]] = llvm.bitcast %[[RANKED_DESC]]
// CHECK-SAME: : !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<i32>, ptr<i32>, i64)>>
-// CHECK: %[[C0_:.*]] = llvm.mlir.constant(0 : index) : i64
-
// CHECK: %[[OFFSET_PTR:.*]] = llvm.getelementptr %[[ZERO_D_DESC]]{{\[}}
-// CHECK-SAME: %[[C0_]], 2] : (!llvm.ptr<struct<(ptr<i32>, ptr<i32>,
-// CHECK-SAME: i64)>>, i64) -> !llvm.ptr<i64>
+// CHECK-SAME: 0, 2] : (!llvm.ptr<struct<(ptr<i32>, ptr<i32>,
+// CHECK-SAME: i64)>>) -> !llvm.ptr<i64>
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[INDEX_INC:.*]] = llvm.add %[[C1]], %{{.*}} : i64
@@ -636,8 +634,7 @@ func.func @get_gv0_memref() {
// CHECK: %[[DIM:.*]] = llvm.mlir.constant(2 : index) : i64
// CHECK: %[[STRIDE:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv0 : !llvm.ptr<array<2 x f32>>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x f32>>, i64, i64) -> !llvm.ptr<f32>
+ // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0, 0] : (!llvm.ptr<array<2 x f32>>) -> !llvm.ptr<f32>
// CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
// CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<f32>
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
@@ -657,8 +654,7 @@ func.func @get_gv2_memref() {
// CHECK: %[[DIM1:.*]] = llvm.mlir.constant(3 : index) : i64
// CHECK: %[[STRIDE1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv2 : !llvm.ptr<array<2 x array<3 x f32>>>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x array<3 x f32>>>, i64, i64, i64) -> !llvm.ptr<f32>
+ // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0, 0, 0] : (!llvm.ptr<array<2 x array<3 x f32>>>) -> !llvm.ptr<f32>
// CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
// CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<f32>
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -682,8 +678,7 @@ memref.global @gv3 : memref<f32> = dense<1.0>
// CHECK-LABEL: func @get_gv3_memref
func.func @get_gv3_memref() {
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv3 : !llvm.ptr<f32>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+ // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0] : (!llvm.ptr<f32>) -> !llvm.ptr<f32>
// CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
// CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<f32>
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
@@ -1029,8 +1024,7 @@ func.func @memref_copy_ranked() {
// CHECK: [[EXTRACT0:%.*]] = llvm.extractvalue {{%.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[MUL:%.*]] = llvm.mul [[ONE]], [[EXTRACT0]] : i64
// CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr<f32>
- // CHECK: [[ONE2:%.*]] = llvm.mlir.constant(1 : index) : i64
- // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][[[ONE2]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+ // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr<f32>) -> !llvm.ptr<f32>
// CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr<f32> to i64
// CHECK: [[SIZE:%.*]] = llvm.mul [[MUL]], [[PTRTOINT]] : i64
// CHECK: [[EXTRACT1P:%.*]] = llvm.extractvalue {{%.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
@@ -1058,8 +1052,7 @@ func.func @memref_copy_contiguous(%in: memref<16x2xi32>, %offset: index) {
// CHECK: [[EXTRACT1:%.*]] = llvm.extractvalue {{%.*}}[3, 1] : !llvm.struct<(ptr<i32>, ptr<i32>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: [[MUL2:%.*]] = llvm.mul [[MUL1]], [[EXTRACT1]] : i64
// CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr<i32>
- // CHECK: [[ONE2:%.*]] = llvm.mlir.constant(1 : index) : i64
- // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][[[ONE2]]] : (!llvm.ptr<i32>, i64) -> !llvm.ptr<i32>
+ // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr<i32>) -> !llvm.ptr<i32>
// CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr<i32> to i64
// CHECK: [[SIZE:%.*]] = llvm.mul [[MUL2]], [[PTRTOINT]] : i64
// CHECK: [[EXTRACT1P:%.*]] = llvm.extractvalue {{%.*}}[1] : !llvm.struct<(ptr<i32>, ptr<i32>, i64, array<2 x i64>, array<2 x i64>)>
diff --git a/mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll b/mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll
index 411a8adf98df4..a4add0ea95414 100644
--- a/mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll
+++ b/mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll
@@ -8,16 +8,12 @@
; only wrote minimum level of checks.
%my_struct = type {i32, i8*}
-; CHECK: llvm.mlir.constant(0 : i32) : i32
-; CHECK: llvm.mlir.constant(0 : i32) : i32
; CHECK: llvm.mlir.addressof @str1 : !llvm.ptr<array<5 x i8>>
; CHECK: llvm.getelementptr
; CHECK: llvm.mlir.constant(7 : i32) : i32
; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr<i8>)>
; CHECK: llvm.insertvalue
; CHECK: llvm.insertvalue
-; CHECK: llvm.mlir.constant(0 : i32) : i32
-; CHECK: llvm.mlir.constant(0 : i32) : i32
; CHECK: llvm.mlir.addressof @str0 : !llvm.ptr<array<5 x i8>>
; CHECK: llvm.getelementptr
; CHECK: llvm.mlir.constant(8 : i32) : i32
More information about the flang-commits
mailing list