[llvm-branch-commits] [mlir] 2230bf9 - [mlir] replace LLVMIntegerType with built-in integer type
Alex Zinenko via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Jan 7 10:52:44 PST 2021
Author: Alex Zinenko
Date: 2021-01-07T19:48:31+01:00
New Revision: 2230bf99c712a1e9cb3a9f423998aa8727b389fc
URL: https://github.com/llvm/llvm-project/commit/2230bf99c712a1e9cb3a9f423998aa8727b389fc
DIFF: https://github.com/llvm/llvm-project/commit/2230bf99c712a1e9cb3a9f423998aa8727b389fc.diff
LOG: [mlir] replace LLVMIntegerType with built-in integer type
The LLVM dialect type system has been closed until now, i.e. did not support
types from other dialects inside containers. While this has had obvious
benefits of deriving from a common base class, it has led to some simple types
being almost identical with the built-in types, namely integer and floating
point types. This in turn has led to a lot of larger-scale complexity: simple
types must still be converted, numerous operations that correspond to LLVM IR
intrinsics are replicated to produce versions operating on either LLVM dialect
or built-in types leading to quasi-duplicate dialects, lowering to the LLVM
dialect is essentially required to be one-shot because of type conversion, etc.
In this light, it is reasonable to trade off some local complexity in the
internal implementation of LLVM dialect types for removing larger-scale system
complexity. Previous commits to the LLVM dialect type system have adapted the
API to support types from other dialects.
Replace LLVMIntegerType with the built-in IntegerType plus additional checks
that such types are signless (these are isolated in a utility function that
replaced `isa<LLVMType>` and in the parser). Temporarily keep the possibility
to parse `!llvm.i32` as a synonym for `i32`, but add a deprecation notice.
Reviewed By: mehdi_amini, silvas, antiagainst
Differential Revision: https://reviews.llvm.org/D94178
Added:
Modified:
mlir/docs/ConversionToLLVMDialect.md
mlir/docs/Dialects/LLVM.md
mlir/docs/LLVMDialectMemRefConvention.md
mlir/docs/SPIRVToLLVMDialectConversion.md
mlir/docs/Tutorials/Toy/Ch-6.md
mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir
mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir
mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp
mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h
mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h
mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h
mlir/lib/ExecutionEngine/JitRunner.cpp
mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
mlir/lib/Target/LLVMIR/TypeTranslation.cpp
mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir
mlir/test/Conversion/StandardToLLVM/calling-convention.mlir
mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir
mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir
mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
mlir/test/Dialect/GPU/outlining.mlir
mlir/test/Dialect/LLVMIR/dialect-cast.mlir
mlir/test/Dialect/LLVMIR/func.mlir
mlir/test/Dialect/LLVMIR/global.mlir
mlir/test/Dialect/LLVMIR/invalid.mlir
mlir/test/Dialect/LLVMIR/legalize-for-export.mlir
mlir/test/Dialect/LLVMIR/nvvm.mlir
mlir/test/Dialect/LLVMIR/rocdl.mlir
mlir/test/Dialect/LLVMIR/roundtrip.mlir
mlir/test/Dialect/LLVMIR/terminator.mlir
mlir/test/Dialect/LLVMIR/types-invalid.mlir
mlir/test/Dialect/LLVMIR/types.mlir
mlir/test/Dialect/Linalg/llvm.mlir
mlir/test/Dialect/OpenMP/ops.mlir
mlir/test/Dialect/SPIRV/IR/types.mlir
mlir/test/Target/arm-sve.mlir
mlir/test/Target/avx512.mlir
mlir/test/Target/import.ll
mlir/test/Target/llvmir-intrinsics.mlir
mlir/test/Target/llvmir-types.mlir
mlir/test/Target/llvmir.mlir
mlir/test/Target/nvvmir.mlir
mlir/test/Target/openmp-llvm.mlir
mlir/test/Target/rocdl.mlir
mlir/test/Transforms/test-convert-call-op.mlir
mlir/test/lib/Transforms/TestConvertCallOp.cpp
mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir
mlir/test/mlir-cpu-runner/simple.mlir
Removed:
################################################################################
diff --git a/mlir/docs/ConversionToLLVMDialect.md b/mlir/docs/ConversionToLLVMDialect.md
index 55b62ff70376..db84e9c5be74 100644
--- a/mlir/docs/ConversionToLLVMDialect.md
+++ b/mlir/docs/ConversionToLLVMDialect.md
@@ -35,7 +35,7 @@ following conversions are currently implemented:
Index type is converted to an LLVM dialect integer type with bitwidth equal to
the bitwidth of the pointer size as specified by the
[data layout](Dialects/LLVM.md#data-layout-and-triple) of the closest module.
-For example, on x86-64 CPUs it converts to `!llvm.i64`. This behavior can be
+For example, on x86-64 CPUs it converts to `i64`. This behavior can be
overridden by the type converter configuration, which is often exposed as a pass
option by conversion passes.
diff --git a/mlir/docs/Dialects/LLVM.md b/mlir/docs/Dialects/LLVM.md
index 0f5b69104775..666f693c453b 100644
--- a/mlir/docs/Dialects/LLVM.md
+++ b/mlir/docs/Dialects/LLVM.md
@@ -66,11 +66,11 @@ For example:
```mlir
^bb1:
- %0 = llvm.addi %arg0, %cst : !llvm.i32
- llvm.br ^bb2[%0: !llvm.i32]
+ %0 = llvm.addi %arg0, %cst : i32
+ llvm.br ^bb2[%0: i32]
// If the control flow comes from ^bb1, %arg1 == %0.
-^bb2(%arg1: !llvm.i32)
+^bb2(%arg1: i32)
// ...
```
@@ -91,9 +91,9 @@ control flow to the same block with
diff erent arguments. For example:
```mlir
^bb1:
- llvm.cond_br %cond, ^bb2[%0: !llvm.i32], ^bb2[%1: !llvm.i32]
+ llvm.cond_br %cond, ^bb2[%0: i32], ^bb2[%1: i32]
-^bb2(%arg0: !llvm.i32):
+^bb2(%arg0: i32):
// ...
```
@@ -124,7 +124,7 @@ Examples:
%2 = llvm.mlir.null : !llvm.ptr<func<void ()>>
// Constant 42 as i32.
-%3 = llvm.mlir.constant(42 : i32) : !llvm.i32
+%3 = llvm.mlir.constant(42 : i32) : i32
// Splat dense vector constant.
%3 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : !llvm.vec<4 x float>
diff --git a/mlir/docs/LLVMDialectMemRefConvention.md b/mlir/docs/LLVMDialectMemRefConvention.md
index 94ca718bd744..b52db054225f 100644
--- a/mlir/docs/LLVMDialectMemRefConvention.md
+++ b/mlir/docs/LLVMDialectMemRefConvention.md
@@ -31,7 +31,7 @@ func @bar() {
// is transformed into
-llvm.func @foo(%arg0: !llvm.i32, %arg1: !llvm.i64) -> !llvm.struct<(i32, i64)> {
+llvm.func @foo(%arg0: i32, %arg1: i64) -> !llvm.struct<(i32, i64)> {
// insert the vales into a structure
%0 = llvm.mlir.undef : !llvm.struct<(i32, i64)>
%1 = llvm.insertvalue %arg0, %0[0] : !llvm.struct<(i32, i64)>
@@ -41,18 +41,18 @@ llvm.func @foo(%arg0: !llvm.i32, %arg1: !llvm.i64) -> !llvm.struct<(i32, i64)> {
llvm.return %2 : !llvm.struct<(i32, i64)>
}
llvm.func @bar() {
- %0 = llvm.mlir.constant(42 : i32) : !llvm.i32
- %1 = llvm.mlir.constant(17) : !llvm.i64
+ %0 = llvm.mlir.constant(42 : i32) : i32
+ %1 = llvm.mlir.constant(17) : i64
// call and extract the values from the structure
%2 = llvm.call @bar(%0, %1)
- : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i64)>
+ : (i32, i32) -> !llvm.struct<(i32, i64)>
%3 = llvm.extractvalue %2[0] : !llvm.struct<(i32, i64)>
%4 = llvm.extractvalue %2[1] : !llvm.struct<(i32, i64)>
// use as before
- "use_i32"(%3) : (!llvm.i32) -> ()
- "use_i64"(%4) : (!llvm.i64) -> ()
+ "use_i32"(%3) : (i32) -> ()
+ "use_i64"(%4) : (i64) -> ()
}
```
@@ -87,9 +87,9 @@ func @foo(%arg0: memref<?xf32>) -> () {
llvm.func @foo(%arg0: !llvm.ptr<float>, // Allocated pointer.
%arg1: !llvm.ptr<float>, // Aligned pointer.
- %arg2: !llvm.i64, // Offset.
- %arg3: !llvm.i64, // Size in dim 0.
- %arg4: !llvm.i64) { // Stride in dim 0.
+ %arg2: i64, // Offset.
+ %arg3: i64, // Size in dim 0.
+ %arg4: i64) { // Stride in dim 0.
// Populate memref descriptor structure.
%0 = llvm.mlir.undef :
%1 = llvm.insertvalue %arg0, %0[0] : !llvm.memref_1d
@@ -153,7 +153,7 @@ llvm.func @foo(%arg0: memref<*xf32>) -> () {
// Gets converted to the following.
-llvm.func @foo(%arg0: !llvm.i64 // Rank.
+llvm.func @foo(%arg0: i64 // Rank.
%arg1: !llvm.ptr<i8>) { // Type-erased pointer to descriptor.
// Pack the unranked memref descriptor.
%0 = llvm.mlir.undef : !llvm.struct<(i64, ptr<i8>)>
@@ -182,7 +182,7 @@ llvm.func @bar() {
%2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, ptr<i8>)>
// Pass individual values to the callee.
- llvm.call @foo(%1, %2) : (!llvm.i64, !llvm.ptr<i8>)
+ llvm.call @foo(%1, %2) : (i64, !llvm.ptr<i8>)
llvm.return
}
```
@@ -269,8 +269,8 @@ func @qux(%arg0: memref<?x?xf32>)
// Function with unpacked arguments.
llvm.func @qux(%arg0: !llvm.ptr<float>, %arg1: !llvm.ptr<float>,
- %arg2: !llvm.i64, %arg3: !llvm.i64, %arg4: !llvm.i64,
- %arg5: !llvm.i64, %arg6: !llvm.i64) {
+ %arg2: i64, %arg3: i64, %arg4: i64,
+ %arg5: i64, %arg6: i64) {
// Populate memref descriptor (as per calling convention).
%0 = llvm.mlir.undef : !llvm.memref_2d
%1 = llvm.insertvalue %arg0, %0[0] : !llvm.memref_2d
@@ -282,9 +282,9 @@ llvm.func @qux(%arg0: !llvm.ptr<float>, %arg1: !llvm.ptr<float>,
%7 = llvm.insertvalue %arg6, %6[4, 1] : !llvm.memref_2d
// Store the descriptor in a stack-allocated space.
- %8 = llvm.mlir.constant(1 : index) : !llvm.i64
+ %8 = llvm.mlir.constant(1 : index) : i64
%9 = llvm.alloca %8 x !llvm.memref_2d
- : (!llvm.i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64,
+ : (i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64,
array<2xi64>, array<2xi64>)>>
llvm.store %7, %9 : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64,
array<2xi64>, array<2xi64>)>>
@@ -317,8 +317,8 @@ func @foo(%arg0: memref<?x?xf32>) {
// Function with unpacked arguments.
llvm.func @foo(%arg0: !llvm.ptr<float>, %arg1: !llvm.ptr<float>,
- %arg2: !llvm.i64, %arg3: !llvm.i64, %arg4: !llvm.i64,
- %arg5: !llvm.i64, %arg6: !llvm.i64) {
+ %arg2: i64, %arg3: i64, %arg4: i64,
+ %arg5: i64, %arg6: i64) {
llvm.return
}
@@ -336,8 +336,8 @@ llvm.func @_mlir_ciface_foo(%arg0: !llvm.memref_2d_ptr) {
%6 = llvm.extractvalue %0[4, 0] : !llvm.memref_2d
%7 = llvm.extractvalue %0[4, 1] : !llvm.memref_2d
llvm.call @foo(%1, %2, %3, %4, %5, %6, %7)
- : (!llvm.ptr<float>, !llvm.ptr<float>, !llvm.i64, !llvm.i64, !llvm.i64,
- !llvm.i64, !llvm.i64) -> ()
+ : (!llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64,
+ i64, i64) -> ()
llvm.return
}
```
@@ -397,27 +397,27 @@ is transformed into the equivalent of the following code:
// dynamic, extract the stride value from the descriptor.
%stride1 = llvm.extractvalue[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64,
array<4xi64>, array<4xi64>)>
-%addr1 = muli %stride1, %1 : !llvm.i64
+%addr1 = muli %stride1, %1 : i64
// When the stride or, in absence of explicit strides, the trailing sizes are
// known statically, this value is used as a constant. The natural value of
// strides is the product of all sizes following the current dimension.
-%stride2 = llvm.mlir.constant(32 : index) : !llvm.i64
-%addr2 = muli %stride2, %2 : !llvm.i64
-%addr3 = addi %addr1, %addr2 : !llvm.i64
+%stride2 = llvm.mlir.constant(32 : index) : i64
+%addr2 = muli %stride2, %2 : i64
+%addr3 = addi %addr1, %addr2 : i64
-%stride3 = llvm.mlir.constant(8 : index) : !llvm.i64
-%addr4 = muli %stride3, %3 : !llvm.i64
-%addr5 = addi %addr3, %addr4 : !llvm.i64
+%stride3 = llvm.mlir.constant(8 : index) : i64
+%addr4 = muli %stride3, %3 : i64
+%addr5 = addi %addr3, %addr4 : i64
// Multiplication with the known unit stride can be omitted.
-%addr6 = addi %addr5, %4 : !llvm.i64
+%addr6 = addi %addr5, %4 : i64
// If the linear offset is known to be zero, it can also be omitted. If it is
// dynamic, it is extracted from the descriptor.
%offset = llvm.extractvalue[2] : !llvm.struct<(ptr<float>, ptr<float>, i64,
array<4xi64>, array<4xi64>)>
-%addr7 = addi %addr6, %offset : !llvm.i64
+%addr7 = addi %addr6, %offset : i64
// All accesses are based on the aligned pointer.
%aligned = llvm.extractvalue[1] : !llvm.struct<(ptr<float>, ptr<float>, i64,
diff --git a/mlir/docs/SPIRVToLLVMDialectConversion.md b/mlir/docs/SPIRVToLLVMDialectConversion.md
index bdae08c1e230..494dcda7d8ab 100644
--- a/mlir/docs/SPIRVToLLVMDialectConversion.md
+++ b/mlir/docs/SPIRVToLLVMDialectConversion.md
@@ -159,8 +159,8 @@ SPIR-V Dialect op | LLVM Dialect intrinsic
`spv.Not` is modelled with a `xor` operation with a mask with all bits set.
```mlir
- %mask = llvm.mlir.constant(-1 : i32) : !llvm.i32
-%0 = spv.Not %op : i32 => %0 = llvm.xor %op, %mask : !llvm.i32
+ %mask = llvm.mlir.constant(-1 : i32) : i32
+%0 = spv.Not %op : i32 => %0 = llvm.xor %op, %mask : i32
```
#### Bitfield ops
@@ -189,10 +189,10 @@ to note:
```mlir
// Broadcasting offset
%offset0 = llvm.mlir.undef : !llvm.vec<2 x i8>
- %zero = llvm.mlir.constant(0 : i32) : !llvm.i32
- %offset1 = llvm.insertelement %offset, %offset0[%zero : !llvm.i32] : !llvm.vec<2 x i8>
- %one = llvm.mlir.constant(1 : i32) : !llvm.i32
- %vec_offset = llvm.insertelement %offset, %offset1[%one : !llvm.i32] : !llvm.vec<2 x i8>
+ %zero = llvm.mlir.constant(0 : i32) : i32
+ %offset1 = llvm.insertelement %offset, %offset0[%zero : i32] : !llvm.vec<2 x i8>
+ %one = llvm.mlir.constant(1 : i32) : i32
+ %vec_offset = llvm.insertelement %offset, %offset1[%one : i32] : !llvm.vec<2 x i8>
// Broadcasting count
// ...
@@ -209,10 +209,10 @@ to note:
```
Also, note that if the bitwidth of `offset` or `count` is greater than the
- bitwidth of `base`, truncation is still permitted. This is because the ops have a
- defined behaviour with `offset` and `count` being less than the size of
- `base`. It creates a natural upper bound on what values `offset` and `count`
- can take, which is 64. This can be expressed in less than 8 bits.
+ bitwidth of `base`, truncation is still permitted. This is because the ops
+ have a defined behaviour with `offset` and `count` being less than the size
+ of `base`. It creates a natural upper bound on what values `offset` and
+ `count` can take, which is 64. This can be expressed in less than 8 bits.
Now, having these two cases in mind, we can proceed with conversion for the ops
and their operands.
@@ -227,18 +227,18 @@ would be to create a mask with bits set outside
```mlir
// Create mask
-// %minus_one = llvm.mlir.constant(-1 : i32) : !llvm.i32
-// %t0 = llvm.shl %minus_one, %count : !llvm.i32
-// %t1 = llvm.xor %t0, %minus_one : !llvm.i32
-// %t2 = llvm.shl %t1, %offset : !llvm.i32
-// %mask = llvm.xor %t2, %minus_one : !llvm.i32
+// %minus_one = llvm.mlir.constant(-1 : i32) : i32
+// %t0 = llvm.shl %minus_one, %count : i32
+// %t1 = llvm.xor %t0, %minus_one : i32
+// %t2 = llvm.shl %t1, %offset : i32
+// %mask = llvm.xor %t2, %minus_one : i32
// Extract unchanged bits from the Base
-// %new_base = llvm.and %base, %mask : !llvm.i32
+// %new_base = llvm.and %base, %mask : i32
// Insert new bits
-// %sh_insert = llvm.shl %insert, %offset : !llvm.i32
-// %res = llvm.or %new_base, %sh_insert : !llvm.i32
+// %sh_insert = llvm.shl %insert, %offset : i32
+// %res = llvm.or %new_base, %sh_insert : i32
%res = spv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32
```
@@ -251,14 +251,14 @@ sign bit.
```mlir
// Calculate the amount to shift left.
-// %size = llvm.mlir.constant(32 : i32) : !llvm.i32
-// %t0 = llvm.add %count, %offset : !llvm.i32
-// %t1 = llvm.sub %size, %t0 : !llvm.i32
+// %size = llvm.mlir.constant(32 : i32) : i32
+// %t0 = llvm.add %count, %offset : i32
+// %t1 = llvm.sub %size, %t0 : i32
// Shift left and then right to extract the bits
-// %sh_left = llvm.shl %base, %t1 : !llvm.i32
-// %t2 = llvm.add %offset, %t1 : !llvm.i32
-// %res = llvm.ashr %sh_left, %t2 : !llvm.i32
+// %sh_left = llvm.shl %base, %t1 : i32
+// %t2 = llvm.add %offset, %t1 : i32
+// %res = llvm.ashr %sh_left, %t2 : i32
%res = spv.BitFieldSExtract %base, %offset, %count : i32, i32, i32
```
@@ -270,13 +270,13 @@ and the mask is applied.
```mlir
// Create a mask
-// %minus_one = llvm.mlir.constant(-1 : i32) : !llvm.i32
-// %t0 = llvm.shl %minus_one, %count : !llvm.i32
-// mask = llvm.xor %t0, %minus_one : !llvm.i32
+// %minus_one = llvm.mlir.constant(-1 : i32) : i32
+// %t0 = llvm.shl %minus_one, %count : i32
+// mask = llvm.xor %t0, %minus_one : i32
// Shift Base and apply mask
-// %sh_base = llvm.lshr %base, %offset : !llvm.i32
-// %res = llvm.and %sh_base, %mask : !llvm.i32
+// %sh_base = llvm.lshr %base, %offset : i32
+// %res = llvm.and %sh_base, %mask : i32
%res = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32
```
@@ -371,34 +371,34 @@ non-vector | `spv.CompositeInsert` | `llvm.insertvalue`
First of all, it is important to note that there is no direct representation of
entry points in LLVM. At the moment, we use the following approach:
-* `spv.EntryPoint` is simply removed.
+* `spv.EntryPoint` is simply removed.
-* In contrast, `spv.ExecutionMode` may contain important information about the
- entry point. For example, `LocalSize` provides information about the
- work-group size that can be reused.
+* In contrast, `spv.ExecutionMode` may contain important information about the
+ entry point. For example, `LocalSize` provides information about the
+ work-group size that can be reused.
- In order to preserve this information, `spv.ExecutionMode` is converted to
- a struct global variable that stores the execution mode id and any variables
- associated with it. In C, the struct has the structure shown below.
+ In order to preserve this information, `spv.ExecutionMode` is converted to a
+ struct global variable that stores the execution mode id and any variables
+ associated with it. In C, the struct has the structure shown below.
- ```C
- // No values are associated // There are values that are associated
- // with this entry point. // with this entry point.
- struct { struct {
- int32_t executionMode; int32_t executionMode;
- }; int32_t values[];
- };
- ```
+ ```C
+ // No values are associated // There are values that are associated
+ // with this entry point. // with this entry point.
+ struct { struct {
+ int32_t executionMode; int32_t executionMode;
+ }; int32_t values[];
+ };
+ ```
- ```mlir
- // spv.ExecutionMode @empty "ContractionOff"
- llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32)> {
- %0 = llvm.mlir.undef : !llvm.struct<(i32)>
- %1 = llvm.mlir.constant(31 : i32) : !llvm.i32
- %ret = llvm.insertvalue %1, %0[0 : i32] : !llvm.struct<(i32)>
- llvm.return %ret : !llvm.struct<(i32)>
- }
- ```
+ ```mlir
+ // spv.ExecutionMode @empty "ContractionOff"
+ llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32)> {
+ %0 = llvm.mlir.undef : !llvm.struct<(i32)>
+ %1 = llvm.mlir.constant(31 : i32) : i32
+ %ret = llvm.insertvalue %1, %0[0 : i32] : !llvm.struct<(i32)>
+ llvm.return %ret : !llvm.struct<(i32)>
+ }
+ ```
### Logical ops
@@ -417,8 +417,8 @@ SPIR-V Dialect op | LLVM Dialect op
modelled with `xor` operation with a mask with all bits set.
```mlir
- %mask = llvm.mlir.constant(-1 : i1) : !llvm.i1
-%0 = spv.LogicalNot %op : i1 => %0 = llvm.xor %op, %mask : !llvm.i1
+ %mask = llvm.mlir.constant(-1 : i1) : i1
+%0 = spv.LogicalNot %op : i1 => %0 = llvm.xor %op, %mask : i1
```
### Memory ops
@@ -441,8 +441,8 @@ order to go through the pointer.
// Corresponding LLVM dialect code
%i = ...
%var = ...
-%0 = llvm.mlir.constant(0 : i32) : !llvm.i32
-%el = llvm.getelementptr %var[%0, %i, %i] : (!llvm.ptr<struct<packed (float, array<4 x float>)>>, !llvm.i32, !llvm.i32, !llvm.i32)
+%0 = llvm.mlir.constant(0 : i32) : i32
+%el = llvm.getelementptr %var[%0, %i, %i] : (!llvm.ptr<struct<packed (float, array<4 x float>)>>, i32, i32, i32)
```
#### `spv.Load` and `spv.Store`
@@ -538,13 +538,13 @@ Also, at the moment initialization is only possible via `spv.constant`.
```mlir
// Conversion of VariableOp without initialization
- %size = llvm.mlir.constant(1 : i32) : !llvm.i32
-%res = spv.Variable : !spv.ptr<vector<3xf32>, Function> => %res = llvm.alloca %size x !llvm.vec<3 x float> : (!llvm.i32) -> !llvm.ptr<vec<3 x float>>
+ %size = llvm.mlir.constant(1 : i32) : i32
+%res = spv.Variable : !spv.ptr<vector<3xf32>, Function> => %res = llvm.alloca %size x !llvm.vec<3 x float> : (i32) -> !llvm.ptr<vec<3 x float>>
// Conversion of VariableOp with initialization
- %c = llvm.mlir.constant(0 : i64) : !llvm.i64
-%c = spv.constant 0 : i64 %size = llvm.mlir.constant(1 : i32) : !llvm.i32
-%res = spv.Variable init(%c) : !spv.ptr<i64, Function> => %res = llvm.alloca %[[SIZE]] x !llvm.i64 : (!llvm.i32) -> !llvm.ptr<i64>
+ %c = llvm.mlir.constant(0 : i64) : i64
+%c = spv.constant 0 : i64 %size = llvm.mlir.constant(1 : i32) : i32
+%res = spv.Variable init(%c) : !spv.ptr<i64, Function> => %res = llvm.alloca %[[SIZE]] x i64 : (i32) -> !llvm.ptr<i64>
llvm.store %c, %res : !llvm.ptr<i64>
```
@@ -582,11 +582,11 @@ bitwidth. This leads to the following conversions:
```mlir
// Shift without extension
-%res0 = spv.ShiftRightArithmetic %0, %2 : i32, i32 => %res0 = llvm.ashr %0, %2 : !llvm.i32
+%res0 = spv.ShiftRightArithmetic %0, %2 : i32, i32 => %res0 = llvm.ashr %0, %2 : i32
// Shift with extension
- %ext = llvm.sext %1 : !llvm.i16 to !llvm.i32
-%res1 = spv.ShiftRightArithmetic %0, %1 : i32, i16 => %res1 = llvm.ashr %0, %ext: !llvm.i32
+ %ext = llvm.sext %1 : i16 to i32
+%res1 = spv.ShiftRightArithmetic %0, %1 : i32, i16 => %res1 = llvm.ashr %0, %ext: i32
```
### `spv.constant`
@@ -612,7 +612,7 @@ to handle it case-by-case, given that the purpose of the conversion is not to
cover all possible corner cases.
```mlir
-// %0 = llvm.mlir.constant(0 : i8) : !llvm.i8
+// %0 = llvm.mlir.constant(0 : i8) : i8
%0 = spv.constant 0 : i8
// %1 = llvm.mlir.constant(dense<[2, 3, 4]> : vector<3xi32>) : !llvm.vec<3 x i32>
@@ -677,11 +677,11 @@ blocks being reachable. Moreover, selection and loop control attributes (such as
```mlir
// Conversion of selection
-%cond = spv.constant true %cond = llvm.mlir.constant(true) : !llvm.i1
+%cond = spv.constant true %cond = llvm.mlir.constant(true) : i1
spv.selection {
spv.BranchConditional %cond, ^true, ^false llvm.cond_br %cond, ^true, ^false
-^true: ^true:
+^true: ^true:
// True block code // True block code
spv.Branch ^merge => llvm.br ^merge
@@ -692,13 +692,13 @@ spv.selection {
^merge: ^merge:
spv.mlir.merge llvm.br ^continue
}
-// Remaining code ^continue:
+// Remaining code ^continue:
// Remaining code
```
```mlir
// Conversion of loop
-%cond = spv.constant true %cond = llvm.mlir.constant(true) : !llvm.i1
+%cond = spv.constant true %cond = llvm.mlir.constant(true) : i1
spv.loop {
spv.Branch ^header llvm.br ^header
diff --git a/mlir/docs/Tutorials/Toy/Ch-6.md b/mlir/docs/Tutorials/Toy/Ch-6.md
index c2211412e5c4..43bbb56714d1 100644
--- a/mlir/docs/Tutorials/Toy/Ch-6.md
+++ b/mlir/docs/Tutorials/Toy/Ch-6.md
@@ -127,8 +127,8 @@ We can now lower down to the LLVM dialect, which produces the following code:
```mlir
llvm.func @free(!llvm<"i8*">)
-llvm.func @printf(!llvm<"i8*">, ...) -> !llvm.i32
-llvm.func @malloc(!llvm.i64) -> !llvm<"i8*">
+llvm.func @printf(!llvm<"i8*">, ...) -> i32
+llvm.func @malloc(i64) -> !llvm<"i8*">
llvm.func @main() {
%0 = llvm.mlir.constant(1.000000e+00 : f64) : !llvm.double
%1 = llvm.mlir.constant(2.000000e+00 : f64) : !llvm.double
@@ -137,18 +137,18 @@ llvm.func @main() {
^bb16:
%221 = llvm.extractvalue %25[0 : index] : !llvm<"{ double*, i64, [2 x i64], [2 x i64] }">
- %222 = llvm.mlir.constant(0 : index) : !llvm.i64
- %223 = llvm.mlir.constant(2 : index) : !llvm.i64
- %224 = llvm.mul %214, %223 : !llvm.i64
- %225 = llvm.add %222, %224 : !llvm.i64
- %226 = llvm.mlir.constant(1 : index) : !llvm.i64
- %227 = llvm.mul %219, %226 : !llvm.i64
- %228 = llvm.add %225, %227 : !llvm.i64
- %229 = llvm.getelementptr %221[%228] : (!llvm<"double*">, !llvm.i64) -> !llvm<"double*">
+ %222 = llvm.mlir.constant(0 : index) : i64
+ %223 = llvm.mlir.constant(2 : index) : i64
+ %224 = llvm.mul %214, %223 : i64
+ %225 = llvm.add %222, %224 : i64
+ %226 = llvm.mlir.constant(1 : index) : i64
+ %227 = llvm.mul %219, %226 : i64
+ %228 = llvm.add %225, %227 : i64
+ %229 = llvm.getelementptr %221[%228] : (!llvm<"double*">, i64) -> !llvm<"double*">
%230 = llvm.load %229 : !llvm<"double*">
- %231 = llvm.call @printf(%207, %230) : (!llvm<"i8*">, !llvm.double) -> !llvm.i32
- %232 = llvm.add %219, %218 : !llvm.i64
- llvm.br ^bb15(%232 : !llvm.i64)
+ %231 = llvm.call @printf(%207, %230) : (!llvm<"i8*">, !llvm.double) -> i32
+ %232 = llvm.add %219, %218 : i64
+ llvm.br ^bb15(%232 : i64)
...
diff --git a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
index 6fbf29f4128d..3883ce2ed0c8 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
@@ -111,9 +111,8 @@ class PrintOpLowering : public ConversionPattern {
// Create a function declaration for printf, the signature is:
// * `i32 (i8*, ...)`
- auto llvmI32Ty = LLVM::LLVMIntegerType::get(context, 32);
- auto llvmI8PtrTy =
- LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8));
+ auto llvmI32Ty = IntegerType::get(context, 32);
+ auto llvmI8PtrTy = LLVM::LLVMPointerType::get(IntegerType::get(context, 8));
auto llvmFnType = LLVM::LLVMFunctionType::get(llvmI32Ty, llvmI8PtrTy,
/*isVarArg=*/true);
@@ -135,7 +134,7 @@ class PrintOpLowering : public ConversionPattern {
OpBuilder::InsertionGuard insertGuard(builder);
builder.setInsertionPointToStart(module.getBody());
auto type = LLVM::LLVMArrayType::get(
- LLVM::LLVMIntegerType::get(builder.getContext(), 8), value.size());
+ IntegerType::get(builder.getContext(), 8), value.size());
global = builder.create<LLVM::GlobalOp>(loc, type, /*isConstant=*/true,
LLVM::Linkage::Internal, name,
builder.getStringAttr(value));
@@ -144,12 +143,11 @@ class PrintOpLowering : public ConversionPattern {
// Get the pointer to the first character in the global string.
Value globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
Value cst0 = builder.create<LLVM::ConstantOp>(
- loc, LLVM::LLVMIntegerType::get(builder.getContext(), 64),
+ loc, IntegerType::get(builder.getContext(), 64),
builder.getIntegerAttr(builder.getIndexType(), 0));
return builder.create<LLVM::GEPOp>(
loc,
- LLVM::LLVMPointerType::get(
- LLVM::LLVMIntegerType::get(builder.getContext(), 8)),
+ LLVM::LLVMPointerType::get(IntegerType::get(builder.getContext(), 8)),
globalPtr, ArrayRef<Value>({cst0, cst0}));
}
};
diff --git a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
index 6fbf29f4128d..3883ce2ed0c8 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
@@ -111,9 +111,8 @@ class PrintOpLowering : public ConversionPattern {
// Create a function declaration for printf, the signature is:
// * `i32 (i8*, ...)`
- auto llvmI32Ty = LLVM::LLVMIntegerType::get(context, 32);
- auto llvmI8PtrTy =
- LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8));
+ auto llvmI32Ty = IntegerType::get(context, 32);
+ auto llvmI8PtrTy = LLVM::LLVMPointerType::get(IntegerType::get(context, 8));
auto llvmFnType = LLVM::LLVMFunctionType::get(llvmI32Ty, llvmI8PtrTy,
/*isVarArg=*/true);
@@ -135,7 +134,7 @@ class PrintOpLowering : public ConversionPattern {
OpBuilder::InsertionGuard insertGuard(builder);
builder.setInsertionPointToStart(module.getBody());
auto type = LLVM::LLVMArrayType::get(
- LLVM::LLVMIntegerType::get(builder.getContext(), 8), value.size());
+ IntegerType::get(builder.getContext(), 8), value.size());
global = builder.create<LLVM::GlobalOp>(loc, type, /*isConstant=*/true,
LLVM::Linkage::Internal, name,
builder.getStringAttr(value));
@@ -144,12 +143,11 @@ class PrintOpLowering : public ConversionPattern {
// Get the pointer to the first character in the global string.
Value globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
Value cst0 = builder.create<LLVM::ConstantOp>(
- loc, LLVM::LLVMIntegerType::get(builder.getContext(), 64),
+ loc, IntegerType::get(builder.getContext(), 64),
builder.getIntegerAttr(builder.getIndexType(), 0));
return builder.create<LLVM::GEPOp>(
loc,
- LLVM::LLVMPointerType::get(
- LLVM::LLVMIntegerType::get(builder.getContext(), 8)),
+ LLVM::LLVMPointerType::get(IntegerType::get(builder.getContext(), 8)),
globalPtr, ArrayRef<Value>({cst0, cst0}));
}
};
diff --git a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
index 686a8ca01d9e..a48acb93c382 100644
--- a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
+++ b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
@@ -180,9 +180,9 @@ class LLVMTypeConverter : public TypeConverter {
/// For example, memref<?x?xf32> is converted to the following list:
/// - `!llvm<"float*">` (allocated pointer),
/// - `!llvm<"float*">` (aligned pointer),
- /// - `!llvm.i64` (offset),
- /// - `!llvm.i64`, `!llvm.i64` (sizes),
- /// - `!llvm.i64`, `!llvm.i64` (strides).
+ /// - `i64` (offset),
+ /// - `i64`, `i64` (sizes),
+ /// - `i64`, `i64` (strides).
/// These types can be recomposed to a memref descriptor struct.
SmallVector<Type, 5> getMemRefDescriptorFields(MemRefType type,
bool unpackAggregates);
@@ -193,7 +193,7 @@ class LLVMTypeConverter : public TypeConverter {
/// - an integer rank, followed by
/// - a pointer to the memref descriptor struct.
/// For example, memref<*xf32> is converted to the following list:
- /// !llvm.i64 (rank)
+ /// i64 (rank)
/// !llvm<"i8*"> (type-erased pointer).
/// These types can be recomposed to a unranked memref descriptor struct.
SmallVector<Type, 2> getUnrankedMemRefDescriptorFields();
@@ -523,15 +523,15 @@ class ConvertToLLVMPattern : public ConversionPattern {
/// strides and buffer size from these sizes.
///
/// For example, memref<4x?xf32> emits:
- /// `sizes[0]` = llvm.mlir.constant(4 : index) : !llvm.i64
+ /// `sizes[0]` = llvm.mlir.constant(4 : index) : i64
/// `sizes[1]` = `dynamicSizes[0]`
- /// `strides[1]` = llvm.mlir.constant(1 : index) : !llvm.i64
+ /// `strides[1]` = llvm.mlir.constant(1 : index) : i64
/// `strides[0]` = `sizes[0]`
- /// %size = llvm.mul `sizes[0]`, `sizes[1]` : !llvm.i64
+ /// %size = llvm.mul `sizes[0]`, `sizes[1]` : i64
/// %nullptr = llvm.mlir.null : !llvm.ptr<float>
/// %gep = llvm.getelementptr %nullptr[%size]
- /// : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
- /// `sizeBytes` = llvm.ptrtoint %gep : !llvm.ptr<float> to !llvm.i64
+ /// : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+ /// `sizeBytes` = llvm.ptrtoint %gep : !llvm.ptr<float> to i64
void getMemRefDescriptorSizes(Location loc, MemRefType memRefType,
ArrayRef<Value> dynamicSizes,
ConversionPatternRewriter &rewriter,
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
index 05092c430617..a2b807e1697e 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
@@ -54,18 +54,18 @@ def LLVM_Type : DialectType<LLVM_Dialect,
// Type constraint accepting LLVM integer types.
def LLVM_AnyInteger : Type<
- CPred<"$_self.isa<::mlir::LLVM::LLVMIntegerType>()">,
+ CPred<"$_self.isa<::mlir::IntegerType>()">,
"LLVM integer type">;
// Type constraints accepting LLVM integer type of a specific width.
class LLVM_IntBase<int width> :
Type<And<[
LLVM_AnyInteger.predicate,
- CPred<"$_self.cast<::mlir::LLVM::LLVMIntegerType>().getBitWidth() == "
+ CPred<"$_self.cast<::mlir::IntegerType>().getWidth() == "
# width>]>,
"LLVM " # width # "-bit integer type">,
BuildableType<
- "::mlir::LLVM::LLVMIntegerType::get($_builder.getContext(), "
+ "::mlir::IntegerType::get($_builder.getContext(), "
# width # ")">;
def LLVM_i1 : LLVM_IntBase<1>;
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index 53c42540aa48..a6dcf79318df 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -184,7 +184,7 @@ def LLVM_ICmpOp : LLVM_Op<"icmp", [NoSideEffect]> {
let builders = [
OpBuilderDAG<(ins "ICmpPredicate":$predicate, "Value":$lhs, "Value":$rhs),
[{
- build($_builder, $_state, LLVMIntegerType::get(lhs.getType().getContext(), 1),
+ build($_builder, $_state, IntegerType::get(lhs.getType().getContext(), 1),
$_builder.getI64IntegerAttr(static_cast<int64_t>(predicate)), lhs, rhs);
}]>];
let parser = [{ return parseCmpOp<ICmpPredicate>(parser, result); }];
@@ -235,7 +235,7 @@ def LLVM_FCmpOp : LLVM_Op<"fcmp", [
OpBuilderDAG<(ins "FCmpPredicate":$predicate, "Value":$lhs, "Value":$rhs,
CArg<"FastmathFlags", "{}">:$fmf),
[{
- build($_builder, $_state, LLVMIntegerType::get(lhs.getType().getContext(), 1),
+ build($_builder, $_state, IntegerType::get(lhs.getType().getContext(), 1),
$_builder.getI64IntegerAttr(static_cast<int64_t>(predicate)), lhs, rhs,
::mlir::LLVM::FMFAttr::get(fmf, $_builder.getContext()));
}]>];
@@ -791,7 +791,7 @@ def LLVM_AddressOfOp : LLVM_Op<"mlir.addressof"> {
}
// Define the global.
- llvm.mlir.global @const(42 : i32) : !llvm.i32
+ llvm.mlir.global @const(42 : i32) : i32
```
}];
@@ -862,9 +862,9 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
// i32* getelementptr (i32* @g2, i32 2)
llvm.mlir.global constant @int_gep() : !llvm.ptr<i32> {
%0 = llvm.mlir.addressof @g2 : !llvm.ptr<i32>
- %1 = llvm.mlir.constant(2 : i32) : !llvm.i32
+ %1 = llvm.mlir.constant(2 : i32) : i32
%2 = llvm.getelementptr %0[%1]
- : (!llvm.ptr<i32>, !llvm.i32) -> !llvm.ptr<i32>
+ : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
// The initializer region must end with `llvm.return`.
llvm.return %2 : !llvm.ptr<i32>
}
@@ -880,7 +880,7 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
```mlir
// Global values use @-identifiers.
- llvm.mlir.global constant @cst(42 : i32) : !llvm.i32
+ llvm.mlir.global constant @cst(42 : i32) : i32
// Non-constant values must also be initialized.
llvm.mlir.global @variable(32.0 : f32) : !llvm.float
@@ -895,9 +895,9 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
// A complex initializer is constructed with an initializer region.
llvm.mlir.global constant @int_gep() : !llvm.ptr<i32> {
%0 = llvm.mlir.addressof @g2 : !llvm.ptr<i32>
- %1 = llvm.mlir.constant(2 : i32) : !llvm.i32
+ %1 = llvm.mlir.constant(2 : i32) : i32
%2 = llvm.getelementptr %0[%1]
- : (!llvm.ptr<i32>, !llvm.i32) -> !llvm.ptr<i32>
+ : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
llvm.return %2 : !llvm.ptr<i32>
}
```
@@ -911,7 +911,7 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
```mlir
// A constant with internal linkage will not participate in linking.
- llvm.mlir.global internal constant @cst(42 : i32) : !llvm.i32
+ llvm.mlir.global internal constant @cst(42 : i32) : i32
// By default, "external" linkage is assumed and the global participates in
// symbol resolution at link-time.
@@ -969,13 +969,13 @@ def LLVM_LLVMFuncOp : LLVM_Op<"func",
```mlir
// The type of @bar is !llvm<"i64 (i64)">
- llvm.func @bar(%arg0: !llvm.i64) -> !llvm.i64 {
- llvm.return %arg0 : !llvm.i64
+ llvm.func @bar(%arg0: i64) -> i64 {
+ llvm.return %arg0 : i64
}
// Type type of @foo is !llvm<"void (i64)">
// !llvm.void type is omitted
- llvm.func @foo(%arg0: !llvm.i64) {
+ llvm.func @foo(%arg0: i64) {
llvm.return
}
@@ -1102,10 +1102,10 @@ def LLVM_ConstantOp
```mlir
// Integer constant, internal i32 is mandatory
- %0 = llvm.mlir.constant(42 : i32) : !llvm.i32
+ %0 = llvm.mlir.constant(42 : i32) : i32
// It's okay to omit i64.
- %1 = llvm.mlir.constant(42) : !llvm.i64
+ %1 = llvm.mlir.constant(42) : i64
// Floating point constant.
%2 = llvm.mlir.constant(42.0 : f32) : !llvm.float
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
index 6f78118a5b00..4c0bee780e76 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
@@ -31,7 +31,6 @@ class LLVMDialect;
namespace detail {
struct LLVMFunctionTypeStorage;
-struct LLVMIntegerTypeStorage;
struct LLVMPointerTypeStorage;
struct LLVMStructTypeStorage;
struct LLVMTypeAndSizeStorage;
@@ -155,30 +154,6 @@ class LLVMFunctionType
bool);
};
-//===----------------------------------------------------------------------===//
-// LLVMIntegerType.
-//===----------------------------------------------------------------------===//
-
-/// LLVM dialect signless integer type parameterized by bitwidth.
-class LLVMIntegerType : public Type::TypeBase<LLVMIntegerType, Type,
- detail::LLVMIntegerTypeStorage> {
-public:
- /// Inherit base constructor.
- using Base::Base;
-
- /// Gets or creates an instance of the integer of the specified `bitwidth` in
- /// the given context.
- static LLVMIntegerType get(MLIRContext *ctx, unsigned bitwidth);
- static LLVMIntegerType getChecked(Location loc, unsigned bitwidth);
-
- /// Returns the bitwidth of this integer type.
- unsigned getBitWidth();
-
- /// Verifies that the type about to be constructed is well-formed.
- static LogicalResult verifyConstructionInvariants(Location loc,
- unsigned bitwidth);
-};
-
//===----------------------------------------------------------------------===//
// LLVMPointerType.
//===----------------------------------------------------------------------===//
@@ -412,30 +387,7 @@ void printType(Type type, DialectAsmPrinter &printer);
//===----------------------------------------------------------------------===//
/// Returns `true` if the given type is compatible with the LLVM dialect.
-inline bool isCompatibleType(Type type) {
- // clang-format off
- return type.isa<
- LLVMArrayType,
- LLVMBFloatType,
- LLVMDoubleType,
- LLVMFP128Type,
- LLVMFloatType,
- LLVMFunctionType,
- LLVMHalfType,
- LLVMIntegerType,
- LLVMLabelType,
- LLVMMetadataType,
- LLVMPPCFP128Type,
- LLVMPointerType,
- LLVMStructType,
- LLVMTokenType,
- LLVMVectorType,
- LLVMVoidType,
- LLVMX86FP80Type,
- LLVMX86MMXType
- >();
- // clang-format on
-}
+bool isCompatibleType(Type type);
inline bool isCompatibleFloatingPointType(Type type) {
return type.isa<LLVMHalfType, LLVMBFloatType, LLVMFloatType, LLVMDoubleType,
@@ -443,7 +395,7 @@ inline bool isCompatibleFloatingPointType(Type type) {
}
/// Returns the size of the given primitive LLVM dialect-compatible type
-/// (including vectors) in bits, for example, the size of !llvm.i16 is 16 and
+/// (including vectors) in bits, for example, the size of i16 is 16 and
/// the size of !llvm.vec<4 x i16> is 64. Returns 0 for non-primitive
/// (aggregates such as struct) or types that don't have a size (such as void).
llvm::TypeSize getPrimitiveTypeSizeInBits(Type type);
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 3c73cdf64eb7..b94cb5ce86b6 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -111,9 +111,9 @@ def NVVM_ShflBflyOp :
return success();
auto type = getType().dyn_cast<LLVM::LLVMStructType>();
auto elementType = (type && type.getBody().size() == 2)
- ? type.getBody()[1].dyn_cast<LLVM::LLVMIntegerType>()
+ ? type.getBody()[1].dyn_cast<IntegerType>()
: nullptr;
- if (!elementType || elementType.getBitWidth() != 1)
+ if (!elementType || elementType.getWidth() != 1)
return emitError("expected return type to be a two-element struct with "
"i1 as the second element");
return success();
diff --git a/mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir
index a4c0efb7beed..2b237f341e1f 100644
--- a/mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir
+++ b/mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir
@@ -3,14 +3,14 @@
// RUN: FileCheck %s
module {
- llvm.func @printI64(!llvm.i64)
+ llvm.func @printI64(i64)
llvm.func @entry() {
- %c2 = llvm.mlir.constant(-42: i64) :!llvm.i64
+ %c2 = llvm.mlir.constant(-42: i64) :i64
%val = llvm.inline_asm "xor $0, $0", "=r,r" %c2 :
- (!llvm.i64) -> !llvm.i64
+ (i64) -> i64
// CHECK: 0
- llvm.call @printI64(%val) : (!llvm.i64) -> ()
+ llvm.call @printI64(%val) : (i64) -> ()
llvm.return
}
}
diff --git a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
index dbbcecfa3b61..3aa556a832a0 100644
--- a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
+++ b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
@@ -13,16 +13,16 @@ module {
%2 = llvm.mlir.constant(3.000000e+00 : f32) : !llvm.float
%3 = llvm.mlir.constant(4.000000e+00 : f32) : !llvm.float
%4 = llvm.mlir.undef : !llvm.vec<4 x float>
- %5 = llvm.mlir.constant(0 : index) : !llvm.i64
- %6 = llvm.insertelement %0, %4[%5 : !llvm.i64] : !llvm.vec<4 x float>
+ %5 = llvm.mlir.constant(0 : index) : i64
+ %6 = llvm.insertelement %0, %4[%5 : i64] : !llvm.vec<4 x float>
%7 = llvm.shufflevector %6, %4 [0 : i32, 0 : i32, 0 : i32, 0 : i32]
: !llvm.vec<4 x float>, !llvm.vec<4 x float>
- %8 = llvm.mlir.constant(1 : i64) : !llvm.i64
- %9 = llvm.insertelement %1, %7[%8 : !llvm.i64] : !llvm.vec<4 x float>
- %10 = llvm.mlir.constant(2 : i64) : !llvm.i64
- %11 = llvm.insertelement %2, %9[%10 : !llvm.i64] : !llvm.vec<4 x float>
- %12 = llvm.mlir.constant(3 : i64) : !llvm.i64
- %v = llvm.insertelement %3, %11[%12 : !llvm.i64] : !llvm.vec<4 x float>
+ %8 = llvm.mlir.constant(1 : i64) : i64
+ %9 = llvm.insertelement %1, %7[%8 : i64] : !llvm.vec<4 x float>
+ %10 = llvm.mlir.constant(2 : i64) : i64
+ %11 = llvm.insertelement %2, %9[%10 : i64] : !llvm.vec<4 x float>
+ %12 = llvm.mlir.constant(3 : i64) : i64
+ %v = llvm.insertelement %3, %11[%12 : i64] : !llvm.vec<4 x float>
%max = "llvm.intr.vector.reduce.fmax"(%v)
: (!llvm.vec<4 x float>) -> !llvm.float
diff --git a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir
index 7d8c9736a348..181e2e3ce0fc 100644
--- a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir
+++ b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir
@@ -5,76 +5,76 @@
// End-to-end test of all int reduction intrinsics (not exhaustive unit tests).
module {
llvm.func @printNewline()
- llvm.func @printI64(!llvm.i64)
+ llvm.func @printI64(i64)
llvm.func @entry() {
// Setup (1,2,3,4).
- %0 = llvm.mlir.constant(1 : i64) : !llvm.i64
- %1 = llvm.mlir.constant(2 : i64) : !llvm.i64
- %2 = llvm.mlir.constant(3 : i64) : !llvm.i64
- %3 = llvm.mlir.constant(4 : i64) : !llvm.i64
+ %0 = llvm.mlir.constant(1 : i64) : i64
+ %1 = llvm.mlir.constant(2 : i64) : i64
+ %2 = llvm.mlir.constant(3 : i64) : i64
+ %3 = llvm.mlir.constant(4 : i64) : i64
%4 = llvm.mlir.undef : !llvm.vec<4 x i64>
- %5 = llvm.mlir.constant(0 : index) : !llvm.i64
- %6 = llvm.insertelement %0, %4[%5 : !llvm.i64] : !llvm.vec<4 x i64>
+ %5 = llvm.mlir.constant(0 : index) : i64
+ %6 = llvm.insertelement %0, %4[%5 : i64] : !llvm.vec<4 x i64>
%7 = llvm.shufflevector %6, %4 [0 : i64, 0 : i64, 0 : i64, 0 : i64]
: !llvm.vec<4 x i64>, !llvm.vec<4 x i64>
- %8 = llvm.mlir.constant(1 : i64) : !llvm.i64
- %9 = llvm.insertelement %1, %7[%8 : !llvm.i64] : !llvm.vec<4 x i64>
- %10 = llvm.mlir.constant(2 : i64) : !llvm.i64
- %11 = llvm.insertelement %2, %9[%10 : !llvm.i64] : !llvm.vec<4 x i64>
- %12 = llvm.mlir.constant(3 : i64) : !llvm.i64
- %v = llvm.insertelement %3, %11[%12 : !llvm.i64] : !llvm.vec<4 x i64>
+ %8 = llvm.mlir.constant(1 : i64) : i64
+ %9 = llvm.insertelement %1, %7[%8 : i64] : !llvm.vec<4 x i64>
+ %10 = llvm.mlir.constant(2 : i64) : i64
+ %11 = llvm.insertelement %2, %9[%10 : i64] : !llvm.vec<4 x i64>
+ %12 = llvm.mlir.constant(3 : i64) : i64
+ %v = llvm.insertelement %3, %11[%12 : i64] : !llvm.vec<4 x i64>
%add = "llvm.intr.vector.reduce.add"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%add) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%add) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 10
%and = "llvm.intr.vector.reduce.and"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%and) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%and) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 0
%mul = "llvm.intr.vector.reduce.mul"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%mul) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%mul) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 24
%or = "llvm.intr.vector.reduce.or"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%or) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%or) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 7
%smax = "llvm.intr.vector.reduce.smax"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%smax) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%smax) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 4
%smin = "llvm.intr.vector.reduce.smin"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%smin) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%smin) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 1
%umax = "llvm.intr.vector.reduce.umax"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%umax) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%umax) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 4
%umin = "llvm.intr.vector.reduce.umin"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%umin) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%umin) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 1
%xor = "llvm.intr.vector.reduce.xor"(%v)
- : (!llvm.vec<4 x i64>) -> !llvm.i64
- llvm.call @printI64(%xor) : (!llvm.i64) -> ()
+ : (!llvm.vec<4 x i64>) -> i64
+ llvm.call @printI64(%xor) : (i64) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 4
diff --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
index 93854c3cc05c..43537fe62217 100644
--- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
+++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
@@ -66,7 +66,7 @@ namespace {
struct AsyncAPI {
// All async types are lowered to opaque i8* LLVM pointers at runtime.
static LLVM::LLVMPointerType opaquePointerType(MLIRContext *ctx) {
- return LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(ctx, 8));
+ return LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8));
}
static FunctionType addOrDropRefFunctionType(MLIRContext *ctx) {
@@ -222,10 +222,10 @@ static void addCoroutineIntrinsicsDeclarations(ModuleOp module) {
auto token = LLVMTokenType::get(ctx);
auto voidTy = LLVMVoidType::get(ctx);
- auto i8 = LLVMIntegerType::get(ctx, 8);
- auto i1 = LLVMIntegerType::get(ctx, 1);
- auto i32 = LLVMIntegerType::get(ctx, 32);
- auto i64 = LLVMIntegerType::get(ctx, 64);
+ auto i8 = IntegerType::get(ctx, 8);
+ auto i1 = IntegerType::get(ctx, 1);
+ auto i32 = IntegerType::get(ctx, 32);
+ auto i64 = IntegerType::get(ctx, 64);
auto i8Ptr = LLVMPointerType::get(i8);
addLLVMFuncDecl(module, builder, kCoroId, token, {i32, i8Ptr, i8Ptr, i8Ptr});
@@ -254,8 +254,8 @@ static void addCRuntimeDeclarations(ModuleOp module) {
module.getBody()->getTerminator());
auto voidTy = LLVMVoidType::get(ctx);
- auto i64 = LLVMIntegerType::get(ctx, 64);
- auto i8Ptr = LLVMPointerType::get(LLVMIntegerType::get(ctx, 8));
+ auto i64 = IntegerType::get(ctx, 64);
+ auto i8Ptr = LLVMPointerType::get(IntegerType::get(ctx, 8));
addLLVMFuncDecl(module, builder, kMalloc, i8Ptr, {i64});
addLLVMFuncDecl(module, builder, kFree, voidTy, {i8Ptr});
@@ -280,7 +280,7 @@ static void addResumeFunction(ModuleOp module) {
return;
auto voidTy = LLVM::LLVMVoidType::get(ctx);
- auto i8Ptr = LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(ctx, 8));
+ auto i8Ptr = LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8));
auto resumeOp = moduleBuilder.create<LLVM::LLVMFuncOp>(
loc, kResume, LLVM::LLVMFunctionType::get(voidTy, {i8Ptr}));
@@ -361,10 +361,10 @@ static CoroMachinery setupCoroMachinery(FuncOp func) {
MLIRContext *ctx = func.getContext();
auto token = LLVM::LLVMTokenType::get(ctx);
- auto i1 = LLVM::LLVMIntegerType::get(ctx, 1);
- auto i32 = LLVM::LLVMIntegerType::get(ctx, 32);
- auto i64 = LLVM::LLVMIntegerType::get(ctx, 64);
- auto i8Ptr = LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(ctx, 8));
+ auto i1 = IntegerType::get(ctx, 1);
+ auto i32 = IntegerType::get(ctx, 32);
+ auto i64 = IntegerType::get(ctx, 64);
+ auto i8Ptr = LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8));
Block *entryBlock = func.addEntryBlock();
Location loc = func.getBody().getLoc();
@@ -393,11 +393,7 @@ static CoroMachinery setupCoroMachinery(FuncOp func) {
builder.getI32IntegerAttr(1));
auto gep = builder.create<LLVM::GEPOp>(loc, storagePtrType, nullPtr,
one.getResult());
- auto size = builder.create<LLVM::PtrToIntOp>(loc, i32, gep);
-
- // Cast to std type because runtime API defined using std types.
- return builder.create<LLVM::DialectCastOp>(loc, builder.getI32Type(),
- size.getResult());
+ return builder.create<LLVM::PtrToIntOp>(loc, i32, gep);
};
// We use the `async.value` type as a return type although it does not match
@@ -529,8 +525,8 @@ static void addSuspensionPoint(CoroMachinery coro, Value coroState,
OpBuilder &builder) {
Location loc = op->getLoc();
MLIRContext *ctx = op->getContext();
- auto i1 = LLVM::LLVMIntegerType::get(ctx, 1);
- auto i8 = LLVM::LLVMIntegerType::get(ctx, 8);
+ auto i1 = IntegerType::get(ctx, 1);
+ auto i8 = IntegerType::get(ctx, 8);
// Add a coroutine suspension in place of original `op` in the split block.
OpBuilder::InsertionGuard guard(builder);
diff --git a/mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp b/mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp
index cd16df12bfae..8c89b6d20099 100644
--- a/mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp
+++ b/mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp
@@ -75,12 +75,12 @@ class ConvertOpToGpuRuntimeCallPattern : public ConvertOpToLLVMPattern<OpTy> {
Type llvmVoidType = LLVM::LLVMVoidType::get(context);
Type llvmPointerType =
- LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8));
+ LLVM::LLVMPointerType::get(IntegerType::get(context, 8));
Type llvmPointerPointerType = LLVM::LLVMPointerType::get(llvmPointerType);
- Type llvmInt8Type = LLVM::LLVMIntegerType::get(context, 8);
- Type llvmInt32Type = LLVM::LLVMIntegerType::get(context, 32);
- Type llvmInt64Type = LLVM::LLVMIntegerType::get(context, 64);
- Type llvmIntPtrType = LLVM::LLVMIntegerType::get(
+ Type llvmInt8Type = IntegerType::get(context, 8);
+ Type llvmInt32Type = IntegerType::get(context, 32);
+ Type llvmInt64Type = IntegerType::get(context, 64);
+ Type llvmIntPtrType = IntegerType::get(
context, this->getTypeConverter()->getPointerBitwidth(0));
FunctionCallBuilder moduleLoadCallBuilder = {
@@ -716,10 +716,10 @@ mlir::createGpuToLLVMConversionPass(StringRef gpuBinaryAnnotation) {
void mlir::populateGpuToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
StringRef gpuBinaryAnnotation) {
- converter.addConversion([context = &converter.getContext()](
- gpu::AsyncTokenType type) -> Type {
- return LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8));
- });
+ converter.addConversion(
+ [context = &converter.getContext()](gpu::AsyncTokenType type) -> Type {
+ return LLVM::LLVMPointerType::get(IntegerType::get(context, 8));
+ });
patterns.insert<ConvertAllocOpToGpuRuntimeCallPattern,
ConvertDeallocOpToGpuRuntimeCallPattern,
ConvertHostRegisterOpToGpuRuntimeCallPattern,
diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h
index 37c4469676c6..95215d0f4a6e 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h
+++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h
@@ -85,7 +85,7 @@ struct GPUFuncOpLowering : ConvertOpToLLVMPattern<gpu::GPUFuncOp> {
// Rewrite workgroup memory attributions to addresses of global buffers.
rewriter.setInsertionPointToStart(&gpuFuncOp.front());
unsigned numProperArguments = gpuFuncOp.getNumArguments();
- auto i32Type = LLVM::LLVMIntegerType::get(rewriter.getContext(), 32);
+ auto i32Type = IntegerType::get(rewriter.getContext(), 32);
Value zero = nullptr;
if (!workgroupBuffers.empty())
@@ -114,7 +114,7 @@ struct GPUFuncOpLowering : ConvertOpToLLVMPattern<gpu::GPUFuncOp> {
// Rewrite private memory attributions to alloca'ed buffers.
unsigned numWorkgroupAttributions =
gpuFuncOp.getNumWorkgroupAttributions();
- auto int64Ty = LLVM::LLVMIntegerType::get(rewriter.getContext(), 64);
+ auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
for (auto en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) {
Value attribution = en.value();
auto type = attribution.getType().cast<MemRefType>();
diff --git a/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h b/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h
index 0a1e76b99dbe..53a92e9c17eb 100644
--- a/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h
+++ b/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h
@@ -48,16 +48,13 @@ struct GPUIndexIntrinsicOpLowering : public ConvertOpToLLVMPattern<Op> {
Value newOp;
switch (dimensionToIndex(op)) {
case X:
- newOp =
- rewriter.create<XOp>(loc, LLVM::LLVMIntegerType::get(context, 32));
+ newOp = rewriter.create<XOp>(loc, IntegerType::get(context, 32));
break;
case Y:
- newOp =
- rewriter.create<YOp>(loc, LLVM::LLVMIntegerType::get(context, 32));
+ newOp = rewriter.create<YOp>(loc, IntegerType::get(context, 32));
break;
case Z:
- newOp =
- rewriter.create<ZOp>(loc, LLVM::LLVMIntegerType::get(context, 32));
+ newOp = rewriter.create<ZOp>(loc, IntegerType::get(context, 32));
break;
default:
return failure();
@@ -65,10 +62,10 @@ struct GPUIndexIntrinsicOpLowering : public ConvertOpToLLVMPattern<Op> {
if (indexBitwidth > 32) {
newOp = rewriter.create<LLVM::SExtOp>(
- loc, LLVM::LLVMIntegerType::get(context, indexBitwidth), newOp);
+ loc, IntegerType::get(context, indexBitwidth), newOp);
} else if (indexBitwidth < 32) {
newOp = rewriter.create<LLVM::TruncOp>(
- loc, LLVM::LLVMIntegerType::get(context, indexBitwidth), newOp);
+ loc, IntegerType::get(context, indexBitwidth), newOp);
}
rewriter.replaceOp(op, {newOp});
diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index 09877bfd19b3..a968f9289dac 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -40,10 +40,10 @@ struct GPUShuffleOpLowering : public ConvertOpToLLVMPattern<gpu::ShuffleOp> {
/// which threads participate in the shuffle) and a maskAndClamp (specifying
/// the highest lane which participates in the shuffle).
///
- /// %one = llvm.constant(1 : i32) : !llvm.i32
- /// %shl = llvm.shl %one, %width : !llvm.i32
- /// %active_mask = llvm.sub %shl, %one : !llvm.i32
- /// %mask_and_clamp = llvm.sub %width, %one : !llvm.i32
+ /// %one = llvm.constant(1 : i32) : i32
+ /// %shl = llvm.shl %one, %width : i32
+ /// %active_mask = llvm.sub %shl, %one : i32
+ /// %mask_and_clamp = llvm.sub %width, %one : i32
/// %shfl = nvvm.shfl.sync.bfly %active_mask, %value, %offset,
/// %mask_and_clamp : !llvm<"{ float, i1 }">
/// %shfl_value = llvm.extractvalue %shfl[0 : index] :
@@ -57,8 +57,8 @@ struct GPUShuffleOpLowering : public ConvertOpToLLVMPattern<gpu::ShuffleOp> {
gpu::ShuffleOpAdaptor adaptor(operands);
auto valueTy = adaptor.value().getType();
- auto int32Type = LLVM::LLVMIntegerType::get(rewriter.getContext(), 32);
- auto predTy = LLVM::LLVMIntegerType::get(rewriter.getContext(), 1);
+ auto int32Type = IntegerType::get(rewriter.getContext(), 32);
+ auto predTy = IntegerType::get(rewriter.getContext(), 1);
auto resultTy = LLVM::LLVMStructType::getLiteral(rewriter.getContext(),
{valueTy, predTy});
diff --git a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
index 6c88e5774239..eb5cf94b4751 100644
--- a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
+++ b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
@@ -59,10 +59,10 @@ class VulkanLaunchFuncToVulkanCallsPass
void initializeCachedTypes() {
llvmFloatType = LLVM::LLVMFloatType::get(&getContext());
llvmVoidType = LLVM::LLVMVoidType::get(&getContext());
- llvmPointerType = LLVM::LLVMPointerType::get(
- LLVM::LLVMIntegerType::get(&getContext(), 8));
- llvmInt32Type = LLVM::LLVMIntegerType::get(&getContext(), 32);
- llvmInt64Type = LLVM::LLVMIntegerType::get(&getContext(), 64);
+ llvmPointerType =
+ LLVM::LLVMPointerType::get(IntegerType::get(&getContext(), 8));
+ llvmInt32Type = IntegerType::get(&getContext(), 32);
+ llvmInt64Type = IntegerType::get(&getContext(), 64);
}
Type getMemRefType(uint32_t rank, Type elemenType) {
@@ -136,12 +136,12 @@ class VulkanLaunchFuncToVulkanCallsPass
return "Float";
if (type.isa<LLVM::LLVMHalfType>())
return "Half";
- if (auto intType = type.dyn_cast<LLVM::LLVMIntegerType>()) {
- if (intType.getBitWidth() == 32)
+ if (auto intType = type.dyn_cast<IntegerType>()) {
+ if (intType.getWidth() == 32)
return "Int32";
- if (intType.getBitWidth() == 16)
+ if (intType.getWidth() == 16)
return "Int16";
- if (intType.getBitWidth() == 8)
+ if (intType.getWidth() == 8)
return "Int8";
}
@@ -242,8 +242,7 @@ void VulkanLaunchFuncToVulkanCallsPass::createBindMemRefCalls(
// Special case for fp16 type. Since it is not a supported type in C we use
// int16_t and bitcast the descriptor.
if (type.isa<LLVM::LLVMHalfType>()) {
- auto memRefTy =
- getMemRefType(rank, LLVM::LLVMIntegerType::get(&getContext(), 16));
+ auto memRefTy = getMemRefType(rank, IntegerType::get(&getContext(), 16));
ptrToMemRefDescriptor = builder.create<LLVM::BitcastOp>(
loc, LLVM::LLVMPointerType::get(memRefTy), ptrToMemRefDescriptor);
}
@@ -325,15 +324,15 @@ void VulkanLaunchFuncToVulkanCallsPass::declareVulkanFunctions(Location loc) {
for (unsigned i = 1; i <= 3; i++) {
SmallVector<Type, 5> types{LLVM::LLVMFloatType::get(&getContext()),
- LLVM::LLVMIntegerType::get(&getContext(), 32),
- LLVM::LLVMIntegerType::get(&getContext(), 16),
- LLVM::LLVMIntegerType::get(&getContext(), 8),
+ IntegerType::get(&getContext(), 32),
+ IntegerType::get(&getContext(), 16),
+ IntegerType::get(&getContext(), 8),
LLVM::LLVMHalfType::get(&getContext())};
for (auto type : types) {
std::string fnName = "bindMemRef" + std::to_string(i) + "D" +
std::string(stringifyType(type));
if (type.isa<LLVM::LLVMHalfType>())
- type = LLVM::LLVMIntegerType::get(&getContext(), 16);
+ type = IntegerType::get(&getContext(), 16);
if (!module.lookupSymbol(fnName)) {
auto fnType = LLVM::LLVMFunctionType::get(
getVoidType(),
diff --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
index b94f92f37ee6..0978e8cf756a 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
@@ -60,7 +60,7 @@ static unsigned calculateGlobalIndex(spirv::GlobalVariableOp op) {
static void copy(Location loc, Value dst, Value src, Value size,
OpBuilder &builder) {
MLIRContext *context = builder.getContext();
- auto llvmI1Type = LLVM::LLVMIntegerType::get(context, 1);
+ auto llvmI1Type = IntegerType::get(context, 1);
Value isVolatile = builder.create<LLVM::ConstantOp>(
loc, llvmI1Type, builder.getBoolAttr(false));
builder.create<LLVM::MemcpyOp>(loc, dst, src, size, isVolatile);
diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index 78927fbcd457..f60ba96e3a20 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -68,8 +68,8 @@ static unsigned getBitWidth(Type type) {
static unsigned getLLVMTypeBitWidth(Type type) {
auto vectorType = type.dyn_cast<LLVM::LLVMVectorType>();
return (vectorType ? vectorType.getElementType() : type)
- .cast<LLVM::LLVMIntegerType>()
- .getBitWidth();
+ .cast<IntegerType>()
+ .getWidth();
}
/// Creates `IntegerAttribute` with all bits set for given type
@@ -213,7 +213,7 @@ static Type convertStructTypePacked(spirv::StructType type,
static Value createI32ConstantOf(Location loc, PatternRewriter &rewriter,
unsigned value) {
return rewriter.create<LLVM::ConstantOp>(
- loc, LLVM::LLVMIntegerType::get(rewriter.getContext(), 32),
+ loc, IntegerType::get(rewriter.getContext(), 32),
rewriter.getIntegerAttr(rewriter.getI32Type(), value));
}
@@ -661,7 +661,7 @@ class ExecutionModePattern
// int32_t executionMode;
// int32_t values[]; // optional values
// };
- auto llvmI32Type = LLVM::LLVMIntegerType::get(context, 32);
+ auto llvmI32Type = IntegerType::get(context, 32);
SmallVector<Type, 2> fields;
fields.push_back(llvmI32Type);
ArrayAttr values = op.values();
diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
index 51f34661bece..e73bc669cd50 100644
--- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
@@ -174,7 +174,7 @@ MLIRContext &LLVMTypeConverter::getContext() {
}
Type LLVMTypeConverter::getIndexType() {
- return LLVM::LLVMIntegerType::get(&getContext(), getIndexTypeBitwidth());
+ return IntegerType::get(&getContext(), getIndexTypeBitwidth());
}
unsigned LLVMTypeConverter::getPointerBitwidth(unsigned addressSpace) {
@@ -186,7 +186,7 @@ Type LLVMTypeConverter::convertIndexType(IndexType type) {
}
Type LLVMTypeConverter::convertIntegerType(IntegerType type) {
- return LLVM::LLVMIntegerType::get(&getContext(), type.getWidth());
+ return IntegerType::get(&getContext(), type.getWidth());
}
Type LLVMTypeConverter::convertFloatType(FloatType type) {
@@ -361,8 +361,8 @@ static constexpr unsigned kPtrInUnrankedMemRefDescriptor = 1;
/// stack allocated (alloca) copy of a MemRef descriptor that got casted to
/// be unranked.
SmallVector<Type, 2> LLVMTypeConverter::getUnrankedMemRefDescriptorFields() {
- return {getIndexType(), LLVM::LLVMPointerType::get(
- LLVM::LLVMIntegerType::get(&getContext(), 8))};
+ return {getIndexType(),
+ LLVM::LLVMPointerType::get(IntegerType::get(&getContext(), 8))};
}
Type LLVMTypeConverter::convertUnrankedMemRefType(UnrankedMemRefType type) {
@@ -1021,9 +1021,8 @@ Type ConvertToLLVMPattern::getIndexType() const {
}
Type ConvertToLLVMPattern::getIntPtrType(unsigned addressSpace) const {
- return LLVM::LLVMIntegerType::get(
- &getTypeConverter()->getContext(),
- getTypeConverter()->getPointerBitwidth(addressSpace));
+ return IntegerType::get(&getTypeConverter()->getContext(),
+ getTypeConverter()->getPointerBitwidth(addressSpace));
}
Type ConvertToLLVMPattern::getVoidType() const {
@@ -1032,7 +1031,7 @@ Type ConvertToLLVMPattern::getVoidType() const {
Type ConvertToLLVMPattern::getVoidPtrType() const {
return LLVM::LLVMPointerType::get(
- LLVM::LLVMIntegerType::get(&getTypeConverter()->getContext(), 8));
+ IntegerType::get(&getTypeConverter()->getContext(), 8));
}
Value ConvertToLLVMPattern::createIndexConstant(
@@ -2197,9 +2196,8 @@ static LogicalResult copyUnrankedDescriptors(OpBuilder &builder, Location loc,
// Get frequently used types.
MLIRContext *context = builder.getContext();
auto voidType = LLVM::LLVMVoidType::get(context);
- Type voidPtrType =
- LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8));
- auto i1Type = LLVM::LLVMIntegerType::get(context, 1);
+ Type voidPtrType = LLVM::LLVMPointerType::get(IntegerType::get(context, 8));
+ auto i1Type = IntegerType::get(context, 1);
Type indexType = typeConverter.getIndexType();
// Find the malloc and free, or declare them if necessary.
@@ -2838,7 +2836,7 @@ struct MemRefReshapeOpLowering
Value zeroIndex = createIndexConstant(rewriter, loc, 0);
Value pred = rewriter.create<LLVM::ICmpOp>(
- loc, LLVM::LLVMIntegerType::get(rewriter.getContext(), 1),
+ loc, IntegerType::get(rewriter.getContext(), 1),
LLVM::ICmpPredicate::sge, indexArg, zeroIndex);
Block *bodyBlock =
@@ -3107,10 +3105,10 @@ struct IndexCastOpLowering : public ConvertOpToLLVMPattern<IndexCastOp> {
auto targetType =
typeConverter->convertType(indexCastOp.getResult().getType())
- .cast<LLVM::LLVMIntegerType>();
- auto sourceType = transformed.in().getType().cast<LLVM::LLVMIntegerType>();
- unsigned targetBits = targetType.getBitWidth();
- unsigned sourceBits = sourceType.getBitWidth();
+ .cast<IntegerType>();
+ auto sourceType = transformed.in().getType().cast<IntegerType>();
+ unsigned targetBits = targetType.getWidth();
+ unsigned sourceBits = sourceType.getWidth();
if (targetBits == sourceBits)
rewriter.replaceOp(indexCastOp, transformed.in());
@@ -3870,7 +3868,7 @@ struct GenericAtomicRMWOpLowering
// Append the cmpxchg op to the end of the loop block.
auto successOrdering = LLVM::AtomicOrdering::acq_rel;
auto failureOrdering = LLVM::AtomicOrdering::monotonic;
- auto boolType = LLVM::LLVMIntegerType::get(rewriter.getContext(), 1);
+ auto boolType = IntegerType::get(rewriter.getContext(), 1);
auto pairType = LLVM::LLVMStructType::getLiteral(rewriter.getContext(),
{valueType, boolType});
auto cmpxchg = rewriter.create<LLVM::AtomicCmpXchgOp>(
@@ -4054,7 +4052,7 @@ Type LLVMTypeConverter::packFunctionResults(ArrayRef<Type> types) {
Value LLVMTypeConverter::promoteOneMemRefDescriptor(Location loc, Value operand,
OpBuilder &builder) {
auto *context = builder.getContext();
- auto int64Ty = LLVM::LLVMIntegerType::get(builder.getContext(), 64);
+ auto int64Ty = IntegerType::get(builder.getContext(), 64);
auto indexType = IndexType::get(context);
// Alloca with proper alignment. We do not expect optimizations of this
// alloca op and so we omit allocating at the entry block.
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 535cdcb7dfd7..5b66a31eaeaf 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -747,7 +747,7 @@ class VectorExtractOpConversion
// Remaining extraction of element from 1-D LLVM vector
auto position = positionAttrs.back().cast<IntegerAttr>();
- auto i64Type = LLVM::LLVMIntegerType::get(rewriter.getContext(), 64);
+ auto i64Type = IntegerType::get(rewriter.getContext(), 64);
auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
extracted =
rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
@@ -855,7 +855,7 @@ class VectorInsertOpConversion
}
// Insertion of an element into a 1-D LLVM vector.
- auto i64Type = LLVM::LLVMIntegerType::get(rewriter.getContext(), 64);
+ auto i64Type = IntegerType::get(rewriter.getContext(), 64);
auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
Value inserted = rewriter.create<LLVM::InsertElementOp>(
loc, typeConverter->convertType(oneDVectorType), extracted,
@@ -1121,7 +1121,7 @@ class VectorTypeCastOpConversion
}))
return failure();
- auto int64Ty = LLVM::LLVMIntegerType::get(rewriter.getContext(), 64);
+ auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
// Create descriptor.
auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
@@ -1360,11 +1360,11 @@ class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
switch (conversion) {
case PrintConversion::ZeroExt64:
value = rewriter.create<ZeroExtendIOp>(
- loc, value, LLVM::LLVMIntegerType::get(rewriter.getContext(), 64));
+ loc, value, IntegerType::get(rewriter.getContext(), 64));
break;
case PrintConversion::SignExt64:
value = rewriter.create<SignExtendIOp>(
- loc, value, LLVM::LLVMIntegerType::get(rewriter.getContext(), 64));
+ loc, value, IntegerType::get(rewriter.getContext(), 64));
break;
case PrintConversion::None:
break;
@@ -1414,12 +1414,10 @@ class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
// Helpers for method names.
Operation *getPrintI64(Operation *op) const {
- return getPrint(op, "printI64",
- LLVM::LLVMIntegerType::get(op->getContext(), 64));
+ return getPrint(op, "printI64", IntegerType::get(op->getContext(), 64));
}
Operation *getPrintU64(Operation *op) const {
- return getPrint(op, "printU64",
- LLVM::LLVMIntegerType::get(op->getContext(), 64));
+ return getPrint(op, "printU64", IntegerType::get(op->getContext(), 64));
}
Operation *getPrintFloat(Operation *op) const {
return getPrint(op, "printF32", LLVM::LLVMFloatType::get(op->getContext()));
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index cc57b1803f26..2e8f2bf16461 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -146,7 +146,7 @@ static ParseResult parseCmpOp(OpAsmParser &parser, OperationState &result) {
// The result type is either i1 or a vector type <? x i1> if the inputs are
// vectors.
- Type resultType = LLVMIntegerType::get(builder.getContext(), 1);
+ Type resultType = IntegerType::get(builder.getContext(), 1);
if (!isCompatibleType(type))
return parser.emitError(trailingTypeLoc,
"expected LLVM dialect-compatible type");
@@ -1254,7 +1254,7 @@ static void printGlobalOp(OpAsmPrinter &p, GlobalOp op) {
// TODO: make the size depend on data layout rather than on the conversion
// pass option, and pull that information here.
static LogicalResult verifyCastWithIndex(Type llvmType) {
- return success(llvmType.isa<LLVMIntegerType>());
+ return success(llvmType.isa<IntegerType>());
}
/// Checks if `llvmType` is dialect cast-compatible with built-in `type` and
@@ -1294,19 +1294,6 @@ static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type) {
"invalid cast between f64 and a type other than !llvm.double");
}
- // Singless integers are compatible with LLVM integer of the same bitwidth.
- if (type.isSignlessInteger()) {
- auto llvmInt = llvmType.dyn_cast<LLVMIntegerType>();
- if (!llvmInt)
- return op->emitOpError(
- "invalid cast between integer and non-integer type");
- if (llvmInt.getBitWidth() == type.getIntOrFloatBitWidth())
- return success();
-
- return op->emitOpError(
- "invalid cast between integers with mismatching bitwidth");
- }
-
// Vectors are compatible if they are 1D non-scalable, and their element types
// are compatible.
if (auto vectorType = type.dyn_cast<VectorType>()) {
@@ -1413,9 +1400,8 @@ static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type) {
auto ptrType = structType.getBody()[1].dyn_cast<LLVMPointerType>();
auto ptrElementType =
- ptrType ? ptrType.getElementType().dyn_cast<LLVMIntegerType>()
- : nullptr;
- if (!ptrElementType || ptrElementType.getBitWidth() != 8)
+ ptrType ? ptrType.getElementType().dyn_cast<IntegerType>() : nullptr;
+ if (!ptrElementType || ptrElementType.getWidth() != 8)
return op->emitOpError("expected second element of a memref descriptor "
"to be an !llvm.ptr<i8>");
@@ -1515,8 +1501,8 @@ static ParseResult parseGlobalOp(OpAsmParser &parser, OperationState &result) {
if (types.empty()) {
if (auto strAttr = value.dyn_cast_or_null<StringAttr>()) {
MLIRContext *context = parser.getBuilder().getContext();
- auto arrayType = LLVM::LLVMArrayType::get(
- LLVM::LLVMIntegerType::get(context, 8), strAttr.getValue().size());
+ auto arrayType = LLVM::LLVMArrayType::get(IntegerType::get(context, 8),
+ strAttr.getValue().size());
types.push_back(arrayType);
} else {
return parser.emitError(parser.getNameLoc(),
@@ -1543,9 +1529,9 @@ static LogicalResult verify(GlobalOp op) {
if (auto strAttr = op.getValueOrNull().dyn_cast_or_null<StringAttr>()) {
auto type = op.getType().dyn_cast<LLVMArrayType>();
- LLVMIntegerType elementType =
- type ? type.getElementType().dyn_cast<LLVMIntegerType>() : nullptr;
- if (!elementType || elementType.getBitWidth() != 8 ||
+ IntegerType elementType =
+ type ? type.getElementType().dyn_cast<IntegerType>() : nullptr;
+ if (!elementType || elementType.getWidth() != 8 ||
type.getNumElements() != strAttr.getValue().size())
return op.emitOpError(
"requires an i8 array type of the length equal to that of the string "
@@ -1957,16 +1943,16 @@ static LogicalResult verify(AtomicRMWOp op) {
if (!mlir::LLVM::isCompatibleFloatingPointType(valType))
return op.emitOpError("expected LLVM IR floating point type");
} else if (op.bin_op() == AtomicBinOp::xchg) {
- auto intType = valType.dyn_cast<LLVMIntegerType>();
- unsigned intBitWidth = intType ? intType.getBitWidth() : 0;
+ auto intType = valType.dyn_cast<IntegerType>();
+ unsigned intBitWidth = intType ? intType.getWidth() : 0;
if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 &&
intBitWidth != 64 && !valType.isa<LLVMBFloatType>() &&
!valType.isa<LLVMHalfType>() && !valType.isa<LLVMFloatType>() &&
!valType.isa<LLVMDoubleType>())
return op.emitOpError("unexpected LLVM IR type for 'xchg' bin_op");
} else {
- auto intType = valType.dyn_cast<LLVMIntegerType>();
- unsigned intBitWidth = intType ? intType.getBitWidth() : 0;
+ auto intType = valType.dyn_cast<IntegerType>();
+ unsigned intBitWidth = intType ? intType.getWidth() : 0;
if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 &&
intBitWidth != 64)
return op.emitOpError("expected LLVM IR integer type");
@@ -2007,7 +1993,7 @@ static ParseResult parseAtomicCmpXchgOp(OpAsmParser &parser,
parser.resolveOperand(val, type, result.operands))
return failure();
- auto boolType = LLVMIntegerType::get(builder.getContext(), 1);
+ auto boolType = IntegerType::get(builder.getContext(), 1);
auto resultType =
LLVMStructType::getLiteral(builder.getContext(), {type, boolType});
result.addTypes(resultType);
@@ -2024,8 +2010,8 @@ static LogicalResult verify(AtomicCmpXchgOp op) {
if (cmpType != ptrType.getElementType() || cmpType != valType)
return op.emitOpError("expected LLVM IR element type for operand #0 to "
"match type for all other operands");
- auto intType = valType.dyn_cast<LLVMIntegerType>();
- unsigned intBitWidth = intType ? intType.getBitWidth() : 0;
+ auto intType = valType.dyn_cast<IntegerType>();
+ unsigned intBitWidth = intType ? intType.getWidth() : 0;
if (!valType.isa<LLVMPointerType>() && intBitWidth != 8 &&
intBitWidth != 16 && intBitWidth != 32 && intBitWidth != 64 &&
!valType.isa<LLVMBFloatType>() && !valType.isa<LLVMHalfType>() &&
@@ -2102,7 +2088,6 @@ void LLVMDialect::initialize() {
LLVMLabelType,
LLVMMetadataType,
LLVMFunctionType,
- LLVMIntegerType,
LLVMPointerType,
LLVMFixedVectorType,
LLVMScalableVectorType,
@@ -2199,8 +2184,7 @@ Value mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
// Create the global at the entry of the module.
OpBuilder moduleBuilder(module.getBodyRegion());
MLIRContext *ctx = builder.getContext();
- auto type = LLVM::LLVMArrayType::get(LLVM::LLVMIntegerType::get(ctx, 8),
- value.size());
+ auto type = LLVM::LLVMArrayType::get(IntegerType::get(ctx, 8), value.size());
auto global = moduleBuilder.create<LLVM::GlobalOp>(
loc, type, /*isConstant=*/true, linkage, name,
builder.getStringAttr(value));
@@ -2208,10 +2192,10 @@ Value mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
// Get the pointer to the first character in the global string.
Value globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
Value cst0 = builder.create<LLVM::ConstantOp>(
- loc, LLVM::LLVMIntegerType::get(ctx, 64),
+ loc, IntegerType::get(ctx, 64),
builder.getIntegerAttr(builder.getIndexType(), 0));
return builder.create<LLVM::GEPOp>(
- loc, LLVM::LLVMPointerType::get(LLVMIntegerType::get(ctx, 8)), globalPtr,
+ loc, LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8)), globalPtr,
ValueRange{cst0, cst0});
}
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
index 08c00befcf18..6f5ea1d813a1 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
@@ -24,7 +24,7 @@ using namespace mlir::LLVM;
/// internal functions to avoid getting a verbose `!llvm` prefix. Otherwise
/// prints it as usual.
static void dispatchPrint(DialectAsmPrinter &printer, Type type) {
- if (isCompatibleType(type))
+ if (isCompatibleType(type) && !type.isa<IntegerType>())
return mlir::LLVM::detail::printType(type, printer);
printer.printType(type);
}
@@ -45,7 +45,6 @@ static StringRef getTypeKeyword(Type type) {
.Case<LLVMLabelType>([&](Type) { return "label"; })
.Case<LLVMMetadataType>([&](Type) { return "metadata"; })
.Case<LLVMFunctionType>([&](Type) { return "func"; })
- .Case<LLVMIntegerType>([&](Type) { return "i"; })
.Case<LLVMPointerType>([&](Type) { return "ptr"; })
.Case<LLVMVectorType>([&](Type) { return "vec"; })
.Case<LLVMArrayType>([&](Type) { return "array"; })
@@ -147,11 +146,6 @@ void mlir::LLVM::detail::printType(Type type, DialectAsmPrinter &printer) {
printer << getTypeKeyword(type);
- if (auto intType = type.dyn_cast<LLVMIntegerType>()) {
- printer << intType.getBitWidth();
- return;
- }
-
if (auto ptrType = type.dyn_cast<LLVMPointerType>()) {
printer << '<';
dispatchPrint(printer, ptrType.getElementType());
@@ -416,26 +410,30 @@ static LLVMStructType parseStructType(DialectAsmParser &parser) {
/// will try to parse any type in full form (including types with the `!llvm`
/// prefix), and on failure fall back to parsing the short-hand version of the
/// LLVM dialect types without the `!llvm` prefix.
-static Type dispatchParse(DialectAsmParser &parser) {
- Type type;
+static Type dispatchParse(DialectAsmParser &parser, bool allowAny = true) {
llvm::SMLoc keyLoc = parser.getCurrentLocation();
- Location loc = parser.getEncodedSourceLoc(keyLoc);
- OptionalParseResult parseResult = parser.parseOptionalType(type);
- if (parseResult.hasValue()) {
- if (failed(*parseResult))
- return Type();
-
- // Special case for integers (i[1-9][0-9]*) that are literals rather than
- // keywords for the parser, so they are not caught by the main dispatch
- // below. Try parsing it a built-in integer type instead.
- auto intType = type.dyn_cast<IntegerType>();
- if (!intType || !intType.isSignless())
- return type;
-
- return LLVMIntegerType::getChecked(loc, intType.getWidth());
+
+ // Try parsing any MLIR type.
+ Type type;
+ OptionalParseResult result = parser.parseOptionalType(type);
+ if (result.hasValue()) {
+ if (failed(result.getValue()))
+ return nullptr;
+ // TODO: integer types are temporarily allowed for compatibility with the
+ // deprecated !llvm.i[0-9]+ syntax.
+ if (!allowAny) {
+ auto intType = type.dyn_cast<IntegerType>();
+ if (!intType || !intType.isSignless()) {
+ parser.emitError(keyLoc) << "unexpected type, expected keyword";
+ return nullptr;
+ }
+ Location loc = parser.getEncodedSourceLoc(keyLoc);
+ emitWarning(loc) << "deprecated syntax, drop '!llvm.' for integers";
+ }
+ return type;
}
- // Dispatch to concrete functions.
+ // If no type found, fallback to the shorthand form.
StringRef key;
if (failed(parser.parseKeyword(&key)))
return Type();
@@ -474,11 +472,11 @@ static ParseResult dispatchParse(DialectAsmParser &parser, Type &type) {
/// Parses one of the LLVM dialect types.
Type mlir::LLVM::detail::parseType(DialectAsmParser &parser) {
llvm::SMLoc loc = parser.getCurrentLocation();
- Type type = dispatchParse(parser);
+ Type type = dispatchParse(parser, /*allowAny=*/false);
if (!type)
return type;
if (!isCompatibleType(type)) {
- parser.emitError(loc) << "unexpected type, expected i* or keyword";
+ parser.emitError(loc) << "unexpected type, expected keyword";
return nullptr;
}
return type;
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
index c982abf8ad72..d6a037f363b6 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
@@ -15,6 +15,7 @@
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectImplementation.h"
#include "mlir/IR/TypeSupport.h"
@@ -110,28 +111,6 @@ LLVMFunctionType::verifyConstructionInvariants(Location loc, Type result,
return success();
}
-//===----------------------------------------------------------------------===//
-// Integer type.
-//===----------------------------------------------------------------------===//
-
-LLVMIntegerType LLVMIntegerType::get(MLIRContext *ctx, unsigned bitwidth) {
- return Base::get(ctx, bitwidth);
-}
-
-LLVMIntegerType LLVMIntegerType::getChecked(Location loc, unsigned bitwidth) {
- return Base::getChecked(loc, bitwidth);
-}
-
-unsigned LLVMIntegerType::getBitWidth() { return getImpl()->bitwidth; }
-
-LogicalResult LLVMIntegerType::verifyConstructionInvariants(Location loc,
- unsigned bitwidth) {
- constexpr int maxSupportedBitwidth = (1 << 24);
- if (bitwidth >= maxSupportedBitwidth)
- return emitError(loc, "integer type too wide");
- return success();
-}
-
//===----------------------------------------------------------------------===//
// Pointer type.
//===----------------------------------------------------------------------===//
@@ -258,7 +237,9 @@ LogicalResult LLVMStructType::verifyConstructionInvariants(Location loc,
//===----------------------------------------------------------------------===//
bool LLVMVectorType::isValidElementType(Type type) {
- return type.isa<LLVMIntegerType, LLVMPointerType>() ||
+ if (auto intType = type.dyn_cast<IntegerType>())
+ return intType.isSignless();
+ return type.isa<LLVMPointerType>() ||
mlir::LLVM::isCompatibleFloatingPointType(type);
}
@@ -330,6 +311,34 @@ unsigned LLVMScalableVectorType::getMinNumElements() {
// Utility functions.
//===----------------------------------------------------------------------===//
+bool mlir::LLVM::isCompatibleType(Type type) {
+ // Only signless integers are compatible.
+ if (auto intType = type.dyn_cast<IntegerType>())
+ return intType.isSignless();
+
+ // clang-format off
+ return type.isa<
+ LLVMArrayType,
+ LLVMBFloatType,
+ LLVMDoubleType,
+ LLVMFP128Type,
+ LLVMFloatType,
+ LLVMFunctionType,
+ LLVMHalfType,
+ LLVMLabelType,
+ LLVMMetadataType,
+ LLVMPPCFP128Type,
+ LLVMPointerType,
+ LLVMStructType,
+ LLVMTokenType,
+ LLVMVectorType,
+ LLVMVoidType,
+ LLVMX86FP80Type,
+ LLVMX86MMXType
+ >();
+ // clang-format on
+}
+
llvm::TypeSize mlir::LLVM::getPrimitiveTypeSizeInBits(Type type) {
assert(isCompatibleType(type) &&
"expected a type compatible with the LLVM dialect");
@@ -340,8 +349,8 @@ llvm::TypeSize mlir::LLVM::getPrimitiveTypeSizeInBits(Type type) {
.Case<LLVMFloatType>([](Type) { return llvm::TypeSize::Fixed(32); })
.Case<LLVMDoubleType, LLVMX86MMXType>(
[](Type) { return llvm::TypeSize::Fixed(64); })
- .Case<LLVMIntegerType>([](LLVMIntegerType intTy) {
- return llvm::TypeSize::Fixed(intTy.getBitWidth());
+ .Case<IntegerType>([](IntegerType intTy) {
+ return llvm::TypeSize::Fixed(intTy.getWidth());
})
.Case<LLVMX86FP80Type>([](Type) { return llvm::TypeSize::Fixed(80); })
.Case<LLVMPPCFP128Type, LLVMFP128Type>(
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index f8d6518b23aa..b451623e628a 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -62,8 +62,7 @@ static ParseResult parseNVVMShflSyncBflyOp(OpAsmParser &parser,
break;
}
- auto int32Ty =
- LLVM::LLVMIntegerType::get(parser.getBuilder().getContext(), 32);
+ auto int32Ty = IntegerType::get(parser.getBuilder().getContext(), 32);
return parser.resolveOperands(ops, {int32Ty, resultType, int32Ty, int32Ty},
parser.getNameLoc(), result.operands);
}
@@ -72,8 +71,8 @@ static ParseResult parseNVVMShflSyncBflyOp(OpAsmParser &parser,
static ParseResult parseNVVMVoteBallotOp(OpAsmParser &parser,
OperationState &result) {
MLIRContext *context = parser.getBuilder().getContext();
- auto int32Ty = LLVM::LLVMIntegerType::get(context, 32);
- auto int1Ty = LLVM::LLVMIntegerType::get(context, 1);
+ auto int32Ty = IntegerType::get(context, 32);
+ auto int1Ty = IntegerType::get(context, 1);
SmallVector<OpAsmParser::OperandType, 8> ops;
Type type;
diff --git a/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
index f50c49f03a07..da1b73ac565a 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
@@ -46,8 +46,8 @@ static ParseResult parseROCDLMubufLoadOp(OpAsmParser &parser,
return failure();
MLIRContext *context = parser.getBuilder().getContext();
- auto int32Ty = LLVM::LLVMIntegerType::get(context, 32);
- auto int1Ty = LLVM::LLVMIntegerType::get(context, 1);
+ auto int32Ty = IntegerType::get(context, 32);
+ auto int1Ty = IntegerType::get(context, 1);
auto i32x4Ty = LLVM::LLVMFixedVectorType::get(int32Ty, 4);
return parser.resolveOperands(ops,
{i32x4Ty, int32Ty, int32Ty, int1Ty, int1Ty},
@@ -65,8 +65,8 @@ static ParseResult parseROCDLMubufStoreOp(OpAsmParser &parser,
return failure();
MLIRContext *context = parser.getBuilder().getContext();
- auto int32Ty = LLVM::LLVMIntegerType::get(context, 32);
- auto int1Ty = LLVM::LLVMIntegerType::get(context, 1);
+ auto int32Ty = IntegerType::get(context, 32);
+ auto int1Ty = IntegerType::get(context, 1);
auto i32x4Ty = LLVM::LLVMFixedVectorType::get(int32Ty, 4);
if (parser.resolveOperands(ops,
diff --git a/mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h b/mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h
index 30386da0c10e..cf8e46842662 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h
+++ b/mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h
@@ -373,27 +373,6 @@ struct LLVMFunctionTypeStorage : public TypeStorage {
ArrayRef<Type> argumentTypes;
};
-//===----------------------------------------------------------------------===//
-// LLVMIntegerTypeStorage.
-//===----------------------------------------------------------------------===//
-
-/// Storage type for LLVM dialect integer types. These are uniqued by bitwidth.
-struct LLVMIntegerTypeStorage : public TypeStorage {
- using KeyTy = unsigned;
-
- LLVMIntegerTypeStorage(unsigned width) : bitwidth(width) {}
-
- static LLVMIntegerTypeStorage *construct(TypeStorageAllocator &allocator,
- const KeyTy &key) {
- return new (allocator.allocate<LLVMIntegerTypeStorage>())
- LLVMIntegerTypeStorage(key);
- }
-
- bool operator==(const KeyTy &key) const { return key == bitwidth; }
-
- unsigned bitwidth;
-};
-
//===----------------------------------------------------------------------===//
// LLVMPointerTypeStorage.
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp
index bfdae2b4588d..c7548b0d8a85 100644
--- a/mlir/lib/ExecutionEngine/JitRunner.cpp
+++ b/mlir/lib/ExecutionEngine/JitRunner.cpp
@@ -199,8 +199,8 @@ Error checkCompatibleReturnType<int32_t>(LLVM::LLVMFuncOp mainFunction) {
auto resultType = mainFunction.getType()
.cast<LLVM::LLVMFunctionType>()
.getReturnType()
- .dyn_cast<LLVM::LLVMIntegerType>();
- if (!resultType || resultType.getBitWidth() != 32)
+ .dyn_cast<IntegerType>();
+ if (!resultType || resultType.getWidth() != 32)
return make_string_error("only single llvm.i32 function result supported");
return Error::success();
}
@@ -209,8 +209,8 @@ Error checkCompatibleReturnType<int64_t>(LLVM::LLVMFuncOp mainFunction) {
auto resultType = mainFunction.getType()
.cast<LLVM::LLVMFunctionType>()
.getReturnType()
- .dyn_cast<LLVM::LLVMIntegerType>();
- if (!resultType || resultType.getBitWidth() != 64)
+ .dyn_cast<IntegerType>();
+ if (!resultType || resultType.getWidth() != 64)
return make_string_error("only single llvm.i64 function result supported");
return Error::success();
}
diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
index 89e7decc8152..0159d2a57c7d 100644
--- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
+++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
@@ -172,8 +172,8 @@ Type Importer::getStdTypeForAttr(Type type) {
if (!type)
return nullptr;
- if (auto intType = type.dyn_cast<LLVMIntegerType>())
- return b.getIntegerType(intType.getBitWidth());
+ if (auto intType = type.dyn_cast<IntegerType>())
+ return intType;
if (type.isa<LLVMFloatType>())
return b.getF32Type();
@@ -244,7 +244,7 @@ Attribute Importer::getConstantAsAttr(llvm::Constant *value) {
if (auto *c = dyn_cast<llvm::ConstantFP>(value)) {
if (c->getType()->isDoubleTy())
return b.getFloatAttr(FloatType::getF64(context), c->getValueAPF());
- else if (c->getType()->isFloatingPointTy())
+ if (c->getType()->isFloatingPointTy())
return b.getFloatAttr(FloatType::getF32(context), c->getValueAPF());
}
if (auto *f = dyn_cast<llvm::Function>(value))
@@ -261,7 +261,7 @@ Attribute Importer::getConstantAsAttr(llvm::Constant *value) {
if (!attrType)
return nullptr;
- if (type.isa<LLVMIntegerType>()) {
+ if (type.isa<IntegerType>()) {
SmallVector<APInt, 8> values;
values.reserve(cd->getNumElements());
for (unsigned i = 0, e = cd->getNumElements(); i < e; ++i)
diff --git a/mlir/lib/Target/LLVMIR/TypeTranslation.cpp b/mlir/lib/Target/LLVMIR/TypeTranslation.cpp
index ecde56cc78f6..2c024dd2489e 100644
--- a/mlir/lib/Target/LLVMIR/TypeTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/TypeTranslation.cpp
@@ -8,6 +8,7 @@
#include "mlir/Target/LLVMIR/TypeTranslation.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "llvm/ADT/TypeSwitch.h"
@@ -71,10 +72,9 @@ class TypeToLLVMIRTranslatorImpl {
.Case([this](LLVM::LLVMMetadataType) {
return llvm::Type::getMetadataTy(context);
})
- .Case<LLVM::LLVMArrayType, LLVM::LLVMIntegerType,
- LLVM::LLVMFunctionType, LLVM::LLVMPointerType,
- LLVM::LLVMStructType, LLVM::LLVMFixedVectorType,
- LLVM::LLVMScalableVectorType>(
+ .Case<LLVM::LLVMArrayType, IntegerType, LLVM::LLVMFunctionType,
+ LLVM::LLVMPointerType, LLVM::LLVMStructType,
+ LLVM::LLVMFixedVectorType, LLVM::LLVMScalableVectorType>(
[this](auto type) { return this->translate(type); })
.Default([](Type t) -> llvm::Type * {
llvm_unreachable("unknown LLVM dialect type");
@@ -101,8 +101,8 @@ class TypeToLLVMIRTranslatorImpl {
}
/// Translates the given integer type.
- llvm::Type *translate(LLVM::LLVMIntegerType type) {
- return llvm::IntegerType::get(context, type.getBitWidth());
+ llvm::Type *translate(IntegerType type) {
+ return llvm::IntegerType::get(context, type.getWidth());
}
/// Translates the given pointer type.
@@ -253,7 +253,7 @@ class TypeFromLLVMIRTranslatorImpl {
/// Translates the given integer type.
Type translate(llvm::IntegerType *type) {
- return LLVM::LLVMIntegerType::get(&context, type->getBitWidth());
+ return IntegerType::get(&context, type->getBitWidth());
}
/// Translates the given pointer type.
diff --git a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
index 3fcf3a00ab9a..14db10856393 100644
--- a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
@@ -10,9 +10,9 @@ module attributes {gpu.container_module} {
gpu.module @kernel_module attributes {
nvvm.cubin = "CUBIN", rocdl.hsaco = "HSACO"
} {
- llvm.func @kernel(%arg0: !llvm.i32, %arg1: !llvm.ptr<float>,
- %arg2: !llvm.ptr<float>, %arg3: !llvm.i64, %arg4: !llvm.i64,
- %arg5: !llvm.i64) attributes {gpu.kernel} {
+ llvm.func @kernel(%arg0: i32, %arg1: !llvm.ptr<float>,
+ %arg2: !llvm.ptr<float>, %arg3: i64, %arg4: i64,
+ %arg5: i64) attributes {gpu.kernel} {
llvm.return
}
}
@@ -27,7 +27,7 @@ module attributes {gpu.container_module} {
return
}
- // CHECK: [[C8:%.*]] = llvm.mlir.constant(8 : index) : !llvm.i64
+ // CHECK: [[C8:%.*]] = llvm.mlir.constant(8 : index) : i64
// CHECK: [[ADDRESSOF:%.*]] = llvm.mlir.addressof @[[GLOBAL]]
// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index)
// CHECK: [[BINARY:%.*]] = llvm.getelementptr [[ADDRESSOF]]{{\[}}[[C0]], [[C0]]]
@@ -39,7 +39,7 @@ module attributes {gpu.container_module} {
// CHECK: [[C0_I32:%.*]] = llvm.mlir.constant(0 : i32)
// CHECK: [[STREAM:%.*]] = llvm.call @mgpuStreamCreate
- // CHECK: [[NUM_PARAMS:%.*]] = llvm.mlir.constant(6 : i32) : !llvm.i32
+ // CHECK: [[NUM_PARAMS:%.*]] = llvm.mlir.constant(6 : i32) : i32
// CHECK-NEXT: [[PARAMS:%.*]] = llvm.alloca [[NUM_PARAMS]] x !llvm.ptr<i8>
// CHECK: [[EXTRA_PARAMS:%.*]] = llvm.mlir.null : !llvm.ptr<ptr<i8>>
diff --git a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
index a2764d424205..215a39edfddb 100644
--- a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
+++ b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
@@ -5,31 +5,31 @@ gpu.module @kernel {
// NVVM-LABEL: llvm.func @private
gpu.func @private(%arg0: f32) private(%arg1: memref<4xf32, 5>) {
// Allocate private memory inside the function.
- // NVVM: %[[size:.*]] = llvm.mlir.constant(4 : i64) : !llvm.i64
- // NVVM: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (!llvm.i64) -> !llvm.ptr<float>
+ // NVVM: %[[size:.*]] = llvm.mlir.constant(4 : i64) : i64
+ // NVVM: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (i64) -> !llvm.ptr<float>
- // ROCDL: %[[size:.*]] = llvm.mlir.constant(4 : i64) : !llvm.i64
- // ROCDL: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (!llvm.i64) -> !llvm.ptr<float, 5>
+ // ROCDL: %[[size:.*]] = llvm.mlir.constant(4 : i64) : i64
+ // ROCDL: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (i64) -> !llvm.ptr<float, 5>
// Populate the memref descriptor.
// NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
// NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
- // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// NVVM: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2]
- // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
+ // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64
// NVVM: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0]
- // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
// NVVM: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0]
// ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 5>, ptr<float, 5>, i64, array<1 x i64>, array<1 x i64>)>
// ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
// ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
- // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// ROCDL: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2]
- // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
+ // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64
// ROCDL: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0]
- // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
// ROCDL: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0]
// "Store" lowering should work just as any other memref, only check that
@@ -67,12 +67,12 @@ gpu.module @kernel {
// ROCDL-SAME: {
gpu.func @workgroup(%arg0: f32) workgroup(%arg1: memref<4xf32, 3>) {
// Get the address of the first element in the global array.
- // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+ // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<4 x float>, 3>
// NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
// NVVM-SAME: !llvm.ptr<float, 3>
- // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+ // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<4 x float>, 3>
// ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
// ROCDL-SAME: !llvm.ptr<float, 3>
@@ -81,21 +81,21 @@ gpu.module @kernel {
// NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<1 x i64>, array<1 x i64>)>
// NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
// NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
- // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// NVVM: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2]
- // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
+ // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64
// NVVM: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0]
- // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
// NVVM: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0]
// ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<1 x i64>, array<1 x i64>)>
// ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
// ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
- // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// ROCDL: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2]
- // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
+ // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64
// ROCDL: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0]
- // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
// ROCDL: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0]
// "Store" lowering should work just as any other memref, only check that
@@ -130,12 +130,12 @@ gpu.module @kernel {
// ROCDL-LABEL: llvm.func @workgroup3d
gpu.func @workgroup3d(%arg0: f32) workgroup(%arg1: memref<4x2x6xf32, 3>) {
// Get the address of the first element in the global array.
- // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+ // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<48 x float>, 3>
// NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
// NVVM-SAME: !llvm.ptr<float, 3>
- // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+ // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<48 x float>, 3>
// ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
// ROCDL-SAME: !llvm.ptr<float, 3>
@@ -144,37 +144,37 @@ gpu.module @kernel {
// NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<3 x i64>, array<3 x i64>)>
// NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
// NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
- // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// NVVM: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2]
- // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
+ // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64
// NVVM: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0]
- // NVVM: %[[c12:.*]] = llvm.mlir.constant(12 : index) : !llvm.i64
+ // NVVM: %[[c12:.*]] = llvm.mlir.constant(12 : index) : i64
// NVVM: %[[descr6:.*]] = llvm.insertvalue %[[c12]], %[[descr5]][4, 0]
- // NVVM: %[[c2:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
+ // NVVM: %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64
// NVVM: %[[descr7:.*]] = llvm.insertvalue %[[c2]], %[[descr6]][3, 1]
- // NVVM: %[[c6:.*]] = llvm.mlir.constant(6 : index) : !llvm.i64
+ // NVVM: %[[c6:.*]] = llvm.mlir.constant(6 : index) : i64
// NVVM: %[[descr8:.*]] = llvm.insertvalue %[[c6]], %[[descr7]][4, 1]
- // NVVM: %[[c6:.*]] = llvm.mlir.constant(6 : index) : !llvm.i64
+ // NVVM: %[[c6:.*]] = llvm.mlir.constant(6 : index) : i64
// NVVM: %[[descr9:.*]] = llvm.insertvalue %[[c6]], %[[descr8]][3, 2]
- // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
// NVVM: %[[descr10:.*]] = llvm.insertvalue %[[c1]], %[[descr9]][4, 2]
// ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<3 x i64>, array<3 x i64>)>
// ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
// ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
- // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// ROCDL: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2]
- // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
+ // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64
// ROCDL: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0]
- // ROCDL: %[[c12:.*]] = llvm.mlir.constant(12 : index) : !llvm.i64
+ // ROCDL: %[[c12:.*]] = llvm.mlir.constant(12 : index) : i64
// ROCDL: %[[descr6:.*]] = llvm.insertvalue %[[c12]], %[[descr5]][4, 0]
- // ROCDL: %[[c2:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
+ // ROCDL: %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64
// ROCDL: %[[descr7:.*]] = llvm.insertvalue %[[c2]], %[[descr6]][3, 1]
- // ROCDL: %[[c6:.*]] = llvm.mlir.constant(6 : index) : !llvm.i64
+ // ROCDL: %[[c6:.*]] = llvm.mlir.constant(6 : index) : i64
// ROCDL: %[[descr8:.*]] = llvm.insertvalue %[[c6]], %[[descr7]][4, 1]
- // ROCDL: %[[c6:.*]] = llvm.mlir.constant(6 : index) : !llvm.i64
+ // ROCDL: %[[c6:.*]] = llvm.mlir.constant(6 : index) : i64
// ROCDL: %[[descr9:.*]] = llvm.insertvalue %[[c6]], %[[descr8]][3, 2]
- // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
// ROCDL: %[[descr10:.*]] = llvm.insertvalue %[[c1]], %[[descr9]][4, 2]
%c0 = constant 0 : index
@@ -212,14 +212,14 @@ gpu.module @kernel {
// Private buffers.
// NVVM: %[[c3:.*]] = llvm.mlir.constant(3 : i64)
- // NVVM: llvm.alloca %[[c3]] x !llvm.float : (!llvm.i64) -> !llvm.ptr<float>
+ // NVVM: llvm.alloca %[[c3]] x !llvm.float : (i64) -> !llvm.ptr<float>
// NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : i64)
- // NVVM: llvm.alloca %[[c4]] x !llvm.float : (!llvm.i64) -> !llvm.ptr<float>
+ // NVVM: llvm.alloca %[[c4]] x !llvm.float : (i64) -> !llvm.ptr<float>
// ROCDL: %[[c3:.*]] = llvm.mlir.constant(3 : i64)
- // ROCDL: llvm.alloca %[[c3]] x !llvm.float : (!llvm.i64) -> !llvm.ptr<float, 5>
+ // ROCDL: llvm.alloca %[[c3]] x !llvm.float : (i64) -> !llvm.ptr<float, 5>
// ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : i64)
- // ROCDL: llvm.alloca %[[c4]] x !llvm.float : (!llvm.i64) -> !llvm.ptr<float, 5>
+ // ROCDL: llvm.alloca %[[c4]] x !llvm.float : (i64) -> !llvm.ptr<float, 5>
%c0 = constant 0 : index
store %arg0, %arg1[%c0] : memref<1xf32, 3>
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index 21be3b6297e2..3a48f2e759af 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -7,46 +7,46 @@ gpu.module @test_module {
func @gpu_index_ops()
-> (index, index, index, index, index, index,
index, index, index, index, index, index) {
- // CHECK32-NOT: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK32-NOT: = llvm.sext %{{.*}} : i32 to i64
- // CHECK: = nvvm.read.ptx.sreg.tid.x : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.tid.x : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%tIdX = "gpu.thread_id"() {dimension = "x"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.tid.y : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.tid.y : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%tIdY = "gpu.thread_id"() {dimension = "y"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.tid.z : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.tid.z : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%tIdZ = "gpu.thread_id"() {dimension = "z"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.ntid.x : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.ntid.x : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bDimX = "gpu.block_dim"() {dimension = "x"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.ntid.y : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.ntid.y : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bDimY = "gpu.block_dim"() {dimension = "y"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.ntid.z : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.ntid.z : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bDimZ = "gpu.block_dim"() {dimension = "z"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.ctaid.x : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.ctaid.x : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bIdX = "gpu.block_id"() {dimension = "x"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.ctaid.y : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.ctaid.y : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bIdY = "gpu.block_id"() {dimension = "y"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.ctaid.z : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.ctaid.z : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bIdZ = "gpu.block_id"() {dimension = "z"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.nctaid.x : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.nctaid.x : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%gDimX = "gpu.grid_dim"() {dimension = "x"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.nctaid.y : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.nctaid.y : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%gDimY = "gpu.grid_dim"() {dimension = "y"} : () -> (index)
- // CHECK: = nvvm.read.ptx.sreg.nctaid.z : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: = nvvm.read.ptx.sreg.nctaid.z : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%gDimZ = "gpu.grid_dim"() {dimension = "z"} : () -> (index)
std.return %tIdX, %tIdY, %tIdZ, %bDimX, %bDimY, %bDimZ,
@@ -62,11 +62,11 @@ gpu.module @test_module {
// CHECK-LABEL: func @gpu_index_comp
// CHECK32-LABEL: func @gpu_index_comp
func @gpu_index_comp(%idx : index) -> index {
- // CHECK: = llvm.add %{{.*}}, %{{.*}} : !llvm.i64
- // CHECK32: = llvm.add %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: = llvm.add %{{.*}}, %{{.*}} : i64
+ // CHECK32: = llvm.add %{{.*}}, %{{.*}} : i32
%0 = addi %idx, %idx : index
- // CHECK: llvm.return %{{.*}} : !llvm.i64
- // CHECK32: llvm.return %{{.*}} : !llvm.i32
+ // CHECK: llvm.return %{{.*}} : i64
+ // CHECK32: llvm.return %{{.*}} : i32
std.return %0 : index
}
}
@@ -112,14 +112,14 @@ gpu.module @test_module {
func @gpu_shuffle() -> (f32) {
// CHECK: %[[#VALUE:]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
%arg0 = constant 1.0 : f32
- // CHECK: %[[#OFFSET:]] = llvm.mlir.constant(4 : i32) : !llvm.i32
+ // CHECK: %[[#OFFSET:]] = llvm.mlir.constant(4 : i32) : i32
%arg1 = constant 4 : i32
- // CHECK: %[[#WIDTH:]] = llvm.mlir.constant(23 : i32) : !llvm.i32
+ // CHECK: %[[#WIDTH:]] = llvm.mlir.constant(23 : i32) : i32
%arg2 = constant 23 : i32
- // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[#SHL:]] = llvm.shl %[[#ONE]], %[[#WIDTH]] : !llvm.i32
- // CHECK: %[[#MASK:]] = llvm.sub %[[#SHL]], %[[#ONE]] : !llvm.i32
- // CHECK: %[[#CLAMP:]] = llvm.sub %[[#WIDTH]], %[[#ONE]] : !llvm.i32
+ // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[#SHL:]] = llvm.shl %[[#ONE]], %[[#WIDTH]] : i32
+ // CHECK: %[[#MASK:]] = llvm.sub %[[#SHL]], %[[#ONE]] : i32
+ // CHECK: %[[#CLAMP:]] = llvm.sub %[[#WIDTH]], %[[#ONE]] : i32
// CHECK: %[[#SHFL:]] = nvvm.shfl.sync.bfly %[[#MASK]], %[[#VALUE]], %[[#OFFSET]], %[[#CLAMP]] : !llvm.struct<(float, i1)>
// CHECK: llvm.extractvalue %[[#SHFL]][0 : index] : !llvm.struct<(float, i1)>
// CHECK: llvm.extractvalue %[[#SHFL]][1 : index] : !llvm.struct<(float, i1)>
diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
index dc23d436cab5..a65bef80e363 100644
--- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
+++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
@@ -7,46 +7,46 @@ gpu.module @test_module {
func @gpu_index_ops()
-> (index, index, index, index, index, index,
index, index, index, index, index, index) {
- // CHECK32-NOT: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK32-NOT: = llvm.sext %{{.*}} : i32 to i64
- // CHECK: rocdl.workitem.id.x : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workitem.id.x : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%tIdX = "gpu.thread_id"() {dimension = "x"} : () -> (index)
- // CHECK: rocdl.workitem.id.y : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workitem.id.y : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%tIdY = "gpu.thread_id"() {dimension = "y"} : () -> (index)
- // CHECK: rocdl.workitem.id.z : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workitem.id.z : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%tIdZ = "gpu.thread_id"() {dimension = "z"} : () -> (index)
- // CHECK: rocdl.workgroup.dim.x : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workgroup.dim.x : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bDimX = "gpu.block_dim"() {dimension = "x"} : () -> (index)
- // CHECK: rocdl.workgroup.dim.y : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workgroup.dim.y : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bDimY = "gpu.block_dim"() {dimension = "y"} : () -> (index)
- // CHECK: rocdl.workgroup.dim.z : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workgroup.dim.z : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bDimZ = "gpu.block_dim"() {dimension = "z"} : () -> (index)
- // CHECK: rocdl.workgroup.id.x : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workgroup.id.x : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bIdX = "gpu.block_id"() {dimension = "x"} : () -> (index)
- // CHECK: rocdl.workgroup.id.y : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workgroup.id.y : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bIdY = "gpu.block_id"() {dimension = "y"} : () -> (index)
- // CHECK: rocdl.workgroup.id.z : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.workgroup.id.z : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%bIdZ = "gpu.block_id"() {dimension = "z"} : () -> (index)
- // CHECK: rocdl.grid.dim.x : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.grid.dim.x : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%gDimX = "gpu.grid_dim"() {dimension = "x"} : () -> (index)
- // CHECK: rocdl.grid.dim.y : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.grid.dim.y : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%gDimY = "gpu.grid_dim"() {dimension = "y"} : () -> (index)
- // CHECK: rocdl.grid.dim.z : !llvm.i32
- // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: rocdl.grid.dim.z : i32
+ // CHECK: = llvm.sext %{{.*}} : i32 to i64
%gDimZ = "gpu.grid_dim"() {dimension = "z"} : () -> (index)
std.return %tIdX, %tIdY, %tIdZ, %bDimX, %bDimY, %bDimZ,
@@ -62,11 +62,11 @@ gpu.module @test_module {
// CHECK-LABEL: func @gpu_index_comp
// CHECK32-LABEL: func @gpu_index_comp
func @gpu_index_comp(%idx : index) -> index {
- // CHECK: = llvm.add %{{.*}}, %{{.*}} : !llvm.i64
- // CHECK32: = llvm.add %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: = llvm.add %{{.*}}, %{{.*}} : i64
+ // CHECK32: = llvm.add %{{.*}}, %{{.*}} : i32
%0 = addi %idx, %idx : index
- // CHECK: llvm.return %{{.*}} : !llvm.i64
- // CHECK32: llvm.return %{{.*}} : !llvm.i32
+ // CHECK: llvm.return %{{.*}} : i64
+ // CHECK32: llvm.return %{{.*}} : i32
std.return %0 : index
}
}
diff --git a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
index 507925bc891b..e66770265938 100644
--- a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
+++ b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
@@ -6,58 +6,58 @@
// CHECK: %[[addressof_SPIRV_BIN:.*]] = llvm.mlir.addressof @SPIRV_BIN
// CHECK: %[[SPIRV_BIN_ptr:.*]] = llvm.getelementptr %[[addressof_SPIRV_BIN]]
// CHECK: %[[SPIRV_BIN_size:.*]] = llvm.mlir.constant
-// CHECK: llvm.call @bindMemRef1DFloat(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, !llvm.i32, !llvm.i32, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>) -> !llvm.void
-// CHECK: llvm.call @setBinaryShader(%[[Vulkan_Runtime_ptr]], %[[SPIRV_BIN_ptr]], %[[SPIRV_BIN_size]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i32) -> !llvm.void
+// CHECK: llvm.call @bindMemRef1DFloat(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, i32, i32, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>) -> !llvm.void
+// CHECK: llvm.call @setBinaryShader(%[[Vulkan_Runtime_ptr]], %[[SPIRV_BIN_ptr]], %[[SPIRV_BIN_size]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32) -> !llvm.void
// CHECK: %[[addressof_entry_point:.*]] = llvm.mlir.addressof @kernel_spv_entry_point_name
// CHECK: %[[entry_point_ptr:.*]] = llvm.getelementptr %[[addressof_entry_point]]
// CHECK: llvm.call @setEntryPoint(%[[Vulkan_Runtime_ptr]], %[[entry_point_ptr]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>) -> !llvm.void
-// CHECK: llvm.call @setNumWorkGroups(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, !llvm.i64, !llvm.i64, !llvm.i64) -> !llvm.void
+// CHECK: llvm.call @setNumWorkGroups(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, i64, i64, i64) -> !llvm.void
// CHECK: llvm.call @runOnVulkan(%[[Vulkan_Runtime_ptr]]) : (!llvm.ptr<i8>) -> !llvm.void
// CHECK: llvm.call @deinitVulkan(%[[Vulkan_Runtime_ptr]]) : (!llvm.ptr<i8>) -> !llvm.void
-// CHECK: llvm.func @bindMemRef1DHalf(!llvm.ptr<i8>, !llvm.i32, !llvm.i32, !llvm.ptr<struct<(ptr<i16>, ptr<i16>, i64, array<1 x i64>, array<1 x i64>)>>)
+// CHECK: llvm.func @bindMemRef1DHalf(!llvm.ptr<i8>, i32, i32, !llvm.ptr<struct<(ptr<i16>, ptr<i16>, i64, array<1 x i64>, array<1 x i64>)>>)
module attributes {gpu.container_module} {
- llvm.func @malloc(!llvm.i64) -> !llvm.ptr<i8>
+ llvm.func @malloc(i64) -> !llvm.ptr<i8>
llvm.func @foo() {
- %0 = llvm.mlir.constant(12 : index) : !llvm.i64
+ %0 = llvm.mlir.constant(12 : index) : i64
%1 = llvm.mlir.null : !llvm.ptr<float>
- %2 = llvm.mlir.constant(1 : index) : !llvm.i64
- %3 = llvm.getelementptr %1[%2] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
- %4 = llvm.ptrtoint %3 : !llvm.ptr<float> to !llvm.i64
- %5 = llvm.mul %0, %4 : !llvm.i64
- %6 = llvm.call @malloc(%5) : (!llvm.i64) -> !llvm.ptr<i8>
+ %2 = llvm.mlir.constant(1 : index) : i64
+ %3 = llvm.getelementptr %1[%2] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+ %4 = llvm.ptrtoint %3 : !llvm.ptr<float> to i64
+ %5 = llvm.mul %0, %4 : i64
+ %6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr<i8>
%7 = llvm.bitcast %6 : !llvm.ptr<i8> to !llvm.ptr<float>
%8 = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%9 = llvm.insertvalue %7, %8[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%10 = llvm.insertvalue %7, %9[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
- %11 = llvm.mlir.constant(0 : index) : !llvm.i64
+ %11 = llvm.mlir.constant(0 : index) : i64
%12 = llvm.insertvalue %11, %10[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
- %13 = llvm.mlir.constant(1 : index) : !llvm.i64
+ %13 = llvm.mlir.constant(1 : index) : i64
%14 = llvm.insertvalue %0, %12[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%15 = llvm.insertvalue %13, %14[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
- %16 = llvm.mlir.constant(1 : index) : !llvm.i64
+ %16 = llvm.mlir.constant(1 : index) : i64
%17 = llvm.extractvalue %15[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%18 = llvm.extractvalue %15[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%19 = llvm.extractvalue %15[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%20 = llvm.extractvalue %15[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%21 = llvm.extractvalue %15[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
llvm.call @vulkanLaunch(%16, %16, %16, %17, %18, %19, %20, %21) {spirv_blob = "\03\02#\07\00", spirv_entry_point = "kernel"}
- : (!llvm.i64, !llvm.i64, !llvm.i64, !llvm.ptr<float>, !llvm.ptr<float>, !llvm.i64, !llvm.i64, !llvm.i64) -> ()
+ : (i64, i64, i64, !llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64) -> ()
llvm.return
}
- llvm.func @vulkanLaunch(%arg0: !llvm.i64, %arg1: !llvm.i64, %arg2: !llvm.i64, %arg6: !llvm.ptr<float>, %arg7: !llvm.ptr<float>, %arg8: !llvm.i64, %arg9: !llvm.i64, %arg10: !llvm.i64) {
+ llvm.func @vulkanLaunch(%arg0: i64, %arg1: i64, %arg2: i64, %arg6: !llvm.ptr<float>, %arg7: !llvm.ptr<float>, %arg8: i64, %arg9: i64, %arg10: i64) {
%0 = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%1 = llvm.insertvalue %arg6, %0[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%2 = llvm.insertvalue %arg7, %1[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%3 = llvm.insertvalue %arg8, %2[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%4 = llvm.insertvalue %arg9, %3[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%5 = llvm.insertvalue %arg10, %4[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
- %6 = llvm.mlir.constant(1 : index) : !llvm.i64
- %7 = llvm.alloca %6 x !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)> : (!llvm.i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>
+ %6 = llvm.mlir.constant(1 : index) : i64
+ %7 = llvm.alloca %6 x !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>
llvm.store %5, %7 : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>
- llvm.call @_mlir_ciface_vulkanLaunch(%arg0, %arg1, %arg2, %7) : (!llvm.i64, !llvm.i64, !llvm.i64, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>) -> ()
+ llvm.call @_mlir_ciface_vulkanLaunch(%arg0, %arg1, %arg2, %7) : (i64, i64, i64, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>) -> ()
llvm.return
}
- llvm.func @_mlir_ciface_vulkanLaunch(!llvm.i64, !llvm.i64, !llvm.i64, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>)
+ llvm.func @_mlir_ciface_vulkanLaunch(i64, i64, i64, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>)
}
diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
index 62ea39f078b2..2c75f99650ab 100644
--- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
@@ -6,17 +6,17 @@ func @branch_loop() {
%end = constant 0 : index
// CHECK: omp.parallel
omp.parallel {
- // CHECK-NEXT: llvm.br ^[[BB1:.*]](%{{[0-9]+}}, %{{[0-9]+}} : !llvm.i64, !llvm.i64
+ // CHECK-NEXT: llvm.br ^[[BB1:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64
br ^bb1(%start, %end : index, index)
- // CHECK-NEXT: ^[[BB1]](%[[ARG1:[0-9]+]]: !llvm.i64, %[[ARG2:[0-9]+]]: !llvm.i64):{{.*}}
+ // CHECK-NEXT: ^[[BB1]](%[[ARG1:[0-9]+]]: i64, %[[ARG2:[0-9]+]]: i64):{{.*}}
^bb1(%0: index, %1: index):
- // CHECK-NEXT: %[[CMP:[0-9]+]] = llvm.icmp "slt" %[[ARG1]], %[[ARG2]] : !llvm.i64
+ // CHECK-NEXT: %[[CMP:[0-9]+]] = llvm.icmp "slt" %[[ARG1]], %[[ARG2]] : i64
%2 = cmpi "slt", %0, %1 : index
- // CHECK-NEXT: llvm.cond_br %[[CMP]], ^[[BB2:.*]](%{{[0-9]+}}, %{{[0-9]+}} : !llvm.i64, !llvm.i64), ^[[BB3:.*]]
+ // CHECK-NEXT: llvm.cond_br %[[CMP]], ^[[BB2:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64), ^[[BB3:.*]]
cond_br %2, ^bb2(%end, %end : index, index), ^bb3
- // CHECK-NEXT: ^[[BB2]](%[[ARG3:[0-9]+]]: !llvm.i64, %[[ARG4:[0-9]+]]: !llvm.i64):
+ // CHECK-NEXT: ^[[BB2]](%[[ARG3:[0-9]+]]: i64, %[[ARG4:[0-9]+]]: i64):
^bb2(%3: index, %4: index):
- // CHECK-NEXT: llvm.br ^[[BB1]](%[[ARG3]], %[[ARG4]] : !llvm.i64, !llvm.i64)
+ // CHECK-NEXT: llvm.br ^[[BB1]](%[[ARG3]], %[[ARG4]] : i64, i64)
br ^bb1(%3, %4 : index, index)
// CHECK-NEXT: ^[[BB3]]:
^bb3:
@@ -30,16 +30,16 @@ func @branch_loop() {
}
// CHECK-LABEL: @wsloop
-// CHECK: (%[[ARG0:.*]]: !llvm.i64, %[[ARG1:.*]]: !llvm.i64, %[[ARG2:.*]]: !llvm.i64, %[[ARG3:.*]]: !llvm.i64, %[[ARG4:.*]]: !llvm.i64, %[[ARG5:.*]]: !llvm.i64)
+// CHECK: (%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64, %[[ARG3:.*]]: i64, %[[ARG4:.*]]: i64, %[[ARG5:.*]]: i64)
func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) {
// CHECK: omp.parallel
omp.parallel {
// CHECK: omp.wsloop
// CHECK: (%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]], %[[ARG5]])
"omp.wsloop"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) ( {
- // CHECK: ^{{.*}}(%[[ARG6:.*]]: !llvm.i64, %[[ARG7:.*]]: !llvm.i64):
+ // CHECK: ^{{.*}}(%[[ARG6:.*]]: i64, %[[ARG7:.*]]: i64):
^bb0(%arg6: index, %arg7: index): // no predecessors
- // CHECK: "test.payload"(%[[ARG6]], %[[ARG7]]) : (!llvm.i64, !llvm.i64) -> ()
+ // CHECK: "test.payload"(%[[ARG6]], %[[ARG7]]) : (i64, i64) -> ()
"test.payload"(%arg6, %arg7) : (index, index) -> ()
omp.yield
}) {operand_segment_sizes = dense<[2, 2, 2, 0, 0, 0, 0, 0, 0]> : vector<9xi32>} : (index, index, index, index, index, index) -> ()
diff --git a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
index ab202e5b0622..f88e901443df 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: @iadd_scalar
spv.func @iadd_scalar(%arg0: i32, %arg1: i32) "None" {
- // CHECK: llvm.add %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.add %{{.*}}, %{{.*}} : i32
%0 = spv.IAdd %arg0, %arg1 : i32
spv.Return
}
@@ -24,7 +24,7 @@ spv.func @iadd_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" {
// CHECK-LABEL: @isub_scalar
spv.func @isub_scalar(%arg0: i8, %arg1: i8) "None" {
- // CHECK: llvm.sub %{{.*}}, %{{.*}} : !llvm.i8
+ // CHECK: llvm.sub %{{.*}}, %{{.*}} : i8
%0 = spv.ISub %arg0, %arg1 : i8
spv.Return
}
@@ -42,7 +42,7 @@ spv.func @isub_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) "None" {
// CHECK-LABEL: @imul_scalar
spv.func @imul_scalar(%arg0: i32, %arg1: i32) "None" {
- // CHECK: llvm.mul %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.mul %{{.*}}, %{{.*}} : i32
%0 = spv.IMul %arg0, %arg1 : i32
spv.Return
}
@@ -168,7 +168,7 @@ spv.func @fneg_vector(%arg: vector<2xf32>) "None" {
// CHECK-LABEL: @udiv_scalar
spv.func @udiv_scalar(%arg0: i32, %arg1: i32) "None" {
- // CHECK: llvm.udiv %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.udiv %{{.*}}, %{{.*}} : i32
%0 = spv.UDiv %arg0, %arg1 : i32
spv.Return
}
@@ -186,7 +186,7 @@ spv.func @udiv_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) "None" {
// CHECK-LABEL: @umod_scalar
spv.func @umod_scalar(%arg0: i32, %arg1: i32) "None" {
- // CHECK: llvm.urem %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.urem %{{.*}}, %{{.*}} : i32
%0 = spv.UMod %arg0, %arg1 : i32
spv.Return
}
@@ -204,7 +204,7 @@ spv.func @umod_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) "None" {
// CHECK-LABEL: @sdiv_scalar
spv.func @sdiv_scalar(%arg0: i16, %arg1: i16) "None" {
- // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : !llvm.i16
+ // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : i16
%0 = spv.SDiv %arg0, %arg1 : i16
spv.Return
}
@@ -222,7 +222,7 @@ spv.func @sdiv_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
// CHECK-LABEL: @srem_scalar
spv.func @srem_scalar(%arg0: i32, %arg1: i32) "None" {
- // CHECK: llvm.srem %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.srem %{{.*}}, %{{.*}} : i32
%0 = spv.SRem %arg0, %arg1 : i32
spv.Return
}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
index 4538b736fdf4..db1ac3a6d4d0 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: @bitcount_scalar
spv.func @bitcount_scalar(%arg0: i16) "None" {
- // CHECK: "llvm.intr.ctpop"(%{{.*}}) : (!llvm.i16) -> !llvm.i16
+ // CHECK: "llvm.intr.ctpop"(%{{.*}}) : (i16) -> i16
%0 = spv.BitCount %arg0: i16
spv.Return
}
@@ -24,7 +24,7 @@ spv.func @bitcount_vector(%arg0: vector<3xi32>) "None" {
// CHECK-LABEL: @bitreverse_scalar
spv.func @bitreverse_scalar(%arg0: i64) "None" {
- // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (!llvm.i64) -> !llvm.i64
+ // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (i64) -> i64
%0 = spv.BitReverse %arg0: i64
spv.Return
}
@@ -41,67 +41,67 @@ spv.func @bitreverse_vector(%arg0: vector<4xi32>) "None" {
//===----------------------------------------------------------------------===//
// CHECK-LABEL: @bitfield_insert_scalar_same_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[INSERT:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
+// CHECK-SAME: %[[BASE:.*]]: i32, %[[INSERT:.*]]: i32, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
spv.func @bitfield_insert_scalar_same_bit_width(%base: i32, %insert: i32, %offset: i32, %count: i32) "None" {
- // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
- // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32
- // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
- // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[OFFSET]] : !llvm.i32
- // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i32
- // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i32
- // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET]] : !llvm.i32
- // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i32
+ // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32
+ // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i32
+ // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32
+ // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[OFFSET]] : i32
+ // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : i32
+ // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i32
+ // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET]] : i32
+ // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i32
%0 = spv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32
spv.Return
}
// CHECK-LABEL: @bitfield_insert_scalar_smaller_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i64, %[[INSERT:.*]]: !llvm.i64, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i8
+// CHECK-SAME: %[[BASE:.*]]: i64, %[[INSERT:.*]]: i64, %[[OFFSET:.*]]: i8, %[[COUNT:.*]]: i8
spv.func @bitfield_insert_scalar_smaller_bit_width(%base: i64, %insert: i64, %offset: i8, %count: i8) "None" {
- // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i64
- // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i64
- // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i64) : !llvm.i64
- // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i64
- // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i64
- // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[EXT_OFFSET]] : !llvm.i64
- // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i64
- // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i64
- // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[EXT_OFFSET]] : !llvm.i64
- // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i64
+ // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i8 to i64
+ // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i64
+ // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i64) : i64
+ // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : i64
+ // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i64
+ // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[EXT_OFFSET]] : i64
+ // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : i64
+ // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i64
+ // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[EXT_OFFSET]] : i64
+ // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i64
%0 = spv.BitFieldInsert %base, %insert, %offset, %count : i64, i8, i8
spv.Return
}
// CHECK-LABEL: @bitfield_insert_scalar_greater_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i16, %[[INSERT:.*]]: !llvm.i16, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i64
+// CHECK-SAME: %[[BASE:.*]]: i16, %[[INSERT:.*]]: i16, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i64
spv.func @bitfield_insert_scalar_greater_bit_width(%base: i16, %insert: i16, %offset: i32, %count: i64) "None" {
- // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i32 to !llvm.i16
- // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i16
- // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i16) : !llvm.i16
- // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[TRUNC_COUNT]] : !llvm.i16
- // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i16
- // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[TRUNC_OFFSET]] : !llvm.i16
- // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i16
- // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i16
- // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[TRUNC_OFFSET]] : !llvm.i16
- // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i16
+ // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i32 to i16
+ // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : i64 to i16
+ // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i16) : i16
+ // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[TRUNC_COUNT]] : i16
+ // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i16
+ // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[TRUNC_OFFSET]] : i16
+ // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : i16
+ // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i16
+ // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[TRUNC_OFFSET]] : i16
+ // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i16
%0 = spv.BitFieldInsert %base, %insert, %offset, %count : i16, i32, i64
spv.Return
}
// CHECK-LABEL: @bitfield_insert_vector
-// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[INSERT:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
+// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[INSERT:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
spv.func @bitfield_insert_vector(%base: vector<2xi32>, %insert: vector<2xi32>, %offset: i32, %count: i32) "None" {
// CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32>
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32>
// CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32>
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32>
// CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm.vec<2 x i32>
// CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm.vec<2 x i32>
// CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.vec<2 x i32>
@@ -119,61 +119,61 @@ spv.func @bitfield_insert_vector(%base: vector<2xi32>, %insert: vector<2xi32>, %
//===----------------------------------------------------------------------===//
// CHECK-LABEL: @bitfield_sextract_scalar_same_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i64, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64
+// CHECK-SAME: %[[BASE:.*]]: i64, %[[OFFSET:.*]]: i64, %[[COUNT:.*]]: i64
spv.func @bitfield_sextract_scalar_same_bit_width(%base: i64, %offset: i64, %count: i64) "None" {
- // CHECK: %[[SIZE:.]] = llvm.mlir.constant(64 : i64) : !llvm.i64
- // CHECK: %[[T0:.*]] = llvm.add %[[COUNT]], %[[OFFSET]] : !llvm.i64
- // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i64
- // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i64
- // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET]], %[[T1]] : !llvm.i64
- // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i64
+ // CHECK: %[[SIZE:.]] = llvm.mlir.constant(64 : i64) : i64
+ // CHECK: %[[T0:.*]] = llvm.add %[[COUNT]], %[[OFFSET]] : i64
+ // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : i64
+ // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i64
+ // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET]], %[[T1]] : i64
+ // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i64
%0 = spv.BitFieldSExtract %base, %offset, %count : i64, i64, i64
spv.Return
}
// CHECK-LABEL: @bitfield_sextract_scalar_smaller_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i8
+// CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i8, %[[COUNT:.*]]: i8
spv.func @bitfield_sextract_scalar_smaller_bit_width(%base: i32, %offset: i8, %count: i8) "None" {
- // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i32
- // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32
- // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32
- // CHECK: %[[T0:.*]] = llvm.add %[[EXT_COUNT]], %[[EXT_OFFSET]] : !llvm.i32
- // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32
- // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32
- // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : !llvm.i32
- // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32
+ // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i8 to i32
+ // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i32
+ // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : i32
+ // CHECK: %[[T0:.*]] = llvm.add %[[EXT_COUNT]], %[[EXT_OFFSET]] : i32
+ // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : i32
+ // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i32
+ // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : i32
+ // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i32
%0 = spv.BitFieldSExtract %base, %offset, %count : i32, i8, i8
spv.Return
}
// CHECK-LABEL: @bitfield_sextract_scalar_greater_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64
+// CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i64, %[[COUNT:.*]]: i64
spv.func @bitfield_sextract_scalar_greater_bit_width(%base: i32, %offset: i64, %count: i64) "None" {
- // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i64 to !llvm.i32
- // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i32
- // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32
- // CHECK: %[[T0:.*]] = llvm.add %[[TRUNC_COUNT]], %[[TRUNC_OFFSET]] : !llvm.i32
- // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32
- // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32
- // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : !llvm.i32
- // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32
+ // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i64 to i32
+ // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : i64 to i32
+ // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : i32
+ // CHECK: %[[T0:.*]] = llvm.add %[[TRUNC_COUNT]], %[[TRUNC_OFFSET]] : i32
+ // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : i32
+ // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i32
+ // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : i32
+ // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i32
%0 = spv.BitFieldSExtract %base, %offset, %count : i32, i64, i64
spv.Return
}
// CHECK-LABEL: @bitfield_sextract_vector
-// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
+// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
spv.func @bitfield_sextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) "None" {
// CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32>
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32>
// CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32>
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32>
// CHECK: %[[SIZE:.*]] = llvm.mlir.constant(dense<32> : vector<2xi32>) : !llvm.vec<2 x i32>
// CHECK: %[[T0:.*]] = llvm.add %[[COUNT_V2]], %[[OFFSET_V2]] : !llvm.vec<2 x i32>
// CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.vec<2 x i32>
@@ -189,57 +189,57 @@ spv.func @bitfield_sextract_vector(%base: vector<2xi32>, %offset: i32, %count: i
//===----------------------------------------------------------------------===//
// CHECK-LABEL: @bitfield_uextract_scalar_same_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
+// CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
spv.func @bitfield_uextract_scalar_same_bit_width(%base: i32, %offset: i32, %count: i32) "None" {
- // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
- // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32
- // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
- // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET]] : !llvm.i32
- // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32
+ // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32
+ // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i32
+ // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32
+ // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET]] : i32
+ // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i32
%0 = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32
spv.Return
}
// CHECK-LABEL: @bitfield_uextract_scalar_smaller_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i16, %[[COUNT:.*]]: !llvm.i8
+// CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i16, %[[COUNT:.*]]: i8
spv.func @bitfield_uextract_scalar_smaller_bit_width(%base: i32, %offset: i16, %count: i8) "None" {
- // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i16 to !llvm.i32
- // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32
- // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
- // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i32
- // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
- // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : !llvm.i32
- // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32
+ // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i16 to i32
+ // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i32
+ // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32
+ // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : i32
+ // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32
+ // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : i32
+ // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i32
%0 = spv.BitFieldUExtract %base, %offset, %count : i32, i16, i8
spv.Return
}
// CHECK-LABEL: @bitfield_uextract_scalar_greater_bit_width
-// CHECK-SAME: %[[BASE:.*]]: !llvm.i8, %[[OFFSET:.*]]: !llvm.i16, %[[COUNT:.*]]: !llvm.i8
+// CHECK-SAME: %[[BASE:.*]]: i8, %[[OFFSET:.*]]: i16, %[[COUNT:.*]]: i8
spv.func @bitfield_uextract_scalar_greater_bit_width(%base: i8, %offset: i16, %count: i8) "None" {
- // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i16 to !llvm.i8
- // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i8) : !llvm.i8
- // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i8
- // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i8
- // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[TRUNC_OFFSET]] : !llvm.i8
- // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i8
+ // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i16 to i8
+ // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i8) : i8
+ // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i8
+ // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i8
+ // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[TRUNC_OFFSET]] : i8
+ // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i8
%0 = spv.BitFieldUExtract %base, %offset, %count : i8, i16, i8
spv.Return
}
// CHECK-LABEL: @bitfield_uextract_vector
-// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
+// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
spv.func @bitfield_uextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) "None" {
// CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32>
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32>
// CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32>
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32>
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32>
// CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm.vec<2 x i32>
// CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm.vec<2 x i32>
// CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.vec<2 x i32>
@@ -255,7 +255,7 @@ spv.func @bitfield_uextract_vector(%base: vector<2xi32>, %offset: i32, %count: i
// CHECK-LABEL: @bitwise_and_scalar
spv.func @bitwise_and_scalar(%arg0: i32, %arg1: i32) "None" {
- // CHECK: llvm.and %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.and %{{.*}}, %{{.*}} : i32
%0 = spv.BitwiseAnd %arg0, %arg1 : i32
spv.Return
}
@@ -273,7 +273,7 @@ spv.func @bitwise_and_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None"
// CHECK-LABEL: @bitwise_or_scalar
spv.func @bitwise_or_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.or %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.or %{{.*}}, %{{.*}} : i64
%0 = spv.BitwiseOr %arg0, %arg1 : i64
spv.Return
}
@@ -291,7 +291,7 @@ spv.func @bitwise_or_vector(%arg0: vector<3xi8>, %arg1: vector<3xi8>) "None" {
// CHECK-LABEL: @bitwise_xor_scalar
spv.func @bitwise_xor_scalar(%arg0: i32, %arg1: i32) "None" {
- // CHECK: llvm.xor %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.xor %{{.*}}, %{{.*}} : i32
%0 = spv.BitwiseXor %arg0, %arg1 : i32
spv.Return
}
@@ -309,8 +309,8 @@ spv.func @bitwise_xor_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) "None"
// CHECK-LABEL: @not_scalar
spv.func @not_scalar(%arg0: i32) "None" {
- // CHECK: %[[CONST:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
- // CHECK: llvm.xor %{{.*}}, %[[CONST]] : !llvm.i32
+ // CHECK: %[[CONST:.*]] = llvm.mlir.constant(-1 : i32) : i32
+ // CHECK: llvm.xor %{{.*}}, %[[CONST]] : i32
%0 = spv.Not %arg0 : i32
spv.Return
}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
index a2c8b3ffccb2..f303c3b8bc9e 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: @bitcast_float_to_integer_scalar
spv.func @bitcast_float_to_integer_scalar(%arg0 : f32) "None" {
- // CHECK: llvm.bitcast {{.*}} : !llvm.float to !llvm.i32
+ // CHECK: llvm.bitcast {{.*}} : !llvm.float to i32
%0 = spv.Bitcast %arg0: f32 to i32
spv.Return
}
@@ -20,7 +20,7 @@ spv.func @bitcast_float_to_integer_vector(%arg0 : vector<3xf32>) "None" {
// CHECK-LABEL: @bitcast_vector_to_scalar
spv.func @bitcast_vector_to_scalar(%arg0 : vector<2xf32>) "None" {
- // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<2 x float> to !llvm.i64
+ // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<2 x float> to i64
%0 = spv.Bitcast %arg0: vector<2xf32> to i64
spv.Return
}
@@ -52,7 +52,7 @@ spv.func @bitcast_pointer(%arg0: !spv.ptr<f32, Function>) "None" {
// CHECK-LABEL: @convert_float_to_signed_scalar
spv.func @convert_float_to_signed_scalar(%arg0: f32) "None" {
- // CHECK: llvm.fptosi %{{.*}} : !llvm.float to !llvm.i32
+ // CHECK: llvm.fptosi %{{.*}} : !llvm.float to i32
%0 = spv.ConvertFToS %arg0: f32 to i32
spv.Return
}
@@ -70,7 +70,7 @@ spv.func @convert_float_to_signed_vector(%arg0: vector<2xf32>) "None" {
// CHECK-LABEL: @convert_float_to_unsigned_scalar
spv.func @convert_float_to_unsigned_scalar(%arg0: f32) "None" {
- // CHECK: llvm.fptoui %{{.*}} : !llvm.float to !llvm.i32
+ // CHECK: llvm.fptoui %{{.*}} : !llvm.float to i32
%0 = spv.ConvertFToU %arg0: f32 to i32
spv.Return
}
@@ -88,7 +88,7 @@ spv.func @convert_float_to_unsigned_vector(%arg0: vector<2xf32>) "None" {
// CHECK-LABEL: @convert_signed_to_float_scalar
spv.func @convert_signed_to_float_scalar(%arg0: i32) "None" {
- // CHECK: llvm.sitofp %{{.*}} : !llvm.i32 to !llvm.float
+ // CHECK: llvm.sitofp %{{.*}} : i32 to !llvm.float
%0 = spv.ConvertSToF %arg0: i32 to f32
spv.Return
}
@@ -106,7 +106,7 @@ spv.func @convert_signed_to_float_vector(%arg0: vector<3xi32>) "None" {
// CHECK-LABEL: @convert_unsigned_to_float_scalar
spv.func @convert_unsigned_to_float_scalar(%arg0: i32) "None" {
- // CHECK: llvm.uitofp %{{.*}} : !llvm.i32 to !llvm.float
+ // CHECK: llvm.uitofp %{{.*}} : i32 to !llvm.float
%0 = spv.ConvertUToF %arg0: i32 to f32
spv.Return
}
@@ -148,10 +148,10 @@ spv.func @fconvert_vector(%arg0: vector<2xf32>, %arg1: vector<2xf64>) "None" {
// CHECK-LABEL: @sconvert_scalar
spv.func @sconvert_scalar(%arg0: i32, %arg1: i64) "None" {
- // CHECK: llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: llvm.sext %{{.*}} : i32 to i64
%0 = spv.SConvert %arg0: i32 to i64
- // CHECK: llvm.trunc %{{.*}} : !llvm.i64 to !llvm.i32
+ // CHECK: llvm.trunc %{{.*}} : i64 to i32
%1 = spv.SConvert %arg1: i64 to i32
spv.Return
}
@@ -172,10 +172,10 @@ spv.func @sconvert_vector(%arg0: vector<3xi32>, %arg1: vector<3xi64>) "None" {
// CHECK-LABEL: @uconvert_scalar
spv.func @uconvert_scalar(%arg0: i32, %arg1: i64) "None" {
- // CHECK: llvm.zext %{{.*}} : !llvm.i32 to !llvm.i64
+ // CHECK: llvm.zext %{{.*}} : i32 to i64
%0 = spv.UConvert %arg0: i32 to i64
- // CHECK: llvm.trunc %{{.*}} : !llvm.i64 to !llvm.i32
+ // CHECK: llvm.trunc %{{.*}} : i64 to i32
%1 = spv.UConvert %arg1: i64 to i32
spv.Return
}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
index 106edc0d421c..ef50c05346ed 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: @i_equal_scalar
spv.func @i_equal_scalar(%arg0: i32, %arg1: i32) "None" {
- // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : i32
%0 = spv.IEqual %arg0, %arg1 : i32
spv.Return
}
@@ -24,7 +24,7 @@ spv.func @i_equal_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" {
// CHECK-LABEL: @i_not_equal_scalar
spv.func @i_not_equal_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : i64
%0 = spv.INotEqual %arg0, %arg1 : i64
spv.Return
}
@@ -42,7 +42,7 @@ spv.func @i_not_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None"
// CHECK-LABEL: @s_greater_than_equal_scalar
spv.func @s_greater_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "sge" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "sge" %{{.*}}, %{{.*}} : i64
%0 = spv.SGreaterThanEqual %arg0, %arg1 : i64
spv.Return
}
@@ -60,7 +60,7 @@ spv.func @s_greater_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>
// CHECK-LABEL: @s_greater_than_scalar
spv.func @s_greater_than_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "sgt" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "sgt" %{{.*}}, %{{.*}} : i64
%0 = spv.SGreaterThan %arg0, %arg1 : i64
spv.Return
}
@@ -78,7 +78,7 @@ spv.func @s_greater_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "Non
// CHECK-LABEL: @s_less_than_equal_scalar
spv.func @s_less_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "sle" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "sle" %{{.*}}, %{{.*}} : i64
%0 = spv.SLessThanEqual %arg0, %arg1 : i64
spv.Return
}
@@ -96,7 +96,7 @@ spv.func @s_less_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "
// CHECK-LABEL: @s_less_than_scalar
spv.func @s_less_than_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "slt" %{{.*}}, %{{.*}} : i64
%0 = spv.SLessThan %arg0, %arg1 : i64
spv.Return
}
@@ -114,7 +114,7 @@ spv.func @s_less_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None"
// CHECK-LABEL: @u_greater_than_equal_scalar
spv.func @u_greater_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "uge" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "uge" %{{.*}}, %{{.*}} : i64
%0 = spv.UGreaterThanEqual %arg0, %arg1 : i64
spv.Return
}
@@ -132,7 +132,7 @@ spv.func @u_greater_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>
// CHECK-LABEL: @u_greater_than_scalar
spv.func @u_greater_than_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "ugt" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "ugt" %{{.*}}, %{{.*}} : i64
%0 = spv.UGreaterThan %arg0, %arg1 : i64
spv.Return
}
@@ -150,7 +150,7 @@ spv.func @u_greater_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "Non
// CHECK-LABEL: @u_less_than_equal_scalar
spv.func @u_less_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "ule" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "ule" %{{.*}}, %{{.*}} : i64
%0 = spv.ULessThanEqual %arg0, %arg1 : i64
spv.Return
}
@@ -168,7 +168,7 @@ spv.func @u_less_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "
// CHECK-LABEL: @u_less_than_scalar
spv.func @u_less_than_scalar(%arg0: i64, %arg1: i64) "None" {
- // CHECK: llvm.icmp "ult" %{{.*}}, %{{.*}} : !llvm.i64
+ // CHECK: llvm.icmp "ult" %{{.*}}, %{{.*}} : i64
%0 = spv.ULessThan %arg0, %arg1 : i64
spv.Return
}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
index 52038f9f3c19..a95956ef89b3 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
@@ -6,9 +6,9 @@
// CHECK-LABEL: @bool_constant_scalar
spv.func @bool_constant_scalar() "None" {
- // CHECK: llvm.mlir.constant(true) : !llvm.i1
+ // CHECK: llvm.mlir.constant(true) : i1
%0 = spv.constant true
- // CHECK: llvm.mlir.constant(false) : !llvm.i1
+ // CHECK: llvm.mlir.constant(false) : i1
%1 = spv.constant false
spv.Return
}
@@ -24,11 +24,11 @@ spv.func @bool_constant_vector() "None" {
// CHECK-LABEL: @integer_constant_scalar
spv.func @integer_constant_scalar() "None" {
- // CHECK: llvm.mlir.constant(0 : i8) : !llvm.i8
+ // CHECK: llvm.mlir.constant(0 : i8) : i8
%0 = spv.constant 0 : i8
- // CHECK: llvm.mlir.constant(-5 : i64) : !llvm.i64
+ // CHECK: llvm.mlir.constant(-5 : i64) : i64
%1 = spv.constant -5 : si64
- // CHECK: llvm.mlir.constant(10 : i16) : !llvm.i16
+ // CHECK: llvm.mlir.constant(10 : i16) : i16
%2 = spv.constant 10 : ui16
spv.Return
}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir
index 75a06748c3ed..4211b47d6e66 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir
@@ -16,9 +16,9 @@ spv.module Logical GLSL450 {
spv.func @branch_with_arguments() -> () "None" {
%0 = spv.constant 0 : i32
%1 = spv.constant true
- // CHECK: llvm.br ^bb1(%{{.*}}, %{{.*}} : !llvm.i32, !llvm.i1)
+ // CHECK: llvm.br ^bb1(%{{.*}}, %{{.*}} : i32, i1)
spv.Branch ^label(%0, %1: i32, i1)
- // CHECK: ^bb1(%{{.*}}: !llvm.i32, %{{.*}}: !llvm.i1)
+ // CHECK: ^bb1(%{{.*}}: i32, %{{.*}}: i1)
^label(%arg0: i32, %arg1: i1):
spv.Return
}
@@ -32,7 +32,7 @@ spv.module Logical GLSL450 {
spv.module Logical GLSL450 {
spv.func @cond_branch_without_arguments() -> () "None" {
- // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : !llvm.i1
+ // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1
%cond = spv.constant true
// CHECK: lvm.cond_br %[[COND]], ^bb1, ^bb2
spv.BranchConditional %cond, ^true, ^false
@@ -45,16 +45,16 @@ spv.module Logical GLSL450 {
}
spv.func @cond_branch_with_arguments_nested() -> () "None" {
- // CHECK: %[[COND1:.*]] = llvm.mlir.constant(true) : !llvm.i1
+ // CHECK: %[[COND1:.*]] = llvm.mlir.constant(true) : i1
%cond = spv.constant true
%0 = spv.constant 0 : i32
- // CHECK: %[[COND2:.*]] = llvm.mlir.constant(false) : !llvm.i1
+ // CHECK: %[[COND2:.*]] = llvm.mlir.constant(false) : i1
%false = spv.constant false
- // CHECK: llvm.cond_br %[[COND1]], ^bb1(%{{.*}}, %[[COND2]] : !llvm.i32, !llvm.i1), ^bb2
+ // CHECK: llvm.cond_br %[[COND1]], ^bb1(%{{.*}}, %[[COND2]] : i32, i1), ^bb2
spv.BranchConditional %cond, ^outer_true(%0, %false: i32, i1), ^outer_false
- // CHECK: ^bb1(%{{.*}}: !llvm.i32, %[[COND:.*]]: !llvm.i1):
+ // CHECK: ^bb1(%{{.*}}: i32, %[[COND:.*]]: i1):
^outer_true(%arg0: i32, %arg1: i1):
- // CHECK: llvm.cond_br %[[COND]], ^bb3, ^bb4(%{{.*}}, %{{.*}} : !llvm.i32, !llvm.i32)
+ // CHECK: llvm.cond_br %[[COND]], ^bb3, ^bb4(%{{.*}}, %{{.*}} : i32, i32)
spv.BranchConditional %arg1, ^inner_true, ^inner_false(%arg0, %arg0: i32, i32)
// CHECK: ^bb2:
^outer_false:
@@ -62,7 +62,7 @@ spv.module Logical GLSL450 {
// CHECK: ^bb3:
^inner_true:
spv.Return
- // CHECK: ^bb4(%{{.*}}: !llvm.i32, %{{.*}}: !llvm.i32):
+ // CHECK: ^bb4(%{{.*}}: i32, %{{.*}}: i32):
^inner_false(%arg3: i32, %arg4: i32):
spv.Return
}
@@ -90,7 +90,7 @@ spv.module Logical GLSL450 {
spv.func @infinite_loop(%count : i32) -> () "None" {
// CHECK: llvm.br ^[[BB1:.*]]
// CHECK: ^[[BB1]]:
- // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : !llvm.i1
+ // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1
// CHECK: llvm.cond_br %[[COND]], ^[[BB2:.*]], ^[[BB4:.*]]
// CHECK: ^[[BB2]]:
// CHECK: llvm.br ^[[BB3:.*]]
@@ -144,7 +144,7 @@ spv.module Logical GLSL450 {
}
spv.func @selection_with_true_block_only() -> () "None" {
- // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : !llvm.i1
+ // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1
%cond = spv.constant true
// CHECK: llvm.cond_br %[[COND]], ^bb1, ^bb2
spv.selection {
@@ -164,7 +164,7 @@ spv.module Logical GLSL450 {
}
spv.func @selection_with_both_true_and_false_block() -> () "None" {
- // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : !llvm.i1
+ // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1
%cond = spv.constant true
// CHECK: llvm.cond_br %[[COND]], ^bb1, ^bb2
spv.selection {
@@ -188,14 +188,14 @@ spv.module Logical GLSL450 {
}
spv.func @selection_with_early_return(%arg0: i1) -> i32 "None" {
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
%0 = spv.constant 0 : i32
- // CHECK: llvm.cond_br %{{.*}}, ^bb1(%[[ZERO]] : !llvm.i32), ^bb2
+ // CHECK: llvm.cond_br %{{.*}}, ^bb1(%[[ZERO]] : i32), ^bb2
spv.selection {
spv.BranchConditional %arg0, ^true(%0 : i32), ^merge
- // CHECK: ^bb1(%[[ARG:.*]]: !llvm.i32):
+ // CHECK: ^bb1(%[[ARG:.*]]: i32):
^true(%arg1: i32):
- // CHECK: llvm.return %[[ARG]] : !llvm.i32
+ // CHECK: llvm.return %[[ARG]] : i32
spv.ReturnValue %arg1 : i32
// CHECK: ^bb2:
^merge:
diff --git a/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
index 04bc25c3f76f..981674e7c16f 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
@@ -16,7 +16,7 @@ spv.func @return() "None" {
// CHECK-LABEL: @return_value
spv.func @return_value(%arg: i32) -> i32 "None" {
- // CHECK: llvm.return %{{.*}} : !llvm.i32
+ // CHECK: llvm.return %{{.*}} : i32
spv.ReturnValue %arg : i32
}
@@ -49,7 +49,7 @@ spv.func @const() "Const" {
spv.Return
}
-// CHECK-LABEL: llvm.func @scalar_types(%arg0: !llvm.i32, %arg1: !llvm.i1, %arg2: !llvm.double, %arg3: !llvm.float)
+// CHECK-LABEL: llvm.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: !llvm.double, %arg3: !llvm.float)
spv.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: f32) "None" {
spv.Return
}
@@ -65,11 +65,11 @@ spv.func @vector_types(%arg0: vector<2xi64>, %arg1: vector<2xi64>) -> vector<2xi
//===----------------------------------------------------------------------===//
// CHECK-LABEL: llvm.func @function_calls
-// CHECK-SAME: %[[ARG0:.*]]: !llvm.i32, %[[ARG1:.*]]: !llvm.i1, %[[ARG2:.*]]: !llvm.double, %[[ARG3:.*]]: !llvm.vec<2 x i64>, %[[ARG4:.*]]: !llvm.vec<2 x float>
+// CHECK-SAME: %[[ARG0:.*]]: i32, %[[ARG1:.*]]: i1, %[[ARG2:.*]]: !llvm.double, %[[ARG3:.*]]: !llvm.vec<2 x i64>, %[[ARG4:.*]]: !llvm.vec<2 x float>
spv.func @function_calls(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: vector<2xi64>, %arg4: vector<2xf32>) "None" {
// CHECK: llvm.call @void_1() : () -> ()
// CHECK: llvm.call @void_2(%[[ARG3]]) : (!llvm.vec<2 x i64>) -> ()
- // CHECK: llvm.call @value_scalar(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!llvm.i32, !llvm.i1, !llvm.double) -> !llvm.i32
+ // CHECK: llvm.call @value_scalar(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (i32, i1, !llvm.double) -> i32
// CHECK: llvm.call @value_vector(%[[ARG3]], %[[ARG4]]) : (!llvm.vec<2 x i64>, !llvm.vec<2 x float>) -> !llvm.vec<2 x float>
spv.FunctionCall @void_1() : () -> ()
spv.FunctionCall @void_2(%arg3) : (vector<2xi64>) -> ()
diff --git a/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir
index c096fc0308ff..c69416aaeed8 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir
@@ -123,7 +123,7 @@ spv.func @sin(%arg0: f32, %arg1: vector<3xf16>) "None" {
// CHECK-LABEL: @smax
spv.func @smax(%arg0: i16, %arg1: vector<3xi32>) "None" {
- // CHECK: "llvm.intr.smax"(%{{.*}}, %{{.*}}) : (!llvm.i16, !llvm.i16) -> !llvm.i16
+ // CHECK: "llvm.intr.smax"(%{{.*}}, %{{.*}}) : (i16, i16) -> i16
%0 = spv.GLSL.SMax %arg0, %arg0 : i16
// CHECK: "llvm.intr.smax"(%{{.*}}, %{{.*}}) : (!llvm.vec<3 x i32>, !llvm.vec<3 x i32>) -> !llvm.vec<3 x i32>
%1 = spv.GLSL.SMax %arg1, %arg1 : vector<3xi32>
@@ -136,7 +136,7 @@ spv.func @smax(%arg0: i16, %arg1: vector<3xi32>) "None" {
// CHECK-LABEL: @smin
spv.func @smin(%arg0: i16, %arg1: vector<3xi32>) "None" {
- // CHECK: "llvm.intr.smin"(%{{.*}}, %{{.*}}) : (!llvm.i16, !llvm.i16) -> !llvm.i16
+ // CHECK: "llvm.intr.smin"(%{{.*}}, %{{.*}}) : (i16, i16) -> i16
%0 = spv.GLSL.SMin %arg0, %arg0 : i16
// CHECK: "llvm.intr.smin"(%{{.*}}, %{{.*}}) : (!llvm.vec<3 x i32>, !llvm.vec<3 x i32>) -> !llvm.vec<3 x i32>
%1 = spv.GLSL.SMin %arg1, %arg1 : vector<3xi32>
diff --git a/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
index e2fc57b2f490..a61fb7316fb3 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: @logical_equal_scalar
spv.func @logical_equal_scalar(%arg0: i1, %arg1: i1) "None" {
- // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm.i1
+ // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : i1
%0 = spv.LogicalEqual %arg0, %arg0 : i1
spv.Return
}
@@ -24,7 +24,7 @@ spv.func @logical_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None"
// CHECK-LABEL: @logical_not_equal_scalar
spv.func @logical_not_equal_scalar(%arg0: i1, %arg1: i1) "None" {
- // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm.i1
+ // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : i1
%0 = spv.LogicalNotEqual %arg0, %arg0 : i1
spv.Return
}
@@ -42,8 +42,8 @@ spv.func @logical_not_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "No
// CHECK-LABEL: @logical_not_scalar
spv.func @logical_not_scalar(%arg0: i1) "None" {
- // CHECK: %[[CONST:.*]] = llvm.mlir.constant(true) : !llvm.i1
- // CHECK: llvm.xor %{{.*}}, %[[CONST]] : !llvm.i1
+ // CHECK: %[[CONST:.*]] = llvm.mlir.constant(true) : i1
+ // CHECK: llvm.xor %{{.*}}, %[[CONST]] : i1
%0 = spv.LogicalNot %arg0 : i1
spv.Return
}
@@ -62,7 +62,7 @@ spv.func @logical_not_vector(%arg0: vector<4xi1>) "None" {
// CHECK-LABEL: @logical_and_scalar
spv.func @logical_and_scalar(%arg0: i1, %arg1: i1) "None" {
- // CHECK: llvm.and %{{.*}}, %{{.*}} : !llvm.i1
+ // CHECK: llvm.and %{{.*}}, %{{.*}} : i1
%0 = spv.LogicalAnd %arg0, %arg0 : i1
spv.Return
}
@@ -80,7 +80,7 @@ spv.func @logical_and_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
// CHECK-LABEL: @logical_or_scalar
spv.func @logical_or_scalar(%arg0: i1, %arg1: i1) "None" {
- // CHECK: llvm.or %{{.*}}, %{{.*}} : !llvm.i1
+ // CHECK: llvm.or %{{.*}}, %{{.*}} : i1
%0 = spv.LogicalOr %arg0, %arg0 : i1
spv.Return
}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir b/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
index 7f4f496ce20d..3e6eadd626b9 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
@@ -15,11 +15,11 @@ module attributes {gpu.container_module, spv.target_env = #spv.target_env<#spv.v
// CHECK-LABEL: @main
// CHECK: %[[SRC:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<i32>, ptr<i32>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK-NEXT: %[[DEST:.*]] = llvm.mlir.addressof @__spv__foo_bar_arg_0_descriptor_set0_binding0 : !llvm.ptr<struct<(array<6 x i32>)>>
- // CHECK-NEXT: llvm.mlir.constant(false) : !llvm.i1
- // CHECK-NEXT: "llvm.intr.memcpy"(%[[DEST]], %[[SRC]], %[[SIZE:.*]], %{{.*}}) : (!llvm.ptr<struct<(array<6 x i32>)>>, !llvm.ptr<i32>, !llvm.i64, !llvm.i1) -> ()
+ // CHECK-NEXT: llvm.mlir.constant(false) : i1
+ // CHECK-NEXT: "llvm.intr.memcpy"(%[[DEST]], %[[SRC]], %[[SIZE:.*]], %{{.*}}) : (!llvm.ptr<struct<(array<6 x i32>)>>, !llvm.ptr<i32>, i64, i1) -> ()
// CHECK-NEXT: llvm.call @__spv__foo_bar() : () -> ()
- // CHECK-NEXT: llvm.mlir.constant(false) : !llvm.i1
- // CHECK-NEXT: "llvm.intr.memcpy"(%[[SRC]], %[[DEST]], %[[SIZE]], %{{.*}}) : (!llvm.ptr<i32>, !llvm.ptr<struct<(array<6 x i32>)>>, !llvm.i64, !llvm.i1) -> ()
+ // CHECK-NEXT: llvm.mlir.constant(false) : i1
+ // CHECK-NEXT: "llvm.intr.memcpy"(%[[SRC]], %[[DEST]], %[[SIZE]], %{{.*}}) : (!llvm.ptr<i32>, !llvm.ptr<struct<(array<6 x i32>)>>, i64, i1) -> ()
spv.module @__spv__foo Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_variable_pointers]> {
spv.globalVariable @bar_arg_0 bind(0, 0) : !spv.ptr<!spv.struct<(!spv.array<6 x i32, stride=4> [0])>, StorageBuffer>
diff --git a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
index 8e8b1db889ca..66c8d4ee28e2 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
@@ -6,11 +6,11 @@
// CHECK-LABEL: @access_chain
spv.func @access_chain() "None" {
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
%0 = spv.constant 1: i32
%1 = spv.Variable : !spv.ptr<!spv.struct<(f32, !spv.array<4xf32>)>, Function>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %[[ONE]], %[[ONE]]] : (!llvm.ptr<struct<packed (float, array<4 x float>)>>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.ptr<float>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %[[ONE]], %[[ONE]]] : (!llvm.ptr<struct<packed (float, array<4 x float>)>>, i32, i32, i32) -> !llvm.ptr<float>
%2 = spv.AccessChain %1[%0, %0] : !spv.ptr<!spv.struct<(f32, !spv.array<4xf32>)>, Function>, i32, i32
spv.Return
}
@@ -18,8 +18,8 @@ spv.func @access_chain() "None" {
// CHECK-LABEL: @access_chain_array
spv.func @access_chain_array(%arg0 : i32) "None" {
%0 = spv.Variable : !spv.ptr<!spv.array<4x!spv.array<4xf32>>, Function>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %{{.*}}] : (!llvm.ptr<array<4 x array<4 x float>>>, !llvm.i32, !llvm.i32) -> !llvm.ptr<array<4 x float>>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %{{.*}}] : (!llvm.ptr<array<4 x array<4 x float>>>, i32, i32) -> !llvm.ptr<array<4 x float>>
%1 = spv.AccessChain %0[%arg0] : !spv.ptr<!spv.array<4x!spv.array<4xf32>>, Function>, i32
%2 = spv.Load "Function" %1 ["Volatile"] : !spv.array<4xf32>
spv.Return
@@ -46,7 +46,7 @@ spv.module Logical GLSL450 {
}
spv.module Logical GLSL450 {
- // CHECK: llvm.mlir.global external @bar_descriptor_set0_binding0() : !llvm.i32
+ // CHECK: llvm.mlir.global external @bar_descriptor_set0_binding0() : i32
// CHECK-LABEL: @foo
// CHECK: llvm.mlir.addressof @bar_descriptor_set0_binding0 : !llvm.ptr<i32>
spv.globalVariable @bar bind(0, 0) : !spv.ptr<i32, StorageBuffer>
@@ -57,7 +57,7 @@ spv.module Logical GLSL450 {
}
spv.module @name Logical GLSL450 {
- // CHECK: llvm.mlir.global external @name_bar_descriptor_set0_binding0() : !llvm.i32
+ // CHECK: llvm.mlir.global external @name_bar_descriptor_set0_binding0() : i32
// CHECK-LABEL: @foo
// CHECK: llvm.mlir.addressof @name_bar_descriptor_set0_binding0 : !llvm.ptr<i32>
spv.globalVariable @bar bind(0, 0) : !spv.ptr<i32, StorageBuffer>
@@ -161,20 +161,20 @@ spv.func @store_nontemporal(%arg0 : f32) "None" {
// CHECK-LABEL: @variable_scalar
spv.func @variable_scalar() "None" {
- // CHECK: %[[SIZE1:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: llvm.alloca %[[SIZE1]] x !llvm.float : (!llvm.i32) -> !llvm.ptr<float>
+ // CHECK: %[[SIZE1:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: llvm.alloca %[[SIZE1]] x !llvm.float : (i32) -> !llvm.ptr<float>
%0 = spv.Variable : !spv.ptr<f32, Function>
- // CHECK: %[[SIZE2:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: llvm.alloca %[[SIZE2]] x !llvm.i8 : (!llvm.i32) -> !llvm.ptr<i8>
+ // CHECK: %[[SIZE2:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: llvm.alloca %[[SIZE2]] x i8 : (i32) -> !llvm.ptr<i8>
%1 = spv.Variable : !spv.ptr<i8, Function>
spv.Return
}
// CHECK-LABEL: @variable_scalar_with_initialization
spv.func @variable_scalar_with_initialization() "None" {
- // CHECK: %[[VALUE:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64
- // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x !llvm.i64 : (!llvm.i32) -> !llvm.ptr<i64>
+ // CHECK: %[[VALUE:.*]] = llvm.mlir.constant(0 : i64) : i64
+ // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x i64 : (i32) -> !llvm.ptr<i64>
// CHECK: llvm.store %[[VALUE]], %[[ALLOCATED]] : !llvm.ptr<i64>
%c = spv.constant 0 : i64
%0 = spv.Variable init(%c) : !spv.ptr<i64, Function>
@@ -183,8 +183,8 @@ spv.func @variable_scalar_with_initialization() "None" {
// CHECK-LABEL: @variable_vector
spv.func @variable_vector() "None" {
- // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: llvm.alloca %[[SIZE]] x !llvm.vec<3 x float> : (!llvm.i32) -> !llvm.ptr<vec<3 x float>>
+ // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: llvm.alloca %[[SIZE]] x !llvm.vec<3 x float> : (i32) -> !llvm.ptr<vec<3 x float>>
%0 = spv.Variable : !spv.ptr<vector<3xf32>, Function>
spv.Return
}
@@ -192,8 +192,8 @@ spv.func @variable_vector() "None" {
// CHECK-LABEL: @variable_vector_with_initialization
spv.func @variable_vector_with_initialization() "None" {
// CHECK: %[[VALUE:.*]] = llvm.mlir.constant(dense<false> : vector<3xi1>) : !llvm.vec<3 x i1>
- // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x !llvm.vec<3 x i1> : (!llvm.i32) -> !llvm.ptr<vec<3 x i1>>
+ // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x !llvm.vec<3 x i1> : (i32) -> !llvm.ptr<vec<3 x i1>>
// CHECK: llvm.store %[[VALUE]], %[[ALLOCATED]] : !llvm.ptr<vec<3 x i1>>
%c = spv.constant dense<false> : vector<3xi1>
%0 = spv.Variable init(%c) : !spv.ptr<vector<3xi1>, Function>
@@ -202,8 +202,8 @@ spv.func @variable_vector_with_initialization() "None" {
// CHECK-LABEL: @variable_array
spv.func @variable_array() "None" {
- // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: llvm.alloca %[[SIZE]] x !llvm.array<10 x i32> : (!llvm.i32) -> !llvm.ptr<array<10 x i32>>
+ // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: llvm.alloca %[[SIZE]] x !llvm.array<10 x i32> : (i32) -> !llvm.ptr<array<10 x i32>>
%0 = spv.Variable : !spv.ptr<!spv.array<10 x i32>, Function>
spv.Return
}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
index 32841d1846b7..127a3b87d0a1 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
@@ -13,8 +13,8 @@ spv.func @composite_extract_array(%arg: !spv.array<4x!spv.array<4xf32>>) "None"
// CHECK-LABEL: @composite_extract_vector
spv.func @composite_extract_vector(%arg: vector<3xf32>) "None" {
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
- // CHECK: llvm.extractelement %{{.*}}[%[[ZERO]] : !llvm.i32] : !llvm.vec<3 x float>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
+ // CHECK: llvm.extractelement %{{.*}}[%[[ZERO]] : i32] : !llvm.vec<3 x float>
%0 = spv.CompositeExtract %arg[0 : i32] : vector<3xf32>
spv.Return
}
@@ -32,8 +32,8 @@ spv.func @composite_insert_struct(%arg0: i32, %arg1: !spv.struct<(f32, !spv.arra
// CHECK-LABEL: @composite_insert_vector
spv.func @composite_insert_vector(%arg0: vector<3xf32>, %arg1: f32) "None" {
- // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
- // CHECK: llvm.insertelement %{{.*}}, %{{.*}}[%[[ONE]] : !llvm.i32] : !llvm.vec<3 x float>
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: llvm.insertelement %{{.*}}, %{{.*}}[%[[ONE]] : i32] : !llvm.vec<3 x float>
%0 = spv.CompositeInsert %arg1, %arg0[1 : i32] : f32 into vector<3xf32>
spv.Return
}
@@ -44,9 +44,9 @@ spv.func @composite_insert_vector(%arg0: vector<3xf32>, %arg1: f32) "None" {
// CHECK-LABEL: @select_scalar
spv.func @select_scalar(%arg0: i1, %arg1: vector<3xi32>, %arg2: f32) "None" {
- // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.vec<3 x i32>
+ // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, !llvm.vec<3 x i32>
%0 = spv.Select %arg0, %arg1, %arg1 : i1, vector<3xi32>
- // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.float
+ // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, !llvm.float
%1 = spv.Select %arg0, %arg2, %arg2 : i1, f32
spv.Return
}
@@ -65,7 +65,7 @@ spv.func @select_vector(%arg0: vector<2xi1>, %arg1: vector<2xi32>) "None" {
// CHECK: module {
// CHECK-NEXT: llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32)> {
// CHECK-NEXT: %[[UNDEF:.*]] = llvm.mlir.undef : !llvm.struct<(i32)>
-// CHECK-NEXT: %[[VAL:.*]] = llvm.mlir.constant(31 : i32) : !llvm.i32
+// CHECK-NEXT: %[[VAL:.*]] = llvm.mlir.constant(31 : i32) : i32
// CHECK-NEXT: %[[RET:.*]] = llvm.insertvalue %[[VAL]], %[[UNDEF]][0 : i32] : !llvm.struct<(i32)>
// CHECK-NEXT: llvm.return %[[RET]] : !llvm.struct<(i32)>
// CHECK-NEXT: }
@@ -84,13 +84,13 @@ spv.module Logical OpenCL {
// CHECK: module {
// CHECK-NEXT: llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32, array<3 x i32>)> {
// CHECK-NEXT: %[[UNDEF:.*]] = llvm.mlir.undef : !llvm.struct<(i32, array<3 x i32>)>
-// CHECK-NEXT: %[[EM:.*]] = llvm.mlir.constant(18 : i32) : !llvm.i32
+// CHECK-NEXT: %[[EM:.*]] = llvm.mlir.constant(18 : i32) : i32
// CHECK-NEXT: %[[T0:.*]] = llvm.insertvalue %[[EM]], %[[UNDEF]][0 : i32] : !llvm.struct<(i32, array<3 x i32>)>
-// CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(32 : i32) : !llvm.i32
+// CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(32 : i32) : i32
// CHECK-NEXT: %[[T1:.*]] = llvm.insertvalue %[[C0]], %[[T0]][1 : i32, 0 : i32] : !llvm.struct<(i32, array<3 x i32>)>
-// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK-NEXT: %[[T2:.*]] = llvm.insertvalue %[[C1]], %[[T1]][1 : i32, 1 : i32] : !llvm.struct<(i32, array<3 x i32>)>
-// CHECK-NEXT: %[[C2:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+// CHECK-NEXT: %[[C2:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK-NEXT: %[[RET:.*]] = llvm.insertvalue %[[C2]], %[[T2]][1 : i32, 2 : i32] : !llvm.struct<(i32, array<3 x i32>)>
// CHECK-NEXT: llvm.return %[[RET]] : !llvm.struct<(i32, array<3 x i32>)>
// CHECK-NEXT: }
diff --git a/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir
index e23844611258..c28328385dcf 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir
@@ -6,18 +6,18 @@
// CHECK-LABEL: @shift_right_arithmetic_scalar
spv.func @shift_right_arithmetic_scalar(%arg0: i32, %arg1: si32, %arg2 : i16, %arg3 : ui16) "None" {
- // CHECK: llvm.ashr %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.ashr %{{.*}}, %{{.*}} : i32
%0 = spv.ShiftRightArithmetic %arg0, %arg0 : i32, i32
- // CHECK: llvm.ashr %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.ashr %{{.*}}, %{{.*}} : i32
%1 = spv.ShiftRightArithmetic %arg0, %arg1 : i32, si32
- // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32
- // CHECK: llvm.ashr %{{.*}}, %[[SEXT]] : !llvm.i32
+ // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32
+ // CHECK: llvm.ashr %{{.*}}, %[[SEXT]] : i32
%2 = spv.ShiftRightArithmetic %arg0, %arg2 : i32, i16
- // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : !llvm.i16 to !llvm.i32
- // CHECK: llvm.ashr %{{.*}}, %[[ZEXT]] : !llvm.i32
+ // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32
+ // CHECK: llvm.ashr %{{.*}}, %[[ZEXT]] : i32
%3 = spv.ShiftRightArithmetic %arg0, %arg3 : i32, ui16
spv.Return
}
@@ -46,18 +46,18 @@ spv.func @shift_right_arithmetic_vector(%arg0: vector<4xi64>, %arg1: vector<4xui
// CHECK-LABEL: @shift_right_logical_scalar
spv.func @shift_right_logical_scalar(%arg0: i32, %arg1: si32, %arg2 : si16, %arg3 : ui16) "None" {
- // CHECK: llvm.lshr %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.lshr %{{.*}}, %{{.*}} : i32
%0 = spv.ShiftRightLogical %arg0, %arg0 : i32, i32
- // CHECK: llvm.lshr %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.lshr %{{.*}}, %{{.*}} : i32
%1 = spv.ShiftRightLogical %arg0, %arg1 : i32, si32
- // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32
- // CHECK: llvm.lshr %{{.*}}, %[[SEXT]] : !llvm.i32
+ // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32
+ // CHECK: llvm.lshr %{{.*}}, %[[SEXT]] : i32
%2 = spv.ShiftRightLogical %arg0, %arg2 : i32, si16
- // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : !llvm.i16 to !llvm.i32
- // CHECK: llvm.lshr %{{.*}}, %[[ZEXT]] : !llvm.i32
+ // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32
+ // CHECK: llvm.lshr %{{.*}}, %[[ZEXT]] : i32
%3 = spv.ShiftRightLogical %arg0, %arg3 : i32, ui16
spv.Return
}
@@ -86,18 +86,18 @@ spv.func @shift_right_logical_vector(%arg0: vector<4xi64>, %arg1: vector<4xsi64>
// CHECK-LABEL: @shift_left_logical_scalar
spv.func @shift_left_logical_scalar(%arg0: i32, %arg1: si32, %arg2 : i16, %arg3 : ui16) "None" {
- // CHECK: llvm.shl %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.shl %{{.*}}, %{{.*}} : i32
%0 = spv.ShiftLeftLogical %arg0, %arg0 : i32, i32
- // CHECK: llvm.shl %{{.*}}, %{{.*}} : !llvm.i32
+ // CHECK: llvm.shl %{{.*}}, %{{.*}} : i32
%1 = spv.ShiftLeftLogical %arg0, %arg1 : i32, si32
- // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32
- // CHECK: llvm.shl %{{.*}}, %[[SEXT]] : !llvm.i32
+ // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32
+ // CHECK: llvm.shl %{{.*}}, %[[SEXT]] : i32
%2 = spv.ShiftLeftLogical %arg0, %arg2 : i32, i16
- // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : !llvm.i16 to !llvm.i32
- // CHECK: llvm.shl %{{.*}}, %[[ZEXT]] : !llvm.i32
+ // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32
+ // CHECK: llvm.shl %{{.*}}, %[[ZEXT]] : i32
%3 = spv.ShiftLeftLogical %arg0, %arg3 : i32, ui16
spv.Return
}
diff --git a/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir b/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir
index 238cd39577b8..6d37987bb218 100644
--- a/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir
@@ -8,8 +8,8 @@
// An external function is transformed into the glue around calling an interface function.
// CHECK-LABEL: @external
-// CHECK: %[[ALLOC0:.*]]: !llvm.ptr<float>, %[[ALIGN0:.*]]: !llvm.ptr<float>, %[[OFFSET0:.*]]: !llvm.i64, %[[SIZE00:.*]]: !llvm.i64, %[[SIZE01:.*]]: !llvm.i64, %[[STRIDE00:.*]]: !llvm.i64, %[[STRIDE01:.*]]: !llvm.i64,
-// CHECK: %[[ALLOC1:.*]]: !llvm.ptr<float>, %[[ALIGN1:.*]]: !llvm.ptr<float>, %[[OFFSET1:.*]]: !llvm.i64)
+// CHECK: %[[ALLOC0:.*]]: !llvm.ptr<float>, %[[ALIGN0:.*]]: !llvm.ptr<float>, %[[OFFSET0:.*]]: i64, %[[SIZE00:.*]]: i64, %[[SIZE01:.*]]: i64, %[[STRIDE00:.*]]: i64, %[[STRIDE01:.*]]: i64,
+// CHECK: %[[ALLOC1:.*]]: !llvm.ptr<float>, %[[ALIGN1:.*]]: !llvm.ptr<float>, %[[OFFSET1:.*]]: i64)
func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
// Populate the descriptor for arg0.
// CHECK: %[[DESC00:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
@@ -67,7 +67,7 @@ func @caller() {
// CHECK: %[[OFFSET1:.*]] = llvm.extractvalue %[[DESC1]][2]
// Forward the values to the call.
- // CHECK: llvm.call @external(%[[ALLOC0]], %[[ALIGN0]], %[[OFFSET0]], %[[SIZE00]], %[[SIZE01]], %[[STRIDE00]], %[[STRIDE01]], %[[ALLOC1]], %[[ALIGN1]], %[[OFFSET1]]) : (!llvm.ptr<float>, !llvm.ptr<float>, !llvm.i64, !llvm.i64, !llvm.i64, !llvm.i64, !llvm.i64, !llvm.ptr<float>, !llvm.ptr<float>, !llvm.i64) -> ()
+ // CHECK: llvm.call @external(%[[ALLOC0]], %[[ALIGN0]], %[[OFFSET0]], %[[SIZE00]], %[[SIZE01]], %[[STRIDE00]], %[[STRIDE01]], %[[ALLOC1]], %[[ALIGN1]], %[[OFFSET1]]) : (!llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64, i64, i64, !llvm.ptr<float>, !llvm.ptr<float>, i64) -> ()
call @external(%0#0, %0#1) : (memref<?x?xf32>, memref<f32>) -> ()
return
}
@@ -93,7 +93,7 @@ func @callee(%arg0: memref<?xf32>, %arg1: index) {
// CHECK: %[[STRIDE:.*]] = llvm.extractvalue %[[DESC]][4, 0]
// Forward the descriptor components to the call.
- // CHECK: llvm.call @callee(%[[ALLOC]], %[[ALIGN]], %[[OFFSET]], %[[SIZE]], %[[STRIDE]], %{{.*}}) : (!llvm.ptr<float>, !llvm.ptr<float>, !llvm.i64, !llvm.i64, !llvm.i64, !llvm.i64) -> ()
+ // CHECK: llvm.call @callee(%[[ALLOC]], %[[ALIGN]], %[[OFFSET]], %[[SIZE]], %[[STRIDE]], %{{.*}}) : (!llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64, i64) -> ()
// EMIT_C_ATTRIBUTE-NOT: @mlir_ciface_callee
@@ -132,7 +132,7 @@ func @return_var_memref_caller(%arg0: memref<4x3xf32>) {
// CHECK: %[[TABLES_SIZE:.*]] = llvm.mul %[[DOUBLE_RANK_INC]], %[[IDX_SIZE]]
// CHECK: %[[ALLOC_SIZE:.*]] = llvm.add %[[DOUBLE_PTR_SIZE]], %[[TABLES_SIZE]]
// CHECK: %[[FALSE:.*]] = llvm.mlir.constant(false)
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOC_SIZE]] x !llvm.i8
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOC_SIZE]] x i8
// CHECK: %[[SOURCE:.*]] = llvm.extractvalue %[[CALL_RES]][1]
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[SOURCE]], %[[ALLOC_SIZE]], %[[FALSE]])
// CHECK: llvm.call @free(%[[SOURCE]])
@@ -187,7 +187,7 @@ func @return_two_var_memref_caller(%arg0: memref<4x3xf32>) {
// CHECK: %[[RES_2:.*]] = llvm.extractvalue %[[CALL_RES]][1]
%0:2 = call @return_two_var_memref(%arg0) : (memref<4x3xf32>) -> (memref<*xf32>, memref<*xf32>)
- // CHECK: %[[ALLOCA_1:.*]] = llvm.alloca %{{.*}} x !llvm.i8
+ // CHECK: %[[ALLOCA_1:.*]] = llvm.alloca %{{.*}} x i8
// CHECK: %[[SOURCE_1:.*]] = llvm.extractvalue %[[RES_1:.*]][1] : ![[DESC_TYPE:.*]]
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA_1]], %[[SOURCE_1]], %{{.*}}, %[[FALSE:.*]])
// CHECK: llvm.call @free(%[[SOURCE_1]])
@@ -195,7 +195,7 @@ func @return_two_var_memref_caller(%arg0: memref<4x3xf32>) {
// CHECK: %[[DESC_11:.*]] = llvm.insertvalue %{{.*}}, %[[DESC_1]][0]
// CHECK: llvm.insertvalue %[[ALLOCA_1]], %[[DESC_11]][1]
- // CHECK: %[[ALLOCA_2:.*]] = llvm.alloca %{{.*}} x !llvm.i8
+ // CHECK: %[[ALLOCA_2:.*]] = llvm.alloca %{{.*}} x i8
// CHECK: %[[SOURCE_2:.*]] = llvm.extractvalue %[[RES_2:.*]][1]
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA_2]], %[[SOURCE_2]], %{{.*}}, %[[FALSE]])
// CHECK: llvm.call @free(%[[SOURCE_2]])
diff --git a/mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir b/mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir
index 2831aefc10f9..41959a79d58f 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir
@@ -12,8 +12,8 @@ func @check_attributes(%static: memref<10x20xf32> {dialect.a = true, dialect.b =
// CHECK-LABEL: func @check_multiple
// Make sure arguments attributes are attached to the right argument. We match
// commas in the argument list for this purpose.
-// CHECK: %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: !llvm{{.*}} {first.arg = true},
-// CHECK-SAME: %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32})
+// CHECK: %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: i{{.*}} {first.arg = true},
+// CHECK-SAME: %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: i{{.*}} {second.arg = 42 : i32})
func @check_multiple(%first: memref<f32> {first.arg = true}, %second: memref<f32> {second.arg = 42 : i32}) {
return
}
diff --git a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
index b2708a562eab..080ba531a14d 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
@@ -3,11 +3,11 @@
// CHECK-LABEL: func @check_strided_memref_arguments(
// CHECK-COUNT-2: !llvm.ptr<float>
-// CHECK-COUNT-5: !llvm.i64
+// CHECK-COUNT-5: i64
// CHECK-COUNT-2: !llvm.ptr<float>
-// CHECK-COUNT-5: !llvm.i64
+// CHECK-COUNT-5: i64
// CHECK-COUNT-2: !llvm.ptr<float>
-// CHECK-COUNT-5: !llvm.i64
+// CHECK-COUNT-5: i64
func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)->(20 * i + j + 1)>>,
%dynamic : memref<?x?xf32, affine_map<(i,j)[M]->(M * i + j + 1)>>,
%mixed : memref<10x?xf32, affine_map<(i,j)[M]->(M * i + j + 1)>>) {
@@ -16,31 +16,31 @@ func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)
// CHECK-LABEL: func @check_arguments
// CHECK-COUNT-2: !llvm.ptr<float>
-// CHECK-COUNT-5: !llvm.i64
+// CHECK-COUNT-5: i64
// CHECK-COUNT-2: !llvm.ptr<float>
-// CHECK-COUNT-5: !llvm.i64
+// CHECK-COUNT-5: i64
// CHECK-COUNT-2: !llvm.ptr<float>
-// CHECK-COUNT-5: !llvm.i64
+// CHECK-COUNT-5: i64
func @check_arguments(%static: memref<10x20xf32>, %dynamic : memref<?x?xf32>, %mixed : memref<10x?xf32>) {
return
}
// CHECK-LABEL: func @mixed_alloc(
-// CHECK: %[[M:.*]]: !llvm.i64, %[[N:.*]]: !llvm.i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)> {
+// CHECK: %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)> {
func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
-// CHECK: %[[c42:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64
-// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : !llvm.i64
-// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : !llvm.i64
+// CHECK: %[[c42:.*]] = llvm.mlir.constant(42 : index) : i64
+// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : i64
+// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (!llvm.i64) -> !llvm.ptr<i8>
+// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr<i8>
// CHECK-NEXT: llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[c42]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
@@ -64,19 +64,19 @@ func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
}
// CHECK-LABEL: func @dynamic_alloc(
-// CHECK: %[[M:.*]]: !llvm.i64, %[[N:.*]]: !llvm.i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
+// CHECK: %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
-// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : !llvm.i64
+// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (!llvm.i64) -> !llvm.ptr<i8>
+// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr<i8>
// CHECK-NEXT: llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
@@ -90,18 +90,18 @@ func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
// -----
// CHECK-LABEL: func @dynamic_alloca
-// CHECK: %[[M:.*]]: !llvm.i64, %[[N:.*]]: !llvm.i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
+// CHECK: %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
func @dynamic_alloca(%arg0: index, %arg1: index) -> memref<?x?xf32> {
-// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: %[[num_elems:.*]] = llvm.mul %[[N]], %[[M]] : !llvm.i64
+// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: %[[num_elems:.*]] = llvm.mul %[[N]], %[[M]] : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// CHECK-NEXT: %[[allocated:.*]] = llvm.alloca %[[sz_bytes]] x !llvm.float : (!llvm.i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT: %[[allocated:.*]] = llvm.alloca %[[sz_bytes]] x !llvm.float : (i64) -> !llvm.ptr<float>
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[allocated]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[allocated]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
@@ -112,7 +112,7 @@ func @dynamic_alloca(%arg0: index, %arg1: index) -> memref<?x?xf32> {
// Test with explicitly specified alignment. llvm.alloca takes care of the
// alignment. The same pointer is thus used for allocation and aligned
// accesses.
-// CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (!llvm.i64) -> !llvm.ptr<float>
+// CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (i64) -> !llvm.ptr<float>
// CHECK: %[[desc:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[desc1:.*]] = llvm.insertvalue %[[alloca_aligned]], %[[desc]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[alloca_aligned]], %[[desc1]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
@@ -132,15 +132,15 @@ func @dynamic_dealloc(%arg0: memref<?x?xf32>) {
// CHECK-LABEL: func @stdlib_aligned_alloc({{.*}}) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
// ALIGNED-ALLOC-LABEL: func @stdlib_aligned_alloc({{.*}}) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
-// ALIGNED-ALLOC-NEXT: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64
-// ALIGNED-ALLOC-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64
-// ALIGNED-ALLOC-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// ALIGNED-ALLOC-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64
+// ALIGNED-ALLOC-NEXT: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : i64
+// ALIGNED-ALLOC-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64
+// ALIGNED-ALLOC-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
+// ALIGNED-ALLOC-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
// ALIGNED-ALLOC-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// ALIGNED-ALLOC-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// ALIGNED-ALLOC-NEXT: %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// ALIGNED-ALLOC-NEXT: %[[alignment:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64
-// ALIGNED-ALLOC-NEXT: %[[allocated:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[bytes]]) : (!llvm.i64, !llvm.i64) -> !llvm.ptr<i8>
+// ALIGNED-ALLOC-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// ALIGNED-ALLOC-NEXT: %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// ALIGNED-ALLOC-NEXT: %[[alignment:.*]] = llvm.mlir.constant(32 : index) : i64
+// ALIGNED-ALLOC-NEXT: %[[allocated:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[bytes]]) : (i64, i64) -> !llvm.ptr<i8>
// ALIGNED-ALLOC-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<float>
%0 = alloc() {alignment = 32} : memref<32x18xf32>
// Do another alloc just to test that we have a unique declaration for
@@ -149,19 +149,19 @@ func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
%1 = alloc() {alignment = 64} : memref<4096xf32>
// Alignment is to element type boundaries (minimum 16 bytes).
- // ALIGNED-ALLOC: %[[c32:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64
+ // ALIGNED-ALLOC: %[[c32:.*]] = llvm.mlir.constant(32 : index) : i64
// ALIGNED-ALLOC-NEXT: llvm.call @aligned_alloc(%[[c32]]
%2 = alloc() : memref<4096xvector<8xf32>>
// The minimum alignment is 16 bytes unless explicitly specified.
- // ALIGNED-ALLOC: %[[c16:.*]] = llvm.mlir.constant(16 : index) : !llvm.i64
+ // ALIGNED-ALLOC: %[[c16:.*]] = llvm.mlir.constant(16 : index) : i64
// ALIGNED-ALLOC-NEXT: llvm.call @aligned_alloc(%[[c16]],
%3 = alloc() : memref<4096xvector<2xf32>>
- // ALIGNED-ALLOC: %[[c8:.*]] = llvm.mlir.constant(8 : index) : !llvm.i64
+ // ALIGNED-ALLOC: %[[c8:.*]] = llvm.mlir.constant(8 : index) : i64
// ALIGNED-ALLOC-NEXT: llvm.call @aligned_alloc(%[[c8]],
%4 = alloc() {alignment = 8} : memref<1024xvector<4xf32>>
// Bump the memref allocation size if its size is not a multiple of alignment.
- // ALIGNED-ALLOC: %[[c32:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64
- // ALIGNED-ALLOC-NEXT: llvm.mlir.constant(1 : index) : !llvm.i64
+ // ALIGNED-ALLOC: %[[c32:.*]] = llvm.mlir.constant(32 : index) : i64
+ // ALIGNED-ALLOC-NEXT: llvm.mlir.constant(1 : index) : i64
// ALIGNED-ALLOC-NEXT: llvm.sub
// ALIGNED-ALLOC-NEXT: llvm.add
// ALIGNED-ALLOC-NEXT: llvm.urem
@@ -169,7 +169,7 @@ func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
// ALIGNED-ALLOC-NEXT: llvm.call @aligned_alloc(%[[c32]], %[[SIZE_ALIGNED]])
%5 = alloc() {alignment = 32} : memref<100xf32>
// Bump alignment to the next power of two if it isn't.
- // ALIGNED-ALLOC: %[[c128:.*]] = llvm.mlir.constant(128 : index) : !llvm.i64
+ // ALIGNED-ALLOC: %[[c128:.*]] = llvm.mlir.constant(128 : index) : i64
// ALIGNED-ALLOC: llvm.call @aligned_alloc(%[[c128]]
%6 = alloc(%N) : memref<?xvector<18xf32>>
return %0 : memref<32x18xf32>
@@ -177,15 +177,15 @@ func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
// CHECK-LABEL: func @mixed_load(
// CHECK-COUNT-2: !llvm.ptr<float>,
-// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64
-// CHECK: %[[I:.*]]: !llvm.i64,
-// CHECK: %[[J:.*]]: !llvm.i64)
+// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64
+// CHECK: %[[I:.*]]: i64,
+// CHECK: %[[J:.*]]: i64)
func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK-NEXT: llvm.load %[[addr]] : !llvm.ptr<float>
%0 = load %mixed[%i, %j] : memref<42x?xf32>
return
@@ -194,19 +194,19 @@ func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
// CHECK-LABEL: func @dynamic_load(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64
func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK-NEXT: llvm.load %[[addr]] : !llvm.ptr<float>
%0 = load %dynamic[%i, %j] : memref<?x?xf32>
return
@@ -215,33 +215,33 @@ func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
// CHECK-LABEL: func @prefetch
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64
func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// CHECK-NEXT: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-// CHECK-NEXT: [[C3:%.*]] = llvm.mlir.constant(3 : i32) : !llvm.i32
-// CHECK-NEXT: [[C1_1:%.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-// CHECK-NEXT: "llvm.intr.prefetch"(%[[addr]], [[C1]], [[C3]], [[C1_1]]) : (!llvm.ptr<float>, !llvm.i32, !llvm.i32, !llvm.i32) -> ()
+// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK-NEXT: [[C3:%.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK-NEXT: [[C1_1:%.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK-NEXT: "llvm.intr.prefetch"(%[[addr]], [[C1]], [[C3]], [[C1_1]]) : (!llvm.ptr<float>, i32, i32, i32) -> ()
prefetch %A[%i, %j], write, locality<3>, data : memref<?x?xf32>
-// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: [[C0_1:%.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: [[C1_2:%.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0]], [[C0_1]], [[C1_2]]) : (!llvm.ptr<float>, !llvm.i32, !llvm.i32, !llvm.i32) -> ()
+// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: [[C0_1:%.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: [[C1_2:%.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0]], [[C0_1]], [[C1_2]]) : (!llvm.ptr<float>, i32, i32, i32) -> ()
prefetch %A[%i, %j], read, locality<0>, data : memref<?x?xf32>
-// CHECK: [[C0_2:%.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : !llvm.i32
-// CHECK: [[C0_3:%.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0_2]], [[C2]], [[C0_3]]) : (!llvm.ptr<float>, !llvm.i32, !llvm.i32, !llvm.i32) -> ()
+// CHECK: [[C0_2:%.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK: [[C0_3:%.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0_2]], [[C2]], [[C0_3]]) : (!llvm.ptr<float>, i32, i32, i32) -> ()
prefetch %A[%i, %j], read, locality<2>, instr : memref<?x?xf32>
return
}
@@ -249,19 +249,19 @@ func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
// CHECK-LABEL: func @dynamic_store
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64
func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f32) {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<float>
store %val, %dynamic[%i, %j] : memref<?x?xf32>
return
@@ -270,19 +270,19 @@ func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f
// CHECK-LABEL: func @mixed_store
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64
func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32) {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<float>
store %val, %mixed[%i, %j] : memref<42x?xf32>
return
@@ -339,11 +339,11 @@ func @memref_cast_mixed_to_mixed(%mixed : memref<42x?xf32>) {
// CHECK-LABEL: func @memref_cast_ranked_to_unranked
func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
-// CHECK-DAG: %[[c:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)> : (!llvm.i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>>
+// CHECK-DAG: %[[c:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>>
// CHECK-DAG: llvm.store %{{.*}}, %[[p]] : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>>
// CHECK-DAG: %[[p2:.*]] = llvm.bitcast %[[p]] : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>> to !llvm.ptr<i8>
-// CHECK-DAG: %[[r:.*]] = llvm.mlir.constant(3 : i64) : !llvm.i64
+// CHECK-DAG: %[[r:.*]] = llvm.mlir.constant(3 : i64) : i64
// CHECK : llvm.mlir.undef : !llvm.struct<(i64, ptr<i8>)>
// CHECK-DAG: llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i64, ptr<i8>)>
// CHECK-DAG: llvm.insertvalue %[[p2]], %{{.*}}[1] : !llvm.struct<(i64, ptr<i8>)>
@@ -361,7 +361,7 @@ func @memref_cast_unranked_to_ranked(%arg : memref<*xf32>) {
// CHECK-LABEL: func @mixed_memref_dim
func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
-// CHECK: llvm.mlir.constant(42 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(42 : index) : i64
%c0 = constant 0 : index
%0 = dim %mixed, %c0 : memref<42x?x?x13x?xf32>
// CHECK: llvm.extractvalue %[[ld:.*]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
@@ -370,7 +370,7 @@ func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
// CHECK: llvm.extractvalue %[[ld]][3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
%c2 = constant 2 : index
%2 = dim %mixed, %c2 : memref<42x?x?x13x?xf32>
-// CHECK: llvm.mlir.constant(13 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(13 : index) : i64
%c3 = constant 3 : index
%3 = dim %mixed, %c3 : memref<42x?x?x13x?xf32>
// CHECK: llvm.extractvalue %[[ld]][3, 4] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
@@ -380,7 +380,7 @@ func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
}
// CHECK-LABEL: @memref_dim_with_dyn_index
-// CHECK-SAME: %[[ALLOC_PTR:.*]]: !llvm.ptr<float>, %[[ALIGN_PTR:.*]]: !llvm.ptr<float>, %[[OFFSET:.*]]: !llvm.i64, %[[SIZE0:.*]]: !llvm.i64, %[[SIZE1:.*]]: !llvm.i64, %[[STRIDE0:.*]]: !llvm.i64, %[[STRIDE1:.*]]: !llvm.i64, %[[IDX:.*]]: !llvm.i64) -> !llvm.i64
+// CHECK-SAME: %[[ALLOC_PTR:.*]]: !llvm.ptr<float>, %[[ALIGN_PTR:.*]]: !llvm.ptr<float>, %[[OFFSET:.*]]: i64, %[[SIZE0:.*]]: i64, %[[SIZE1:.*]]: i64, %[[STRIDE0:.*]]: i64, %[[STRIDE1:.*]]: i64, %[[IDX:.*]]: i64) -> i64
func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index {
// CHECK-NEXT: %[[DESCR0:.*]] = llvm.mlir.undef : [[DESCR_TY:!llvm.struct<\(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>\)>]]
// CHECK-NEXT: %[[DESCR1:.*]] = llvm.insertvalue %[[ALLOC_PTR]], %[[DESCR0]][0] : [[DESCR_TY]]
@@ -390,14 +390,14 @@ func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index {
// CHECK-NEXT: %[[DESCR5:.*]] = llvm.insertvalue %[[STRIDE0]], %[[DESCR4]][4, 0] : [[DESCR_TY]]
// CHECK-NEXT: %[[DESCR6:.*]] = llvm.insertvalue %[[SIZE1]], %[[DESCR5]][3, 1] : [[DESCR_TY]]
// CHECK-NEXT: %[[DESCR7:.*]] = llvm.insertvalue %[[STRIDE1]], %[[DESCR6]][4, 1] : [[DESCR_TY]]
- // CHECK-DAG: %[[C0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
- // CHECK-DAG: %[[C1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // CHECK-DAG: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64
+ // CHECK-DAG: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK-DAG: %[[SIZES:.*]] = llvm.extractvalue %[[DESCR7]][3] : [[DESCR_TY]]
- // CHECK-DAG: %[[SIZES_PTR:.*]] = llvm.alloca %[[C1]] x !llvm.array<2 x i64> : (!llvm.i64) -> !llvm.ptr<array<2 x i64>>
+ // CHECK-DAG: %[[SIZES_PTR:.*]] = llvm.alloca %[[C1]] x !llvm.array<2 x i64> : (i64) -> !llvm.ptr<array<2 x i64>>
// CHECK-DAG: llvm.store %[[SIZES]], %[[SIZES_PTR]] : !llvm.ptr<array<2 x i64>>
- // CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][%[[C0]], %[[IDX]]] : (!llvm.ptr<array<2 x i64>>, !llvm.i64, !llvm.i64) -> !llvm.ptr<i64>
+ // CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][%[[C0]], %[[IDX]]] : (!llvm.ptr<array<2 x i64>>, i64, i64) -> !llvm.ptr<i64>
// CHECK-DAG: %[[RESULT:.*]] = llvm.load %[[RESULT_PTR]] : !llvm.ptr<i64>
- // CHECK-DAG: llvm.return %[[RESULT]] : !llvm.i64
+ // CHECK-DAG: llvm.return %[[RESULT]] : i64
%result = dim %arg, %idx : memref<3x?xf32>
return %result : index
}
@@ -415,15 +415,15 @@ func @memref_reinterpret_cast_ranked_to_static_shape(%input : memref<2x3xf32>) {
// CHECK: [[ALIGNED_PTR:%.*]] = llvm.extractvalue [[INPUT]][1] : [[TY]]
// CHECK: [[OUT_1:%.*]] = llvm.insertvalue [[BASE_PTR]], [[OUT_0]][0] : [[TY]]
// CHECK: [[OUT_2:%.*]] = llvm.insertvalue [[ALIGNED_PTR]], [[OUT_1]][1] : [[TY]]
-// CHECK: [[OFFSET:%.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// CHECK: [[OFFSET:%.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: [[OUT_3:%.*]] = llvm.insertvalue [[OFFSET]], [[OUT_2]][2] : [[TY]]
-// CHECK: [[SIZE_0:%.*]] = llvm.mlir.constant(6 : index) : !llvm.i64
+// CHECK: [[SIZE_0:%.*]] = llvm.mlir.constant(6 : index) : i64
// CHECK: [[OUT_4:%.*]] = llvm.insertvalue [[SIZE_0]], [[OUT_3]][3, 0] : [[TY]]
-// CHECK: [[SIZE_1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: [[SIZE_1:%.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: [[OUT_5:%.*]] = llvm.insertvalue [[SIZE_1]], [[OUT_4]][4, 0] : [[TY]]
-// CHECK: [[STRIDE_0:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: [[STRIDE_0:%.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: [[OUT_6:%.*]] = llvm.insertvalue [[STRIDE_0]], [[OUT_5]][3, 1] : [[TY]]
-// CHECK: [[STRIDE_1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: [[STRIDE_1:%.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: [[OUT_7:%.*]] = llvm.insertvalue [[STRIDE_1]], [[OUT_6]][4, 1] : [[TY]]
// CHECK-LABEL: @memref_reinterpret_cast_unranked_to_dynamic_shape
@@ -439,18 +439,18 @@ func @memref_reinterpret_cast_unranked_to_dynamic_shape(%offset: index,
: memref<*xf32> to memref<?x?xf32, offset: ?, strides: [?, ?]>
return
}
-// CHECK-SAME: ([[OFFSET:%[a-z,0-9]+]]: !llvm.i64,
-// CHECK-SAME: [[SIZE_0:%[a-z,0-9]+]]: !llvm.i64, [[SIZE_1:%[a-z,0-9]+]]: !llvm.i64,
-// CHECK-SAME: [[STRIDE_0:%[a-z,0-9]+]]: !llvm.i64, [[STRIDE_1:%[a-z,0-9]+]]: !llvm.i64,
+// CHECK-SAME: ([[OFFSET:%[a-z,0-9]+]]: i64,
+// CHECK-SAME: [[SIZE_0:%[a-z,0-9]+]]: i64, [[SIZE_1:%[a-z,0-9]+]]: i64,
+// CHECK-SAME: [[STRIDE_0:%[a-z,0-9]+]]: i64, [[STRIDE_1:%[a-z,0-9]+]]: i64,
// CHECK: [[INPUT:%.*]] = llvm.insertvalue {{.*}}[1] : !llvm.struct<(i64, ptr<i8>)>
// CHECK: [[OUT_0:%.*]] = llvm.mlir.undef : [[TY:!.*]]
// CHECK: [[DESCRIPTOR:%.*]] = llvm.extractvalue [[INPUT]][1] : !llvm.struct<(i64, ptr<i8>)>
// CHECK: [[BASE_PTR_PTR:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
// CHECK: [[BASE_PTR:%.*]] = llvm.load [[BASE_PTR_PTR]] : !llvm.ptr<ptr<float>>
// CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
-// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]]
-// CHECK-SAME: : (!llvm.ptr<ptr<float>>, !llvm.i64) -> !llvm.ptr<ptr<float>>
+// CHECK-SAME: : (!llvm.ptr<ptr<float>>, i64) -> !llvm.ptr<ptr<float>>
// CHECK: [[ALIGNED_PTR:%.*]] = llvm.load [[ALIGNED_PTR_PTR]] : !llvm.ptr<ptr<float>>
// CHECK: [[OUT_1:%.*]] = llvm.insertvalue [[BASE_PTR]], [[OUT_0]][0] : [[TY]]
// CHECK: [[OUT_2:%.*]] = llvm.insertvalue [[ALIGNED_PTR]], [[OUT_1]][1] : [[TY]]
@@ -473,13 +473,13 @@ func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
// CHECK: [[UNRANKED_OUT_1:%.*]] = llvm.insertvalue [[RANK]], [[UNRANKED_OUT_O]][0] : !llvm.struct<(i64, ptr<i8>)>
// Compute size in bytes to allocate result ranked descriptor
-// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK: [[PTR_SIZE:%.*]] = llvm.mlir.constant(8 : index) : !llvm.i64
-// CHECK: [[INDEX_SIZE:%.*]] = llvm.mlir.constant(8 : index) : !llvm.i64
-// CHECK: [[DOUBLE_PTR_SIZE:%.*]] = llvm.mul [[C2]], [[PTR_SIZE]] : !llvm.i64
+// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : i64
+// CHECK: [[PTR_SIZE:%.*]] = llvm.mlir.constant(8 : index) : i64
+// CHECK: [[INDEX_SIZE:%.*]] = llvm.mlir.constant(8 : index) : i64
+// CHECK: [[DOUBLE_PTR_SIZE:%.*]] = llvm.mul [[C2]], [[PTR_SIZE]] : i64
// CHECK: [[DESC_ALLOC_SIZE:%.*]] = llvm.add [[DOUBLE_PTR_SIZE]], %{{.*}}
-// CHECK: [[UNDERLYING_DESC:%.*]] = llvm.alloca [[DESC_ALLOC_SIZE]] x !llvm.i8
+// CHECK: [[UNDERLYING_DESC:%.*]] = llvm.alloca [[DESC_ALLOC_SIZE]] x i8
// CHECK: llvm.insertvalue [[UNDERLYING_DESC]], [[UNRANKED_OUT_1]][1]
// Set allocated, aligned pointers and offset.
@@ -490,11 +490,11 @@ func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
// CHECK-SAME: !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
// CHECK: llvm.store [[ALLOC_PTR]], [[BASE_PTR_PTR]] : !llvm.ptr<ptr<float>>
// CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
-// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]]
// CHECK: llvm.store [[ALIGN_PTR]], [[ALIGNED_PTR_PTR]] : !llvm.ptr<ptr<float>>
// CHECK: [[BASE_PTR_PTR__:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
-// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
+// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : i64
// CHECK: [[OFFSET_PTR_:%.*]] = llvm.getelementptr [[BASE_PTR_PTR__]]{{\[}}[[C2]]]
// CHECK: [[OFFSET_PTR:%.*]] = llvm.bitcast [[OFFSET_PTR_]]
// CHECK: llvm.store [[OFFSET]], [[OFFSET_PTR]] : !llvm.ptr<i64>
@@ -502,18 +502,18 @@ func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
// Iterate over shape operand in reverse order and set sizes and strides.
// CHECK: [[STRUCT_PTR:%.*]] = llvm.bitcast [[UNDERLYING_DESC]]
// CHECK-SAME: !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, i64)>>
-// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: [[C3_I32:%.*]] = llvm.mlir.constant(3 : i32) : !llvm.i32
+// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: [[C3_I32:%.*]] = llvm.mlir.constant(3 : i32) : i32
// CHECK: [[SIZES_PTR:%.*]] = llvm.getelementptr [[STRUCT_PTR]]{{\[}}[[C0]], [[C3_I32]]]
// CHECK: [[STRIDES_PTR:%.*]] = llvm.getelementptr [[SIZES_PTR]]{{\[}}[[RANK]]]
// CHECK: [[SHAPE_IN_PTR:%.*]] = llvm.extractvalue [[SHAPE]][1] : [[SHAPE_TY]]
-// CHECK: [[C1_:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: [[RANK_MIN_1:%.*]] = llvm.sub [[RANK]], [[C1_]] : !llvm.i64
-// CHECK: llvm.br ^bb1([[RANK_MIN_1]], [[C1_]] : !llvm.i64, !llvm.i64)
+// CHECK: [[C1_:%.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: [[RANK_MIN_1:%.*]] = llvm.sub [[RANK]], [[C1_]] : i64
+// CHECK: llvm.br ^bb1([[RANK_MIN_1]], [[C1_]] : i64, i64)
-// CHECK: ^bb1([[DIM:%.*]]: !llvm.i64, [[CUR_STRIDE:%.*]]: !llvm.i64):
-// CHECK: [[C0_:%.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: [[COND:%.*]] = llvm.icmp "sge" [[DIM]], [[C0_]] : !llvm.i64
+// CHECK: ^bb1([[DIM:%.*]]: i64, [[CUR_STRIDE:%.*]]: i64):
+// CHECK: [[C0_:%.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: [[COND:%.*]] = llvm.icmp "sge" [[DIM]], [[C0_]] : i64
// CHECK: llvm.cond_br [[COND]], ^bb2, ^bb3
// CHECK: ^bb2:
@@ -523,9 +523,9 @@ func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
// CHECK: llvm.store [[SIZE]], [[TARGET_SIZE_PTR]] : !llvm.ptr<i64>
// CHECK: [[TARGET_STRIDE_PTR:%.*]] = llvm.getelementptr [[STRIDES_PTR]]{{\[}}[[DIM]]]
// CHECK: llvm.store [[CUR_STRIDE]], [[TARGET_STRIDE_PTR]] : !llvm.ptr<i64>
-// CHECK: [[UPDATE_STRIDE:%.*]] = llvm.mul [[CUR_STRIDE]], [[SIZE]] : !llvm.i64
-// CHECK: [[STRIDE_COND:%.*]] = llvm.sub [[DIM]], [[C1_]] : !llvm.i64
-// CHECK: llvm.br ^bb1([[STRIDE_COND]], [[UPDATE_STRIDE]] : !llvm.i64, !llvm.i64)
+// CHECK: [[UPDATE_STRIDE:%.*]] = llvm.mul [[CUR_STRIDE]], [[SIZE]] : i64
+// CHECK: [[STRIDE_COND:%.*]] = llvm.sub [[DIM]], [[C1_]] : i64
+// CHECK: llvm.br ^bb1([[STRIDE_COND]], [[UPDATE_STRIDE]] : i64, i64)
// CHECK: ^bb3:
// CHECK: llvm.return
diff --git a/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir b/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir
index 6d2419692750..d5f9aff52bff 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir
@@ -19,7 +19,7 @@ func private @fifth_order_left(%arg0: (((() -> ()) -> ()) -> ()) -> ())
func private @fifth_order_right(%arg0: () -> (() -> (() -> (() -> ()))))
// Check that memrefs are converted to argument packs if appear as function arguments.
-// CHECK: llvm.func @memref_call_conv(!llvm.ptr<float>, !llvm.ptr<float>, !llvm.i64, !llvm.i64, !llvm.i64)
+// CHECK: llvm.func @memref_call_conv(!llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64)
func private @memref_call_conv(%arg0: memref<?xf32>)
// Same in nested functions.
@@ -37,25 +37,25 @@ func @pass_through(%arg0: () -> ()) -> (() -> ()) {
return %bbarg : () -> ()
}
-// CHECK-LABEL: llvm.func @body(!llvm.i32)
+// CHECK-LABEL: llvm.func @body(i32)
func private @body(i32)
// CHECK-LABEL: llvm.func @indirect_const_call
-// CHECK-SAME: (%[[ARG0:.*]]: !llvm.i32) {
+// CHECK-SAME: (%[[ARG0:.*]]: i32) {
func @indirect_const_call(%arg0: i32) {
// CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @body : !llvm.ptr<func<void (i32)>>
%0 = constant @body : (i32) -> ()
-// CHECK-NEXT: llvm.call %[[ADDR]](%[[ARG0:.*]]) : (!llvm.i32) -> ()
+// CHECK-NEXT: llvm.call %[[ADDR]](%[[ARG0:.*]]) : (i32) -> ()
call_indirect %0(%arg0) : (i32) -> ()
// CHECK-NEXT: llvm.return
return
}
-// CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (float)>>, %arg1: !llvm.float) -> !llvm.i32 {
+// CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (float)>>, %arg1: !llvm.float) -> i32 {
func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 {
-// CHECK-NEXT: %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> !llvm.i32
+// CHECK-NEXT: %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> i32
%0 = call_indirect %arg0(%arg1) : (f32) -> i32
-// CHECK-NEXT: llvm.return %0 : !llvm.i32
+// CHECK-NEXT: llvm.return %0 : i32
return %0 : i32
}
diff --git a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
index 158fdcba7c92..b6133e840251 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
@@ -11,7 +11,7 @@ func @check_noalias(%static : memref<2xf32> {llvm.noalias = true}, %other : memr
// CHECK-LABEL: func @check_static_return
// CHECK-COUNT-2: !llvm.ptr<float>
-// CHECK-COUNT-5: !llvm.i64
+// CHECK-COUNT-5: i64
// CHECK-SAME: -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-LABEL: func @check_static_return
// BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr<float>) -> !llvm.ptr<float> {
@@ -21,15 +21,15 @@ func @check_static_return(%static : memref<32x18xf32>) -> memref<32x18xf32> {
// BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-NEXT: %[[base0:.*]] = llvm.insertvalue %[[arg]], %[[udf]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-NEXT: %[[aligned:.*]] = llvm.insertvalue %[[arg]], %[[base0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(0 : index) : i64
// BAREPTR-NEXT: %[[ins0:.*]] = llvm.insertvalue %[[val0]], %[[aligned]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : i64
// BAREPTR-NEXT: %[[ins1:.*]] = llvm.insertvalue %[[val1]], %[[ins0]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(18 : index) : i64
// BAREPTR-NEXT: %[[ins2:.*]] = llvm.insertvalue %[[val2]], %[[ins1]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : i64
// BAREPTR-NEXT: %[[ins3:.*]] = llvm.insertvalue %[[val3]], %[[ins2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : i64
// BAREPTR-NEXT: %[[ins4:.*]] = llvm.insertvalue %[[val4]], %[[ins3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-NEXT: %[[base1:.*]] = llvm.extractvalue %[[ins4]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-NEXT: llvm.return %[[base1]] : !llvm.ptr<float>
@@ -40,7 +40,7 @@ func @check_static_return(%static : memref<32x18xf32>) -> memref<32x18xf32> {
// CHECK-LABEL: func @check_static_return_with_offset
// CHECK-COUNT-2: !llvm.ptr<float>
-// CHECK-COUNT-5: !llvm.i64
+// CHECK-COUNT-5: i64
// CHECK-SAME: -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-LABEL: func @check_static_return_with_offset
// BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr<float>) -> !llvm.ptr<float> {
@@ -50,15 +50,15 @@ func @check_static_return_with_offset(%static : memref<32x18xf32, offset:7, stri
// BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-NEXT: %[[base0:.*]] = llvm.insertvalue %[[arg]], %[[udf]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-NEXT: %[[aligned:.*]] = llvm.insertvalue %[[arg]], %[[base0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(7 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(7 : index) : i64
// BAREPTR-NEXT: %[[ins0:.*]] = llvm.insertvalue %[[val0]], %[[aligned]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : i64
// BAREPTR-NEXT: %[[ins1:.*]] = llvm.insertvalue %[[val1]], %[[ins0]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(22 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(22 : index) : i64
// BAREPTR-NEXT: %[[ins2:.*]] = llvm.insertvalue %[[val2]], %[[ins1]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : i64
// BAREPTR-NEXT: %[[ins3:.*]] = llvm.insertvalue %[[val3]], %[[ins2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : i64
// BAREPTR-NEXT: %[[ins4:.*]] = llvm.insertvalue %[[val4]], %[[ins3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-NEXT: %[[base1:.*]] = llvm.extractvalue %[[ins4]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// BAREPTR-NEXT: llvm.return %[[base1]] : !llvm.ptr<float>
@@ -70,28 +70,28 @@ func @check_static_return_with_offset(%static : memref<32x18xf32, offset:7, stri
// CHECK-LABEL: func @zero_d_alloc() -> !llvm.struct<(ptr<float>, ptr<float>, i64)> {
// BAREPTR-LABEL: func @zero_d_alloc() -> !llvm.ptr<float> {
func @zero_d_alloc() -> memref<f32> {
-// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// CHECK-NEXT: llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr<i8>
+// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT: llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr<i8>
// CHECK-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64)>
// CHECK-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
// CHECK-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// CHECK-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// CHECK-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-NEXT: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// BAREPTR-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
// BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// BAREPTR-NEXT: llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr<i8>
+// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// BAREPTR-NEXT: llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr<i8>
// BAREPTR-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
// BAREPTR-NEXT: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64)>
// BAREPTR-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
// BAREPTR-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// BAREPTR-NEXT: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
%0 = alloc() : memref<f32>
return %0 : memref<f32>
@@ -118,48 +118,48 @@ func @zero_d_dealloc(%arg0: memref<f32>) {
// CHECK-LABEL: func @aligned_1d_alloc(
// BAREPTR-LABEL: func @aligned_1d_alloc(
func @aligned_1d_alloc() -> memref<42xf32> {
-// CHECK-NEXT: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64
-// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK-NEXT: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64
+// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// CHECK-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : !llvm.i64
-// CHECK-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : !llvm.i64
-// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (!llvm.i64) -> !llvm.ptr<i8>
+// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64
+// CHECK-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : i64
+// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (i64) -> !llvm.ptr<i8>
// CHECK-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
-// CHECK-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr<float> to !llvm.i64
-// CHECK-NEXT: %[[one_1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_1]] : !llvm.i64
-// CHECK-NEXT: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : !llvm.i64
-// CHECK-NEXT: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : !llvm.i64
-// CHECK-NEXT: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : !llvm.i64
-// CHECK-NEXT: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : !llvm.i64 to !llvm.ptr<float>
+// CHECK-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr<float> to i64
+// CHECK-NEXT: %[[one_1:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_1]] : i64
+// CHECK-NEXT: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : i64
+// CHECK-NEXT: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : i64
+// CHECK-NEXT: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : i64
+// CHECK-NEXT: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr<float>
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK-NEXT: llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-// CHECK-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// CHECK-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-NEXT: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-// BAREPTR-NEXT: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64
-// BAREPTR-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64
+// BAREPTR-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
// BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// BAREPTR-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : !llvm.i64
-// BAREPTR-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : !llvm.i64
-// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (!llvm.i64) -> !llvm.ptr<i8>
+// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// BAREPTR-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64
+// BAREPTR-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : i64
+// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (i64) -> !llvm.ptr<i8>
// BAREPTR-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
-// BAREPTR-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr<float> to !llvm.i64
-// BAREPTR-NEXT: %[[one_2:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// BAREPTR-NEXT: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_2]] : !llvm.i64
-// BAREPTR-NEXT: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : !llvm.i64
-// BAREPTR-NEXT: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : !llvm.i64
-// BAREPTR-NEXT: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : !llvm.i64
-// BAREPTR-NEXT: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : !llvm.i64 to !llvm.ptr<float>
+// BAREPTR-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr<float> to i64
+// BAREPTR-NEXT: %[[one_2:.*]] = llvm.mlir.constant(1 : index) : i64
+// BAREPTR-NEXT: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_2]] : i64
+// BAREPTR-NEXT: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : i64
+// BAREPTR-NEXT: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : i64
+// BAREPTR-NEXT: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : i64
+// BAREPTR-NEXT: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr<float>
// BAREPTR-NEXT: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// BAREPTR-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// BAREPTR-NEXT: llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-// BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// BAREPTR-NEXT: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
%0 = alloc() {alignment = 8} : memref<42xf32>
return %0 : memref<42xf32>
@@ -170,18 +170,18 @@ func @aligned_1d_alloc() -> memref<42xf32> {
// CHECK-LABEL: func @static_alloc() -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
// BAREPTR-LABEL: func @static_alloc() -> !llvm.ptr<float> {
func @static_alloc() -> memref<32x18xf32> {
-// CHECK: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64
+// CHECK: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr<i8>
+// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr<i8>
// CHECK-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<float>
-// BAREPTR: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64
+// BAREPTR: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
// BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr<i8>
+// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr<i8>
// BAREPTR-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<float>
%0 = alloc() : memref<32x18xf32>
return %0 : memref<32x18xf32>
@@ -191,20 +191,20 @@ func @static_alloc() -> memref<32x18xf32> {
// CHECK-LABEL: func @static_alloca() -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
func @static_alloca() -> memref<32x18xf32> {
-// CHECK-NEXT: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64
-// CHECK-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64
-// CHECK-NEXT: %[[st2:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64
+// CHECK-NEXT: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : i64
+// CHECK-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64
+// CHECK-NEXT: %[[st2:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
-// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to !llvm.i64
-// CHECK-NEXT: %[[allocated:.*]] = llvm.alloca %[[size_bytes]] x !llvm.float : (!llvm.i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT: %[[allocated:.*]] = llvm.alloca %[[size_bytes]] x !llvm.float : (i64) -> !llvm.ptr<float>
%0 = alloca() : memref<32x18xf32>
// Test with explicitly specified alignment. llvm.alloca takes care of the
// alignment. The same pointer is thus used for allocation and aligned
// accesses.
- // CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (!llvm.i64) -> !llvm.ptr<float>
+ // CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (i64) -> !llvm.ptr<float>
// CHECK: %[[desc:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[desc1:.*]] = llvm.insertvalue %[[alloca_aligned]], %[[desc]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[alloca_aligned]], %[[desc1]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
@@ -246,24 +246,24 @@ func @zero_d_load(%arg0: memref<f32>) -> f32 {
// CHECK-LABEL: func @static_load(
// CHECK-COUNT-2: !llvm.ptr<float>,
-// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64
-// CHECK: %[[I:.*]]: !llvm.i64,
-// CHECK: %[[J:.*]]: !llvm.i64)
+// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64
+// CHECK: %[[I:.*]]: i64,
+// CHECK: %[[J:.*]]: i64)
// BAREPTR-LABEL: func @static_load
-// BAREPTR-SAME: (%[[A:.*]]: !llvm.ptr<float>, %[[I:.*]]: !llvm.i64, %[[J:.*]]: !llvm.i64) {
+// BAREPTR-SAME: (%[[A:.*]]: !llvm.ptr<float>, %[[I:.*]]: i64, %[[J:.*]]: i64) {
func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64
-// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
+// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK-NEXT: llvm.load %[[addr]] : !llvm.ptr<float>
// BAREPTR: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64
-// BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
+// BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// BAREPTR-NEXT: llvm.load %[[addr]] : !llvm.ptr<float>
%0 = load %static[%i, %j] : memref<10x42xf32>
return
@@ -289,30 +289,30 @@ func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
// CHECK-LABEL: func @static_store
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64
// BAREPTR-LABEL: func @static_store
// BAREPTR-SAME: %[[A:.*]]: !llvm.ptr<float>
-// BAREPTR-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64
-// BAREPTR-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64
+// BAREPTR-SAME: %[[I:[a-zA-Z0-9]*]]: i64
+// BAREPTR-SAME: %[[J:[a-zA-Z0-9]*]]: i64
func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f32) {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64
-// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
+// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<float>
// BAREPTR: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64
-// BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64
-// BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64
-// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
+// BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
+// BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
+// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// BAREPTR-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<float>
store %val, %static[%i, %j] : memref<10x42xf32>
return
@@ -323,25 +323,25 @@ func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f
// CHECK-LABEL: func @static_memref_dim
// BAREPTR-LABEL: func @static_memref_dim(%{{.*}}: !llvm.ptr<float>) {
func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) {
-// CHECK: llvm.mlir.constant(42 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(42 : index) : i64
// BAREPTR: llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// BAREPTR: llvm.mlir.constant(42 : index) : !llvm.i64
+// BAREPTR: llvm.mlir.constant(42 : index) : i64
%c0 = constant 0 : index
%0 = dim %static, %c0 : memref<42x32x15x13x27xf32>
-// CHECK: llvm.mlir.constant(32 : index) : !llvm.i64
-// BAREPTR: llvm.mlir.constant(32 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(32 : index) : i64
+// BAREPTR: llvm.mlir.constant(32 : index) : i64
%c1 = constant 1 : index
%1 = dim %static, %c1 : memref<42x32x15x13x27xf32>
-// CHECK: llvm.mlir.constant(15 : index) : !llvm.i64
-// BAREPTR: llvm.mlir.constant(15 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(15 : index) : i64
+// BAREPTR: llvm.mlir.constant(15 : index) : i64
%c2 = constant 2 : index
%2 = dim %static, %c2 : memref<42x32x15x13x27xf32>
-// CHECK: llvm.mlir.constant(13 : index) : !llvm.i64
-// BAREPTR: llvm.mlir.constant(13 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(13 : index) : i64
+// BAREPTR: llvm.mlir.constant(13 : index) : i64
%c3 = constant 3 : index
%3 = dim %static, %c3 : memref<42x32x15x13x27xf32>
-// CHECK: llvm.mlir.constant(27 : index) : !llvm.i64
-// BAREPTR: llvm.mlir.constant(27 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(27 : index) : i64
+// BAREPTR: llvm.mlir.constant(27 : index) : i64
%c4 = constant 4 : index
%4 = dim %static, %c4 : memref<42x32x15x13x27xf32>
return
@@ -361,11 +361,11 @@ func @check_memref_func_call(%in : memref<10xi8>) -> memref<20xi8> {
// BAREPTR-NEXT: %[[desc0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
// BAREPTR-NEXT: %[[desc1:.*]] = llvm.insertvalue %[[call]], %[[desc0]][0] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
// BAREPTR-NEXT: %[[desc2:.*]] = llvm.insertvalue %[[call]], %[[desc1]][1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
- // BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
// BAREPTR-NEXT: %[[desc4:.*]] = llvm.insertvalue %[[c0]], %[[desc2]][2] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
- // BAREPTR-NEXT: %[[c20:.*]] = llvm.mlir.constant(20 : index) : !llvm.i64
+ // BAREPTR-NEXT: %[[c20:.*]] = llvm.mlir.constant(20 : index) : i64
// BAREPTR-NEXT: %[[desc6:.*]] = llvm.insertvalue %[[c20]], %[[desc4]][3, 0] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
- // BAREPTR-NEXT: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // BAREPTR-NEXT: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
// BAREPTR-NEXT: %[[outDesc:.*]] = llvm.insertvalue %[[c1]], %[[desc6]][4, 0] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
%res = call @foo(%in) : (memref<10xi8>) -> (memref<20xi8>)
// BAREPTR-NEXT: %[[res:.*]] = llvm.extractvalue %[[outDesc]][1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
diff --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
index 2dcc3114b4d0..e43db3ca55bc 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
@@ -9,7 +9,7 @@ func @empty() {
return
}
-// CHECK-LABEL: llvm.func @body(!llvm.i64)
+// CHECK-LABEL: llvm.func @body(i64)
func private @body(index)
// CHECK-LABEL: func @simple_loop() {
@@ -21,38 +21,38 @@ func @simple_loop() {
br ^bb1
// CHECK-NEXT: ^bb1: // pred: ^bb0
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64)
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i64
+// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64)
// CHECK32-NEXT: ^bb1: // pred: ^bb0
-// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i32
-// CHECK32-NEXT: llvm.br ^bb2({{.*}} : !llvm.i32)
+// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i32
+// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i32
+// CHECK32-NEXT: llvm.br ^bb2({{.*}} : i32)
^bb1: // pred: ^bb0
%c1 = constant 1 : index
%c42 = constant 42 : index
br ^bb2(%c1 : index)
-// CHECK: ^bb2({{.*}}: !llvm.i64): // 2 preds: ^bb1, ^bb3
-// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64
+// CHECK: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb3
+// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4
-// CHECK32: ^bb2({{.*}}: !llvm.i32): // 2 preds: ^bb1, ^bb3
-// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i32
+// CHECK32: ^bb2({{.*}}: i32): // 2 preds: ^bb1, ^bb3
+// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i32
// CHECK32-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4
^bb2(%0: index): // 2 preds: ^bb1, ^bb3
%1 = cmpi "slt", %0, %c42 : index
cond_br %1, ^bb3, ^bb4
// CHECK: ^bb3: // pred: ^bb2
-// CHECK-NEXT: llvm.call @body({{.*}}) : (!llvm.i64) -> ()
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64)
+// CHECK-NEXT: llvm.call @body({{.*}}) : (i64) -> ()
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64
+// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64)
// CHECK32: ^bb3: // pred: ^bb2
-// CHECK32-NEXT: llvm.call @body({{.*}}) : (!llvm.i32) -> ()
-// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i32
-// CHECK32-NEXT: llvm.br ^bb2({{.*}} : !llvm.i32)
+// CHECK32-NEXT: llvm.call @body({{.*}}) : (i32) -> ()
+// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i32
+// CHECK32-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i32
+// CHECK32-NEXT: llvm.br ^bb2({{.*}} : i32)
^bb3: // pred: ^bb2
call @body(%0) : (index) -> ()
%c1_0 = constant 1 : index
@@ -155,18 +155,18 @@ func @ml_caller() {
return
}
-// CHECK-LABEL: llvm.func @body_args(!llvm.i64) -> !llvm.i64
-// CHECK32-LABEL: llvm.func @body_args(!llvm.i32) -> !llvm.i32
+// CHECK-LABEL: llvm.func @body_args(i64) -> i64
+// CHECK32-LABEL: llvm.func @body_args(i32) -> i32
func private @body_args(index) -> index
-// CHECK-LABEL: llvm.func @other(!llvm.i64, !llvm.i32) -> !llvm.i32
-// CHECK32-LABEL: llvm.func @other(!llvm.i32, !llvm.i32) -> !llvm.i32
+// CHECK-LABEL: llvm.func @other(i64, i32) -> i32
+// CHECK32-LABEL: llvm.func @other(i32, i32) -> i32
func private @other(index, i32) -> i32
-// CHECK-LABEL: func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 {
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : !llvm.i32
+// CHECK-LABEL: func @func_args(%arg0: i32, %arg1: i32) -> i32 {
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : i32
// CHECK-NEXT: llvm.br ^bb1
-// CHECK32-LABEL: func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 {
-// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : !llvm.i32
+// CHECK32-LABEL: func @func_args(%arg0: i32, %arg1: i32) -> i32 {
+// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : i32
// CHECK32-NEXT: llvm.br ^bb1
func @func_args(i32, i32) -> i32 {
^bb0(%arg0: i32, %arg1: i32):
@@ -174,44 +174,44 @@ func @func_args(i32, i32) -> i32 {
br ^bb1
// CHECK-NEXT: ^bb1: // pred: ^bb0
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64)
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i64
+// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64)
// CHECK32-NEXT: ^bb1: // pred: ^bb0
-// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i32
-// CHECK32-NEXT: llvm.br ^bb2({{.*}} : !llvm.i32)
+// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i32
+// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i32
+// CHECK32-NEXT: llvm.br ^bb2({{.*}} : i32)
^bb1: // pred: ^bb0
%c0 = constant 0 : index
%c42 = constant 42 : index
br ^bb2(%c0 : index)
-// CHECK-NEXT: ^bb2({{.*}}: !llvm.i64): // 2 preds: ^bb1, ^bb3
-// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64
+// CHECK-NEXT: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb3
+// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4
-// CHECK32-NEXT: ^bb2({{.*}}: !llvm.i32): // 2 preds: ^bb1, ^bb3
-// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i32
+// CHECK32-NEXT: ^bb2({{.*}}: i32): // 2 preds: ^bb1, ^bb3
+// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i32
// CHECK32-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4
^bb2(%0: index): // 2 preds: ^bb1, ^bb3
%1 = cmpi "slt", %0, %c42 : index
cond_br %1, ^bb3, ^bb4
// CHECK-NEXT: ^bb3: // pred: ^bb2
-// CHECK-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (!llvm.i64) -> !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg0) : (!llvm.i64, !llvm.i32) -> !llvm.i32
-// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i32) -> !llvm.i32
-// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg1) : (!llvm.i64, !llvm.i32) -> !llvm.i32
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64)
+// CHECK-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (i64) -> i64
+// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg0) : (i64, i32) -> i32
+// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (i64, i32) -> i32
+// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg1) : (i64, i32) -> i32
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64
+// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64)
// CHECK32-NEXT: ^bb3: // pred: ^bb2
-// CHECK32-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (!llvm.i32) -> !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg0) : (!llvm.i32, !llvm.i32) -> !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (!llvm.i32, !llvm.i32) -> !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i32
-// CHECK32-NEXT: llvm.br ^bb2({{.*}} : !llvm.i32)
+// CHECK32-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (i32) -> i32
+// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg0) : (i32, i32) -> i32
+// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (i32, i32) -> i32
+// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg1) : (i32, i32) -> i32
+// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i32
+// CHECK32-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i32
+// CHECK32-NEXT: llvm.br ^bb2({{.*}} : i32)
^bb3: // pred: ^bb2
%2 = call @body_args(%0) : (index) -> index
%3 = call @other(%2, %arg0) : (index, i32) -> i32
@@ -222,29 +222,29 @@ func @func_args(i32, i32) -> i32 {
br ^bb2(%6 : index)
// CHECK-NEXT: ^bb4: // pred: ^bb2
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i32) -> !llvm.i32
-// CHECK-NEXT: llvm.return {{.*}} : !llvm.i32
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (i64, i32) -> i32
+// CHECK-NEXT: llvm.return {{.*}} : i32
// CHECK32-NEXT: ^bb4: // pred: ^bb2
-// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i32
-// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (!llvm.i32, !llvm.i32) -> !llvm.i32
-// CHECK32-NEXT: llvm.return {{.*}} : !llvm.i32
+// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i32
+// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (i32, i32) -> i32
+// CHECK32-NEXT: llvm.return {{.*}} : i32
^bb4: // pred: ^bb2
%c0_0 = constant 0 : index
%7 = call @other(%c0_0, %c0_i32) : (index, i32) -> i32
return %7 : i32
}
-// CHECK-LABEL: llvm.func @pre(!llvm.i64)
-// CHECK32-LABEL: llvm.func @pre(!llvm.i32)
+// CHECK-LABEL: llvm.func @pre(i64)
+// CHECK32-LABEL: llvm.func @pre(i32)
func private @pre(index)
-// CHECK-LABEL: llvm.func @body2(!llvm.i64, !llvm.i64)
-// CHECK32-LABEL: llvm.func @body2(!llvm.i32, !llvm.i32)
+// CHECK-LABEL: llvm.func @body2(i64, i64)
+// CHECK32-LABEL: llvm.func @body2(i32, i32)
func private @body2(index, index)
-// CHECK-LABEL: llvm.func @post(!llvm.i64)
-// CHECK32-LABEL: llvm.func @post(!llvm.i32)
+// CHECK-LABEL: llvm.func @post(i64)
+// CHECK32-LABEL: llvm.func @post(i32)
func private @post(index)
// CHECK-LABEL: func @imperfectly_nested_loops() {
@@ -254,49 +254,49 @@ func @imperfectly_nested_loops() {
br ^bb1
// CHECK-NEXT: ^bb1: // pred: ^bb0
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64)
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i64
+// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64)
^bb1: // pred: ^bb0
%c0 = constant 0 : index
%c42 = constant 42 : index
br ^bb2(%c0 : index)
-// CHECK-NEXT: ^bb2({{.*}}: !llvm.i64): // 2 preds: ^bb1, ^bb7
-// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64
+// CHECK-NEXT: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb7
+// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb8
^bb2(%0: index): // 2 preds: ^bb1, ^bb7
%1 = cmpi "slt", %0, %c42 : index
cond_br %1, ^bb3, ^bb8
// CHECK-NEXT: ^bb3:
-// CHECK-NEXT: llvm.call @pre({{.*}}) : (!llvm.i64) -> ()
+// CHECK-NEXT: llvm.call @pre({{.*}}) : (i64) -> ()
// CHECK-NEXT: llvm.br ^bb4
^bb3: // pred: ^bb2
call @pre(%0) : (index) -> ()
br ^bb4
// CHECK-NEXT: ^bb4: // pred: ^bb3
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(56 : index) : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb5({{.*}} : !llvm.i64)
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(56 : index) : i64
+// CHECK-NEXT: llvm.br ^bb5({{.*}} : i64)
^bb4: // pred: ^bb3
%c7 = constant 7 : index
%c56 = constant 56 : index
br ^bb5(%c7 : index)
-// CHECK-NEXT: ^bb5({{.*}}: !llvm.i64): // 2 preds: ^bb4, ^bb6
-// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64
+// CHECK-NEXT: ^bb5({{.*}}: i64): // 2 preds: ^bb4, ^bb6
+// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb6, ^bb7
^bb5(%2: index): // 2 preds: ^bb4, ^bb6
%3 = cmpi "slt", %2, %c56 : index
cond_br %3, ^bb6, ^bb7
// CHECK-NEXT: ^bb6: // pred: ^bb5
-// CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i64) -> ()
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb5({{.*}} : !llvm.i64)
+// CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (i64, i64) -> ()
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(2 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64
+// CHECK-NEXT: llvm.br ^bb5({{.*}} : i64)
^bb6: // pred: ^bb5
call @body2(%0, %2) : (index, index) -> ()
%c2 = constant 2 : index
@@ -304,10 +304,10 @@ func @imperfectly_nested_loops() {
br ^bb5(%4 : index)
// CHECK-NEXT: ^bb7: // pred: ^bb5
-// CHECK-NEXT: llvm.call @post({{.*}}) : (!llvm.i64) -> ()
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64)
+// CHECK-NEXT: llvm.call @post({{.*}}) : (i64) -> ()
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64
+// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64)
^bb7: // pred: ^bb5
call @post(%0) : (index) -> ()
%c1 = constant 1 : index
@@ -320,57 +320,57 @@ func @imperfectly_nested_loops() {
return
}
-// CHECK-LABEL: llvm.func @mid(!llvm.i64)
+// CHECK-LABEL: llvm.func @mid(i64)
func private @mid(index)
-// CHECK-LABEL: llvm.func @body3(!llvm.i64, !llvm.i64)
+// CHECK-LABEL: llvm.func @body3(i64, i64)
func private @body3(index, index)
// A complete function transformation check.
// CHECK-LABEL: func @more_imperfectly_nested_loops() {
// CHECK-NEXT: llvm.br ^bb1
// CHECK-NEXT:^bb1: // pred: ^bb0
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64)
-// CHECK-NEXT:^bb2({{.*}}: !llvm.i64): // 2 preds: ^bb1, ^bb11
-// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i64
+// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64)
+// CHECK-NEXT:^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb11
+// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb12
// CHECK-NEXT:^bb3: // pred: ^bb2
-// CHECK-NEXT: llvm.call @pre({{.*}}) : (!llvm.i64) -> ()
+// CHECK-NEXT: llvm.call @pre({{.*}}) : (i64) -> ()
// CHECK-NEXT: llvm.br ^bb4
// CHECK-NEXT:^bb4: // pred: ^bb3
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(56 : index) : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb5({{.*}} : !llvm.i64)
-// CHECK-NEXT:^bb5({{.*}}: !llvm.i64): // 2 preds: ^bb4, ^bb6
-// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(56 : index) : i64
+// CHECK-NEXT: llvm.br ^bb5({{.*}} : i64)
+// CHECK-NEXT:^bb5({{.*}}: i64): // 2 preds: ^bb4, ^bb6
+// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb6, ^bb7
// CHECK-NEXT:^bb6: // pred: ^bb5
-// CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i64) -> ()
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb5({{.*}} : !llvm.i64)
+// CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (i64, i64) -> ()
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(2 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64
+// CHECK-NEXT: llvm.br ^bb5({{.*}} : i64)
// CHECK-NEXT:^bb7: // pred: ^bb5
-// CHECK-NEXT: llvm.call @mid({{.*}}) : (!llvm.i64) -> ()
+// CHECK-NEXT: llvm.call @mid({{.*}}) : (i64) -> ()
// CHECK-NEXT: llvm.br ^bb8
// CHECK-NEXT:^bb8: // pred: ^bb7
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(18 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(37 : index) : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb9({{.*}} : !llvm.i64)
-// CHECK-NEXT:^bb9({{.*}}: !llvm.i64): // 2 preds: ^bb8, ^bb10
-// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(18 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(37 : index) : i64
+// CHECK-NEXT: llvm.br ^bb9({{.*}} : i64)
+// CHECK-NEXT:^bb9({{.*}}: i64): // 2 preds: ^bb8, ^bb10
+// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb10, ^bb11
// CHECK-NEXT:^bb10: // pred: ^bb9
-// CHECK-NEXT: llvm.call @body3({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i64) -> ()
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(3 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb9({{.*}} : !llvm.i64)
+// CHECK-NEXT: llvm.call @body3({{.*}}, {{.*}}) : (i64, i64) -> ()
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(3 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64
+// CHECK-NEXT: llvm.br ^bb9({{.*}} : i64)
// CHECK-NEXT:^bb11: // pred: ^bb9
-// CHECK-NEXT: llvm.call @post({{.*}}) : (!llvm.i64) -> ()
-// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64)
+// CHECK-NEXT: llvm.call @post({{.*}}) : (i64) -> ()
+// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64
+// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64)
// CHECK-NEXT:^bb12: // pred: ^bb2
// CHECK-NEXT: llvm.return
// CHECK-NEXT: }
@@ -423,7 +423,7 @@ func @more_imperfectly_nested_loops() {
return
}
-// CHECK-LABEL: llvm.func @get_i64() -> !llvm.i64
+// CHECK-LABEL: llvm.func @get_i64() -> i64
func private @get_i64() -> (i64)
// CHECK-LABEL: llvm.func @get_f32() -> !llvm.float
func private @get_f32() -> (f32)
@@ -441,10 +441,10 @@ func private @get_memref() -> (memref<42x?x10x?xf32>)
// CHECK32-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)> {
func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
^bb0:
-// CHECK-NEXT: {{.*}} = llvm.call @get_i64() : () -> !llvm.i64
+// CHECK-NEXT: {{.*}} = llvm.call @get_i64() : () -> i64
// CHECK-NEXT: {{.*}} = llvm.call @get_f32() : () -> !llvm.float
// CHECK-NEXT: {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>
-// CHECK32-NEXT: {{.*}} = llvm.call @get_i64() : () -> !llvm.i64
+// CHECK32-NEXT: {{.*}} = llvm.call @get_i64() : () -> i64
// CHECK32-NEXT: {{.*}} = llvm.call @get_f32() : () -> !llvm.float
// CHECK32-NEXT: {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>
%0 = call @get_i64() : () -> (i64)
@@ -478,7 +478,7 @@ func @multireturn_caller() {
// CHECK32-NEXT: {{.*}} = llvm.extractvalue {{.*}}[2] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
%0:3 = call @multireturn() : () -> (i64, f32, memref<42x?x10x?xf32>)
%1 = constant 42 : i64
-// CHECK: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64
+// CHECK: {{.*}} = llvm.add {{.*}}, {{.*}} : i64
%2 = addi %0#0, %1 : i64
%3 = constant 42.0 : f32
// CHECK: {{.*}} = llvm.fadd {{.*}}, {{.*}} : !llvm.float
@@ -525,29 +525,29 @@ func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
^bb0(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64):
// CHECK-NEXT: %0 = llvm.fsub %arg0, %arg1 : !llvm.float
%0 = subf %arg0, %arg1: f32
-// CHECK-NEXT: %1 = llvm.sub %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %1 = llvm.sub %arg2, %arg3 : i32
%1 = subi %arg2, %arg3: i32
-// CHECK-NEXT: %2 = llvm.icmp "slt" %arg2, %1 : !llvm.i32
+// CHECK-NEXT: %2 = llvm.icmp "slt" %arg2, %1 : i32
%2 = cmpi "slt", %arg2, %1 : i32
-// CHECK-NEXT: %3 = llvm.sdiv %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %3 = llvm.sdiv %arg2, %arg3 : i32
%3 = divi_signed %arg2, %arg3 : i32
-// CHECK-NEXT: %4 = llvm.udiv %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %4 = llvm.udiv %arg2, %arg3 : i32
%4 = divi_unsigned %arg2, %arg3 : i32
-// CHECK-NEXT: %5 = llvm.srem %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %5 = llvm.srem %arg2, %arg3 : i32
%5 = remi_signed %arg2, %arg3 : i32
-// CHECK-NEXT: %6 = llvm.urem %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %6 = llvm.urem %arg2, %arg3 : i32
%6 = remi_unsigned %arg2, %arg3 : i32
-// CHECK-NEXT: %7 = llvm.select %2, %arg2, %arg3 : !llvm.i1, !llvm.i32
+// CHECK-NEXT: %7 = llvm.select %2, %arg2, %arg3 : i1, i32
%7 = select %2, %arg2, %arg3 : i32
// CHECK-NEXT: %8 = llvm.fdiv %arg0, %arg1 : !llvm.float
%8 = divf %arg0, %arg1 : f32
// CHECK-NEXT: %9 = llvm.frem %arg0, %arg1 : !llvm.float
%9 = remf %arg0, %arg1 : f32
-// CHECK-NEXT: %10 = llvm.and %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %10 = llvm.and %arg2, %arg3 : i32
%10 = and %arg2, %arg3 : i32
-// CHECK-NEXT: %11 = llvm.or %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %11 = llvm.or %arg2, %arg3 : i32
%11 = or %arg2, %arg3 : i32
-// CHECK-NEXT: %12 = llvm.xor %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %12 = llvm.xor %arg2, %arg3 : i32
%12 = xor %arg2, %arg3 : i32
// CHECK-NEXT: %13 = "llvm.intr.exp"(%arg0) : (!llvm.float) -> !llvm.float
%13 = std.exp %arg0 : f32
@@ -555,11 +555,11 @@ func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
%14 = std.exp2 %arg0 : f32
// CHECK-NEXT: %15 = llvm.mlir.constant(7.900000e-01 : f64) : !llvm.double
%15 = constant 7.9e-01 : f64
-// CHECK-NEXT: %16 = llvm.shl %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %16 = llvm.shl %arg2, %arg3 : i32
%16 = shift_left %arg2, %arg3 : i32
-// CHECK-NEXT: %17 = llvm.ashr %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %17 = llvm.ashr %arg2, %arg3 : i32
%17 = shift_right_signed %arg2, %arg3 : i32
-// CHECK-NEXT: %18 = llvm.lshr %arg2, %arg3 : !llvm.i32
+// CHECK-NEXT: %18 = llvm.lshr %arg2, %arg3 : i32
%18 = shift_right_unsigned %arg2, %arg3 : i32
// CHECK-NEXT: %{{[0-9]+}} = "llvm.intr.sqrt"(%arg0) : (!llvm.float) -> !llvm.float
%19 = std.sqrt %arg0 : f32
@@ -573,9 +573,9 @@ func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
// make this test dependent on the pointer size on the target system.
// CHECK-LABEL: @index_cast
func @index_cast(%arg0: index, %arg1: i1) {
-// CHECK-NEXT: = llvm.trunc %arg0 : !llvm.i{{.*}} to !llvm.i1
+// CHECK-NEXT: = llvm.trunc %arg0 : i{{.*}} to i1
%0 = index_cast %arg0: index to i1
-// CHECK-NEXT: = llvm.sext %arg1 : !llvm.i1 to !llvm.i{{.*}}
+// CHECK-NEXT: = llvm.sext %arg1 : i1 to i{{.*}}
%1 = index_cast %arg1: i1 to index
return
}
@@ -583,13 +583,13 @@ func @index_cast(%arg0: index, %arg1: i1) {
// Checking conversion of signed integer types to floating point.
// CHECK-LABEL: @sitofp
func @sitofp(%arg0 : i32, %arg1 : i64) {
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.i32 to !llvm.float
+// CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to !llvm.float
%0 = sitofp %arg0: i32 to f32
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.i32 to !llvm.double
+// CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to !llvm.double
%1 = sitofp %arg0: i32 to f64
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.i64 to !llvm.float
+// CHECK-NEXT: = llvm.sitofp {{.*}} : i64 to !llvm.float
%2 = sitofp %arg1: i64 to f32
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.i64 to !llvm.double
+// CHECK-NEXT: = llvm.sitofp {{.*}} : i64 to !llvm.double
%3 = sitofp %arg1: i64 to f64
return
}
@@ -615,13 +615,13 @@ func @sitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector
// Checking conversion of unsigned integer types to floating point.
// CHECK-LABEL: @uitofp
func @uitofp(%arg0 : i32, %arg1 : i64) {
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.i32 to !llvm.float
+// CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to !llvm.float
%0 = uitofp %arg0: i32 to f32
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.i32 to !llvm.double
+// CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to !llvm.double
%1 = uitofp %arg0: i32 to f64
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.i64 to !llvm.float
+// CHECK-NEXT: = llvm.uitofp {{.*}} : i64 to !llvm.float
%2 = uitofp %arg1: i64 to f32
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.i64 to !llvm.double
+// CHECK-NEXT: = llvm.uitofp {{.*}} : i64 to !llvm.double
%3 = uitofp %arg1: i64 to f64
return
}
@@ -653,13 +653,13 @@ func @fpext_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>) {
// Checking conversion of floating point to integer types.
// CHECK-LABEL: @fptosi
func @fptosi(%arg0 : f32, %arg1 : f64) {
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to !llvm.i32
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to i32
%0 = fptosi %arg0: f32 to i32
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to !llvm.i64
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to i64
%1 = fptosi %arg0: f32 to i64
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to !llvm.i32
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to i32
%2 = fptosi %arg1: f64 to i32
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to !llvm.i64
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to i64
%3 = fptosi %arg1: f64 to i64
return
}
@@ -685,13 +685,13 @@ func @fptosi_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector
// Checking conversion of floating point to integer types.
// CHECK-LABEL: @fptoui
func @fptoui(%arg0 : f32, %arg1 : f64) {
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to !llvm.i32
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to i32
%0 = fptoui %arg0: f32 to i32
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to !llvm.i64
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to i64
%1 = fptoui %arg0: f32 to i64
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to !llvm.i32
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to i32
%2 = fptoui %arg1: f64 to i32
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to !llvm.i64
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to i64
%3 = fptoui %arg1: f64 to i64
return
}
@@ -759,27 +759,27 @@ func @fptrunc_vector(%arg0 : vector<2xf32>, %arg1 : vector<2xf64>) {
// Check sign and zero extension and truncation of integers.
// CHECK-LABEL: @integer_extension_and_truncation
func @integer_extension_and_truncation() {
-// CHECK-NEXT: %0 = llvm.mlir.constant(-3 : i3) : !llvm.i3
+// CHECK-NEXT: %0 = llvm.mlir.constant(-3 : i3) : i3
%0 = constant 5 : i3
-// CHECK-NEXT: = llvm.sext %0 : !llvm.i3 to !llvm.i6
+// CHECK-NEXT: = llvm.sext %0 : i3 to i6
%1 = sexti %0 : i3 to i6
-// CHECK-NEXT: = llvm.zext %0 : !llvm.i3 to !llvm.i6
+// CHECK-NEXT: = llvm.zext %0 : i3 to i6
%2 = zexti %0 : i3 to i6
-// CHECK-NEXT: = llvm.trunc %0 : !llvm.i3 to !llvm.i2
+// CHECK-NEXT: = llvm.trunc %0 : i3 to i2
%3 = trunci %0 : i3 to i2
return
}
// CHECK-LABEL: @dfs_block_order
func @dfs_block_order(%arg0: i32) -> (i32) {
-// CHECK-NEXT: %[[CST:.*]] = llvm.mlir.constant(42 : i32) : !llvm.i32
+// CHECK-NEXT: %[[CST:.*]] = llvm.mlir.constant(42 : i32) : i32
%0 = constant 42 : i32
// CHECK-NEXT: llvm.br ^bb2
br ^bb2
// CHECK-NEXT: ^bb1:
-// CHECK-NEXT: %[[ADD:.*]] = llvm.add %arg0, %[[CST]] : !llvm.i32
-// CHECK-NEXT: llvm.return %[[ADD]] : !llvm.i32
+// CHECK-NEXT: %[[ADD:.*]] = llvm.add %arg0, %[[CST]] : i32
+// CHECK-NEXT: llvm.return %[[ADD]] : i32
^bb1:
%2 = addi %arg0, %0 : i32
return %2 : i32
@@ -860,29 +860,29 @@ func @splat(%a: vector<4xf32>, %b: f32) -> vector<4xf32> {
return %r : vector<4xf32>
}
// CHECK-NEXT: %[[UNDEF:[0-9]+]] = llvm.mlir.undef : !llvm.vec<4 x float>
-// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : !llvm.i32] : !llvm.vec<4 x float>
+// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : i32] : !llvm.vec<4 x float>
// CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[UNDEF]] [0 : i32, 0 : i32, 0 : i32, 0 : i32]
// CHECK-NEXT: %[[SCALE:[0-9]+]] = llvm.fmul %[[A]], %[[SPLAT]] : !llvm.vec<4 x float>
// CHECK-NEXT: llvm.return %[[SCALE]] : !llvm.vec<4 x float>
// CHECK-LABEL: func @view(
-// CHECK: %[[ARG0:.*]]: !llvm.i64, %[[ARG1:.*]]: !llvm.i64, %[[ARG2:.*]]: !llvm.i64
+// CHECK: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64
func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
- // CHECK: llvm.mlir.constant(2048 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(2048 : index) : i64
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
%0 = alloc() : memref<2048xi8>
// Test two dynamic sizes.
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[BASE_PTR:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
- // CHECK: %[[SHIFTED_BASE_PTR:.*]] = llvm.getelementptr %[[BASE_PTR]][%[[ARG2]]] : (!llvm.ptr<i8>, !llvm.i64) -> !llvm.ptr<i8>
+ // CHECK: %[[SHIFTED_BASE_PTR:.*]] = llvm.getelementptr %[[BASE_PTR]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
// CHECK: %[[CAST_SHIFTED_BASE_PTR:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR]] : !llvm.ptr<i8> to !llvm.ptr<float>
// CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.insertvalue %[[C0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[ARG0]], %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.mul %{{.*}}, %[[ARG1]]
@@ -892,15 +892,15 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// Test one dynamic size.
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[BASE_PTR_2:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
- // CHECK: %[[SHIFTED_BASE_PTR_2:.*]] = llvm.getelementptr %[[BASE_PTR_2]][%[[ARG2]]] : (!llvm.ptr<i8>, !llvm.i64) -> !llvm.ptr<i8>
+ // CHECK: %[[SHIFTED_BASE_PTR_2:.*]] = llvm.getelementptr %[[BASE_PTR_2]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
// CHECK: %[[CAST_SHIFTED_BASE_PTR_2:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_2]] : !llvm.ptr<i8> to !llvm.ptr<float>
// CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_2]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.insertvalue %[[C0_2]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.mul %{{.*}}, %[[ARG1]]
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
@@ -909,40 +909,40 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// Test static sizes.
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[BASE_PTR_3:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
- // CHECK: %[[SHIFTED_BASE_PTR_3:.*]] = llvm.getelementptr %[[BASE_PTR_3]][%[[ARG2]]] : (!llvm.ptr<i8>, !llvm.i64) -> !llvm.ptr<i8>
+ // CHECK: %[[SHIFTED_BASE_PTR_3:.*]] = llvm.getelementptr %[[BASE_PTR_3]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
// CHECK: %[[CAST_SHIFTED_BASE_PTR_3:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_3]] : !llvm.ptr<i8> to !llvm.ptr<float>
// CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_3]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.insertvalue %[[C0_3]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(64 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(64 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
%5 = view %0[%arg2][] : memref<2048xi8> to memref<64x4xf32>
// Test view memory space.
- // CHECK: llvm.mlir.constant(2048 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(2048 : index) : i64
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<i8, 4>, ptr<i8, 4>, i64, array<1 x i64>, array<1 x i64>)>
%6 = alloc() : memref<2048xi8, 4>
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[BASE_PTR_4:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8, 4>, ptr<i8, 4>, i64, array<1 x i64>, array<1 x i64>)>
- // CHECK: %[[SHIFTED_BASE_PTR_4:.*]] = llvm.getelementptr %[[BASE_PTR_4]][%[[ARG2]]] : (!llvm.ptr<i8, 4>, !llvm.i64) -> !llvm.ptr<i8, 4>
+ // CHECK: %[[SHIFTED_BASE_PTR_4:.*]] = llvm.getelementptr %[[BASE_PTR_4]][%[[ARG2]]] : (!llvm.ptr<i8, 4>, i64) -> !llvm.ptr<i8, 4>
// CHECK: %[[CAST_SHIFTED_BASE_PTR_4:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_4]] : !llvm.ptr<i8, 4> to !llvm.ptr<float, 4>
// CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_4]], %{{.*}}[1] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[C0_4:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // CHECK: %[[C0_4:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.insertvalue %[[C0_4]], %{{.*}}[2] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(64 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(64 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
+ // CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
%7 = view %6[%arg2][] : memref<2048xi8, 4> to memref<64x4xf32, 4>
@@ -951,16 +951,16 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-LABEL: func @subview(
// CHECK-COUNT-2: !llvm.ptr<float>,
-// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64,
-// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i64,
-// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i64,
-// CHECK: %[[ARG2:.*]]: !llvm.i64)
+// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64,
+// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: i64,
+// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: i64,
+// CHECK: %[[ARG2:.*]]: i64)
// CHECK32-LABEL: func @subview(
// CHECK32-COUNT-2: !llvm.ptr<float>,
-// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i32,
-// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i32,
-// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i32,
-// CHECK32: %[[ARG2:.*]]: !llvm.i32)
+// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32,
+// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: i32,
+// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: i32,
+// CHECK32: %[[ARG2:.*]]: i32)
func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
// The last "insertvalue" that populates the memref descriptor from the function arguments.
// CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
@@ -974,16 +974,16 @@ func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index,
// CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
- // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64
- // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64
- // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64
+ // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64
+ // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64
+ // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64
+ // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64
// CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64
+ // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64
// CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
+ // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64
// CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
@@ -993,16 +993,16 @@ func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index,
// CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32
- // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32
- // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32
- // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32
+ // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
+ // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32
+ // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32
+ // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32
// CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32
+ // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32
// CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32
+ // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
%1 = subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] :
memref<64x4xf32, offset: 0, strides: [4, 1]>
@@ -1012,16 +1012,16 @@ func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index,
// CHECK-LABEL: func @subview_non_zero_addrspace(
// CHECK-COUNT-2: !llvm.ptr<float, 3>,
-// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64,
-// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i64,
-// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i64,
-// CHECK: %[[ARG2:.*]]: !llvm.i64)
+// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64,
+// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: i64,
+// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: i64,
+// CHECK: %[[ARG2:.*]]: i64)
// CHECK32-LABEL: func @subview_non_zero_addrspace(
// CHECK32-COUNT-2: !llvm.ptr<float, 3>,
-// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i32,
-// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i32,
-// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i32,
-// CHECK32: %[[ARG2:.*]]: !llvm.i32)
+// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32,
+// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: i32,
+// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: i32,
+// CHECK32: %[[ARG2:.*]]: i32)
func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1], 3>, %arg0 : index, %arg1 : index, %arg2 : index) {
// The last "insertvalue" that populates the memref descriptor from the function arguments.
// CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
@@ -1035,16 +1035,16 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
// CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
- // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64
- // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64
- // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64
+ // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64
+ // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64
+ // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64
+ // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64
// CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64
+ // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64
// CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
+ // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64
// CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float, 3> to !llvm.ptr<float, 3>
@@ -1054,16 +1054,16 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
// CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32
- // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32
- // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32
- // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32
+ // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
+ // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32
+ // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32
+ // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32
// CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32
+ // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32
// CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32
+ // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
%1 = subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] :
memref<64x4xf32, offset: 0, strides: [4, 1], 3>
@@ -1074,25 +1074,25 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
// CHECK-LABEL: func @subview_const_size(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
-// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG7:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG8:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG9:[a-zA-Z0-9]*]]: i64
// CHECK32-LABEL: func @subview_const_size(
// CHECK32-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
// CHECK32-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
-// CHECK32-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG7:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG8:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG9:[a-zA-Z0-9]*]]: i32
func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
// The last "insertvalue" that populates the memref descriptor from the function arguments.
// CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
@@ -1106,18 +1106,18 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
// CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64
- // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64
- // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64
- // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64
+ // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64
+ // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64
+ // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64
+ // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64
// CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
// CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64
+ // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64
// CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
// CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64
+ // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64
// CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
@@ -1127,18 +1127,18 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
// CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32
- // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32
- // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32
- // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32
+ // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32
+ // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32
+ // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32
+ // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32
// CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
// CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32
+ // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32
// CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
// CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32
+ // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32
// CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
%1 = subview %0[%arg0, %arg1][4, 2][%arg0, %arg1] :
memref<64x4xf32, offset: 0, strides: [4, 1]>
@@ -1149,25 +1149,25 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
// CHECK-LABEL: func @subview_const_stride(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
-// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i64
-// CHECK-SAME: %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG7:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG8:[a-zA-Z0-9]*]]: i64
+// CHECK-SAME: %[[ARG9:[a-zA-Z0-9]*]]: i64
// CHECK32-LABEL: func @subview_const_stride(
// CHECK32-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
// CHECK32-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
-// CHECK32-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i32
-// CHECK32-SAME: %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG7:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG8:[a-zA-Z0-9]*]]: i32
+// CHECK32-SAME: %[[ARG9:[a-zA-Z0-9]*]]: i32
func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
// The last "insertvalue" that populates the memref descriptor from the function arguments.
// CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
@@ -1181,10 +1181,10 @@ func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %a
// CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64
- // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64
- // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64
- // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64
+ // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64
+ // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64
+ // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64
+ // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64
// CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
@@ -1200,10 +1200,10 @@ func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %a
// CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32
- // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32
- // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32
- // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32
+ // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32
+ // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32
+ // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32
+ // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32
// CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
@@ -1249,16 +1249,16 @@ func @subview_const_stride_and_offset(%0 : memref<64x4xf32, offset: 0, strides:
// CHECK-LABEL: func @subview_mixed_static_dynamic(
// CHECK-COUNT-2: !llvm.ptr<float>,
-// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64,
-// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i64,
-// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i64,
-// CHECK: %[[ARG2:.*]]: !llvm.i64)
+// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64,
+// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: i64,
+// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: i64,
+// CHECK: %[[ARG2:.*]]: i64)
// CHECK32-LABEL: func @subview_mixed_static_dynamic(
// CHECK32-COUNT-2: !llvm.ptr<float>,
-// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i32,
-// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i32,
-// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i32,
-// CHECK32: %[[ARG2:.*]]: !llvm.i32)
+// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32,
+// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: i32,
+// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: i32,
+// CHECK32: %[[ARG2:.*]]: i32)
func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
// The last "insertvalue" that populates the memref descriptor from the function arguments.
// CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
@@ -1272,19 +1272,19 @@ func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4,
// CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[OFFM1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE0]] : !llvm.i32
- // CHECK32: %[[OFFA1:.*]] = llvm.add %[[OFF]], %[[OFFM1]] : !llvm.i32
- // CHECK32: %[[CST8:.*]] = llvm.mlir.constant(8 : i64) : !llvm.i32
- // CHECK32: %[[OFFM2:.*]] = llvm.mul %[[CST8]], %[[STRIDE1]] : !llvm.i32
- // CHECK32: %[[OFFA2:.*]] = llvm.add %[[OFFA1]], %[[OFFM2]] : !llvm.i32
+ // CHECK32: %[[OFFM1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE0]] : i32
+ // CHECK32: %[[OFFA1:.*]] = llvm.add %[[OFF]], %[[OFFM1]] : i32
+ // CHECK32: %[[CST8:.*]] = llvm.mlir.constant(8 : i64) : i32
+ // CHECK32: %[[OFFM2:.*]] = llvm.mul %[[CST8]], %[[STRIDE1]] : i32
+ // CHECK32: %[[OFFA2:.*]] = llvm.add %[[OFFA1]], %[[OFFM2]] : i32
// CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFFA2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
// CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[CST1:.*]] = llvm.mlir.constant(1 : i64) : !llvm.i32
+ // CHECK32: %[[CST1:.*]] = llvm.mlir.constant(1 : i64) : i32
// CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[CST62:.*]] = llvm.mlir.constant(62 : i64) : !llvm.i32
+ // CHECK32: %[[CST62:.*]] = llvm.mlir.constant(62 : i64) : i32
// CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST62]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
- // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32
+ // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
// CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
%1 = subview %0[%arg1, 8][62, %arg2][%arg0, 1] :
memref<64x4xf32, offset: 0, strides: [4, 1]>
@@ -1323,20 +1323,20 @@ func @generic_atomic_rmw(%I : memref<10xi32>, %i : index) -> i32 {
atomic_yield %c1 : i32
}
// CHECK: [[init:%.*]] = llvm.load %{{.*}} : !llvm.ptr<i32>
- // CHECK-NEXT: llvm.br ^bb1([[init]] : !llvm.i32)
- // CHECK-NEXT: ^bb1([[loaded:%.*]]: !llvm.i32):
+ // CHECK-NEXT: llvm.br ^bb1([[init]] : i32)
+ // CHECK-NEXT: ^bb1([[loaded:%.*]]: i32):
// CHECK-NEXT: [[c1:%.*]] = llvm.mlir.constant(1 : i32)
// CHECK-NEXT: [[pair:%.*]] = llvm.cmpxchg %{{.*}}, [[loaded]], [[c1]]
- // CHECK-SAME: acq_rel monotonic : !llvm.i32
+ // CHECK-SAME: acq_rel monotonic : i32
// CHECK-NEXT: [[new:%.*]] = llvm.extractvalue [[pair]][0]
// CHECK-NEXT: [[ok:%.*]] = llvm.extractvalue [[pair]][1]
- // CHECK-NEXT: llvm.cond_br [[ok]], ^bb2, ^bb1([[new]] : !llvm.i32)
+ // CHECK-NEXT: llvm.cond_br [[ok]], ^bb2, ^bb1([[new]] : i32)
// CHECK-NEXT: ^bb2:
%c2 = constant 2 : i32
%add = addi %c2, %x : i32
return %add : i32
// CHECK-NEXT: [[c2:%.*]] = llvm.mlir.constant(2 : i32)
- // CHECK-NEXT: [[add:%.*]] = llvm.add [[c2]], [[new]] : !llvm.i32
+ // CHECK-NEXT: [[add:%.*]] = llvm.add [[c2]], [[new]] : i32
// CHECK-NEXT: llvm.return [[add]]
}
@@ -1345,12 +1345,12 @@ func @generic_atomic_rmw(%I : memref<10xi32>, %i : index) -> i32 {
// CHECK-LABEL: func @assume_alignment
func @assume_alignment(%0 : memref<4x4xf16>) {
// CHECK: %[[PTR:.*]] = llvm.extractvalue %[[MEMREF:.*]][1] : !llvm.struct<(ptr<half>, ptr<half>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK-NEXT: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
- // CHECK-NEXT: %[[MASK:.*]] = llvm.mlir.constant(15 : index) : !llvm.i64
- // CHECK-NEXT: %[[INT:.*]] = llvm.ptrtoint %[[PTR]] : !llvm.ptr<half> to !llvm.i64
- // CHECK-NEXT: %[[MASKED_PTR:.*]] = llvm.and %[[INT]], %[[MASK:.*]] : !llvm.i64
- // CHECK-NEXT: %[[CONDITION:.*]] = llvm.icmp "eq" %[[MASKED_PTR]], %[[ZERO]] : !llvm.i64
- // CHECK-NEXT: "llvm.intr.assume"(%[[CONDITION]]) : (!llvm.i1) -> ()
+ // CHECK-NEXT: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
+ // CHECK-NEXT: %[[MASK:.*]] = llvm.mlir.constant(15 : index) : i64
+ // CHECK-NEXT: %[[INT:.*]] = llvm.ptrtoint %[[PTR]] : !llvm.ptr<half> to i64
+ // CHECK-NEXT: %[[MASKED_PTR:.*]] = llvm.and %[[INT]], %[[MASK:.*]] : i64
+ // CHECK-NEXT: %[[CONDITION:.*]] = llvm.icmp "eq" %[[MASKED_PTR]], %[[ZERO]] : i64
+ // CHECK-NEXT: "llvm.intr.assume"(%[[CONDITION]]) : (i1) -> ()
assume_alignment %0, 16 : memref<4x4xf16>
return
}
@@ -1404,11 +1404,11 @@ func @bfloat(%arg0: bf16) -> bf16 {
// CHECK-LABEL: func @memref_index
// CHECK-SAME: %arg0: !llvm.ptr<i64>, %arg1: !llvm.ptr<i64>,
-// CHECK-SAME: %arg2: !llvm.i64, %arg3: !llvm.i64, %arg4: !llvm.i64)
+// CHECK-SAME: %arg2: i64, %arg3: i64, %arg4: i64)
// CHECK-SAME: -> !llvm.struct<(ptr<i64>, ptr<i64>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK32-LABEL: func @memref_index
// CHECK32-SAME: %arg0: !llvm.ptr<i32>, %arg1: !llvm.ptr<i32>,
-// CHECK32-SAME: %arg2: !llvm.i32, %arg3: !llvm.i32, %arg4: !llvm.i32)
+// CHECK32-SAME: %arg2: i32, %arg3: i32, %arg4: i32)
// CHECK32-SAME: -> !llvm.struct<(ptr<i32>, ptr<i32>, i32, array<1 x i32>, array<1 x i32>)>
func @memref_index(%arg0: memref<32xindex>) -> memref<32xindex> {
return %arg0 : memref<32xindex>
@@ -1434,8 +1434,8 @@ func @rank_of_ranked(%ranked: memref<?xi32>) {
%rank = rank %ranked : memref<?xi32>
return
}
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK32: llvm.mlir.constant(1 : index) : !llvm.i32
+// CHECK: llvm.mlir.constant(1 : index) : i64
+// CHECK32: llvm.mlir.constant(1 : index) : i32
// -----
@@ -1449,7 +1449,7 @@ func @dim_of_unranked(%unranked: memref<*xi32>) -> index {
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(i64, ptr<i8>)>
// CHECK-NEXT: llvm.insertvalue
// CHECK-NEXT: %[[UNRANKED_DESC:.*]] = llvm.insertvalue
-// CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-NEXT: %[[RANKED_DESC:.*]] = llvm.extractvalue %[[UNRANKED_DESC]][1]
// CHECK-SAME: : !llvm.struct<(i64, ptr<i8>)>
@@ -1457,21 +1457,21 @@ func @dim_of_unranked(%unranked: memref<*xi32>) -> index {
// CHECK-NEXT: %[[ZERO_D_DESC:.*]] = llvm.bitcast %[[RANKED_DESC]]
// CHECK-SAME: : !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<i32>, ptr<i32>, i64)>>
-// CHECK-NEXT: %[[C2_i32:.*]] = llvm.mlir.constant(2 : i32) : !llvm.i32
-// CHECK-NEXT: %[[C0_:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+// CHECK-NEXT: %[[C2_i32:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK-NEXT: %[[C0_:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-NEXT: %[[OFFSET_PTR:.*]] = llvm.getelementptr %[[ZERO_D_DESC]]{{\[}}
// CHECK-SAME: %[[C0_]], %[[C2_i32]]] : (!llvm.ptr<struct<(ptr<i32>, ptr<i32>,
-// CHECK-SAME: i64)>>, !llvm.i64, !llvm.i32) -> !llvm.ptr<i64>
+// CHECK-SAME: i64)>>, i64, i32) -> !llvm.ptr<i64>
-// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: %[[INDEX_INC:.*]] = llvm.add %[[C1]], %[[C0]] : !llvm.i64
+// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: %[[INDEX_INC:.*]] = llvm.add %[[C1]], %[[C0]] : i64
// CHECK-NEXT: %[[SIZE_PTR:.*]] = llvm.getelementptr %[[OFFSET_PTR]]{{\[}}
-// CHECK-SAME: %[[INDEX_INC]]] : (!llvm.ptr<i64>, !llvm.i64) -> !llvm.ptr<i64>
+// CHECK-SAME: %[[INDEX_INC]]] : (!llvm.ptr<i64>, i64) -> !llvm.ptr<i64>
// CHECK-NEXT: %[[SIZE:.*]] = llvm.load %[[SIZE_PTR]] : !llvm.ptr<i64>
-// CHECK-NEXT: llvm.return %[[SIZE]] : !llvm.i64
+// CHECK-NEXT: llvm.return %[[SIZE]] : i64
// CHECK32: %[[SIZE:.*]] = llvm.load %{{.*}} : !llvm.ptr<i32>
-// CHECK32-NEXT: llvm.return %[[SIZE]] : !llvm.i32
+// CHECK32-NEXT: llvm.return %[[SIZE]] : i32
diff --git a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
index acb894af1f38..0d346d4cfa59 100644
--- a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
@@ -96,7 +96,7 @@ func @rsqrt_multidim_vector(%arg0 : vector<4x3xf32>) {
// Lowers `assert` to a function call to `abort` if the assertion is violated.
// CHECK: llvm.func @abort()
// CHECK-LABEL: @assert_test_function
-// CHECK-SAME: (%[[ARG:.*]]: !llvm.i1)
+// CHECK-SAME: (%[[ARG:.*]]: i1)
func @assert_test_function(%arg : i1) {
// CHECK: llvm.cond_br %[[ARG]], ^[[CONTINUATION_BLOCK:.*]], ^[[FAILURE_BLOCK:.*]]
// CHECK: ^[[CONTINUATION_BLOCK]]:
@@ -141,17 +141,17 @@ global_memref @gv2 : memref<2x3xf32> = dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]>
// CHECK-LABEL: func @get_gv0_memref
func @get_gv0_memref() {
%0 = get_global_memref @gv0 : memref<2xf32>
- // CHECK: %[[DIM:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
- // CHECK: %[[STRIDE:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // CHECK: %[[DIM:.*]] = llvm.mlir.constant(2 : index) : i64
+ // CHECK: %[[STRIDE:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv0 : !llvm.ptr<array<2 x float>>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x float>>, !llvm.i64, !llvm.i64) -> !llvm.ptr<float>
- // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : !llvm.i64
- // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : !llvm.i64 to !llvm.ptr<float>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
+ // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x float>>, i64, i64) -> !llvm.ptr<float>
+ // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
+ // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<float>
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
- // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: llvm.insertvalue %[[DIM]], {{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: llvm.insertvalue %[[STRIDE]], {{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
@@ -161,18 +161,18 @@ func @get_gv0_memref() {
// Test 2D memref.
// CHECK-LABEL: func @get_gv2_memref
func @get_gv2_memref() {
- // CHECK: %[[DIM0:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
- // CHECK: %[[DIM1:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64
- // CHECK: %[[STRIDE1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
+ // CHECK: %[[DIM0:.*]] = llvm.mlir.constant(2 : index) : i64
+ // CHECK: %[[DIM1:.*]] = llvm.mlir.constant(3 : index) : i64
+ // CHECK: %[[STRIDE1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv2 : !llvm.ptr<array<2 x array<3 x float>>>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x array<3 x float>>>, !llvm.i64, !llvm.i64, !llvm.i64) -> !llvm.ptr<float>
- // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : !llvm.i64
- // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : !llvm.i64 to !llvm.ptr<float>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
+ // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x array<3 x float>>>, i64, i64, i64) -> !llvm.ptr<float>
+ // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
+ // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<float>
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[DIM0]], {{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[DIM1]], {{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
@@ -190,14 +190,14 @@ global_memref @gv3 : memref<f32> = dense<1.0>
// CHECK-LABEL: func @get_gv3_memref
func @get_gv3_memref() {
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv3 : !llvm.ptr<float>
- // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
- // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : !llvm.i64
- // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : !llvm.i64 to !llvm.ptr<float>
+ // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
+ // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+ // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
+ // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<float>
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64)>
// CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
// CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
- // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
+ // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
%0 = get_global_memref @gv3 : memref<f32>
return
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
index ec05e349897a..973361899758 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
@@ -2,22 +2,22 @@
// RUN: mlir-opt %s --convert-vector-to-llvm='enable-index-optimizations=0' | FileCheck %s --check-prefix=CMP64
// CMP32-LABEL: llvm.func @genbool_var_1d(
-// CMP32-SAME: %[[A:.*]]: !llvm.i64)
+// CMP32-SAME: %[[A:.*]]: i64)
// CMP32: %[[T0:.*]] = llvm.mlir.constant(dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : vector<11xi32>) : !llvm.vec<11 x i32>
-// CMP32: %[[T1:.*]] = llvm.trunc %[[A]] : !llvm.i64 to !llvm.i32
+// CMP32: %[[T1:.*]] = llvm.trunc %[[A]] : i64 to i32
// CMP32: %[[T2:.*]] = llvm.mlir.undef : !llvm.vec<11 x i32>
-// CMP32: %[[T3:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CMP32: %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%[[T3]] : !llvm.i32] : !llvm.vec<11 x i32>
+// CMP32: %[[T3:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CMP32: %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%[[T3]] : i32] : !llvm.vec<11 x i32>
// CMP32: %[[T5:.*]] = llvm.shufflevector %[[T4]], %[[T2]] [0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<11 x i32>, !llvm.vec<11 x i32>
// CMP32: %[[T6:.*]] = llvm.icmp "slt" %[[T0]], %[[T5]] : !llvm.vec<11 x i32>
// CMP32: llvm.return %[[T6]] : !llvm.vec<11 x i1>
// CMP64-LABEL: llvm.func @genbool_var_1d(
-// CMP64-SAME: %[[A:.*]]: !llvm.i64)
+// CMP64-SAME: %[[A:.*]]: i64)
// CMP64: %[[T0:.*]] = llvm.mlir.constant(dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : vector<11xi64>) : !llvm.vec<11 x i64>
// CMP64: %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<11 x i64>
-// CMP64: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CMP64: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : !llvm.i32] : !llvm.vec<11 x i64>
+// CMP64: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CMP64: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<11 x i64>
// CMP64: %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T1]] [0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<11 x i64>, !llvm.vec<11 x i64>
// CMP64: %[[T5:.*]] = llvm.icmp "slt" %[[T0]], %[[T4]] : !llvm.vec<11 x i64>
// CMP64: llvm.return %[[T5]] : !llvm.vec<11 x i1>
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index ab4948a56791..0a2d94fcf702 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -7,8 +7,8 @@ func @broadcast_vec1d_from_scalar(%arg0: f32) -> vector<2xf32> {
// CHECK-LABEL: llvm.func @broadcast_vec1d_from_scalar(
// CHECK-SAME: %[[A:.*]]: !llvm.float)
// CHECK: %[[T0:.*]] = llvm.mlir.undef : !llvm.vec<2 x float>
-// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T2:.*]] = llvm.insertelement %[[A]], %[[T0]][%[[T1]] : !llvm.i32] : !llvm.vec<2 x float>
+// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T2:.*]] = llvm.insertelement %[[A]], %[[T0]][%[[T1]] : i32] : !llvm.vec<2 x float>
// CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T0]] [0 : i32, 0 : i32] : !llvm.vec<2 x float>, !llvm.vec<2 x float>
// CHECK: llvm.return %[[T3]] : !llvm.vec<2 x float>
@@ -20,8 +20,8 @@ func @broadcast_vec2d_from_scalar(%arg0: f32) -> vector<2x3xf32> {
// CHECK-SAME: %[[A:.*]]: !llvm.float)
// CHECK: %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x vec<3 x float>>
// CHECK: %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<3 x float>
// CHECK: %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>>
// CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][1] : !llvm.array<2 x vec<3 x float>>
@@ -35,8 +35,8 @@ func @broadcast_vec3d_from_scalar(%arg0: f32) -> vector<2x3x4xf32> {
// CHECK-SAME: %[[A:.*]]: !llvm.float)
// CHECK: %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x array<3 x vec<4 x float>>>
// CHECK: %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<4 x float>
-// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : !llvm.i32] : !llvm.vec<4 x float>
+// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<4 x float>
// CHECK: %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T3]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
// CHECK: %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T0]][0, 0] : !llvm.array<2 x array<3 x vec<4 x float>>>
// CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0, 1] : !llvm.array<2 x array<3 x vec<4 x float>>>
@@ -102,11 +102,11 @@ func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> {
}
// CHECK-LABEL: llvm.func @broadcast_stretch(
// CHECK-SAME: %[[A:.*]]: !llvm.vec<1 x float>)
-// CHECK: %[[T0:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64
-// CHECK: %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : !llvm.i64] : !llvm.vec<1 x float>
+// CHECK: %[[T0:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : i64] : !llvm.vec<1 x float>
// CHECK: %[[T2:.*]] = llvm.mlir.undef : !llvm.vec<4 x float>
-// CHECK: %[[T3:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%3 : !llvm.i32] : !llvm.vec<4 x float>
+// CHECK: %[[T3:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%3 : i32] : !llvm.vec<4 x float>
// CHECK: %[[T5:.*]] = llvm.shufflevector %[[T4]], %[[T2]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
// CHECK: llvm.return %[[T5]] : !llvm.vec<4 x float>
@@ -131,35 +131,35 @@ func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> {
// CHECK-SAME: %[[A:.*]]: !llvm.array<4 x vec<1 x float>>)
// CHECK: %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3xf32>) : !llvm.array<4 x vec<3 x float>>
// CHECK: %[[T1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<4 x vec<1 x float>>
-// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64
-// CHECK: %[[T3:.*]] = llvm.extractelement %[[T1]][%[[T2]] : !llvm.i64] : !llvm.vec<1 x float>
+// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[T3:.*]] = llvm.extractelement %[[T1]][%[[T2]] : i64] : !llvm.vec<1 x float>
// CHECK: %[[T4:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T5:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T6:.*]] = llvm.insertelement %[[T3]], %[[T4]][%[[T5]] : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T5:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T6:.*]] = llvm.insertelement %[[T3]], %[[T4]][%[[T5]] : i32] : !llvm.vec<3 x float>
// CHECK: %[[T7:.*]] = llvm.shufflevector %[[T6]], %[[T4]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T0]][0] : !llvm.array<4 x vec<3 x float>>
// CHECK: %[[T9:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<4 x vec<1 x float>>
-// CHECK: %[[T10:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64
-// CHECK: %[[T11:.*]] = llvm.extractelement %[[T9]][%[[T10]] : !llvm.i64] : !llvm.vec<1 x float>
+// CHECK: %[[T10:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[T11:.*]] = llvm.extractelement %[[T9]][%[[T10]] : i64] : !llvm.vec<1 x float>
// CHECK: %[[T12:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : i32] : !llvm.vec<3 x float>
// CHECK: %[[T15:.*]] = llvm.shufflevector %[[T14]], %[[T12]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T16:.*]] = llvm.insertvalue %[[T15]], %[[T8]][1] : !llvm.array<4 x vec<3 x float>>
// CHECK: %[[T17:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vec<1 x float>>
-// CHECK: %[[T18:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64
-// CHECK: %[[T19:.*]] = llvm.extractelement %[[T17]][%[[T18]] : !llvm.i64] : !llvm.vec<1 x float>
+// CHECK: %[[T18:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[T19:.*]] = llvm.extractelement %[[T17]][%[[T18]] : i64] : !llvm.vec<1 x float>
// CHECK: %[[T20:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T21:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T22:.*]] = llvm.insertelement %[[T19]], %[[T20]][%[[T21]] : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T21:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T22:.*]] = llvm.insertelement %[[T19]], %[[T20]][%[[T21]] : i32] : !llvm.vec<3 x float>
// CHECK: %[[T23:.*]] = llvm.shufflevector %[[T22]], %[[T20]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T24:.*]] = llvm.insertvalue %[[T23]], %[[T16]][2] : !llvm.array<4 x vec<3 x float>>
// CHECK: %[[T25:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vec<1 x float>>
-// CHECK: %[[T26:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64
-// CHECK: %[[T27:.*]] = llvm.extractelement %[[T25]][%[[T26]] : !llvm.i64] : !llvm.vec<1 x float>
+// CHECK: %[[T26:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[T27:.*]] = llvm.extractelement %[[T25]][%[[T26]] : i64] : !llvm.vec<1 x float>
// CHECK: %[[T28:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T29:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T30:.*]] = llvm.insertelement %[[T27]], %[[T28]][%[[T29]] : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T29:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T30:.*]] = llvm.insertelement %[[T27]], %[[T28]][%[[T29]] : i32] : !llvm.vec<3 x float>
// CHECK: %[[T31:.*]] = llvm.shufflevector %[[T30]], %[[T28]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T32:.*]] = llvm.insertvalue %[[T31]], %[[T24]][3] : !llvm.array<4 x vec<3 x float>>
// CHECK: llvm.return %[[T32]] : !llvm.array<4 x vec<3 x float>>
@@ -202,19 +202,19 @@ func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x3xf32
// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x float>,
// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>)
// CHECK: %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<2x3xf32>)
-// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64
-// CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : !llvm.i64] : !llvm.vec<2 x float>
+// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : i64] : !llvm.vec<2 x float>
// CHECK: %[[T3:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%4 : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%4 : i32] : !llvm.vec<3 x float>
// CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T7:.*]] = llvm.fmul %[[T6]], %[[B]] : !llvm.vec<3 x float>
// CHECK: %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>>
-// CHECK: %[[T9:.*]] = llvm.mlir.constant(1 : i64) : !llvm.i64
-// CHECK: %[[T10:.*]] = llvm.extractelement %[[A]][%9 : !llvm.i64] : !llvm.vec<2 x float>
+// CHECK: %[[T9:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK: %[[T10:.*]] = llvm.extractelement %[[A]][%9 : i64] : !llvm.vec<2 x float>
// CHECK: %[[T11:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T12:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T13:.*]] = llvm.insertelement %[[T10]], %[[T11]][%12 : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T12:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T13:.*]] = llvm.insertelement %[[T10]], %[[T11]][%12 : i32] : !llvm.vec<3 x float>
// CHECK: %[[T14:.*]] = llvm.shufflevector %[[T13]], %[[T11]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T15:.*]] = llvm.fmul %[[T14]], %[[B]] : !llvm.vec<3 x float>
// CHECK: %[[T16:.*]] = llvm.insertvalue %[[T15]], %[[T8]][1] : !llvm.array<2 x vec<3 x float>>
@@ -229,20 +229,20 @@ func @outerproduct_add(%arg0: vector<2xf32>, %arg1: vector<3xf32>, %arg2: vector
// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>,
// CHECK-SAME: %[[C:.*]]: !llvm.array<2 x vec<3 x float>>)
// CHECK: %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<2x3xf32>)
-// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64
-// CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : !llvm.i64] : !llvm.vec<2 x float>
+// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : i64] : !llvm.vec<2 x float>
// CHECK: %[[T3:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%[[T4]] : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%[[T4]] : i32] : !llvm.vec<3 x float>
// CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T7:.*]] = llvm.extractvalue %[[C]][0] : !llvm.array<2 x vec<3 x float>>
// CHECK: %[[T8:.*]] = "llvm.intr.fmuladd"(%[[T6]], %[[B]], %[[T7]]) : (!llvm.vec<3 x float>, !llvm.vec<3 x float>, !llvm.vec<3 x float>)
// CHECK: %[[T9:.*]] = llvm.insertvalue %[[T8]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>>
-// CHECK: %[[T10:.*]] = llvm.mlir.constant(1 : i64) : !llvm.i64
-// CHECK: %[[T11:.*]] = llvm.extractelement %[[A]][%[[T10]] : !llvm.i64] : !llvm.vec<2 x float>
+// CHECK: %[[T10:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK: %[[T11:.*]] = llvm.extractelement %[[A]][%[[T10]] : i64] : !llvm.vec<2 x float>
// CHECK: %[[T12:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
-// CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : !llvm.i32] : !llvm.vec<3 x float>
+// CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : i32] : !llvm.vec<3 x float>
// CHECK: %[[T15:.*]] = llvm.shufflevector %[[T14]], %[[T12]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
// CHECK: %[[T16:.*]] = llvm.extractvalue %[[C]][1] : !llvm.array<2 x vec<3 x float>>
// CHECK: %[[T17:.*]] = "llvm.intr.fmuladd"(%[[T15]], %[[B]], %[[T16]]) : (!llvm.vec<3 x float>, !llvm.vec<3 x float>, !llvm.vec<3 x float>)
@@ -267,26 +267,26 @@ func @shuffle_1D(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<5xf32> {
// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x float>,
// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>)
// CHECK: %[[u0:.*]] = llvm.mlir.undef : !llvm.vec<5 x float>
-// CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK: %[[e1:.*]] = llvm.extractelement %[[B]][%[[c2]] : !llvm.i64] : !llvm.vec<3 x float>
-// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: %[[i1:.*]] = llvm.insertelement %[[e1]], %[[u0]][%[[c0]] : !llvm.i64] : !llvm.vec<5 x float>
-// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: %[[e2:.*]] = llvm.extractelement %[[B]][%[[c1]] : !llvm.i64] : !llvm.vec<3 x float>
-// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: %[[i2:.*]] = llvm.insertelement %[[e2]], %[[i1]][%[[c1]] : !llvm.i64] : !llvm.vec<5 x float>
-// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: %[[e3:.*]] = llvm.extractelement %[[B]][%[[c0]] : !llvm.i64] : !llvm.vec<3 x float>
-// CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK: %[[i3:.*]] = llvm.insertelement %[[e3]], %[[i2]][%[[c2]] : !llvm.i64] : !llvm.vec<5 x float>
-// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: %[[e4:.*]] = llvm.extractelement %[[A]][%[[c1]] : !llvm.i64] : !llvm.vec<2 x float>
-// CHECK: %[[c3:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64
-// CHECK: %[[i4:.*]] = llvm.insertelement %[[e4]], %[[i3]][%[[c3]] : !llvm.i64] : !llvm.vec<5 x float>
-// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: %[[e5:.*]] = llvm.extractelement %[[A]][%[[c0]] : !llvm.i64] : !llvm.vec<2 x float>
-// CHECK: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
-// CHECK: %[[i5:.*]] = llvm.insertelement %[[e5]], %[[i4]][%[[c4]] : !llvm.i64] : !llvm.vec<5 x float>
+// CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64
+// CHECK: %[[e1:.*]] = llvm.extractelement %[[B]][%[[c2]] : i64] : !llvm.vec<3 x float>
+// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[i1:.*]] = llvm.insertelement %[[e1]], %[[u0]][%[[c0]] : i64] : !llvm.vec<5 x float>
+// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[e2:.*]] = llvm.extractelement %[[B]][%[[c1]] : i64] : !llvm.vec<3 x float>
+// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[i2:.*]] = llvm.insertelement %[[e2]], %[[i1]][%[[c1]] : i64] : !llvm.vec<5 x float>
+// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[e3:.*]] = llvm.extractelement %[[B]][%[[c0]] : i64] : !llvm.vec<3 x float>
+// CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64
+// CHECK: %[[i3:.*]] = llvm.insertelement %[[e3]], %[[i2]][%[[c2]] : i64] : !llvm.vec<5 x float>
+// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[e4:.*]] = llvm.extractelement %[[A]][%[[c1]] : i64] : !llvm.vec<2 x float>
+// CHECK: %[[c3:.*]] = llvm.mlir.constant(3 : index) : i64
+// CHECK: %[[i4:.*]] = llvm.insertelement %[[e4]], %[[i3]][%[[c3]] : i64] : !llvm.vec<5 x float>
+// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[e5:.*]] = llvm.extractelement %[[A]][%[[c0]] : i64] : !llvm.vec<2 x float>
+// CHECK: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64
+// CHECK: %[[i5:.*]] = llvm.insertelement %[[e5]], %[[i4]][%[[c4]] : i64] : !llvm.vec<5 x float>
// CHECK: llvm.return %[[i5]] : !llvm.vec<5 x float>
func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> {
@@ -312,8 +312,8 @@ func @extract_element(%arg0: vector<16xf32>) -> f32 {
}
// CHECK-LABEL: llvm.func @extract_element(
// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x float>)
-// CHECK: %[[c:.*]] = llvm.mlir.constant(15 : i32) : !llvm.i32
-// CHECK: %[[x:.*]] = llvm.extractelement %[[A]][%[[c]] : !llvm.i32] : !llvm.vec<16 x float>
+// CHECK: %[[c:.*]] = llvm.mlir.constant(15 : i32) : i32
+// CHECK: %[[x:.*]] = llvm.extractelement %[[A]][%[[c]] : i32] : !llvm.vec<16 x float>
// CHECK: llvm.return %[[x]] : !llvm.float
func @extract_element_from_vec_1d(%arg0: vector<16xf32>) -> f32 {
@@ -321,8 +321,8 @@ func @extract_element_from_vec_1d(%arg0: vector<16xf32>) -> f32 {
return %0 : f32
}
// CHECK-LABEL: llvm.func @extract_element_from_vec_1d
-// CHECK: llvm.mlir.constant(15 : i64) : !llvm.i64
-// CHECK: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<16 x float>
+// CHECK: llvm.mlir.constant(15 : i64) : i64
+// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float>
// CHECK: llvm.return {{.*}} : !llvm.float
func @extract_vec_2d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> {
@@ -347,8 +347,8 @@ func @extract_element_from_vec_3d(%arg0: vector<4x3x16xf32>) -> f32 {
}
// CHECK-LABEL: llvm.func @extract_element_from_vec_3d
// CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vec<16 x float>>>
-// CHECK: llvm.mlir.constant(0 : i64) : !llvm.i64
-// CHECK: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<16 x float>
+// CHECK: llvm.mlir.constant(0 : i64) : i64
+// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float>
// CHECK: llvm.return {{.*}} : !llvm.float
func @insert_element(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
@@ -359,8 +359,8 @@ func @insert_element(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
// CHECK-LABEL: llvm.func @insert_element(
// CHECK-SAME: %[[A:.*]]: !llvm.float,
// CHECK-SAME: %[[B:.*]]: !llvm.vec<4 x float>)
-// CHECK: %[[c:.*]] = llvm.mlir.constant(3 : i32) : !llvm.i32
-// CHECK: %[[x:.*]] = llvm.insertelement %[[A]], %[[B]][%[[c]] : !llvm.i32] : !llvm.vec<4 x float>
+// CHECK: %[[c:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK: %[[x:.*]] = llvm.insertelement %[[A]], %[[B]][%[[c]] : i32] : !llvm.vec<4 x float>
// CHECK: llvm.return %[[x]] : !llvm.vec<4 x float>
func @insert_element_into_vec_1d(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
@@ -368,8 +368,8 @@ func @insert_element_into_vec_1d(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf
return %0 : vector<4xf32>
}
// CHECK-LABEL: llvm.func @insert_element_into_vec_1d
-// CHECK: llvm.mlir.constant(3 : i64) : !llvm.i64
-// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float>
+// CHECK: llvm.mlir.constant(3 : i64) : i64
+// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
// CHECK: llvm.return {{.*}} : !llvm.vec<4 x float>
func @insert_vec_2d_into_vec_3d(%arg0: vector<8x16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
@@ -394,8 +394,8 @@ func @insert_element_into_vec_3d(%arg0: f32, %arg1: vector<4x8x16xf32>) -> vecto
}
// CHECK-LABEL: llvm.func @insert_element_into_vec_3d
// CHECK: llvm.extractvalue {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x float>>>
-// CHECK: llvm.mlir.constant(15 : i64) : !llvm.i64
-// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<16 x float>
+// CHECK: llvm.mlir.constant(15 : i64) : i64
+// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float>
// CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x float>>>
// CHECK: llvm.return {{.*}} : !llvm.array<4 x array<8 x vec<16 x float>>>
@@ -437,9 +437,9 @@ func @vector_print_scalar_i1(%arg0: i1) {
// Type "boolean" always uses zero extension.
//
// CHECK-LABEL: llvm.func @vector_print_scalar_i1(
-// CHECK-SAME: %[[A:.*]]: !llvm.i1)
-// CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i1 to !llvm.i64
-// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i1)
+// CHECK: %[[S:.*]] = llvm.zext %[[A]] : i1 to i64
+// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_i4(%arg0: i4) {
@@ -447,9 +447,9 @@ func @vector_print_scalar_i4(%arg0: i4) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_i4(
-// CHECK-SAME: %[[A:.*]]: !llvm.i4)
-// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i64
-// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i4)
+// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i4 to i64
+// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_si4(%arg0: si4) {
@@ -457,9 +457,9 @@ func @vector_print_scalar_si4(%arg0: si4) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_si4(
-// CHECK-SAME: %[[A:.*]]: !llvm.i4)
-// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i64
-// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i4)
+// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i4 to i64
+// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_ui4(%arg0: ui4) {
@@ -467,9 +467,9 @@ func @vector_print_scalar_ui4(%arg0: ui4) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_ui4(
-// CHECK-SAME: %[[A:.*]]: !llvm.i4)
-// CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i4 to !llvm.i64
-// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i4)
+// CHECK: %[[S:.*]] = llvm.zext %[[A]] : i4 to i64
+// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_i32(%arg0: i32) {
@@ -477,9 +477,9 @@ func @vector_print_scalar_i32(%arg0: i32) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_i32(
-// CHECK-SAME: %[[A:.*]]: !llvm.i32)
-// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i32 to !llvm.i64
-// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i32)
+// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i32 to i64
+// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_ui32(%arg0: ui32) {
@@ -487,18 +487,18 @@ func @vector_print_scalar_ui32(%arg0: ui32) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_ui32(
-// CHECK-SAME: %[[A:.*]]: !llvm.i32)
-// CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i32 to !llvm.i64
-// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i32)
+// CHECK: %[[S:.*]] = llvm.zext %[[A]] : i32 to i64
+// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> ()
func @vector_print_scalar_i40(%arg0: i40) {
vector.print %arg0 : i40
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_i40(
-// CHECK-SAME: %[[A:.*]]: !llvm.i40)
-// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i40 to !llvm.i64
-// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i40)
+// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i40 to i64
+// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_si40(%arg0: si40) {
@@ -506,9 +506,9 @@ func @vector_print_scalar_si40(%arg0: si40) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_si40(
-// CHECK-SAME: %[[A:.*]]: !llvm.i40)
-// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i40 to !llvm.i64
-// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i40)
+// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i40 to i64
+// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_ui40(%arg0: ui40) {
@@ -516,9 +516,9 @@ func @vector_print_scalar_ui40(%arg0: ui40) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_ui40(
-// CHECK-SAME: %[[A:.*]]: !llvm.i40)
-// CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i40 to !llvm.i64
-// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i40)
+// CHECK: %[[S:.*]] = llvm.zext %[[A]] : i40 to i64
+// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_i64(%arg0: i64) {
@@ -526,8 +526,8 @@ func @vector_print_scalar_i64(%arg0: i64) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_i64(
-// CHECK-SAME: %[[A:.*]]: !llvm.i64)
-// CHECK: llvm.call @printI64(%[[A]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i64)
+// CHECK: llvm.call @printI64(%[[A]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_ui64(%arg0: ui64) {
@@ -535,8 +535,8 @@ func @vector_print_scalar_ui64(%arg0: ui64) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_ui64(
-// CHECK-SAME: %[[A:.*]]: !llvm.i64)
-// CHECK: llvm.call @printU64(%[[A]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i64)
+// CHECK: llvm.call @printU64(%[[A]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_index(%arg0: index) {
@@ -544,8 +544,8 @@ func @vector_print_scalar_index(%arg0: index) {
return
}
// CHECK-LABEL: llvm.func @vector_print_scalar_index(
-// CHECK-SAME: %[[A:.*]]: !llvm.i64)
-// CHECK: llvm.call @printU64(%[[A]]) : (!llvm.i64) -> ()
+// CHECK-SAME: %[[A:.*]]: i64)
+// CHECK: llvm.call @printU64(%[[A]]) : (i64) -> ()
// CHECK: llvm.call @printNewline() : () -> ()
func @vector_print_scalar_f32(%arg0: f32) {
@@ -575,23 +575,23 @@ func @vector_print_vector(%arg0: vector<2x2xf32>) {
// CHECK: llvm.call @printOpen() : () -> ()
// CHECK: %[[x0:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<2 x float>>
// CHECK: llvm.call @printOpen() : () -> ()
-// CHECK: %[[x1:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: %[[x2:.*]] = llvm.extractelement %[[x0]][%[[x1]] : !llvm.i64] : !llvm.vec<2 x float>
+// CHECK: %[[x1:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[x2:.*]] = llvm.extractelement %[[x0]][%[[x1]] : i64] : !llvm.vec<2 x float>
// CHECK: llvm.call @printF32(%[[x2]]) : (!llvm.float) -> ()
// CHECK: llvm.call @printComma() : () -> ()
-// CHECK: %[[x3:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: %[[x4:.*]] = llvm.extractelement %[[x0]][%[[x3]] : !llvm.i64] : !llvm.vec<2 x float>
+// CHECK: %[[x3:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[x4:.*]] = llvm.extractelement %[[x0]][%[[x3]] : i64] : !llvm.vec<2 x float>
// CHECK: llvm.call @printF32(%[[x4]]) : (!llvm.float) -> ()
// CHECK: llvm.call @printClose() : () -> ()
// CHECK: llvm.call @printComma() : () -> ()
// CHECK: %[[x5:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<2 x float>>
// CHECK: llvm.call @printOpen() : () -> ()
-// CHECK: %[[x6:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: %[[x7:.*]] = llvm.extractelement %[[x5]][%[[x6]] : !llvm.i64] : !llvm.vec<2 x float>
+// CHECK: %[[x6:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[x7:.*]] = llvm.extractelement %[[x5]][%[[x6]] : i64] : !llvm.vec<2 x float>
// CHECK: llvm.call @printF32(%[[x7]]) : (!llvm.float) -> ()
// CHECK: llvm.call @printComma() : () -> ()
-// CHECK: %[[x8:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: %[[x9:.*]] = llvm.extractelement %[[x5]][%[[x8]] : !llvm.i64] : !llvm.vec<2 x float>
+// CHECK: %[[x8:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[x9:.*]] = llvm.extractelement %[[x5]][%[[x8]] : i64] : !llvm.vec<2 x float>
// CHECK: llvm.call @printF32(%[[x9]]) : (!llvm.float) -> ()
// CHECK: llvm.call @printClose() : () -> ()
// CHECK: llvm.call @printClose() : () -> ()
@@ -652,30 +652,30 @@ func @insert_strided_slice2(%a: vector<2x2xf32>, %b: vector<4x4xf32>) -> vector<
// CHECK: llvm.extractvalue {{.*}}[0] : !llvm.array<2 x vec<2 x float>>
// CHECK-NEXT: llvm.extractvalue {{.*}}[2] : !llvm.array<4 x vec<4 x float>>
// Element @0 -> element @2
-// CHECK-NEXT: llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<2 x float>
-// CHECK-NEXT: llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float>
+// CHECK-NEXT: llvm.mlir.constant(0 : index) : i64
+// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float>
+// CHECK-NEXT: llvm.mlir.constant(2 : index) : i64
+// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
// Element @1 -> element @3
-// CHECK-NEXT: llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<2 x float>
-// CHECK-NEXT: llvm.mlir.constant(3 : index) : !llvm.i64
-// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float>
+// CHECK-NEXT: llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float>
+// CHECK-NEXT: llvm.mlir.constant(3 : index) : i64
+// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
// CHECK-NEXT: llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x vec<4 x float>>
//
// Subvector vector<2xf32> @1 into vector<4xf32> @3
// CHECK: llvm.extractvalue {{.*}}[1] : !llvm.array<2 x vec<2 x float>>
// CHECK-NEXT: llvm.extractvalue {{.*}}[3] : !llvm.array<4 x vec<4 x float>>
// Element @0 -> element @2
-// CHECK-NEXT: llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<2 x float>
-// CHECK-NEXT: llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float>
+// CHECK-NEXT: llvm.mlir.constant(0 : index) : i64
+// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float>
+// CHECK-NEXT: llvm.mlir.constant(2 : index) : i64
+// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
// Element @1 -> element @3
-// CHECK-NEXT: llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<2 x float>
-// CHECK-NEXT: llvm.mlir.constant(3 : index) : !llvm.i64
-// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float>
+// CHECK-NEXT: llvm.mlir.constant(1 : index) : i64
+// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float>
+// CHECK-NEXT: llvm.mlir.constant(3 : index) : i64
+// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
// CHECK-NEXT: llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x vec<4 x float>>
func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -> vector<16x4x8xf32> {
@@ -689,41 +689,41 @@ func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -
// CHECK: %[[s0:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<16 x array<4 x vec<8 x float>>>
// CHECK: %[[s1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<4 x float>>
// CHECK: %[[s2:.*]] = llvm.extractvalue %[[B]][0, 0] : !llvm.array<16 x array<4 x vec<8 x float>>>
-// CHECK: %[[s3:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: %[[s4:.*]] = llvm.extractelement %[[s1]][%[[s3]] : !llvm.i64] : !llvm.vec<4 x float>
-// CHECK: %[[s5:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK: %[[s6:.*]] = llvm.insertelement %[[s4]], %[[s2]][%[[s5]] : !llvm.i64] : !llvm.vec<8 x float>
-// CHECK: %[[s7:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: %[[s8:.*]] = llvm.extractelement %[[s1]][%[[s7]] : !llvm.i64] : !llvm.vec<4 x float>
-// CHECK: %[[s9:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64
-// CHECK: %[[s10:.*]] = llvm.insertelement %[[s8]], %[[s6]][%[[s9]] : !llvm.i64] : !llvm.vec<8 x float>
-// CHECK: %[[s11:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK: %[[s12:.*]] = llvm.extractelement %[[s1]][%[[s11]] : !llvm.i64] : !llvm.vec<4 x float>
-// CHECK: %[[s13:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
-// CHECK: %[[s14:.*]] = llvm.insertelement %[[s12]], %[[s10]][%[[s13]] : !llvm.i64] : !llvm.vec<8 x float>
-// CHECK: %[[s15:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64
-// CHECK: %[[s16:.*]] = llvm.extractelement %[[s1]][%[[s15]] : !llvm.i64] : !llvm.vec<4 x float>
-// CHECK: %[[s17:.*]] = llvm.mlir.constant(5 : index) : !llvm.i64
-// CHECK: %[[s18:.*]] = llvm.insertelement %[[s16]], %[[s14]][%[[s17]] : !llvm.i64] : !llvm.vec<8 x float>
+// CHECK: %[[s3:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[s4:.*]] = llvm.extractelement %[[s1]][%[[s3]] : i64] : !llvm.vec<4 x float>
+// CHECK: %[[s5:.*]] = llvm.mlir.constant(2 : index) : i64
+// CHECK: %[[s6:.*]] = llvm.insertelement %[[s4]], %[[s2]][%[[s5]] : i64] : !llvm.vec<8 x float>
+// CHECK: %[[s7:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[s8:.*]] = llvm.extractelement %[[s1]][%[[s7]] : i64] : !llvm.vec<4 x float>
+// CHECK: %[[s9:.*]] = llvm.mlir.constant(3 : index) : i64
+// CHECK: %[[s10:.*]] = llvm.insertelement %[[s8]], %[[s6]][%[[s9]] : i64] : !llvm.vec<8 x float>
+// CHECK: %[[s11:.*]] = llvm.mlir.constant(2 : index) : i64
+// CHECK: %[[s12:.*]] = llvm.extractelement %[[s1]][%[[s11]] : i64] : !llvm.vec<4 x float>
+// CHECK: %[[s13:.*]] = llvm.mlir.constant(4 : index) : i64
+// CHECK: %[[s14:.*]] = llvm.insertelement %[[s12]], %[[s10]][%[[s13]] : i64] : !llvm.vec<8 x float>
+// CHECK: %[[s15:.*]] = llvm.mlir.constant(3 : index) : i64
+// CHECK: %[[s16:.*]] = llvm.extractelement %[[s1]][%[[s15]] : i64] : !llvm.vec<4 x float>
+// CHECK: %[[s17:.*]] = llvm.mlir.constant(5 : index) : i64
+// CHECK: %[[s18:.*]] = llvm.insertelement %[[s16]], %[[s14]][%[[s17]] : i64] : !llvm.vec<8 x float>
// CHECK: %[[s19:.*]] = llvm.insertvalue %[[s18]], %[[s0]][0] : !llvm.array<4 x vec<8 x float>>
// CHECK: %[[s20:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<4 x float>>
// CHECK: %[[s21:.*]] = llvm.extractvalue %[[B]][0, 1] : !llvm.array<16 x array<4 x vec<8 x float>>>
-// CHECK: %[[s22:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK: %[[s23:.*]] = llvm.extractelement %[[s20]][%[[s22]] : !llvm.i64] : !llvm.vec<4 x float>
-// CHECK: %[[s24:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK: %[[s25:.*]] = llvm.insertelement %[[s23]], %[[s21]][%[[s24]] : !llvm.i64] : !llvm.vec<8 x float>
-// CHECK: %[[s26:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
-// CHECK: %[[s27:.*]] = llvm.extractelement %[[s20]][%[[s26]] : !llvm.i64] : !llvm.vec<4 x float>
-// CHECK: %[[s28:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64
-// CHECK: %[[s29:.*]] = llvm.insertelement %[[s27]], %[[s25]][%[[s28]] : !llvm.i64] : !llvm.vec<8 x float>
-// CHECK: %[[s30:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64
-// CHECK: %[[s31:.*]] = llvm.extractelement %[[s20]][%[[s30]] : !llvm.i64] : !llvm.vec<4 x float>
-// CHECK: %[[s32:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64
-// CHECK: %[[s33:.*]] = llvm.insertelement %[[s31]], %[[s29]][%[[s32]] : !llvm.i64] : !llvm.vec<8 x float>
-// CHECK: %[[s34:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64
-// CHECK: %[[s35:.*]] = llvm.extractelement %[[s20]][%[[s34]] : !llvm.i64] : !llvm.vec<4 x float>
-// CHECK: %[[s36:.*]] = llvm.mlir.constant(5 : index) : !llvm.i64
-// CHECK: %[[s37:.*]] = llvm.insertelement %[[s35]], %[[s33]][%[[s36]] : !llvm.i64] : !llvm.vec<8 x float>
+// CHECK: %[[s22:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[s23:.*]] = llvm.extractelement %[[s20]][%[[s22]] : i64] : !llvm.vec<4 x float>
+// CHECK: %[[s24:.*]] = llvm.mlir.constant(2 : index) : i64
+// CHECK: %[[s25:.*]] = llvm.insertelement %[[s23]], %[[s21]][%[[s24]] : i64] : !llvm.vec<8 x float>
+// CHECK: %[[s26:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[s27:.*]] = llvm.extractelement %[[s20]][%[[s26]] : i64] : !llvm.vec<4 x float>
+// CHECK: %[[s28:.*]] = llvm.mlir.constant(3 : index) : i64
+// CHECK: %[[s29:.*]] = llvm.insertelement %[[s27]], %[[s25]][%[[s28]] : i64] : !llvm.vec<8 x float>
+// CHECK: %[[s30:.*]] = llvm.mlir.constant(2 : index) : i64
+// CHECK: %[[s31:.*]] = llvm.extractelement %[[s20]][%[[s30]] : i64] : !llvm.vec<4 x float>
+// CHECK: %[[s32:.*]] = llvm.mlir.constant(4 : index) : i64
+// CHECK: %[[s33:.*]] = llvm.insertelement %[[s31]], %[[s29]][%[[s32]] : i64] : !llvm.vec<8 x float>
+// CHECK: %[[s34:.*]] = llvm.mlir.constant(3 : index) : i64
+// CHECK: %[[s35:.*]] = llvm.extractelement %[[s20]][%[[s34]] : i64] : !llvm.vec<4 x float>
+// CHECK: %[[s36:.*]] = llvm.mlir.constant(5 : index) : i64
+// CHECK: %[[s37:.*]] = llvm.insertelement %[[s35]], %[[s33]][%[[s36]] : i64] : !llvm.vec<8 x float>
// CHECK: %[[s38:.*]] = llvm.insertvalue %[[s37]], %[[s19]][1] : !llvm.array<4 x vec<8 x float>>
// CHECK: %[[s39:.*]] = llvm.insertvalue %[[s38]], %[[B]][0] : !llvm.array<16 x array<4 x vec<8 x float>>>
// CHECK: llvm.return %[[s39]] : !llvm.array<16 x array<4 x vec<8 x float>>>
@@ -807,7 +807,7 @@ func @reduce_i8(%arg0: vector<16xi8>) -> i8 {
// CHECK-LABEL: llvm.func @reduce_i8(
// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x i8>)
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]])
-// CHECK: llvm.return %[[V]] : !llvm.i8
+// CHECK: llvm.return %[[V]] : i8
func @reduce_i32(%arg0: vector<16xi32>) -> i32 {
%0 = vector.reduction "add", %arg0 : vector<16xi32> into i32
@@ -816,7 +816,7 @@ func @reduce_i32(%arg0: vector<16xi32>) -> i32 {
// CHECK-LABEL: llvm.func @reduce_i32(
// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x i32>)
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]])
-// CHECK: llvm.return %[[V]] : !llvm.i32
+// CHECK: llvm.return %[[V]] : i32
func @reduce_i64(%arg0: vector<16xi64>) -> i64 {
%0 = vector.reduction "add", %arg0 : vector<16xi64> into i64
@@ -825,7 +825,7 @@ func @reduce_i64(%arg0: vector<16xi64>) -> i64 {
// CHECK-LABEL: llvm.func @reduce_i64(
// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x i64>)
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]])
-// CHECK: llvm.return %[[V]] : !llvm.i64
+// CHECK: llvm.return %[[V]] : i64
// 4x16 16x3 4x3
@@ -851,11 +851,11 @@ func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
return %f: vector<17xf32>
}
// CHECK-LABEL: func @transfer_read_1d
-// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<17 x float>
+// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float>
//
// 1. Bitcast to vector form.
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
-// CHECK-SAME: (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-SAME: (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] :
// CHECK-SAME: !llvm.ptr<float> to !llvm.ptr<vec<17 x float>>
// CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 0] :
@@ -867,11 +867,11 @@ func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
// CHECK-SAME: vector<17xi32>) : !llvm.vec<17 x i32>
//
// 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
-// CHECK: %[[otrunc:.*]] = llvm.trunc %[[BASE]] : !llvm.i64 to !llvm.i32
+// CHECK: %[[otrunc:.*]] = llvm.trunc %[[BASE]] : i64 to i32
// CHECK: %[[offsetVec:.*]] = llvm.mlir.undef : !llvm.vec<17 x i32>
-// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[offsetVec2:.*]] = llvm.insertelement %[[otrunc]], %[[offsetVec]][%[[c0]] :
-// CHECK-SAME: !llvm.i32] : !llvm.vec<17 x i32>
+// CHECK-SAME: i32] : !llvm.vec<17 x i32>
// CHECK: %[[offsetVec3:.*]] = llvm.shufflevector %[[offsetVec2]], %{{.*}} [
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
@@ -882,11 +882,11 @@ func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
//
// 4. Let dim the memref dimension, compute the vector comparison mask:
// [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
-// CHECK: %[[dtrunc:.*]] = llvm.trunc %[[DIM]] : !llvm.i64 to !llvm.i32
+// CHECK: %[[dtrunc:.*]] = llvm.trunc %[[DIM]] : i64 to i32
// CHECK: %[[dimVec:.*]] = llvm.mlir.undef : !llvm.vec<17 x i32>
-// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[dimVec2:.*]] = llvm.insertelement %[[dtrunc]], %[[dimVec]][%[[c01]] :
-// CHECK-SAME: !llvm.i32] : !llvm.vec<17 x i32>
+// CHECK-SAME: i32] : !llvm.vec<17 x i32>
// CHECK: %[[dimVec3:.*]] = llvm.shufflevector %[[dimVec2]], %{{.*}} [
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
@@ -905,7 +905,7 @@ func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
//
// 1. Bitcast to vector form.
// CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
-// CHECK-SAME: (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-SAME: (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK: %[[vecPtr_b:.*]] = llvm.bitcast %[[gep_b]] :
// CHECK-SAME: !llvm.ptr<float> to !llvm.ptr<vec<17 x float>>
//
@@ -942,16 +942,16 @@ func @transfer_read_2d_to_1d(%A : memref<?x?xf32>, %base0: index, %base1: index)
return %f: vector<17xf32>
}
// CHECK-LABEL: func @transfer_read_2d_to_1d
-// CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: !llvm.i64, %[[BASE_1:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<17 x float>
+// CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: i64, %[[BASE_1:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float>
// CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 1] :
// CHECK-SAME: !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
//
// Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
-// CHECK: %[[trunc:.*]] = llvm.trunc %[[BASE_1]] : !llvm.i64 to !llvm.i32
+// CHECK: %[[trunc:.*]] = llvm.trunc %[[BASE_1]] : i64 to i32
// CHECK: %[[offsetVec:.*]] = llvm.mlir.undef : !llvm.vec<17 x i32>
-// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[offsetVec2:.*]] = llvm.insertelement %[[trunc]], %[[offsetVec]][%[[c0]] :
-// CHECK-SAME: !llvm.i32] : !llvm.vec<17 x i32>
+// CHECK-SAME: i32] : !llvm.vec<17 x i32>
// CHECK: %[[offsetVec3:.*]] = llvm.shufflevector %[[offsetVec2]], %{{.*}} [
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
@@ -960,11 +960,11 @@ func @transfer_read_2d_to_1d(%A : memref<?x?xf32>, %base0: index, %base1: index)
//
// Let dim the memref dimension, compute the vector comparison mask:
// [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
-// CHECK: %[[dimtrunc:.*]] = llvm.trunc %[[DIM]] : !llvm.i64 to !llvm.i32
+// CHECK: %[[dimtrunc:.*]] = llvm.trunc %[[DIM]] : i64 to i32
// CHECK: %[[dimVec:.*]] = llvm.mlir.undef : !llvm.vec<17 x i32>
-// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[dimVec2:.*]] = llvm.insertelement %[[dimtrunc]], %[[dimVec]][%[[c01]] :
-// CHECK-SAME: !llvm.i32] : !llvm.vec<17 x i32>
+// CHECK-SAME: i32] : !llvm.vec<17 x i32>
// CHECK: %[[dimVec3:.*]] = llvm.shufflevector %[[dimVec2]], %{{.*}} [
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
@@ -982,11 +982,11 @@ func @transfer_read_1d_non_zero_addrspace(%A : memref<?xf32, 3>, %base: index) -
return %f: vector<17xf32>
}
// CHECK-LABEL: func @transfer_read_1d_non_zero_addrspace
-// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<17 x float>
+// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float>
//
// 1. Check address space for GEP is correct.
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
-// CHECK-SAME: (!llvm.ptr<float, 3>, !llvm.i64) -> !llvm.ptr<float, 3>
+// CHECK-SAME: (!llvm.ptr<float, 3>, i64) -> !llvm.ptr<float, 3>
// CHECK: %[[vecPtr:.*]] = llvm.addrspacecast %[[gep]] :
// CHECK-SAME: !llvm.ptr<float, 3> to !llvm.ptr<vec<17 x float>>
//
@@ -996,7 +996,7 @@ func @transfer_read_1d_non_zero_addrspace(%A : memref<?xf32, 3>, %base: index) -
//
// 3. Check address apce for GEP is correct.
// CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
-// CHECK-SAME: (!llvm.ptr<float, 3>, !llvm.i64) -> !llvm.ptr<float, 3>
+// CHECK-SAME: (!llvm.ptr<float, 3>, i64) -> !llvm.ptr<float, 3>
// CHECK: %[[vecPtr_b:.*]] = llvm.addrspacecast %[[gep_b]] :
// CHECK-SAME: !llvm.ptr<float, 3> to !llvm.ptr<vec<17 x float>>
@@ -1007,11 +1007,11 @@ func @transfer_read_1d_not_masked(%A : memref<?xf32>, %base: index) -> vector<17
return %f: vector<17xf32>
}
// CHECK-LABEL: func @transfer_read_1d_not_masked
-// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<17 x float>
+// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float>
//
// 1. Bitcast to vector form.
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
-// CHECK-SAME: (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+// CHECK-SAME: (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
// CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] :
// CHECK-SAME: !llvm.ptr<float> to !llvm.ptr<vec<17 x float>>
//
@@ -1025,11 +1025,11 @@ func @transfer_read_1d_cast(%A : memref<?xi32>, %base: index) -> vector<12xi8> {
return %v: vector<12xi8>
}
// CHECK-LABEL: func @transfer_read_1d_cast
-// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<12 x i8>
+// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<12 x i8>
//
// 1. Bitcast to vector form.
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
-// CHECK-SAME: (!llvm.ptr<i32>, !llvm.i64) -> !llvm.ptr<i32>
+// CHECK-SAME: (!llvm.ptr<i32>, i64) -> !llvm.ptr<i32>
// CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] :
// CHECK-SAME: !llvm.ptr<i32> to !llvm.ptr<vec<12 x i8>>
//
diff --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir
index e2f16fe96a08..cb98ebc1544b 100644
--- a/mlir/test/Dialect/GPU/outlining.mlir
+++ b/mlir/test/Dialect/GPU/outlining.mlir
@@ -215,7 +215,7 @@ func @multiple_uses2(%arg0 : memref<*xf32>) {
// -----
-llvm.mlir.global internal @global(42 : i64) : !llvm.i64
+llvm.mlir.global internal @global(42 : i64) : i64
//CHECK-LABEL: @function_call
func @function_call(%arg0 : memref<?xf32>) {
@@ -249,7 +249,7 @@ func @recursive_device_function() {
// CHECK: llvm.mlir.addressof @global : !llvm.ptr<i64>
// CHECK: gpu.return
//
-// CHECK: llvm.mlir.global internal @global(42 : i64) : !llvm.i64
+// CHECK: llvm.mlir.global internal @global(42 : i64) : i64
//
// CHECK: func @device_function()
// CHECK: func @recursive_device_function()
diff --git a/mlir/test/Dialect/LLVMIR/dialect-cast.mlir b/mlir/test/Dialect/LLVMIR/dialect-cast.mlir
index f509b5a388ad..bf8f94892664 100644
--- a/mlir/test/Dialect/LLVMIR/dialect-cast.mlir
+++ b/mlir/test/Dialect/LLVMIR/dialect-cast.mlir
@@ -7,9 +7,8 @@ func @mlir_dialect_cast(%0: index, %1: i32, %2: bf16, %3: f16, %4: f32, %5: f64,
%6: vector<42xf32>, %7: memref<42xf32>,
%8: memref<?xf32>, %9: memref<f32>,
%10: memref<*xf32>) {
- llvm.mlir.cast %0 : index to !llvm.i64
- llvm.mlir.cast %0 : index to !llvm.i32
- llvm.mlir.cast %1 : i32 to !llvm.i32
+ llvm.mlir.cast %0 : index to i64
+ llvm.mlir.cast %0 : index to i32
llvm.mlir.cast %2 : bf16 to !llvm.bfloat
llvm.mlir.cast %3 : f16 to !llvm.half
llvm.mlir.cast %4 : f32 to !llvm.float
@@ -71,19 +70,12 @@ func @mlir_dialect_cast_f64(%0 : f64) {
// -----
func @mlir_dialect_cast_integer_non_integer(%0 : i16) {
- // expected-error at +1 {{invalid cast between integer and non-integer type}}
+ // expected-error at +1 {{unsupported cast}}
llvm.mlir.cast %0 : i16 to !llvm.half
}
// -----
-func @mlir_dialect_cast_integer_bitwidth_mismatch(%0 : i16) {
- // expected-error at +1 {{invalid cast between integers with mismatching bitwidth}}
- llvm.mlir.cast %0 : i16 to !llvm.i32
-}
-
-// -----
-
func @mlir_dialect_cast_nd_vector(%0 : vector<2x2xf32>) {
// expected-error at +1 {{only 1-d vector is allowed}}
llvm.mlir.cast %0 : vector<2x2xf32> to !llvm.vec<4xfloat>
diff --git a/mlir/test/Dialect/LLVMIR/func.mlir b/mlir/test/Dialect/LLVMIR/func.mlir
index 65dc33cc1c4f..72e117d57a91 100644
--- a/mlir/test/Dialect/LLVMIR/func.mlir
+++ b/mlir/test/Dialect/LLVMIR/func.mlir
@@ -14,24 +14,24 @@ module {
// GENERIC: sym_name = "bar"
// GENERIC-SAME: type = !llvm.func<i64 (i64, i64)>
// GENERIC-SAME: () -> ()
- // CHECK: llvm.func @bar(!llvm.i64, !llvm.i64) -> !llvm.i64
+ // CHECK: llvm.func @bar(i64, i64) -> i64
"llvm.func"() ({
}) {sym_name = "bar", type = !llvm.func<i64 (i64, i64)>} : () -> ()
// GENERIC: "llvm.func"
- // CHECK: llvm.func @baz(%{{.*}}: !llvm.i64) -> !llvm.i64
+ // CHECK: llvm.func @baz(%{{.*}}: i64) -> i64
"llvm.func"() ({
// GENERIC: ^bb0
- ^bb0(%arg0: !llvm.i64):
+ ^bb0(%arg0: i64):
// GENERIC: llvm.return
- llvm.return %arg0 : !llvm.i64
+ llvm.return %arg0 : i64
// GENERIC: sym_name = "baz"
// GENERIC-SAME: type = !llvm.func<i64 (i64)>
// GENERIC-SAME: () -> ()
}) {sym_name = "baz", type = !llvm.func<i64 (i64)>} : () -> ()
- // CHECK: llvm.func @qux(!llvm.ptr<i64> {llvm.noalias = true}, !llvm.i64)
+ // CHECK: llvm.func @qux(!llvm.ptr<i64> {llvm.noalias = true}, i64)
// CHECK: attributes {xxx = {yyy = 42 : i64}}
"llvm.func"() ({
}) {sym_name = "qux", type = !llvm.func<void (ptr<i64>, i64)>,
@@ -40,14 +40,14 @@ module {
// CHECK: llvm.func @roundtrip1()
llvm.func @roundtrip1()
- // CHECK: llvm.func @roundtrip2(!llvm.i64, !llvm.float) -> !llvm.double
- llvm.func @roundtrip2(!llvm.i64, !llvm.float) -> !llvm.double
+ // CHECK: llvm.func @roundtrip2(i64, !llvm.float) -> !llvm.double
+ llvm.func @roundtrip2(i64, !llvm.float) -> !llvm.double
- // CHECK: llvm.func @roundtrip3(!llvm.i32, !llvm.i1)
- llvm.func @roundtrip3(%a: !llvm.i32, %b: !llvm.i1)
+ // CHECK: llvm.func @roundtrip3(i32, i1)
+ llvm.func @roundtrip3(%a: i32, %b: i1)
- // CHECK: llvm.func @roundtrip4(%{{.*}}: !llvm.i32, %{{.*}}: !llvm.i1) {
- llvm.func @roundtrip4(%a: !llvm.i32, %b: !llvm.i1) {
+ // CHECK: llvm.func @roundtrip4(%{{.*}}: i32, %{{.*}}: i1) {
+ llvm.func @roundtrip4(%a: i32, %b: i1) {
llvm.return
}
@@ -66,8 +66,8 @@ module {
llvm.return
}
- // CHECK: llvm.func @roundtrip8() -> !llvm.i32
- llvm.func @roundtrip8() -> !llvm.i32 attributes {}
+ // CHECK: llvm.func @roundtrip8() -> i32
+ llvm.func @roundtrip8() -> i32 attributes {}
// CHECK: llvm.func @roundtrip9(!llvm.ptr<i32> {llvm.noalias = true})
llvm.func @roundtrip9(!llvm.ptr<i32> {llvm.noalias = true})
@@ -90,8 +90,8 @@ module {
// CHECK: llvm.func @variadic(...)
llvm.func @variadic(...)
- // CHECK: llvm.func @variadic_args(!llvm.i32, !llvm.i32, ...)
- llvm.func @variadic_args(!llvm.i32, !llvm.i32, ...)
+ // CHECK: llvm.func @variadic_args(i32, i32, ...)
+ llvm.func @variadic_args(i32, i32, ...)
//
// Check that functions can have linkage attributes.
@@ -140,7 +140,7 @@ module {
module {
// expected-error at +1 {{requires 'type' attribute of wrapped LLVM function type}}
- "llvm.func"() ({}) {sym_name = "non_function_type", type = !llvm.i64} : () -> ()
+ "llvm.func"() ({}) {sym_name = "non_function_type", type = i64} : () -> ()
}
// -----
@@ -148,7 +148,7 @@ module {
module {
// expected-error at +1 {{entry block must have 0 arguments}}
"llvm.func"() ({
- ^bb0(%arg0: !llvm.i64):
+ ^bb0(%arg0: i64):
llvm.return
}) {sym_name = "wrong_arg_number", type = !llvm.func<void ()>} : () -> ()
}
@@ -158,7 +158,7 @@ module {
module {
// expected-error at +1 {{entry block argument #0 is not of LLVM type}}
"llvm.func"() ({
- ^bb0(%arg0: i64):
+ ^bb0(%arg0: tensor<*xf32>):
llvm.return
}) {sym_name = "wrong_arg_number", type = !llvm.func<void (i64)>} : () -> ()
}
@@ -168,7 +168,7 @@ module {
module {
// expected-error at +1 {{entry block argument #0 does not match the function signature}}
"llvm.func"() ({
- ^bb0(%arg0: !llvm.i32):
+ ^bb0(%arg0: i32):
llvm.return
}) {sym_name = "wrong_arg_number", type = !llvm.func<void (i64)>} : () -> ()
}
@@ -177,21 +177,21 @@ module {
module {
// expected-error at +1 {{failed to construct function type: expected LLVM type for function arguments}}
- llvm.func @foo(i64)
+ llvm.func @foo(tensor<*xf32>)
}
// -----
module {
// expected-error at +1 {{failed to construct function type: expected LLVM type for function results}}
- llvm.func @foo() -> i64
+ llvm.func @foo() -> tensor<*xf32>
}
// -----
module {
// expected-error at +1 {{failed to construct function type: expected zero or one function result}}
- llvm.func @foo() -> (!llvm.i64, !llvm.i64)
+ llvm.func @foo() -> (i64, i64)
}
// -----
@@ -207,7 +207,7 @@ module {
module {
// expected-error at +1 {{variadic arguments must be in the end of the argument list}}
- llvm.func @variadic_inside(%arg0: !llvm.i32, ..., %arg1: !llvm.i32)
+ llvm.func @variadic_inside(%arg0: i32, ..., %arg1: i32)
}
// -----
diff --git a/mlir/test/Dialect/LLVMIR/global.mlir b/mlir/test/Dialect/LLVMIR/global.mlir
index 7d7860645ee9..9341e774ae84 100644
--- a/mlir/test/Dialect/LLVMIR/global.mlir
+++ b/mlir/test/Dialect/LLVMIR/global.mlir
@@ -1,13 +1,13 @@
// RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s
// CHECK: llvm.mlir.global external @default_external
-llvm.mlir.global @default_external() : !llvm.i64
+llvm.mlir.global @default_external() : i64
// CHECK: llvm.mlir.global external constant @default_external_constant
-llvm.mlir.global constant @default_external_constant(42) : !llvm.i64
+llvm.mlir.global constant @default_external_constant(42) : i64
-// CHECK: llvm.mlir.global internal @global(42 : i64) : !llvm.i64
-llvm.mlir.global internal @global(42 : i64) : !llvm.i64
+// CHECK: llvm.mlir.global internal @global(42 : i64) : i64
+llvm.mlir.global internal @global(42 : i64) : i64
// CHECK: llvm.mlir.global internal constant @constant(3.700000e+01 : f64) : !llvm.float
llvm.mlir.global internal constant @constant(37.0) : !llvm.float
@@ -19,38 +19,38 @@ llvm.mlir.global internal constant @".string"("foobar") : !llvm.array<6 x i8>
llvm.mlir.global internal @string_notype("1234567")
// CHECK: llvm.mlir.global internal @global_undef()
-llvm.mlir.global internal @global_undef() : !llvm.i64
+llvm.mlir.global internal @global_undef() : i64
-// CHECK: llvm.mlir.global internal @global_mega_initializer() : !llvm.i64 {
-// CHECK-NEXT: %[[c:[0-9]+]] = llvm.mlir.constant(42 : i64) : !llvm.i64
-// CHECK-NEXT: llvm.return %[[c]] : !llvm.i64
+// CHECK: llvm.mlir.global internal @global_mega_initializer() : i64 {
+// CHECK-NEXT: %[[c:[0-9]+]] = llvm.mlir.constant(42 : i64) : i64
+// CHECK-NEXT: llvm.return %[[c]] : i64
// CHECK-NEXT: }
-llvm.mlir.global internal @global_mega_initializer() : !llvm.i64 {
- %c = llvm.mlir.constant(42 : i64) : !llvm.i64
- llvm.return %c : !llvm.i64
+llvm.mlir.global internal @global_mega_initializer() : i64 {
+ %c = llvm.mlir.constant(42 : i64) : i64
+ llvm.return %c : i64
}
// Check
diff erent linkage types.
// CHECK: llvm.mlir.global private
-llvm.mlir.global private @private() : !llvm.i64
+llvm.mlir.global private @private() : i64
// CHECK: llvm.mlir.global internal
-llvm.mlir.global internal @internal() : !llvm.i64
+llvm.mlir.global internal @internal() : i64
// CHECK: llvm.mlir.global available_externally
-llvm.mlir.global available_externally @available_externally() : !llvm.i64
+llvm.mlir.global available_externally @available_externally() : i64
// CHECK: llvm.mlir.global linkonce
-llvm.mlir.global linkonce @linkonce() : !llvm.i64
+llvm.mlir.global linkonce @linkonce() : i64
// CHECK: llvm.mlir.global weak
-llvm.mlir.global weak @weak() : !llvm.i64
+llvm.mlir.global weak @weak() : i64
// CHECK: llvm.mlir.global common
-llvm.mlir.global common @common() : !llvm.i64
+llvm.mlir.global common @common() : i64
// CHECK: llvm.mlir.global appending
-llvm.mlir.global appending @appending() : !llvm.i64
+llvm.mlir.global appending @appending() : i64
// CHECK: llvm.mlir.global extern_weak
-llvm.mlir.global extern_weak @extern_weak() : !llvm.i64
+llvm.mlir.global extern_weak @extern_weak() : i64
// CHECK: llvm.mlir.global linkonce_odr
-llvm.mlir.global linkonce_odr @linkonce_odr() : !llvm.i64
+llvm.mlir.global linkonce_odr @linkonce_odr() : i64
// CHECK: llvm.mlir.global weak_odr
-llvm.mlir.global weak_odr @weak_odr() : !llvm.i64
+llvm.mlir.global weak_odr @weak_odr() : i64
// CHECK-LABEL: references
func @references() {
@@ -66,7 +66,7 @@ func @references() {
// -----
// expected-error @+1 {{requires string attribute 'sym_name'}}
-"llvm.mlir.global"() ({}) {type = !llvm.i64, constant, value = 42 : i64} : () -> ()
+"llvm.mlir.global"() ({}) {type = i64, constant, value = 42 : i64} : () -> ()
// -----
@@ -81,18 +81,18 @@ llvm.mlir.global internal constant @constant(37.0) : !llvm.label
// -----
// expected-error @+1 {{'addr_space' failed to satisfy constraint: 32-bit signless integer attribute whose value is non-negative}}
-"llvm.mlir.global"() ({}) {sym_name = "foo", type = !llvm.i64, value = 42 : i64, addr_space = -1 : i32, linkage = 0} : () -> ()
+"llvm.mlir.global"() ({}) {sym_name = "foo", type = i64, value = 42 : i64, addr_space = -1 : i32, linkage = 0} : () -> ()
// -----
// expected-error @+1 {{'addr_space' failed to satisfy constraint: 32-bit signless integer attribute whose value is non-negative}}
-"llvm.mlir.global"() ({}) {sym_name = "foo", type = !llvm.i64, value = 42 : i64, addr_space = 1.0 : f32, linkage = 0} : () -> ()
+"llvm.mlir.global"() ({}) {sym_name = "foo", type = i64, value = 42 : i64, addr_space = 1.0 : f32, linkage = 0} : () -> ()
// -----
func @foo() {
// expected-error @+1 {{must appear at the module level}}
- llvm.mlir.global internal @bar(42) : !llvm.i32
+ llvm.mlir.global internal @bar(42) : i32
}
// -----
@@ -108,11 +108,11 @@ llvm.mlir.global internal @i64_needs_type(0: i64)
// -----
// expected-error @+1 {{expected zero or one type}}
-llvm.mlir.global internal @more_than_one_type(0) : !llvm.i64, !llvm.i32
+llvm.mlir.global internal @more_than_one_type(0) : i64, i32
// -----
-llvm.mlir.global internal @foo(0: i32) : !llvm.i32
+llvm.mlir.global internal @foo(0: i32) : i32
func @bar() {
// expected-error @+2{{expected ':'}}
@@ -137,7 +137,7 @@ func @foo() {
// -----
-llvm.mlir.global internal @foo(0: i32) : !llvm.i32
+llvm.mlir.global internal @foo(0: i32) : i32
func @bar() {
// expected-error @+1 {{the type must be a pointer to the type of the referenced global}}
@@ -157,29 +157,29 @@ llvm.func @bar() {
// expected-error @+2 {{'llvm.mlir.global' op expects regions to end with 'llvm.return', found 'llvm.mlir.constant'}}
// expected-note @+1 {{in custom textual format, the absence of terminator implies 'llvm.return'}}
-llvm.mlir.global internal @g() : !llvm.i64 {
- %c = llvm.mlir.constant(42 : i64) : !llvm.i64
+llvm.mlir.global internal @g() : i64 {
+ %c = llvm.mlir.constant(42 : i64) : i64
}
// -----
-// expected-error @+1 {{'llvm.mlir.global' op initializer region type '!llvm.i64' does not match global type '!llvm.i32'}}
-llvm.mlir.global internal @g() : !llvm.i32 {
- %c = llvm.mlir.constant(42 : i64) : !llvm.i64
- llvm.return %c : !llvm.i64
+// expected-error @+1 {{'llvm.mlir.global' op initializer region type 'i64' does not match global type 'i32'}}
+llvm.mlir.global internal @g() : i32 {
+ %c = llvm.mlir.constant(42 : i64) : i64
+ llvm.return %c : i64
}
// -----
// expected-error @+1 {{'llvm.mlir.global' op cannot have both initializer value and region}}
-llvm.mlir.global internal @g(43 : i64) : !llvm.i64 {
- %c = llvm.mlir.constant(42 : i64) : !llvm.i64
- llvm.return %c : !llvm.i64
+llvm.mlir.global internal @g(43 : i64) : i64 {
+ %c = llvm.mlir.constant(42 : i64) : i64
+ llvm.return %c : i64
}
// -----
-llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : !llvm.i64
+llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : i64
func @mismatch_addr_space_implicit_global() {
// expected-error @+1 {{op the type must be a pointer to the type of the referenced global}}
llvm.mlir.addressof @g : !llvm.ptr<i64>
@@ -187,7 +187,7 @@ func @mismatch_addr_space_implicit_global() {
// -----
-llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : !llvm.i64
+llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : i64
func @mismatch_addr_space() {
// expected-error @+1 {{op the type must be a pointer to the type of the referenced global}}
llvm.mlir.addressof @g : !llvm.ptr<i64, 4>
diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir
index 5e2f666c5b83..87eddd67f030 100644
--- a/mlir/test/Dialect/LLVMIR/invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/invalid.mlir
@@ -1,14 +1,14 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -verify-diagnostics
// expected-error at +1{{llvm.noalias argument attribute of non boolean type}}
-func @invalid_noalias(%arg0: !llvm.i32 {llvm.noalias = 3}) {
+func @invalid_noalias(%arg0: i32 {llvm.noalias = 3}) {
"llvm.return"() : () -> ()
}
// -----
// expected-error at +1{{llvm.align argument attribute of non integer type}}
-func @invalid_align(%arg0: !llvm.i32 {llvm.align = "foo"}) {
+func @invalid_align(%arg0: i32 {llvm.align = "foo"}) {
"llvm.return"() : () -> ()
}
@@ -18,79 +18,79 @@ func @invalid_align(%arg0: !llvm.i32 {llvm.align = "foo"}) {
// -----
-func @icmp_non_string(%arg0 : !llvm.i32, %arg1 : !llvm.i16) {
+func @icmp_non_string(%arg0 : i32, %arg1 : i16) {
// expected-error at +1 {{invalid kind of attribute specified}}
- llvm.icmp 42 %arg0, %arg0 : !llvm.i32
+ llvm.icmp 42 %arg0, %arg0 : i32
return
}
// -----
-func @icmp_wrong_string(%arg0 : !llvm.i32, %arg1 : !llvm.i16) {
+func @icmp_wrong_string(%arg0 : i32, %arg1 : i16) {
// expected-error at +1 {{'foo' is an incorrect value of the 'predicate' attribute}}
- llvm.icmp "foo" %arg0, %arg0 : !llvm.i32
+ llvm.icmp "foo" %arg0, %arg0 : i32
return
}
// -----
-func @alloca_missing_input_result_type(%size : !llvm.i64) {
+func @alloca_missing_input_result_type(%size : i64) {
// expected-error at +1 {{expected trailing function type with one argument and one result}}
- llvm.alloca %size x !llvm.i32 : () -> ()
+ llvm.alloca %size x i32 : () -> ()
}
// -----
func @alloca_missing_input_type() {
// expected-error at +1 {{expected trailing function type with one argument and one result}}
- llvm.alloca %size x !llvm.i32 : () -> (!llvm.ptr<i32>)
+ llvm.alloca %size x i32 : () -> (!llvm.ptr<i32>)
}
// -----
func @alloca_missing_result_type() {
// expected-error at +1 {{expected trailing function type with one argument and one result}}
- llvm.alloca %size x !llvm.i32 : (!llvm.i64) -> ()
+ llvm.alloca %size x i32 : (i64) -> ()
}
// -----
func @alloca_non_function_type() {
// expected-error at +1 {{expected trailing function type with one argument and one result}}
- llvm.alloca %size x !llvm.i32 : !llvm.ptr<i32>
+ llvm.alloca %size x i32 : !llvm.ptr<i32>
}
// -----
func @alloca_non_integer_alignment() {
// expected-error at +1 {{expected integer alignment}}
- llvm.alloca %size x !llvm.i32 {alignment = 3.0} : !llvm.ptr<i32>
+ llvm.alloca %size x i32 {alignment = 3.0} : !llvm.ptr<i32>
}
// -----
-func @gep_missing_input_result_type(%pos : !llvm.i64, %base : !llvm.ptr<float>) {
+func @gep_missing_input_result_type(%pos : i64, %base : !llvm.ptr<float>) {
// expected-error at +1 {{2 operands present, but expected 0}}
llvm.getelementptr %base[%pos] : () -> ()
}
// -----
-func @gep_missing_input_type(%pos : !llvm.i64, %base : !llvm.ptr<float>) {
+func @gep_missing_input_type(%pos : i64, %base : !llvm.ptr<float>) {
// expected-error at +1 {{2 operands present, but expected 0}}
llvm.getelementptr %base[%pos] : () -> (!llvm.ptr<float>)
}
// -----
-func @gep_missing_result_type(%pos : !llvm.i64, %base : !llvm.ptr<float>) {
+func @gep_missing_result_type(%pos : i64, %base : !llvm.ptr<float>) {
// expected-error at +1 {{op requires one result}}
- llvm.getelementptr %base[%pos] : (!llvm.ptr<float>, !llvm.i64) -> ()
+ llvm.getelementptr %base[%pos] : (!llvm.ptr<float>, i64) -> ()
}
// -----
-func @gep_non_function_type(%pos : !llvm.i64, %base : !llvm.ptr<float>) {
+func @gep_non_function_type(%pos : i64, %base : !llvm.ptr<float>) {
// expected-error at +1 {{invalid kind of type specified}}
llvm.getelementptr %base[%pos] : !llvm.ptr<float>
}
@@ -125,7 +125,7 @@ func @store_non_ptr_type(%foo : !llvm.float, %bar : !llvm.float) {
// -----
-func @call_non_function_type(%callee : !llvm.func<i8 (i8)>, %arg : !llvm.i8) {
+func @call_non_function_type(%callee : !llvm.func<i8 (i8)>, %arg : i8) {
// expected-error at +1 {{expected function type}}
llvm.call %callee(%arg) : !llvm.func<i8 (i8)>
}
@@ -139,7 +139,7 @@ func @invalid_call() {
// -----
-func @call_non_function_type(%callee : !llvm.func<i8 (i8)>, %arg : !llvm.i8) {
+func @call_non_function_type(%callee : !llvm.func<i8 (i8)>, %arg : i8) {
// expected-error at +1 {{expected function type}}
llvm.call %callee(%arg) : !llvm.func<i8 (i8)>
}
@@ -162,41 +162,41 @@ func @call_non_llvm() {
// -----
-func @call_non_llvm_indirect(%arg0 : i32) {
- // expected-error at +1 {{'llvm.call' op operand #0 must be LLVM dialect-compatible type, but got 'i32'}}
- "llvm.call"(%arg0) : (i32) -> ()
+func @call_non_llvm_indirect(%arg0 : tensor<*xi32>) {
+ // expected-error at +1 {{'llvm.call' op operand #0 must be LLVM dialect-compatible type}}
+ "llvm.call"(%arg0) : (tensor<*xi32>) -> ()
}
// -----
-llvm.func @callee_func(!llvm.i8) -> ()
+llvm.func @callee_func(i8) -> ()
-func @callee_arg_mismatch(%arg0 : !llvm.i32) {
- // expected-error at +1 {{'llvm.call' op operand type mismatch for operand 0: '!llvm.i32' != '!llvm.i8'}}
- llvm.call @callee_func(%arg0) : (!llvm.i32) -> ()
+func @callee_arg_mismatch(%arg0 : i32) {
+ // expected-error at +1 {{'llvm.call' op operand type mismatch for operand 0: 'i32' != 'i8'}}
+ llvm.call @callee_func(%arg0) : (i32) -> ()
}
// -----
-func @indirect_callee_arg_mismatch(%arg0 : !llvm.i32, %callee : !llvm.ptr<func<void(i8)>>) {
- // expected-error at +1 {{'llvm.call' op operand type mismatch for operand 0: '!llvm.i32' != '!llvm.i8'}}
- "llvm.call"(%callee, %arg0) : (!llvm.ptr<func<void(i8)>>, !llvm.i32) -> ()
+func @indirect_callee_arg_mismatch(%arg0 : i32, %callee : !llvm.ptr<func<void(i8)>>) {
+ // expected-error at +1 {{'llvm.call' op operand type mismatch for operand 0: 'i32' != 'i8'}}
+ "llvm.call"(%callee, %arg0) : (!llvm.ptr<func<void(i8)>>, i32) -> ()
}
// -----
-llvm.func @callee_func() -> (!llvm.i8)
+llvm.func @callee_func() -> (i8)
func @callee_return_mismatch() {
- // expected-error at +1 {{'llvm.call' op result type mismatch: '!llvm.i32' != '!llvm.i8'}}
- %res = llvm.call @callee_func() : () -> (!llvm.i32)
+ // expected-error at +1 {{'llvm.call' op result type mismatch: 'i32' != 'i8'}}
+ %res = llvm.call @callee_func() : () -> (i32)
}
// -----
func @indirect_callee_return_mismatch(%callee : !llvm.ptr<func<i8()>>) {
- // expected-error at +1 {{'llvm.call' op result type mismatch: '!llvm.i32' != '!llvm.i8'}}
- "llvm.call"(%callee) : (!llvm.ptr<func<i8()>>) -> (!llvm.i32)
+ // expected-error at +1 {{'llvm.call' op result type mismatch: 'i32' != 'i8'}}
+ "llvm.call"(%callee) : (!llvm.ptr<func<i8()>>) -> (i32)
}
// -----
@@ -208,16 +208,16 @@ func @call_too_many_results(%callee : () -> (i32,i32)) {
// -----
-func @call_non_llvm_result(%callee : () -> (i32)) {
+func @call_non_llvm_result(%callee : () -> (tensor<*xi32>)) {
// expected-error at +1 {{expected result to have LLVM type}}
- llvm.call %callee() : () -> (i32)
+ llvm.call %callee() : () -> (tensor<*xi32>)
}
// -----
-func @call_non_llvm_input(%callee : (i32) -> (), %arg : i32) {
+func @call_non_llvm_input(%callee : (tensor<*xi32>) -> (), %arg : tensor<*xi32>) {
// expected-error at +1 {{expected LLVM types as inputs}}
- llvm.call %callee(%arg) : (i32) -> ()
+ llvm.call %callee(%arg) : (tensor<*xi32>) -> ()
}
// -----
@@ -231,7 +231,7 @@ func @constant_wrong_type() {
func @insertvalue_non_llvm_type(%a : i32, %b : i32) {
// expected-error at +1 {{expected LLVM IR Dialect type}}
- llvm.insertvalue %a, %b[0] : i32
+ llvm.insertvalue %a, %b[0] : tensor<*xi32>
}
// -----
@@ -273,9 +273,9 @@ func @insertvalue_wrong_nesting() {
// -----
-func @extractvalue_non_llvm_type(%a : i32, %b : i32) {
+func @extractvalue_non_llvm_type(%a : i32, %b : tensor<*xi32>) {
// expected-error at +1 {{expected LLVM IR Dialect type}}
- llvm.extractvalue %b[0] : i32
+ llvm.extractvalue %b[0] : tensor<*xi32>
}
// -----
@@ -317,21 +317,21 @@ func @extractvalue_wrong_nesting() {
// -----
-func @invalid_vector_type_1(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) {
+func @invalid_vector_type_1(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
// expected-error at +1 {{expected LLVM IR dialect vector type for operand #1}}
- %0 = llvm.extractelement %arg2[%arg1 : !llvm.i32] : !llvm.float
+ %0 = llvm.extractelement %arg2[%arg1 : i32] : !llvm.float
}
// -----
-func @invalid_vector_type_2(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) {
+func @invalid_vector_type_2(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
// expected-error at +1 {{expected LLVM IR dialect vector type for operand #1}}
- %0 = llvm.insertelement %arg2, %arg2[%arg1 : !llvm.i32] : !llvm.float
+ %0 = llvm.insertelement %arg2, %arg2[%arg1 : i32] : !llvm.float
}
// -----
-func @invalid_vector_type_3(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) {
+func @invalid_vector_type_3(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
// expected-error at +1 {{expected LLVM IR dialect vector type for operand #1}}
%0 = llvm.shufflevector %arg2, %arg2 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.float, !llvm.float
}
@@ -339,27 +339,27 @@ func @invalid_vector_type_3(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2
// -----
func @null_non_llvm_type() {
- // expected-error at +1 {{must be LLVM pointer type, but got '!llvm.i32'}}
- llvm.mlir.null : !llvm.i32
+ // expected-error at +1 {{must be LLVM pointer type, but got 'i32'}}
+ llvm.mlir.null : i32
}
// -----
-func @nvvm_invalid_shfl_pred_1(%arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32, %arg3 : !llvm.i32) {
+func @nvvm_invalid_shfl_pred_1(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
// expected-error at +1 {{expected return type to be a two-element struct with i1 as the second element}}
- %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.i32
+ %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : i32
}
// -----
-func @nvvm_invalid_shfl_pred_2(%arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32, %arg3 : !llvm.i32) {
+func @nvvm_invalid_shfl_pred_2(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
// expected-error at +1 {{expected return type to be a two-element struct with i1 as the second element}}
%0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(i32)>
}
// -----
-func @nvvm_invalid_shfl_pred_3(%arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32, %arg3 : !llvm.i32) {
+func @nvvm_invalid_shfl_pred_3(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
// expected-error at +1 {{expected return type to be a two-element struct with i1 as the second element}}
%0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(i32, i32)>
}
@@ -448,7 +448,7 @@ func @nvvm_invalid_mma_7(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
%c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
%c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
// expected-error at +1 {{op requires one result}}
- %0:2 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> (!llvm.struct<(float, float, float, float, float, float, float, float)>, !llvm.i32)
+ %0:2 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> (!llvm.struct<(float, float, float, float, float, float, float, float)>, i32)
llvm.return %0#0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
}
@@ -462,9 +462,9 @@ func @atomicrmw_expected_ptr(%f32 : !llvm.float) {
// -----
-func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<float>, %i32 : !llvm.i32) {
+func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<float>, %i32 : i32) {
// expected-error at +1 {{expected LLVM IR element type for operand #0 to match type for operand #1}}
- %0 = "llvm.atomicrmw"(%f32_ptr, %i32) {bin_op=11, ordering=1} : (!llvm.ptr<float>, !llvm.i32) -> !llvm.float
+ %0 = "llvm.atomicrmw"(%f32_ptr, %i32) {bin_op=11, ordering=1} : (!llvm.ptr<float>, i32) -> !llvm.float
llvm.return
}
@@ -472,23 +472,23 @@ func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<float>, %i32 : !llvm.i3
func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<float>, %f32 : !llvm.float) {
// expected-error at +1 {{expected LLVM IR result type to match type for operand #1}}
- %0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr<float>, !llvm.float) -> !llvm.i32
+ %0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr<float>, !llvm.float) -> i32
llvm.return
}
// -----
-func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr<i32>, %i32 : !llvm.i32) {
+func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{expected LLVM IR floating point type}}
- %0 = llvm.atomicrmw fadd %i32_ptr, %i32 unordered : !llvm.i32
+ %0 = llvm.atomicrmw fadd %i32_ptr, %i32 unordered : i32
llvm.return
}
// -----
-func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr<i1>, %i1 : !llvm.i1) {
+func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
// expected-error at +1 {{unexpected LLVM IR type for 'xchg' bin_op}}
- %0 = llvm.atomicrmw xchg %i1_ptr, %i1 unordered : !llvm.i1
+ %0 = llvm.atomicrmw xchg %i1_ptr, %i1 unordered : i1
llvm.return
}
@@ -510,129 +510,129 @@ func @cmpxchg_expected_ptr(%f32_ptr : !llvm.ptr<float>, %f32 : !llvm.float) {
// -----
-func @cmpxchg_mismatched_operands(%i64_ptr : !llvm.ptr<i64>, %i32 : !llvm.i32) {
+func @cmpxchg_mismatched_operands(%i64_ptr : !llvm.ptr<i64>, %i32 : i32) {
// expected-error at +1 {{expected LLVM IR element type for operand #0 to match type for all other operands}}
- %0 = "llvm.cmpxchg"(%i64_ptr, %i32, %i32) {success_ordering=2,failure_ordering=2} : (!llvm.ptr<i64>, !llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)>
+ %0 = "llvm.cmpxchg"(%i64_ptr, %i32, %i32) {success_ordering=2,failure_ordering=2} : (!llvm.ptr<i64>, i32, i32) -> !llvm.struct<(i32, i1)>
llvm.return
}
// -----
-func @cmpxchg_unexpected_type(%i1_ptr : !llvm.ptr<i1>, %i1 : !llvm.i1) {
+func @cmpxchg_unexpected_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
// expected-error at +1 {{unexpected LLVM IR type}}
- %0 = llvm.cmpxchg %i1_ptr, %i1, %i1 monotonic monotonic : !llvm.i1
+ %0 = llvm.cmpxchg %i1_ptr, %i1, %i1 monotonic monotonic : i1
llvm.return
}
// -----
-func @cmpxchg_at_least_monotonic_success(%i32_ptr : !llvm.ptr<i32>, %i32 : !llvm.i32) {
+func @cmpxchg_at_least_monotonic_success(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{ordering must be at least 'monotonic'}}
- %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 unordered monotonic : !llvm.i32
+ %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 unordered monotonic : i32
llvm.return
}
// -----
-func @cmpxchg_at_least_monotonic_failure(%i32_ptr : !llvm.ptr<i32>, %i32 : !llvm.i32) {
+func @cmpxchg_at_least_monotonic_failure(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{ordering must be at least 'monotonic'}}
- %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 monotonic unordered : !llvm.i32
+ %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 monotonic unordered : i32
llvm.return
}
// -----
-func @cmpxchg_failure_release(%i32_ptr : !llvm.ptr<i32>, %i32 : !llvm.i32) {
+func @cmpxchg_failure_release(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{failure ordering cannot be 'release' or 'acq_rel'}}
- %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel release : !llvm.i32
+ %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel release : i32
llvm.return
}
// -----
-func @cmpxchg_failure_acq_rel(%i32_ptr : !llvm.ptr<i32>, %i32 : !llvm.i32) {
+func @cmpxchg_failure_acq_rel(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{failure ordering cannot be 'release' or 'acq_rel'}}
- %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel acq_rel : !llvm.i32
+ %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel acq_rel : i32
llvm.return
}
// -----
-llvm.func @foo(!llvm.i32) -> !llvm.i32
-llvm.func @__gxx_personality_v0(...) -> !llvm.i32
+llvm.func @foo(i32) -> i32
+llvm.func @__gxx_personality_v0(...) -> i32
llvm.func @bad_landingpad(%arg0: !llvm.ptr<ptr<i8>>) attributes { personality = @__gxx_personality_v0} {
- %0 = llvm.mlir.constant(3 : i32) : !llvm.i32
- %1 = llvm.mlir.constant(2 : i32) : !llvm.i32
- %2 = llvm.invoke @foo(%1) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32
+ %0 = llvm.mlir.constant(3 : i32) : i32
+ %1 = llvm.mlir.constant(2 : i32) : i32
+ %2 = llvm.invoke @foo(%1) to ^bb1 unwind ^bb2 : (i32) -> i32
^bb1: // pred: ^bb0
- llvm.return %1 : !llvm.i32
+ llvm.return %1 : i32
^bb2: // pred: ^bb0
// expected-error at +1 {{clause #0 is not a known constant - null, addressof, bitcast}}
- %3 = llvm.landingpad cleanup (catch %1 : !llvm.i32) (catch %arg0 : !llvm.ptr<ptr<i8>>) : !llvm.struct<(ptr<i8>, i32)>
- llvm.return %0 : !llvm.i32
+ %3 = llvm.landingpad cleanup (catch %1 : i32) (catch %arg0 : !llvm.ptr<ptr<i8>>) : !llvm.struct<(ptr<i8>, i32)>
+ llvm.return %0 : i32
}
// -----
-llvm.func @foo(!llvm.i32) -> !llvm.i32
-llvm.func @__gxx_personality_v0(...) -> !llvm.i32
+llvm.func @foo(i32) -> i32
+llvm.func @__gxx_personality_v0(...) -> i32
-llvm.func @caller(%arg0: !llvm.i32) -> !llvm.i32 attributes { personality = @__gxx_personality_v0} {
- %0 = llvm.mlir.constant(1 : i32) : !llvm.i32
- %1 = llvm.alloca %0 x !llvm.ptr<i8> : (!llvm.i32) -> !llvm.ptr<ptr<i8>>
+llvm.func @caller(%arg0: i32) -> i32 attributes { personality = @__gxx_personality_v0} {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ %1 = llvm.alloca %0 x !llvm.ptr<i8> : (i32) -> !llvm.ptr<ptr<i8>>
// expected-note at +1 {{global addresses expected as operand to bitcast used in clauses for landingpad}}
%2 = llvm.bitcast %1 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
- %3 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32
+ %3 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (i32) -> i32
^bb1: // pred: ^bb0
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
^bb2: // pred: ^bb0
// expected-error at +1 {{constant clauses expected}}
%5 = llvm.landingpad (catch %2 : !llvm.ptr<i8>) : !llvm.struct<(ptr<i8>, i32)>
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
}
// -----
-llvm.func @foo(!llvm.i32) -> !llvm.i32
-llvm.func @__gxx_personality_v0(...) -> !llvm.i32
+llvm.func @foo(i32) -> i32
+llvm.func @__gxx_personality_v0(...) -> i32
-llvm.func @caller(%arg0: !llvm.i32) -> !llvm.i32 attributes { personality = @__gxx_personality_v0} {
- %0 = llvm.mlir.constant(1 : i32) : !llvm.i32
- %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32
+llvm.func @caller(%arg0: i32) -> i32 attributes { personality = @__gxx_personality_v0} {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (i32) -> i32
^bb1: // pred: ^bb0
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
^bb2: // pred: ^bb0
// expected-error at +1 {{landingpad instruction expects at least one clause or cleanup attribute}}
%2 = llvm.landingpad : !llvm.struct<(ptr<i8>, i32)>
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
}
// -----
-llvm.func @foo(!llvm.i32) -> !llvm.i32
-llvm.func @__gxx_personality_v0(...) -> !llvm.i32
+llvm.func @foo(i32) -> i32
+llvm.func @__gxx_personality_v0(...) -> i32
-llvm.func @caller(%arg0: !llvm.i32) -> !llvm.i32 attributes { personality = @__gxx_personality_v0 } {
- %0 = llvm.mlir.constant(1 : i32) : !llvm.i32
- %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32
+llvm.func @caller(%arg0: i32) -> i32 attributes { personality = @__gxx_personality_v0 } {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (i32) -> i32
^bb1: // pred: ^bb0
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
^bb2: // pred: ^bb0
%2 = llvm.landingpad cleanup : !llvm.struct<(ptr<i8>, i32)>
// expected-error at +1 {{'llvm.resume' op expects landingpad value as operand}}
- llvm.resume %0 : !llvm.i32
+ llvm.resume %0 : i32
}
// -----
-llvm.func @foo(!llvm.i32) -> !llvm.i32
+llvm.func @foo(i32) -> i32
-llvm.func @caller(%arg0: !llvm.i32) -> !llvm.i32 {
- %0 = llvm.mlir.constant(1 : i32) : !llvm.i32
- %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32
+llvm.func @caller(%arg0: i32) -> i32 {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (i32) -> i32
^bb1: // pred: ^bb0
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
^bb2: // pred: ^bb0
// expected-error at +1 {{llvm.landingpad needs to be in a function with a personality}}
%2 = llvm.landingpad cleanup : !llvm.struct<(ptr<i8>, i32)>
@@ -655,15 +655,15 @@ module attributes {llvm.data_layout = "#vjkr32"} {
// -----
-func @switch_wrong_number_of_weights(%arg0 : !llvm.i32) {
+func @switch_wrong_number_of_weights(%arg0 : i32) {
// expected-error at +1 {{expects number of branch weights to match number of successors: 3 vs 2}}
llvm.switch %arg0, ^bb1 [
- 42: ^bb2(%arg0, %arg0 : !llvm.i32, !llvm.i32)
+ 42: ^bb2(%arg0, %arg0 : i32, i32)
] {branch_weights = dense<[13, 17, 19]> : vector<3xi32>}
^bb1: // pred: ^bb0
llvm.return
-^bb2(%1: !llvm.i32, %2: !llvm.i32): // pred: ^bb0
+^bb2(%1: i32, %2: i32): // pred: ^bb0
llvm.return
}
diff --git a/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir b/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir
index f695f2c362f8..28cd980df504 100644
--- a/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir
+++ b/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir
@@ -4,22 +4,22 @@
// by introducing a new block that forwards its arguments to the original
// successor through an unconditional branch.
// CHECK-LABEL: @repeated_successor_
diff erent_args
-llvm.func @repeated_successor_
diff erent_args(%arg0: !llvm.i1, %arg1: !llvm.i32, %arg2: !llvm.i32) {
+llvm.func @repeated_successor_
diff erent_args(%arg0: i1, %arg1: i32, %arg2: i32) {
// CHECK: llvm.cond_br %{{.*}}, ^[[BB1:.*]]({{.*}}), ^[[BB2:.*]]({{.*}})
- llvm.cond_br %arg0, ^bb1(%arg1: !llvm.i32), ^bb1(%arg2: !llvm.i32)
+ llvm.cond_br %arg0, ^bb1(%arg1: i32), ^bb1(%arg2: i32)
// CHECK: ^[[BB1]]({{.*}}):
-^bb1(%arg3: !llvm.i32):
+^bb1(%arg3: i32):
llvm.return
-// CHECK: ^[[BB2]](%[[ARG:.*]]: !llvm.i32):
-// CHECK: llvm.br ^[[BB1]](%[[ARG]] : !llvm.i32)
+// CHECK: ^[[BB2]](%[[ARG:.*]]: i32):
+// CHECK: llvm.br ^[[BB1]](%[[ARG]] : i32)
}
// Verifies that duplicate successors without arguments do not lead to the
// introduction of new blocks during legalization.
// CHECK-LABEL: @repeated_successor_no_args
-llvm.func @repeated_successor_no_args(%arg0: !llvm.i1) {
+llvm.func @repeated_successor_no_args(%arg0: i1) {
// CHECK: llvm.cond_br
llvm.cond_br %arg0, ^bb1, ^bb1
diff --git a/mlir/test/Dialect/LLVMIR/nvvm.mlir b/mlir/test/Dialect/LLVMIR/nvvm.mlir
index 67d43f7146d5..8bb626617b3d 100644
--- a/mlir/test/Dialect/LLVMIR/nvvm.mlir
+++ b/mlir/test/Dialect/LLVMIR/nvvm.mlir
@@ -1,31 +1,31 @@
// RUN: mlir-opt %s | FileCheck %s
-func @nvvm_special_regs() -> !llvm.i32 {
- // CHECK: nvvm.read.ptx.sreg.tid.x : !llvm.i32
- %0 = nvvm.read.ptx.sreg.tid.x : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.tid.y : !llvm.i32
- %1 = nvvm.read.ptx.sreg.tid.y : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.tid.z : !llvm.i32
- %2 = nvvm.read.ptx.sreg.tid.z : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.ntid.x : !llvm.i32
- %3 = nvvm.read.ptx.sreg.ntid.x : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.ntid.y : !llvm.i32
- %4 = nvvm.read.ptx.sreg.ntid.y : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.ntid.z : !llvm.i32
- %5 = nvvm.read.ptx.sreg.ntid.z : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.ctaid.x : !llvm.i32
- %6 = nvvm.read.ptx.sreg.ctaid.x : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.ctaid.y : !llvm.i32
- %7 = nvvm.read.ptx.sreg.ctaid.y : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.ctaid.z : !llvm.i32
- %8 = nvvm.read.ptx.sreg.ctaid.z : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.nctaid.x : !llvm.i32
- %9 = nvvm.read.ptx.sreg.nctaid.x : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.nctaid.y : !llvm.i32
- %10 = nvvm.read.ptx.sreg.nctaid.y : !llvm.i32
- // CHECK: nvvm.read.ptx.sreg.nctaid.z : !llvm.i32
- %11 = nvvm.read.ptx.sreg.nctaid.z : !llvm.i32
- llvm.return %0 : !llvm.i32
+func @nvvm_special_regs() -> i32 {
+ // CHECK: nvvm.read.ptx.sreg.tid.x : i32
+ %0 = nvvm.read.ptx.sreg.tid.x : i32
+ // CHECK: nvvm.read.ptx.sreg.tid.y : i32
+ %1 = nvvm.read.ptx.sreg.tid.y : i32
+ // CHECK: nvvm.read.ptx.sreg.tid.z : i32
+ %2 = nvvm.read.ptx.sreg.tid.z : i32
+ // CHECK: nvvm.read.ptx.sreg.ntid.x : i32
+ %3 = nvvm.read.ptx.sreg.ntid.x : i32
+ // CHECK: nvvm.read.ptx.sreg.ntid.y : i32
+ %4 = nvvm.read.ptx.sreg.ntid.y : i32
+ // CHECK: nvvm.read.ptx.sreg.ntid.z : i32
+ %5 = nvvm.read.ptx.sreg.ntid.z : i32
+ // CHECK: nvvm.read.ptx.sreg.ctaid.x : i32
+ %6 = nvvm.read.ptx.sreg.ctaid.x : i32
+ // CHECK: nvvm.read.ptx.sreg.ctaid.y : i32
+ %7 = nvvm.read.ptx.sreg.ctaid.y : i32
+ // CHECK: nvvm.read.ptx.sreg.ctaid.z : i32
+ %8 = nvvm.read.ptx.sreg.ctaid.z : i32
+ // CHECK: nvvm.read.ptx.sreg.nctaid.x : i32
+ %9 = nvvm.read.ptx.sreg.nctaid.x : i32
+ // CHECK: nvvm.read.ptx.sreg.nctaid.y : i32
+ %10 = nvvm.read.ptx.sreg.nctaid.y : i32
+ // CHECK: nvvm.read.ptx.sreg.nctaid.z : i32
+ %11 = nvvm.read.ptx.sreg.nctaid.z : i32
+ llvm.return %0 : i32
}
func @llvm.nvvm.barrier0() {
@@ -35,18 +35,18 @@ func @llvm.nvvm.barrier0() {
}
func @nvvm_shfl(
- %arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32,
- %arg3 : !llvm.i32, %arg4 : !llvm.float) -> !llvm.i32 {
- // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i32
- %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 : !llvm.i32
+ %arg0 : i32, %arg1 : i32, %arg2 : i32,
+ %arg3 : i32, %arg4 : !llvm.float) -> i32 {
+ // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : i32
+ %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 : i32
// CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.float
%1 = nvvm.shfl.sync.bfly %arg0, %arg4, %arg1, %arg2 : !llvm.float
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
}
func @nvvm_shfl_pred(
- %arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32,
- %arg3 : !llvm.i32, %arg4 : !llvm.float) -> !llvm.struct<(i32, i1)> {
+ %arg0 : i32, %arg1 : i32, %arg2 : i32,
+ %arg3 : i32, %arg4 : !llvm.float) -> !llvm.struct<(i32, i1)> {
// CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.struct<(i32, i1)>
%0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(i32, i1)>
// CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.struct<(float, i1)>
@@ -54,10 +54,10 @@ func @nvvm_shfl_pred(
llvm.return %0 : !llvm.struct<(i32, i1)>
}
-func @nvvm_vote(%arg0 : !llvm.i32, %arg1 : !llvm.i1) -> !llvm.i32 {
- // CHECK: nvvm.vote.ballot.sync %{{.*}}, %{{.*}} : !llvm.i32
- %0 = nvvm.vote.ballot.sync %arg0, %arg1 : !llvm.i32
- llvm.return %0 : !llvm.i32
+func @nvvm_vote(%arg0 : i32, %arg1 : i1) -> i32 {
+ // CHECK: nvvm.vote.ballot.sync %{{.*}}, %{{.*}} : i32
+ %0 = nvvm.vote.ballot.sync %arg0, %arg1 : i32
+ llvm.return %0 : i32
}
func @nvvm_mma(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index 3f640810c543..caba012286a8 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -1,32 +1,32 @@
// RUN: mlir-opt %s | FileCheck %s
-func @rocdl_special_regs() -> !llvm.i32 {
+func @rocdl_special_regs() -> i32 {
// CHECK-LABEL: rocdl_special_regs
- // CHECK: rocdl.workitem.id.x : !llvm.i32
- %0 = rocdl.workitem.id.x : !llvm.i32
- // CHECK: rocdl.workitem.id.y : !llvm.i32
- %1 = rocdl.workitem.id.y : !llvm.i32
- // CHECK: rocdl.workitem.id.z : !llvm.i32
- %2 = rocdl.workitem.id.z : !llvm.i32
- // CHECK: rocdl.workgroup.id.x : !llvm.i32
- %3 = rocdl.workgroup.id.x : !llvm.i32
- // CHECK: rocdl.workgroup.id.y : !llvm.i32
- %4 = rocdl.workgroup.id.y : !llvm.i32
- // CHECK: rocdl.workgroup.id.z : !llvm.i32
- %5 = rocdl.workgroup.id.z : !llvm.i32
- // CHECK: rocdl.workgroup.dim.x : !llvm.i32
- %6 = rocdl.workgroup.dim.x : !llvm.i32
- // CHECK: rocdl.workgroup.dim.y : !llvm.i32
- %7 = rocdl.workgroup.dim.y : !llvm.i32
- // CHECK: rocdl.workgroup.dim.z : !llvm.i32
- %8 = rocdl.workgroup.dim.z : !llvm.i32
- // CHECK: rocdl.grid.dim.x : !llvm.i32
- %9 = rocdl.grid.dim.x : !llvm.i32
- // CHECK: rocdl.grid.dim.y : !llvm.i32
- %10 = rocdl.grid.dim.y : !llvm.i32
- // CHECK: rocdl.grid.dim.z : !llvm.i32
- %11 = rocdl.grid.dim.z : !llvm.i32
- llvm.return %0 : !llvm.i32
+ // CHECK: rocdl.workitem.id.x : i32
+ %0 = rocdl.workitem.id.x : i32
+ // CHECK: rocdl.workitem.id.y : i32
+ %1 = rocdl.workitem.id.y : i32
+ // CHECK: rocdl.workitem.id.z : i32
+ %2 = rocdl.workitem.id.z : i32
+ // CHECK: rocdl.workgroup.id.x : i32
+ %3 = rocdl.workgroup.id.x : i32
+ // CHECK: rocdl.workgroup.id.y : i32
+ %4 = rocdl.workgroup.id.y : i32
+ // CHECK: rocdl.workgroup.id.z : i32
+ %5 = rocdl.workgroup.id.z : i32
+ // CHECK: rocdl.workgroup.dim.x : i32
+ %6 = rocdl.workgroup.dim.x : i32
+ // CHECK: rocdl.workgroup.dim.y : i32
+ %7 = rocdl.workgroup.dim.y : i32
+ // CHECK: rocdl.workgroup.dim.z : i32
+ %8 = rocdl.workgroup.dim.z : i32
+ // CHECK: rocdl.grid.dim.x : i32
+ %9 = rocdl.grid.dim.x : i32
+ // CHECK: rocdl.grid.dim.y : i32
+ %10 = rocdl.grid.dim.y : i32
+ // CHECK: rocdl.grid.dim.z : i32
+ %11 = rocdl.grid.dim.z : i32
+ llvm.return %0 : i32
}
func @rocdl.barrier() {
@@ -36,118 +36,118 @@ func @rocdl.barrier() {
}
func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float,
- %arg2 : !llvm.vec<32 x float>, %arg3 : !llvm.i32,
+ %arg2 : !llvm.vec<32 x float>, %arg3 : i32,
%arg4 : !llvm.vec<16 x float>, %arg5 : !llvm.vec<4 x float>,
%arg6 : !llvm.vec<4 x half>, %arg7 : !llvm.vec<32 x i32>,
%arg8 : !llvm.vec<16 x i32>, %arg9 : !llvm.vec<4 x i32>,
%arg10 : !llvm.vec<2 x i16>) -> !llvm.vec<32 x float> {
// CHECK-LABEL: rocdl.xdlops
- // CHECK: rocdl.mfma.f32.32x32x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<32 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ // CHECK: rocdl.mfma.f32.32x32x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float>
%r0 = rocdl.mfma.f32.32x32x1f32 %arg0, %arg1, %arg2, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<32 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ i32, i32, i32) -> !llvm.vec<32 x float>
- // CHECK: rocdl.mfma.f32.16x16x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ // CHECK: rocdl.mfma.f32.16x16x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
%r1 = rocdl.mfma.f32.16x16x1f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
- // CHECK: rocdl.mfma.f32.16x16x4f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ // CHECK: rocdl.mfma.f32.16x16x4f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
%r2 = rocdl.mfma.f32.16x16x4f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
- // CHECK: rocdl.mfma.f32.4x4x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ // CHECK: rocdl.mfma.f32.4x4x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
%r3 = rocdl.mfma.f32.4x4x1f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
- // CHECK: rocdl.mfma.f32.32x32x2f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ // CHECK: rocdl.mfma.f32.32x32x2f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
%r4= rocdl.mfma.f32.32x32x2f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
- // CHECK: rocdl.mfma.f32.32x32x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ // CHECK: rocdl.mfma.f32.32x32x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float>
%r5 = rocdl.mfma.f32.32x32x4f16 %arg6, %arg6, %arg2, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ i32, i32, i32) -> !llvm.vec<32 x float>
- // CHECK: rocdl.mfma.f32.16x16x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ // CHECK: rocdl.mfma.f32.16x16x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
%r6 = rocdl.mfma.f32.16x16x4f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
- // CHECK: rocdl.mfma.f32.4x4x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ // CHECK: rocdl.mfma.f32.4x4x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
%r7 = rocdl.mfma.f32.4x4x4f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
- // CHECK: rocdl.mfma.f32.32x32x8f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ // CHECK: rocdl.mfma.f32.32x32x8f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
%r8 = rocdl.mfma.f32.32x32x8f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
- // CHECK: rocdl.mfma.f32.16x16x16f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ // CHECK: rocdl.mfma.f32.16x16x16f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
%r9 = rocdl.mfma.f32.16x16x16f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
- // CHECK: rocdl.mfma.i32.32x32x4i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<32 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x i32>
+ // CHECK: rocdl.mfma.i32.32x32x4i8 {{.*}} : (i32, i32, !llvm.vec<32 x i32>, i32, i32, i32) -> !llvm.vec<32 x i32>
%r10 = rocdl.mfma.i32.32x32x4i8 %arg3, %arg3, %arg7, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<32 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x i32>
+ (i32, i32, !llvm.vec<32 x i32>,
+ i32, i32, i32) -> !llvm.vec<32 x i32>
- // CHECK: rocdl.mfma.i32.16x16x4i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32>
+ // CHECK: rocdl.mfma.i32.16x16x4i8 {{.*}} : (i32, i32, !llvm.vec<16 x i32>, i32, i32, i32) -> !llvm.vec<16 x i32>
%r11 = rocdl.mfma.i32.16x16x4i8 %arg3, %arg3, %arg8, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32>
+ (i32, i32, !llvm.vec<16 x i32>,
+ i32, i32, i32) -> !llvm.vec<16 x i32>
- // CHECK: rocdl.mfma.i32.4x4x4i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32>
+ // CHECK: rocdl.mfma.i32.4x4x4i8 {{.*}} : (i32, i32, !llvm.vec<4 x i32>, i32, i32, i32) -> !llvm.vec<4 x i32>
%r12 = rocdl.mfma.i32.4x4x4i8 %arg3, %arg3, %arg9, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32>
+ (i32, i32, !llvm.vec<4 x i32>,
+ i32, i32, i32) -> !llvm.vec<4 x i32>
- // CHECK: rocdl.mfma.i32.32x32x8i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32>
+ // CHECK: rocdl.mfma.i32.32x32x8i8 {{.*}} : (i32, i32, !llvm.vec<16 x i32>, i32, i32, i32) -> !llvm.vec<16 x i32>
%r13 = rocdl.mfma.i32.32x32x8i8 %arg3, %arg3, %arg8, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32>
+ (i32, i32, !llvm.vec<16 x i32>,
+ i32, i32, i32) -> !llvm.vec<16 x i32>
- // CHECK: rocdl.mfma.i32.16x16x16i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32>
+ // CHECK: rocdl.mfma.i32.16x16x16i8 {{.*}} : (i32, i32, !llvm.vec<4 x i32>, i32, i32, i32) -> !llvm.vec<4 x i32>
%r14 = rocdl.mfma.i32.16x16x16i8 %arg3, %arg3, %arg9, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32>
+ (i32, i32, !llvm.vec<4 x i32>,
+ i32, i32, i32) -> !llvm.vec<4 x i32>
- // CHECK: rocdl.mfma.f32.32x32x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ // CHECK: rocdl.mfma.f32.32x32x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float>
%r15 = rocdl.mfma.f32.32x32x2bf16 %arg10, %arg10, %arg2, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ i32, i32, i32) -> !llvm.vec<32 x float>
- // CHECK: rocdl.mfma.f32.16x16x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ // CHECK: rocdl.mfma.f32.16x16x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
%r16 = rocdl.mfma.f32.16x16x2bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
- // CHECK: rocdl.mfma.f32.4x4x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ // CHECK: rocdl.mfma.f32.4x4x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
%r17 = rocdl.mfma.f32.4x4x2bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
- // CHECK: rocdl.mfma.f32.32x32x4bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ // CHECK: rocdl.mfma.f32.32x32x4bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
%r18 = rocdl.mfma.f32.32x32x4bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
- // CHECK: rocdl.mfma.f32.16x16x8bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ // CHECK: rocdl.mfma.f32.16x16x8bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
%r19 = rocdl.mfma.f32.16x16x8bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
llvm.return %r0 : !llvm.vec<32 x float>
}
-llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : !llvm.i32,
- %offset : !llvm.i32, %glc : !llvm.i1,
- %slc : !llvm.i1, %vdata1 : !llvm.vec<1 x float>,
+llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : i32,
+ %offset : i32, %glc : i1,
+ %slc : i1, %vdata1 : !llvm.vec<1 x float>,
%vdata2 : !llvm.vec<2 x float>, %vdata4 : !llvm.vec<4 x float>) {
// CHECK-LABEL: rocdl.mubuf
// CHECK: %{{.*}} = rocdl.buffer.load %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<1 x float>
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index 05d83810e179..233cc385c253 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -1,28 +1,28 @@
// RUN: mlir-opt %s | mlir-opt | FileCheck %s
// CHECK-LABEL: func @ops
-// CHECK-SAME: (%[[I32:.*]]: !llvm.i32, %[[FLOAT:.*]]: !llvm.float, %[[I8PTR1:.*]]: !llvm.ptr<i8>, %[[I8PTR2:.*]]: !llvm.ptr<i8>, %[[BOOL:.*]]: !llvm.i1)
-func @ops(%arg0: !llvm.i32, %arg1: !llvm.float,
+// CHECK-SAME: (%[[I32:.*]]: i32, %[[FLOAT:.*]]: !llvm.float, %[[I8PTR1:.*]]: !llvm.ptr<i8>, %[[I8PTR2:.*]]: !llvm.ptr<i8>, %[[BOOL:.*]]: i1)
+func @ops(%arg0: i32, %arg1: !llvm.float,
%arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>,
- %arg4: !llvm.i1) {
+ %arg4: i1) {
// Integer arithmetic binary operations.
//
-// CHECK: {{.*}} = llvm.add %[[I32]], %[[I32]] : !llvm.i32
-// CHECK: {{.*}} = llvm.sub %[[I32]], %[[I32]] : !llvm.i32
-// CHECK: {{.*}} = llvm.mul %[[I32]], %[[I32]] : !llvm.i32
-// CHECK: {{.*}} = llvm.udiv %[[I32]], %[[I32]] : !llvm.i32
-// CHECK: {{.*}} = llvm.sdiv %[[I32]], %[[I32]] : !llvm.i32
-// CHECK: {{.*}} = llvm.urem %[[I32]], %[[I32]] : !llvm.i32
-// CHECK: {{.*}} = llvm.srem %[[I32]], %[[I32]] : !llvm.i32
-// CHECK: {{.*}} = llvm.icmp "ne" %[[I32]], %[[I32]] : !llvm.i32
- %0 = llvm.add %arg0, %arg0 : !llvm.i32
- %1 = llvm.sub %arg0, %arg0 : !llvm.i32
- %2 = llvm.mul %arg0, %arg0 : !llvm.i32
- %3 = llvm.udiv %arg0, %arg0 : !llvm.i32
- %4 = llvm.sdiv %arg0, %arg0 : !llvm.i32
- %5 = llvm.urem %arg0, %arg0 : !llvm.i32
- %6 = llvm.srem %arg0, %arg0 : !llvm.i32
- %7 = llvm.icmp "ne" %arg0, %arg0 : !llvm.i32
+// CHECK: {{.*}} = llvm.add %[[I32]], %[[I32]] : i32
+// CHECK: {{.*}} = llvm.sub %[[I32]], %[[I32]] : i32
+// CHECK: {{.*}} = llvm.mul %[[I32]], %[[I32]] : i32
+// CHECK: {{.*}} = llvm.udiv %[[I32]], %[[I32]] : i32
+// CHECK: {{.*}} = llvm.sdiv %[[I32]], %[[I32]] : i32
+// CHECK: {{.*}} = llvm.urem %[[I32]], %[[I32]] : i32
+// CHECK: {{.*}} = llvm.srem %[[I32]], %[[I32]] : i32
+// CHECK: {{.*}} = llvm.icmp "ne" %[[I32]], %[[I32]] : i32
+ %0 = llvm.add %arg0, %arg0 : i32
+ %1 = llvm.sub %arg0, %arg0 : i32
+ %2 = llvm.mul %arg0, %arg0 : i32
+ %3 = llvm.udiv %arg0, %arg0 : i32
+ %4 = llvm.sdiv %arg0, %arg0 : i32
+ %5 = llvm.urem %arg0, %arg0 : i32
+ %6 = llvm.srem %arg0, %arg0 : i32
+ %7 = llvm.icmp "ne" %arg0, %arg0 : i32
// Floating point binary operations.
//
@@ -39,29 +39,29 @@ func @ops(%arg0: !llvm.i32, %arg1: !llvm.float,
// Memory-related operations.
//
-// CHECK-NEXT: %[[ALLOCA:.*]] = llvm.alloca %[[I32]] x !llvm.double : (!llvm.i32) -> !llvm.ptr<double>
-// CHECK-NEXT: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][%[[I32]], %[[I32]]] : (!llvm.ptr<double>, !llvm.i32, !llvm.i32) -> !llvm.ptr<double>
+// CHECK-NEXT: %[[ALLOCA:.*]] = llvm.alloca %[[I32]] x !llvm.double : (i32) -> !llvm.ptr<double>
+// CHECK-NEXT: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][%[[I32]], %[[I32]]] : (!llvm.ptr<double>, i32, i32) -> !llvm.ptr<double>
// CHECK-NEXT: %[[VALUE:.*]] = llvm.load %[[GEP]] : !llvm.ptr<double>
// CHECK-NEXT: llvm.store %[[VALUE]], %[[ALLOCA]] : !llvm.ptr<double>
// CHECK-NEXT: %{{.*}} = llvm.bitcast %[[ALLOCA]] : !llvm.ptr<double> to !llvm.ptr<i64>
- %13 = llvm.alloca %arg0 x !llvm.double : (!llvm.i32) -> !llvm.ptr<double>
- %14 = llvm.getelementptr %13[%arg0, %arg0] : (!llvm.ptr<double>, !llvm.i32, !llvm.i32) -> !llvm.ptr<double>
+ %13 = llvm.alloca %arg0 x !llvm.double : (i32) -> !llvm.ptr<double>
+ %14 = llvm.getelementptr %13[%arg0, %arg0] : (!llvm.ptr<double>, i32, i32) -> !llvm.ptr<double>
%15 = llvm.load %14 : !llvm.ptr<double>
llvm.store %15, %13 : !llvm.ptr<double>
%16 = llvm.bitcast %13 : !llvm.ptr<double> to !llvm.ptr<i64>
// Function call-related operations.
//
-// CHECK: %[[STRUCT:.*]] = llvm.call @foo(%[[I32]]) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
+// CHECK: %[[STRUCT:.*]] = llvm.call @foo(%[[I32]]) : (i32) -> !llvm.struct<(i32, double, i32)>
// CHECK: %[[VALUE:.*]] = llvm.extractvalue %[[STRUCT]][0] : !llvm.struct<(i32, double, i32)>
// CHECK: %[[NEW_STRUCT:.*]] = llvm.insertvalue %[[VALUE]], %[[STRUCT]][2] : !llvm.struct<(i32, double, i32)>
// CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @foo : !llvm.ptr<func<struct<(i32, double, i32)> (i32)>>
-// CHECK: %{{.*}} = llvm.call %[[FUNC]](%[[I32]]) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
- %17 = llvm.call @foo(%arg0) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
+// CHECK: %{{.*}} = llvm.call %[[FUNC]](%[[I32]]) : (i32) -> !llvm.struct<(i32, double, i32)>
+ %17 = llvm.call @foo(%arg0) : (i32) -> !llvm.struct<(i32, double, i32)>
%18 = llvm.extractvalue %17[0] : !llvm.struct<(i32, double, i32)>
%19 = llvm.insertvalue %18, %17[2] : !llvm.struct<(i32, double, i32)>
%20 = llvm.mlir.addressof @foo : !llvm.ptr<func<struct<(i32, double, i32)> (i32)>>
- %21 = llvm.call %20(%arg0) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
+ %21 = llvm.call %20(%arg0) : (i32) -> !llvm.struct<(i32, double, i32)>
// Terminator operations and their successors.
@@ -77,9 +77,9 @@ func @ops(%arg0: !llvm.i32, %arg1: !llvm.float,
// CHECK: ^[[BB2]]
^bb2:
// CHECK: %{{.*}} = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
-// CHECK: %{{.*}} = llvm.mlir.constant(42 : i64) : !llvm.i47
+// CHECK: %{{.*}} = llvm.mlir.constant(42 : i64) : i47
%22 = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
- %23 = llvm.mlir.constant(42) : !llvm.i47
+ %23 = llvm.mlir.constant(42) : i47
// CHECK: llvm.switch %0, ^[[BB3]] [
// CHECK-NEXT: 1: ^[[BB4:.*]],
// CHECK-NEXT: 2: ^[[BB5:.*]],
@@ -116,15 +116,15 @@ func @ops(%arg0: !llvm.i32, %arg1: !llvm.float,
// CHECK: ^[[BB7]]
^bb7:
// Misc operations.
-// CHECK: %{{.*}} = llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.i32
- %24 = llvm.select %7, %0, %1 : !llvm.i1, !llvm.i32
+// CHECK: %{{.*}} = llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, i32
+ %24 = llvm.select %7, %0, %1 : i1, i32
// Integer to pointer and pointer to integer conversions.
//
-// CHECK: %[[PTR:.*]] = llvm.inttoptr %[[I32]] : !llvm.i32 to !llvm.ptr<i32>
-// CHECK: %{{.*}} = llvm.ptrtoint %[[PTR]] : !llvm.ptr<i32> to !llvm.i32
- %25 = llvm.inttoptr %arg0 : !llvm.i32 to !llvm.ptr<i32>
- %26 = llvm.ptrtoint %25 : !llvm.ptr<i32> to !llvm.i32
+// CHECK: %[[PTR:.*]] = llvm.inttoptr %[[I32]] : i32 to !llvm.ptr<i32>
+// CHECK: %{{.*}} = llvm.ptrtoint %[[PTR]] : !llvm.ptr<i32> to i32
+ %25 = llvm.inttoptr %arg0 : i32 to !llvm.ptr<i32>
+ %26 = llvm.ptrtoint %25 : !llvm.ptr<i32> to i32
// Extended and Quad floating point
//
@@ -142,53 +142,53 @@ func @ops(%arg0: !llvm.i32, %arg1: !llvm.float,
// CHECK: "llvm.intr.pow"(%[[FLOAT]], %[[FLOAT]]) : (!llvm.float, !llvm.float) -> !llvm.float
%31 = "llvm.intr.pow"(%arg1, %arg1) : (!llvm.float, !llvm.float) -> !llvm.float
-// CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (!llvm.i32) -> !llvm.i32
- %32 = "llvm.intr.bitreverse"(%arg0) : (!llvm.i32) -> !llvm.i32
+// CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (i32) -> i32
+ %32 = "llvm.intr.bitreverse"(%arg0) : (i32) -> i32
-// CHECK: "llvm.intr.ctpop"(%{{.*}}) : (!llvm.i32) -> !llvm.i32
- %33 = "llvm.intr.ctpop"(%arg0) : (!llvm.i32) -> !llvm.i32
+// CHECK: "llvm.intr.ctpop"(%{{.*}}) : (i32) -> i32
+ %33 = "llvm.intr.ctpop"(%arg0) : (i32) -> i32
-// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i32, !llvm.i1) -> ()
- "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i32, !llvm.i1) -> ()
+// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
+ "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
-// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i32, !llvm.i1) -> ()
- "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i32, !llvm.i1) -> ()
+// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
+ "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
// CHECK: %[[SZ:.*]] = llvm.mlir.constant
- %sz = llvm.mlir.constant(10: i64) : !llvm.i64
-// CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i64, !llvm.i1) -> ()
- "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg4) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i64, !llvm.i1) -> ()
+ %sz = llvm.mlir.constant(10: i64) : i64
+// CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i64, i1) -> ()
+ "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg4) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i64, i1) -> ()
// CHECK: llvm.return
llvm.return
}
// An larger self-contained function.
-// CHECK-LABEL: llvm.func @foo(%{{.*}}: !llvm.i32) -> !llvm.struct<(i32, double, i32)> {
-llvm.func @foo(%arg0: !llvm.i32) -> !llvm.struct<(i32, double, i32)> {
-// CHECK: %[[V0:.*]] = llvm.mlir.constant(3 : i64) : !llvm.i32
-// CHECK: %[[V1:.*]] = llvm.mlir.constant(3 : i64) : !llvm.i32
+// CHECK-LABEL: llvm.func @foo(%{{.*}}: i32) -> !llvm.struct<(i32, double, i32)> {
+llvm.func @foo(%arg0: i32) -> !llvm.struct<(i32, double, i32)> {
+// CHECK: %[[V0:.*]] = llvm.mlir.constant(3 : i64) : i32
+// CHECK: %[[V1:.*]] = llvm.mlir.constant(3 : i64) : i32
// CHECK: %[[V2:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : !llvm.double
// CHECK: %[[V3:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : !llvm.double
-// CHECK: %[[V4:.*]] = llvm.add %[[V0]], %[[V1]] : !llvm.i32
-// CHECK: %[[V5:.*]] = llvm.mul %[[V4]], %[[V1]] : !llvm.i32
+// CHECK: %[[V4:.*]] = llvm.add %[[V0]], %[[V1]] : i32
+// CHECK: %[[V5:.*]] = llvm.mul %[[V4]], %[[V1]] : i32
// CHECK: %[[V6:.*]] = llvm.fadd %[[V2]], %[[V3]] : !llvm.double
// CHECK: %[[V7:.*]] = llvm.fsub %[[V3]], %[[V6]] : !llvm.double
-// CHECK: %[[V8:.*]] = llvm.mlir.constant(1 : i64) : !llvm.i1
-// CHECK: llvm.cond_br %[[V8]], ^[[BB1:.*]](%[[V4]] : !llvm.i32), ^[[BB2:.*]](%[[V4]] : !llvm.i32)
- %0 = llvm.mlir.constant(3) : !llvm.i32
- %1 = llvm.mlir.constant(3) : !llvm.i32
+// CHECK: %[[V8:.*]] = llvm.mlir.constant(1 : i64) : i1
+// CHECK: llvm.cond_br %[[V8]], ^[[BB1:.*]](%[[V4]] : i32), ^[[BB2:.*]](%[[V4]] : i32)
+ %0 = llvm.mlir.constant(3) : i32
+ %1 = llvm.mlir.constant(3) : i32
%2 = llvm.mlir.constant(4.200000e+01) : !llvm.double
%3 = llvm.mlir.constant(4.200000e+01) : !llvm.double
- %4 = llvm.add %0, %1 : !llvm.i32
- %5 = llvm.mul %4, %1 : !llvm.i32
+ %4 = llvm.add %0, %1 : i32
+ %5 = llvm.mul %4, %1 : i32
%6 = llvm.fadd %2, %3 : !llvm.double
%7 = llvm.fsub %3, %6 : !llvm.double
- %8 = llvm.mlir.constant(1) : !llvm.i1
- llvm.cond_br %8, ^bb1(%4 : !llvm.i32), ^bb2(%4 : !llvm.i32)
+ %8 = llvm.mlir.constant(1) : i1
+ llvm.cond_br %8, ^bb1(%4 : i32), ^bb2(%4 : i32)
-// CHECK:^[[BB1]](%[[V9:.*]]: !llvm.i32):
-// CHECK: %[[V10:.*]] = llvm.call @foo(%[[V9]]) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
+// CHECK:^[[BB1]](%[[V9:.*]]: i32):
+// CHECK: %[[V10:.*]] = llvm.call @foo(%[[V9]]) : (i32) -> !llvm.struct<(i32, double, i32)>
// CHECK: %[[V11:.*]] = llvm.extractvalue %[[V10]][0] : !llvm.struct<(i32, double, i32)>
// CHECK: %[[V12:.*]] = llvm.extractvalue %[[V10]][1] : !llvm.struct<(i32, double, i32)>
// CHECK: %[[V13:.*]] = llvm.extractvalue %[[V10]][2] : !llvm.struct<(i32, double, i32)>
@@ -197,8 +197,8 @@ llvm.func @foo(%arg0: !llvm.i32) -> !llvm.struct<(i32, double, i32)> {
// CHECK: %[[V16:.*]] = llvm.insertvalue %[[V7]], %[[V15]][1] : !llvm.struct<(i32, double, i32)>
// CHECK: %[[V17:.*]] = llvm.insertvalue %[[V11]], %[[V16]][2] : !llvm.struct<(i32, double, i32)>
// CHECK: llvm.return %[[V17]] : !llvm.struct<(i32, double, i32)>
-^bb1(%9: !llvm.i32):
- %10 = llvm.call @foo(%9) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
+^bb1(%9: i32):
+ %10 = llvm.call @foo(%9) : (i32) -> !llvm.struct<(i32, double, i32)>
%11 = llvm.extractvalue %10[0] : !llvm.struct<(i32, double, i32)>
%12 = llvm.extractvalue %10[1] : !llvm.struct<(i32, double, i32)>
%13 = llvm.extractvalue %10[2] : !llvm.struct<(i32, double, i32)>
@@ -208,13 +208,13 @@ llvm.func @foo(%arg0: !llvm.i32) -> !llvm.struct<(i32, double, i32)> {
%17 = llvm.insertvalue %11, %16[2] : !llvm.struct<(i32, double, i32)>
llvm.return %17 : !llvm.struct<(i32, double, i32)>
-// CHECK:^[[BB2]](%[[V18:.*]]: !llvm.i32):
+// CHECK:^[[BB2]](%[[V18:.*]]: i32):
// CHECK: %[[V19:.*]] = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
// CHECK: %[[V20:.*]] = llvm.insertvalue %[[V18]], %[[V19]][0] : !llvm.struct<(i32, double, i32)>
// CHECK: %[[V21:.*]] = llvm.insertvalue %[[V7]], %[[V20]][1] : !llvm.struct<(i32, double, i32)>
// CHECK: %[[V22:.*]] = llvm.insertvalue %[[V5]], %[[V21]][2] : !llvm.struct<(i32, double, i32)>
// CHECK: llvm.return %[[V22]] : !llvm.struct<(i32, double, i32)>
-^bb2(%18: !llvm.i32):
+^bb2(%18: i32):
%19 = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
%20 = llvm.insertvalue %18, %19[0] : !llvm.struct<(i32, double, i32)>
%21 = llvm.insertvalue %7, %20[1] : !llvm.struct<(i32, double, i32)>
@@ -223,40 +223,40 @@ llvm.func @foo(%arg0: !llvm.i32) -> !llvm.struct<(i32, double, i32)> {
}
// CHECK-LABEL: @casts
-// CHECK-SAME: (%[[I32:.*]]: !llvm.i32, %[[I64:.*]]: !llvm.i64, %[[V4I32:.*]]: !llvm.vec<4 x i32>, %[[V4I64:.*]]: !llvm.vec<4 x i64>, %[[I32PTR:.*]]: !llvm.ptr<i32>)
-func @casts(%arg0: !llvm.i32, %arg1: !llvm.i64, %arg2: !llvm.vec<4 x i32>,
+// CHECK-SAME: (%[[I32:.*]]: i32, %[[I64:.*]]: i64, %[[V4I32:.*]]: !llvm.vec<4 x i32>, %[[V4I64:.*]]: !llvm.vec<4 x i64>, %[[I32PTR:.*]]: !llvm.ptr<i32>)
+func @casts(%arg0: i32, %arg1: i64, %arg2: !llvm.vec<4 x i32>,
%arg3: !llvm.vec<4 x i64>, %arg4: !llvm.ptr<i32>) {
-// CHECK: = llvm.sext %[[I32]] : !llvm.i32 to !llvm.i56
- %0 = llvm.sext %arg0 : !llvm.i32 to !llvm.i56
-// CHECK: = llvm.zext %[[I32]] : !llvm.i32 to !llvm.i64
- %1 = llvm.zext %arg0 : !llvm.i32 to !llvm.i64
-// CHECK: = llvm.trunc %[[I64]] : !llvm.i64 to !llvm.i56
- %2 = llvm.trunc %arg1 : !llvm.i64 to !llvm.i56
+// CHECK: = llvm.sext %[[I32]] : i32 to i56
+ %0 = llvm.sext %arg0 : i32 to i56
+// CHECK: = llvm.zext %[[I32]] : i32 to i64
+ %1 = llvm.zext %arg0 : i32 to i64
+// CHECK: = llvm.trunc %[[I64]] : i64 to i56
+ %2 = llvm.trunc %arg1 : i64 to i56
// CHECK: = llvm.sext %[[V4I32]] : !llvm.vec<4 x i32> to !llvm.vec<4 x i56>
%3 = llvm.sext %arg2 : !llvm.vec<4 x i32> to !llvm.vec<4 x i56>
// CHECK: = llvm.zext %[[V4I32]] : !llvm.vec<4 x i32> to !llvm.vec<4 x i64>
%4 = llvm.zext %arg2 : !llvm.vec<4 x i32> to !llvm.vec<4 x i64>
// CHECK: = llvm.trunc %[[V4I64]] : !llvm.vec<4 x i64> to !llvm.vec<4 x i56>
%5 = llvm.trunc %arg3 : !llvm.vec<4 x i64> to !llvm.vec<4 x i56>
-// CHECK: = llvm.sitofp %[[I32]] : !llvm.i32 to !llvm.float
- %6 = llvm.sitofp %arg0 : !llvm.i32 to !llvm.float
-// CHECK: %[[FLOAT:.*]] = llvm.uitofp %[[I32]] : !llvm.i32 to !llvm.float
- %7 = llvm.uitofp %arg0 : !llvm.i32 to !llvm.float
-// CHECK: = llvm.fptosi %[[FLOAT]] : !llvm.float to !llvm.i32
- %8 = llvm.fptosi %7 : !llvm.float to !llvm.i32
-// CHECK: = llvm.fptoui %[[FLOAT]] : !llvm.float to !llvm.i32
- %9 = llvm.fptoui %7 : !llvm.float to !llvm.i32
+// CHECK: = llvm.sitofp %[[I32]] : i32 to !llvm.float
+ %6 = llvm.sitofp %arg0 : i32 to !llvm.float
+// CHECK: %[[FLOAT:.*]] = llvm.uitofp %[[I32]] : i32 to !llvm.float
+ %7 = llvm.uitofp %arg0 : i32 to !llvm.float
+// CHECK: = llvm.fptosi %[[FLOAT]] : !llvm.float to i32
+ %8 = llvm.fptosi %7 : !llvm.float to i32
+// CHECK: = llvm.fptoui %[[FLOAT]] : !llvm.float to i32
+ %9 = llvm.fptoui %7 : !llvm.float to i32
// CHECK: = llvm.addrspacecast %[[I32PTR]] : !llvm.ptr<i32> to !llvm.ptr<i32, 2>
%10 = llvm.addrspacecast %arg4 : !llvm.ptr<i32> to !llvm.ptr<i32, 2>
llvm.return
}
// CHECK-LABEL: @vect
-func @vect(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) {
+func @vect(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
// CHECK: = llvm.extractelement {{.*}} : !llvm.vec<4 x float>
- %0 = llvm.extractelement %arg0[%arg1 : !llvm.i32] : !llvm.vec<4 x float>
+ %0 = llvm.extractelement %arg0[%arg1 : i32] : !llvm.vec<4 x float>
// CHECK: = llvm.insertelement {{.*}} : !llvm.vec<4 x float>
- %1 = llvm.insertelement %arg2, %arg0[%arg1 : !llvm.i32] : !llvm.vec<4 x float>
+ %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : !llvm.vec<4 x float>
// CHECK: = llvm.shufflevector {{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
%2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
// CHECK: = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x float>
@@ -265,11 +265,11 @@ func @vect(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) {
}
// CHECK-LABEL: @alloca
-func @alloca(%size : !llvm.i64) {
- // CHECK: llvm.alloca %{{.*}} x !llvm.i32 : (!llvm.i64) -> !llvm.ptr<i32>
- llvm.alloca %size x !llvm.i32 {alignment = 0} : (!llvm.i64) -> (!llvm.ptr<i32>)
- // CHECK: llvm.alloca %{{.*}} x !llvm.i32 {alignment = 8 : i64} : (!llvm.i64) -> !llvm.ptr<i32>
- llvm.alloca %size x !llvm.i32 {alignment = 8} : (!llvm.i64) -> (!llvm.ptr<i32>)
+func @alloca(%size : i64) {
+ // CHECK: llvm.alloca %{{.*}} x i32 : (i64) -> !llvm.ptr<i32>
+ llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr<i32>)
+ // CHECK: llvm.alloca %{{.*}} x i32 {alignment = 8 : i64} : (i64) -> !llvm.ptr<i32>
+ llvm.alloca %size x i32 {alignment = 8} : (i64) -> (!llvm.ptr<i32>)
llvm.return
}
@@ -290,38 +290,38 @@ func @atomicrmw(%ptr : !llvm.ptr<float>, %val : !llvm.float) {
}
// CHECK-LABEL: @cmpxchg
-func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : !llvm.i32, %new : !llvm.i32) {
- // CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : !llvm.i32
- %0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : !llvm.i32
+func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : i32, %new : i32) {
+ // CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : i32
+ %0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : i32
llvm.return
}
llvm.mlir.global external constant @_ZTIi() : !llvm.ptr<i8>
llvm.func @bar(!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.ptr<i8>)
-llvm.func @__gxx_personality_v0(...) -> !llvm.i32
+llvm.func @__gxx_personality_v0(...) -> i32
// CHECK-LABEL: @invokeLandingpad
-llvm.func @invokeLandingpad() -> !llvm.i32 attributes { personality = @__gxx_personality_v0 } {
-// CHECK: %[[a0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-// CHECK: %{{.*}} = llvm.mlir.constant(3 : i32) : !llvm.i32
+llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personality_v0 } {
+// CHECK: %[[a0:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %{{.*}} = llvm.mlir.constant(3 : i32) : i32
// CHECK: %[[a2:.*]] = llvm.mlir.constant("\01") : !llvm.array<1 x i8>
// CHECK: %[[a3:.*]] = llvm.mlir.null : !llvm.ptr<ptr<i8>>
// CHECK: %[[a4:.*]] = llvm.mlir.null : !llvm.ptr<i8>
// CHECK: %[[a5:.*]] = llvm.mlir.addressof @_ZTIi : !llvm.ptr<ptr<i8>>
// CHECK: %[[a6:.*]] = llvm.bitcast %[[a5]] : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
-// CHECK: %[[a7:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-// CHECK: %[[a8:.*]] = llvm.alloca %[[a7]] x !llvm.i8 : (!llvm.i32) -> !llvm.ptr<i8>
-// CHECK: %{{.*}} = llvm.invoke @foo(%[[a7]]) to ^[[BB2:.*]] unwind ^[[BB1:.*]] : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
- %0 = llvm.mlir.constant(0 : i32) : !llvm.i32
- %1 = llvm.mlir.constant(3 : i32) : !llvm.i32
+// CHECK: %[[a7:.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK: %[[a8:.*]] = llvm.alloca %[[a7]] x i8 : (i32) -> !llvm.ptr<i8>
+// CHECK: %{{.*}} = llvm.invoke @foo(%[[a7]]) to ^[[BB2:.*]] unwind ^[[BB1:.*]] : (i32) -> !llvm.struct<(i32, double, i32)>
+ %0 = llvm.mlir.constant(0 : i32) : i32
+ %1 = llvm.mlir.constant(3 : i32) : i32
%2 = llvm.mlir.constant("\01") : !llvm.array<1 x i8>
%3 = llvm.mlir.null : !llvm.ptr<ptr<i8>>
%4 = llvm.mlir.null : !llvm.ptr<i8>
%5 = llvm.mlir.addressof @_ZTIi : !llvm.ptr<ptr<i8>>
%6 = llvm.bitcast %5 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
- %7 = llvm.mlir.constant(1 : i32) : !llvm.i32
- %8 = llvm.alloca %7 x !llvm.i8 : (!llvm.i32) -> !llvm.ptr<i8>
- %9 = llvm.invoke @foo(%7) to ^bb2 unwind ^bb1 : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
+ %7 = llvm.mlir.constant(1 : i32) : i32
+ %8 = llvm.alloca %7 x i8 : (i32) -> !llvm.ptr<i8>
+ %9 = llvm.invoke @foo(%7) to ^bb2 unwind ^bb1 : (i32) -> !llvm.struct<(i32, double, i32)>
// CHECK: ^[[BB1]]:
// CHECK: %[[lp:.*]] = llvm.landingpad cleanup (catch %[[a3]] : !llvm.ptr<ptr<i8>>) (catch %[[a6]] : !llvm.ptr<i8>) (filter %[[a2]] : !llvm.array<1 x i8>) : !llvm.struct<(ptr<i8>, i32)>
@@ -331,9 +331,9 @@ llvm.func @invokeLandingpad() -> !llvm.i32 attributes { personality = @__gxx_per
llvm.resume %10 : !llvm.struct<(ptr<i8>, i32)>
// CHECK: ^[[BB2]]:
-// CHECK: llvm.return %[[a7]] : !llvm.i32
+// CHECK: llvm.return %[[a7]] : i32
^bb2:
- llvm.return %7 : !llvm.i32
+ llvm.return %7 : i32
// CHECK: ^[[BB3:.*]]:
// CHECK: llvm.invoke @bar(%[[a8]], %[[a6]], %[[a4]]) to ^[[BB2]] unwind ^[[BB1]] : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.ptr<i8>) -> ()
@@ -341,19 +341,19 @@ llvm.func @invokeLandingpad() -> !llvm.i32 attributes { personality = @__gxx_per
llvm.invoke @bar(%8, %6, %4) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.ptr<i8>) -> ()
// CHECK: ^[[BB4:.*]]:
-// CHECK: llvm.return %[[a0]] : !llvm.i32
+// CHECK: llvm.return %[[a0]] : i32
^bb4:
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
}
// CHECK-LABEL: @useFreezeOp
-func @useFreezeOp(%arg0: !llvm.i32) {
- // CHECK: = llvm.freeze %[[ARG0:.*]] : !llvm.i32
- %0 = llvm.freeze %arg0 : !llvm.i32
- // CHECK: %[[x:.*]] = llvm.mlir.undef : !llvm.i8
- %1 = llvm.mlir.undef : !llvm.i8
- // CHECK: = llvm.freeze %[[x]] : !llvm.i8
- %2 = llvm.freeze %1 : !llvm.i8
+func @useFreezeOp(%arg0: i32) {
+ // CHECK: = llvm.freeze %[[ARG0:.*]] : i32
+ %0 = llvm.freeze %arg0 : i32
+ // CHECK: %[[x:.*]] = llvm.mlir.undef : i8
+ %1 = llvm.mlir.undef : i8
+ // CHECK: = llvm.freeze %[[x]] : i8
+ %2 = llvm.freeze %1 : i8
return
}
@@ -369,27 +369,27 @@ func @useFenceInst() {
}
// CHECK-LABEL: @useInlineAsm
-llvm.func @useInlineAsm(%arg0: !llvm.i32) {
- // CHECK: llvm.inline_asm {{.*}} (!llvm.i32) -> !llvm.i8
- %0 = llvm.inline_asm "bswap $0", "=r,r" %arg0 : (!llvm.i32) -> !llvm.i8
+llvm.func @useInlineAsm(%arg0: i32) {
+ // CHECK: llvm.inline_asm {{.*}} (i32) -> i8
+ %0 = llvm.inline_asm "bswap $0", "=r,r" %arg0 : (i32) -> i8
- // CHECK-NEXT: llvm.inline_asm {{.*}} (!llvm.i32, !llvm.i32) -> !llvm.i8
- %1 = llvm.inline_asm "foo", "bar" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8
+ // CHECK-NEXT: llvm.inline_asm {{.*}} (i32, i32) -> i8
+ %1 = llvm.inline_asm "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8
- // CHECK-NEXT: llvm.inline_asm has_side_effects {{.*}} (!llvm.i32, !llvm.i32) -> !llvm.i8
- %2 = llvm.inline_asm has_side_effects "foo", "bar" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8
+ // CHECK-NEXT: llvm.inline_asm has_side_effects {{.*}} (i32, i32) -> i8
+ %2 = llvm.inline_asm has_side_effects "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8
- // CHECK-NEXT: llvm.inline_asm is_align_stack {{.*}} (!llvm.i32, !llvm.i32) -> !llvm.i8
- %3 = llvm.inline_asm is_align_stack "foo", "bar" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8
+ // CHECK-NEXT: llvm.inline_asm is_align_stack {{.*}} (i32, i32) -> i8
+ %3 = llvm.inline_asm is_align_stack "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8
- // CHECK-NEXT: llvm.inline_asm "foo", "=r,=r,r" {{.*}} : (!llvm.i32) -> !llvm.struct<(i8, i8)>
- %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (!llvm.i32) -> !llvm.struct<(i8, i8)>
+ // CHECK-NEXT: llvm.inline_asm "foo", "=r,=r,r" {{.*}} : (i32) -> !llvm.struct<(i8, i8)>
+ %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (i32) -> !llvm.struct<(i8, i8)>
llvm.return
}
// CHECK-LABEL: @fastmathFlags
-func @fastmathFlags(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32) {
+func @fastmathFlags(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: i32) {
// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
// CHECK: {{.*}} = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
// CHECK: {{.*}} = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
@@ -407,8 +407,8 @@ func @fastmathFlags(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32) {
// CHECK: {{.*}} = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
%6 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-// CHECK: {{.*}} = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
- %7 = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (!llvm.i32) -> !llvm.struct<(i32, double, i32)>
+// CHECK: {{.*}} = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (i32) -> !llvm.struct<(i32, double, i32)>
+ %7 = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (i32) -> !llvm.struct<(i32, double, i32)>
// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 : !llvm.float
%8 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<>} : !llvm.float
diff --git a/mlir/test/Dialect/LLVMIR/terminator.mlir b/mlir/test/Dialect/LLVMIR/terminator.mlir
index b8e8fcd486e9..31bc9b3d33be 100644
--- a/mlir/test/Dialect/LLVMIR/terminator.mlir
+++ b/mlir/test/Dialect/LLVMIR/terminator.mlir
@@ -11,7 +11,7 @@ func @return() {
// CHECK: llvm.br
// CHECK: llvm.cond_br
// CHECK: llvm.return
-func @control_flow(%cond : !llvm.i1) {
+func @control_flow(%cond : i1) {
llvm.br ^bb1
^bb1:
llvm.cond_br %cond, ^bb2, ^bb1
diff --git a/mlir/test/Dialect/LLVMIR/types-invalid.mlir b/mlir/test/Dialect/LLVMIR/types-invalid.mlir
index 3277e177bc9b..3982f912ead2 100644
--- a/mlir/test/Dialect/LLVMIR/types-invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/types-invalid.mlir
@@ -76,7 +76,7 @@ func @struct_literal_opaque() {
// -----
func @unexpected_type() {
- // expected-error @+1 {{unexpected type, expected i* or keyword}}
+ // expected-error @+1 {{unexpected type, expected keyword}}
"some.op"() : () -> !llvm.f32
}
@@ -150,3 +150,14 @@ func @scalable_void_vector() {
// expected-error @+1 {{invalid vector element type}}
"some.op"() : () -> !llvm.vec<? x 4 x void>
}
+
+// -----
+
+// expected-warning @+1 {{deprecated syntax, drop '!llvm.' for integers}}
+func private @deprecated_int() -> !llvm.i32
+
+// -----
+
+
+// expected-error @+1 {{unexpected type, expected keyword}}
+func private @unexpected_type() -> !llvm.tensor<*xf32>
diff --git a/mlir/test/Dialect/LLVMIR/types.mlir b/mlir/test/Dialect/LLVMIR/types.mlir
index 5258158efb69..f54f00a3f719 100644
--- a/mlir/test/Dialect/LLVMIR/types.mlir
+++ b/mlir/test/Dialect/LLVMIR/types.mlir
@@ -50,20 +50,20 @@ func @func() {
// CHECK-LABEL: @integer
func @integer() {
- // CHECK: !llvm.i1
- "some.op"() : () -> !llvm.i1
- // CHECK: !llvm.i8
- "some.op"() : () -> !llvm.i8
- // CHECK: !llvm.i16
- "some.op"() : () -> !llvm.i16
- // CHECK: !llvm.i32
- "some.op"() : () -> !llvm.i32
- // CHECK: !llvm.i64
- "some.op"() : () -> !llvm.i64
- // CHECK: !llvm.i57
- "some.op"() : () -> !llvm.i57
- // CHECK: !llvm.i129
- "some.op"() : () -> !llvm.i129
+ // CHECK: i1
+ "some.op"() : () -> i1
+ // CHECK: i8
+ "some.op"() : () -> i8
+ // CHECK: i16
+ "some.op"() : () -> i16
+ // CHECK: i32
+ "some.op"() : () -> i32
+ // CHECK: i64
+ "some.op"() : () -> i64
+ // CHECK: i57
+ "some.op"() : () -> i57
+ // CHECK: i129
+ "some.op"() : () -> i129
return
}
@@ -184,7 +184,7 @@ func @identified_struct() {
func @verbose() {
// CHECK: !llvm.struct<(i64, struct<(float)>)>
- "some.op"() : () -> !llvm.struct<(!llvm.i64, !llvm.struct<(!llvm.float)>)>
+ "some.op"() : () -> !llvm.struct<(i64, !llvm.struct<(!llvm.float)>)>
return
}
@@ -195,7 +195,7 @@ func @verbose() {
// DialectAsmPrinter to have a mechanism for querying the presence and
// usability of an alias outside of its `printType` method.
-!baz = type !llvm.i64
+!baz = type i64
!qux = type !llvm.struct<(!baz)>
!rec = type !llvm.struct<"a", (ptr<struct<"a">>)>
diff --git a/mlir/test/Dialect/Linalg/llvm.mlir b/mlir/test/Dialect/Linalg/llvm.mlir
index 9303a7aa6b31..829406bf21f8 100644
--- a/mlir/test/Dialect/Linalg/llvm.mlir
+++ b/mlir/test/Dialect/Linalg/llvm.mlir
@@ -6,9 +6,9 @@ func @range(%arg0: index) {
%R = linalg.range %c0:%arg0:%c1 : !linalg.range
return
}
-// CHECK-LABEL: func @range(%{{.*}}: !llvm.i64) {
-// CHECK: llvm.mlir.constant(0 : index) : !llvm.i64
-// CHECK-NEXT: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK-LABEL: func @range(%{{.*}}: i64) {
+// CHECK: llvm.mlir.constant(0 : index) : i64
+// CHECK-NEXT: llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(i64, i64, i64)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(i64, i64, i64)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(i64, i64, i64)>
@@ -23,8 +23,8 @@ func @slice(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: !linalg.range)
// CHECK: llvm.extractvalue %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK-NEXT: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK-NEXT: llvm.extractvalue %{{.*}}[0] : !llvm.struct<(i64, i64, i64)>
-// CHECK-NEXT: llvm.mul %{{.*}}, %{{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.add %{{.*}}, %{{.*}} : !llvm.i64
+// CHECK-NEXT: llvm.mul %{{.*}}, %{{.*}} : i64
+// CHECK-NEXT: llvm.add %{{.*}}, %{{.*}} : i64
// insert offset
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
@@ -34,15 +34,15 @@ func @slice(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: !linalg.range)
// CHECK-NEXT: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(i64, i64, i64)>
// get size[0] from parent view
// CHECK-NEXT: llvm.extractvalue %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-// CHECK-NEXT: llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.i64
+// CHECK-NEXT: llvm.icmp "slt" %{{.*}}, %{{.*}} : i64
+// CHECK-NEXT: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, i64
// compute size[0] bounded by parent view's size[0]
-// CHECK-NEXT: llvm.sub %{{.*}}, %{{.*}} : !llvm.i64
+// CHECK-NEXT: llvm.sub %{{.*}}, %{{.*}} : i64
// bound below by 0
-// CHECK-NEXT: llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm.i64
-// CHECK-NEXT: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.i64
+// CHECK-NEXT: llvm.icmp "slt" %{{.*}}, %{{.*}} : i64
+// CHECK-NEXT: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, i64
// compute stride[0] using bounded size
-// CHECK-NEXT: llvm.mul %{{.*}}, %{{.*}} : !llvm.i64
+// CHECK-NEXT: llvm.mul %{{.*}}, %{{.*}} : i64
// insert size and stride
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
@@ -85,25 +85,25 @@ func @reshape_static_expand(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> {
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
// CHECK: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(3 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(3 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 3] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(5 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 4] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(60 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(60 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(20 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(20 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(5 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(5 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 3] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
func @reshape_static_collapse(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32> {
@@ -121,17 +121,17 @@ func @reshape_static_collapse(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32>
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
// CHECK: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: llvm.mlir.constant(3 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(3 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(5 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: llvm.mlir.constant(20 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(20 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(5 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
func @reshape_fold_zero_dim(%arg0 : memref<1x1xf32>) -> memref<f32> {
@@ -159,11 +159,11 @@ func @reshape_expand_zero_dim(%arg0 : memref<f32>) -> memref<1x1xf32> {
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64
+// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index c74b8bc3327d..6b9be10c7693 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -29,19 +29,19 @@ func @omp_taskyield() -> () {
}
// CHECK-LABEL: func @omp_flush
-// CHECK-SAME: ([[ARG0:%.*]]: !llvm.i32) {
-func @omp_flush(%arg0 : !llvm.i32) -> () {
+// CHECK-SAME: ([[ARG0:%.*]]: i32) {
+func @omp_flush(%arg0 : i32) -> () {
// Test without data var
// CHECK: omp.flush
omp.flush
// Test with one data var
- // CHECK: omp.flush([[ARG0]] : !llvm.i32)
- omp.flush(%arg0 : !llvm.i32)
+ // CHECK: omp.flush([[ARG0]] : i32)
+ omp.flush(%arg0 : i32)
// Test with two data var
- // CHECK: omp.flush([[ARG0]], [[ARG0]] : !llvm.i32, !llvm.i32)
- omp.flush(%arg0, %arg0: !llvm.i32, !llvm.i32)
+ // CHECK: omp.flush([[ARG0]], [[ARG0]] : i32, i32)
+ omp.flush(%arg0, %arg0: i32, i32)
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/types.mlir b/mlir/test/Dialect/SPIRV/IR/types.mlir
index 7b96f3dd2232..0d9c6cbd2774 100644
--- a/mlir/test/Dialect/SPIRV/IR/types.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/types.mlir
@@ -72,8 +72,8 @@ func private @index_type(!spv.array<4xindex>) -> ()
// -----
-// expected-error @+1 {{cannot use '!llvm.i32' to compose SPIR-V types}}
-func private @llvm_type(!spv.array<4x!llvm.i32>) -> ()
+// expected-error @+1 {{cannot use '!llvm.struct<()>' to compose SPIR-V types}}
+func private @llvm_type(!spv.array<4x!llvm.struct<()>>) -> ()
// -----
diff --git a/mlir/test/Target/arm-sve.mlir b/mlir/test/Target/arm-sve.mlir
index 7340fea34be6..430f60b4ecac 100644
--- a/mlir/test/Target/arm-sve.mlir
+++ b/mlir/test/Target/arm-sve.mlir
@@ -49,8 +49,8 @@ llvm.func @arm_sve_ummla(%arg0: !llvm.vec<? x 16 x i8>,
}
// CHECK-LABEL: define i64 @get_vector_scale()
-llvm.func @get_vector_scale() -> !llvm.i64 {
+llvm.func @get_vector_scale() -> i64 {
// CHECK: call i64 @llvm.vscale.i64()
- %0 = "llvm_arm_sve.vscale"() : () -> !llvm.i64
- llvm.return %0 : !llvm.i64
+ %0 = "llvm_arm_sve.vscale"() : () -> i64
+ llvm.return %0 : i64
}
diff --git a/mlir/test/Target/avx512.mlir b/mlir/test/Target/avx512.mlir
index 0cc336d29df0..32d250282d56 100644
--- a/mlir/test/Target/avx512.mlir
+++ b/mlir/test/Target/avx512.mlir
@@ -2,30 +2,30 @@
// CHECK-LABEL: define <16 x float> @LLVM_x86_avx512_mask_ps_512
llvm.func @LLVM_x86_avx512_mask_ps_512(%a: !llvm.vec<16 x float>,
- %b: !llvm.i32,
- %c: !llvm.i16)
+ %b: i32,
+ %c: i16)
-> (!llvm.vec<16 x float>)
{
// CHECK: call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float>
%0 = "llvm_avx512.mask.rndscale.ps.512"(%a, %b, %a, %c, %b) :
- (!llvm.vec<16 x float>, !llvm.i32, !llvm.vec<16 x float>, !llvm.i16, !llvm.i32) -> !llvm.vec<16 x float>
+ (!llvm.vec<16 x float>, i32, !llvm.vec<16 x float>, i16, i32) -> !llvm.vec<16 x float>
// CHECK: call <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float>
%1 = "llvm_avx512.mask.scalef.ps.512"(%a, %a, %a, %c, %b) :
- (!llvm.vec<16 x float>, !llvm.vec<16 x float>, !llvm.vec<16 x float>, !llvm.i16, !llvm.i32) -> !llvm.vec<16 x float>
+ (!llvm.vec<16 x float>, !llvm.vec<16 x float>, !llvm.vec<16 x float>, i16, i32) -> !llvm.vec<16 x float>
llvm.return %1: !llvm.vec<16 x float>
}
// CHECK-LABEL: define <8 x double> @LLVM_x86_avx512_mask_pd_512
llvm.func @LLVM_x86_avx512_mask_pd_512(%a: !llvm.vec<8 x double>,
- %b: !llvm.i32,
- %c: !llvm.i8)
+ %b: i32,
+ %c: i8)
-> (!llvm.vec<8 x double>)
{
// CHECK: call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double>
%0 = "llvm_avx512.mask.rndscale.pd.512"(%a, %b, %a, %c, %b) :
- (!llvm.vec<8 x double>, !llvm.i32, !llvm.vec<8 x double>, !llvm.i8, !llvm.i32) -> !llvm.vec<8 x double>
+ (!llvm.vec<8 x double>, i32, !llvm.vec<8 x double>, i8, i32) -> !llvm.vec<8 x double>
// CHECK: call <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double>
%1 = "llvm_avx512.mask.scalef.pd.512"(%a, %a, %a, %c, %b) :
- (!llvm.vec<8 x double>, !llvm.vec<8 x double>, !llvm.vec<8 x double>, !llvm.i8, !llvm.i32) -> !llvm.vec<8 x double>
+ (!llvm.vec<8 x double>, !llvm.vec<8 x double>, !llvm.vec<8 x double>, i8, i32) -> !llvm.vec<8 x double>
llvm.return %1: !llvm.vec<8 x double>
}
diff --git a/mlir/test/Target/import.ll b/mlir/test/Target/import.ll
index b3cfad9de427..925320f9a2a1 100644
--- a/mlir/test/Target/import.ll
+++ b/mlir/test/Target/import.ll
@@ -16,8 +16,8 @@
@g4 = external global i32, align 8
; CHECK: llvm.mlir.global internal constant @int_gep() : !llvm.ptr<i32> {
; CHECK-DAG: %[[addr:[0-9]+]] = llvm.mlir.addressof @g4 : !llvm.ptr<i32>
-; CHECK-DAG: %[[c2:[0-9]+]] = llvm.mlir.constant(2 : i32) : !llvm.i32
-; CHECK-NEXT: %[[gepinit:[0-9]+]] = llvm.getelementptr %[[addr]][%[[c2]]] : (!llvm.ptr<i32>, !llvm.i32) -> !llvm.ptr<i32>
+; CHECK-DAG: %[[c2:[0-9]+]] = llvm.mlir.constant(2 : i32) : i32
+; CHECK-NEXT: %[[gepinit:[0-9]+]] = llvm.getelementptr %[[addr]][%[[c2]]] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
; CHECK-NEXT: llvm.return %[[gepinit]] : !llvm.ptr<i32>
; CHECK-NEXT: }
@int_gep = internal constant i32* getelementptr (i32, i32* @g4, i32 2)
@@ -26,27 +26,27 @@
; Linkage attribute.
;
-; CHECK: llvm.mlir.global private @private(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global private @private(42 : i32) : i32
@private = private global i32 42
-; CHECK: llvm.mlir.global internal @internal(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global internal @internal(42 : i32) : i32
@internal = internal global i32 42
-; CHECK: llvm.mlir.global available_externally @available_externally(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global available_externally @available_externally(42 : i32) : i32
@available_externally = available_externally global i32 42
-; CHECK: llvm.mlir.global linkonce @linkonce(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global linkonce @linkonce(42 : i32) : i32
@linkonce = linkonce global i32 42
-; CHECK: llvm.mlir.global weak @weak(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global weak @weak(42 : i32) : i32
@weak = weak global i32 42
-; CHECK: llvm.mlir.global common @common(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global common @common(42 : i32) : i32
@common = common global i32 42
-; CHECK: llvm.mlir.global appending @appending(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global appending @appending(42 : i32) : i32
@appending = appending global i32 42
-; CHECK: llvm.mlir.global extern_weak @extern_weak() : !llvm.i32
+; CHECK: llvm.mlir.global extern_weak @extern_weak() : i32
@extern_weak = extern_weak global i32
-; CHECK: llvm.mlir.global linkonce_odr @linkonce_odr(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global linkonce_odr @linkonce_odr(42 : i32) : i32
@linkonce_odr = linkonce_odr global i32 42
-; CHECK: llvm.mlir.global weak_odr @weak_odr(42 : i32) : !llvm.i32
+; CHECK: llvm.mlir.global weak_odr @weak_odr(42 : i32) : i32
@weak_odr = weak_odr global i32 42
-; CHECK: llvm.mlir.global external @external() : !llvm.i32
+; CHECK: llvm.mlir.global external @external() : i32
@external = external global i32
;
@@ -73,54 +73,54 @@ define internal void @func_internal() {
ret void
}
-; CHECK: llvm.func @fe(!llvm.i32) -> !llvm.float
+; CHECK: llvm.func @fe(i32) -> !llvm.float
declare float @fe(i32)
; FIXME: function attributes.
-; CHECK-LABEL: llvm.func internal @f1(%arg0: !llvm.i64) -> !llvm.i32 {
-; CHECK-DAG: %[[c2:[0-9]+]] = llvm.mlir.constant(2 : i32) : !llvm.i32
-; CHECK-DAG: %[[c42:[0-9]+]] = llvm.mlir.constant(42 : i32) : !llvm.i32
-; CHECK-DAG: %[[c1:[0-9]+]] = llvm.mlir.constant(true) : !llvm.i1
-; CHECK-DAG: %[[c43:[0-9]+]] = llvm.mlir.constant(43 : i32) : !llvm.i32
+; CHECK-LABEL: llvm.func internal @f1(%arg0: i64) -> i32 {
+; CHECK-DAG: %[[c2:[0-9]+]] = llvm.mlir.constant(2 : i32) : i32
+; CHECK-DAG: %[[c42:[0-9]+]] = llvm.mlir.constant(42 : i32) : i32
+; CHECK-DAG: %[[c1:[0-9]+]] = llvm.mlir.constant(true) : i1
+; CHECK-DAG: %[[c43:[0-9]+]] = llvm.mlir.constant(43 : i32) : i32
define internal dso_local i32 @f1(i64 %a) norecurse {
entry:
-; CHECK: %{{[0-9]+}} = llvm.inttoptr %arg0 : !llvm.i64 to !llvm.ptr<i64>
+; CHECK: %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr<i64>
%aa = inttoptr i64 %a to i64*
; %[[addrof:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr<double>
; %[[addrof2:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr<double>
-; %{{[0-9]+}} = llvm.inttoptr %arg0 : !llvm.i64 to !llvm.ptr<i64>
-; %{{[0-9]+}} = llvm.ptrtoint %[[addrof2]] : !llvm.ptr<double> to !llvm.i64
-; %{{[0-9]+}} = llvm.getelementptr %[[addrof]][%3] : (!llvm.ptr<double>, !llvm.i32) -> !llvm.ptr<double>
+; %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr<i64>
+; %{{[0-9]+}} = llvm.ptrtoint %[[addrof2]] : !llvm.ptr<double> to i64
+; %{{[0-9]+}} = llvm.getelementptr %[[addrof]][%3] : (!llvm.ptr<double>, i32) -> !llvm.ptr<double>
%bb = ptrtoint double* @g2 to i64
%cc = getelementptr double, double* @g2, i32 2
-; CHECK: %[[b:[0-9]+]] = llvm.trunc %arg0 : !llvm.i64 to !llvm.i32
+; CHECK: %[[b:[0-9]+]] = llvm.trunc %arg0 : i64 to i32
%b = trunc i64 %a to i32
-; CHECK: %[[c:[0-9]+]] = llvm.call @fe(%[[b]]) : (!llvm.i32) -> !llvm.float
+; CHECK: %[[c:[0-9]+]] = llvm.call @fe(%[[b]]) : (i32) -> !llvm.float
%c = call float @fe(i32 %b)
-; CHECK: %[[d:[0-9]+]] = llvm.fptosi %[[c]] : !llvm.float to !llvm.i32
+; CHECK: %[[d:[0-9]+]] = llvm.fptosi %[[c]] : !llvm.float to i32
%d = fptosi float %c to i32
; FIXME: icmp should return i1.
-; CHECK: %[[e:[0-9]+]] = llvm.icmp "ne" %[[d]], %[[c2]] : !llvm.i32
+; CHECK: %[[e:[0-9]+]] = llvm.icmp "ne" %[[d]], %[[c2]] : i32
%e = icmp ne i32 %d, 2
; CHECK: llvm.cond_br %[[e]], ^bb1, ^bb2
br i1 %e, label %if.then, label %if.end
; CHECK: ^bb1:
if.then:
-; CHECK: llvm.return %[[c42]] : !llvm.i32
+; CHECK: llvm.return %[[c42]] : i32
ret i32 42
; CHECK: ^bb2:
if.end:
-; CHECK: %[[orcond:[0-9]+]] = llvm.or %[[e]], %[[c1]] : !llvm.i1
+; CHECK: %[[orcond:[0-9]+]] = llvm.or %[[e]], %[[c1]] : i1
%or.cond = or i1 %e, 1
; CHECK: llvm.return %[[c43]]
ret i32 43
}
; Test that instructions that dominate can be out of sequential order.
-; CHECK-LABEL: llvm.func @f2(%arg0: !llvm.i64) -> !llvm.i64 {
-; CHECK-DAG: %[[c3:[0-9]+]] = llvm.mlir.constant(3 : i64) : !llvm.i64
+; CHECK-LABEL: llvm.func @f2(%arg0: i64) -> i64 {
+; CHECK-DAG: %[[c3:[0-9]+]] = llvm.mlir.constant(3 : i64) : i64
define i64 @f2(i64 %a) noduplicate {
entry:
; CHECK: llvm.br ^bb2
@@ -133,21 +133,21 @@ end:
; CHECK: ^bb2:
next:
-; CHECK: %1 = llvm.add %arg0, %[[c3]] : !llvm.i64
+; CHECK: %1 = llvm.add %arg0, %[[c3]] : i64
%b = add i64 %a, 3
; CHECK: llvm.br ^bb1
br label %end
}
; Test arguments/phis.
-; CHECK-LABEL: llvm.func @f2_phis(%arg0: !llvm.i64) -> !llvm.i64 {
-; CHECK-DAG: %[[c3:[0-9]+]] = llvm.mlir.constant(3 : i64) : !llvm.i64
+; CHECK-LABEL: llvm.func @f2_phis(%arg0: i64) -> i64 {
+; CHECK-DAG: %[[c3:[0-9]+]] = llvm.mlir.constant(3 : i64) : i64
define i64 @f2_phis(i64 %a) noduplicate {
entry:
; CHECK: llvm.br ^bb2
br label %next
-; CHECK: ^bb1(%1: !llvm.i64):
+; CHECK: ^bb1(%1: i64):
end:
%c = phi i64 [ %b, %next ]
; CHECK: llvm.return %1
@@ -155,7 +155,7 @@ end:
; CHECK: ^bb2:
next:
-; CHECK: %2 = llvm.add %arg0, %[[c3]] : !llvm.i64
+; CHECK: %2 = llvm.add %arg0, %[[c3]] : i64
%b = add i64 %a, 3
; CHECK: llvm.br ^bb1
br label %end
@@ -200,7 +200,7 @@ define void @f5(i32 %d) {
; CHECK-LABEL: llvm.func @f6(%arg0: !llvm.ptr<func<void (i16)>>)
define void @f6(void (i16) *%fn) {
-; CHECK: %[[c:[0-9]+]] = llvm.mlir.constant(0 : i16) : !llvm.i16
+; CHECK: %[[c:[0-9]+]] = llvm.mlir.constant(0 : i16) : i16
; CHECK: llvm.call %arg0(%[[c]])
call void %fn(i16 0)
ret void
@@ -280,7 +280,7 @@ declare i32 @__gxx_personality_v0(...)
; CHECK-LABEL: @invokeLandingpad
define i32 @invokeLandingpad() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
; CHECK: %[[a1:[0-9]+]] = llvm.bitcast %{{[0-9]+}} : !llvm.ptr<ptr<ptr<i8>>> to !llvm.ptr<i8>
- ; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x !llvm.i8 : (!llvm.i32) -> !llvm.ptr<i8>
+ ; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x i8 : (i32) -> !llvm.ptr<i8>
%1 = alloca i8
; CHECK: llvm.invoke @foo(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> ()
invoke void @foo(i8* %1) to label %4 unwind label %2
@@ -294,7 +294,7 @@ define i32 @invokeLandingpad() personality i8* bitcast (i32 (...)* @__gxx_person
resume { i8*, i32 } %3
; CHECK: ^bb2:
- ; CHECK: llvm.return %{{[0-9]+}} : !llvm.i32
+ ; CHECK: llvm.return %{{[0-9]+}} : i32
ret i32 1
; CHECK: ^bb3:
@@ -302,16 +302,16 @@ define i32 @invokeLandingpad() personality i8* bitcast (i32 (...)* @__gxx_person
%6 = invoke i8* @bar(i8* %1) to label %4 unwind label %2
; CHECK: ^bb4:
- ; CHECK: llvm.return %{{[0-9]+}} : !llvm.i32
+ ; CHECK: llvm.return %{{[0-9]+}} : i32
ret i32 0
}
;CHECK-LABEL: @useFreezeOp
define i32 @useFreezeOp(i32 %x) {
- ;CHECK: %{{[0-9]+}} = llvm.freeze %{{[0-9a-z]+}} : !llvm.i32
+ ;CHECK: %{{[0-9]+}} = llvm.freeze %{{[0-9a-z]+}} : i32
%1 = freeze i32 %x
%2 = add i8 10, 10
- ;CHECK: %{{[0-9]+}} = llvm.freeze %{{[0-9]+}} : !llvm.i8
+ ;CHECK: %{{[0-9]+}} = llvm.freeze %{{[0-9]+}} : i8
%3 = freeze i8 %2
%poison = add nsw i1 0, undef
ret i32 0
diff --git a/mlir/test/Target/llvmir-intrinsics.mlir b/mlir/test/Target/llvmir-intrinsics.mlir
index 333ad0553fc4..0760e3110e6e 100644
--- a/mlir/test/Target/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/llvmir-intrinsics.mlir
@@ -2,9 +2,9 @@
// CHECK-LABEL: @intrinsics
llvm.func @intrinsics(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8 x float>, %arg3: !llvm.ptr<i8>) {
- %c3 = llvm.mlir.constant(3 : i32) : !llvm.i32
- %c1 = llvm.mlir.constant(1 : i32) : !llvm.i32
- %c0 = llvm.mlir.constant(0 : i32) : !llvm.i32
+ %c3 = llvm.mlir.constant(3 : i32) : i32
+ %c1 = llvm.mlir.constant(1 : i32) : i32
+ %c0 = llvm.mlir.constant(0 : i32) : i32
// CHECK: call float @llvm.fmuladd.f32
"llvm.intr.fmuladd"(%arg0, %arg1, %arg0) : (!llvm.float, !llvm.float, !llvm.float) -> !llvm.float
// CHECK: call <8 x float> @llvm.fmuladd.v8f32
@@ -14,7 +14,7 @@ llvm.func @intrinsics(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8
// CHECK: call <8 x float> @llvm.fma.v8f32
"llvm.intr.fma"(%arg2, %arg2, %arg2) : (!llvm.vec<8 x float>, !llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float>
// CHECK: call void @llvm.prefetch.p0i8(i8* %3, i32 0, i32 3, i32 1)
- "llvm.intr.prefetch"(%arg3, %c0, %c3, %c1) : (!llvm.ptr<i8>, !llvm.i32, !llvm.i32, !llvm.i32) -> ()
+ "llvm.intr.prefetch"(%arg3, %c0, %c3, %c1) : (!llvm.ptr<i8>, i32, i32, i32) -> ()
llvm.return
}
@@ -127,18 +127,18 @@ llvm.func @pow_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8 x
}
// CHECK-LABEL: @bitreverse_test
-llvm.func @bitreverse_test(%arg0: !llvm.i32, %arg1: !llvm.vec<8 x i32>) {
+llvm.func @bitreverse_test(%arg0: i32, %arg1: !llvm.vec<8 x i32>) {
// CHECK: call i32 @llvm.bitreverse.i32
- "llvm.intr.bitreverse"(%arg0) : (!llvm.i32) -> !llvm.i32
+ "llvm.intr.bitreverse"(%arg0) : (i32) -> i32
// CHECK: call <8 x i32> @llvm.bitreverse.v8i32
"llvm.intr.bitreverse"(%arg1) : (!llvm.vec<8 x i32>) -> !llvm.vec<8 x i32>
llvm.return
}
// CHECK-LABEL: @ctpop_test
-llvm.func @ctpop_test(%arg0: !llvm.i32, %arg1: !llvm.vec<8 x i32>) {
+llvm.func @ctpop_test(%arg0: i32, %arg1: !llvm.vec<8 x i32>) {
// CHECK: call i32 @llvm.ctpop.i32
- "llvm.intr.ctpop"(%arg0) : (!llvm.i32) -> !llvm.i32
+ "llvm.intr.ctpop"(%arg0) : (i32) -> i32
// CHECK: call <8 x i32> @llvm.ctpop.v8i32
"llvm.intr.ctpop"(%arg1) : (!llvm.vec<8 x i32>) -> !llvm.vec<8 x i32>
llvm.return
@@ -163,18 +163,18 @@ llvm.func @minnum_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<
}
// CHECK-LABEL: @smax_test
-llvm.func @smax_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
+llvm.func @smax_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
// CHECK: call i32 @llvm.smax.i32
- "llvm.intr.smax"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.i32
+ "llvm.intr.smax"(%arg0, %arg1) : (i32, i32) -> i32
// CHECK: call <8 x i32> @llvm.smax.v8i32
"llvm.intr.smax"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.vec<8 x i32>
llvm.return
}
// CHECK-LABEL: @smin_test
-llvm.func @smin_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
+llvm.func @smin_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
// CHECK: call i32 @llvm.smin.i32
- "llvm.intr.smin"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.i32
+ "llvm.intr.smin"(%arg0, %arg1) : (i32, i32) -> i32
// CHECK: call <8 x i32> @llvm.smin.v8i32
"llvm.intr.smin"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.vec<8 x i32>
llvm.return
@@ -183,25 +183,25 @@ llvm.func @smin_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i3
// CHECK-LABEL: @vector_reductions
llvm.func @vector_reductions(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>, %arg2: !llvm.vec<8 x i32>) {
// CHECK: call i32 @llvm.vector.reduce.add.v8i32
- "llvm.intr.vector.reduce.add"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.add"(%arg2) : (!llvm.vec<8 x i32>) -> i32
// CHECK: call i32 @llvm.vector.reduce.and.v8i32
- "llvm.intr.vector.reduce.and"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.and"(%arg2) : (!llvm.vec<8 x i32>) -> i32
// CHECK: call float @llvm.vector.reduce.fmax.v8f32
"llvm.intr.vector.reduce.fmax"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.float
// CHECK: call float @llvm.vector.reduce.fmin.v8f32
"llvm.intr.vector.reduce.fmin"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.float
// CHECK: call i32 @llvm.vector.reduce.mul.v8i32
- "llvm.intr.vector.reduce.mul"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.mul"(%arg2) : (!llvm.vec<8 x i32>) -> i32
// CHECK: call i32 @llvm.vector.reduce.or.v8i32
- "llvm.intr.vector.reduce.or"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.or"(%arg2) : (!llvm.vec<8 x i32>) -> i32
// CHECK: call i32 @llvm.vector.reduce.smax.v8i32
- "llvm.intr.vector.reduce.smax"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.smax"(%arg2) : (!llvm.vec<8 x i32>) -> i32
// CHECK: call i32 @llvm.vector.reduce.smin.v8i32
- "llvm.intr.vector.reduce.smin"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.smin"(%arg2) : (!llvm.vec<8 x i32>) -> i32
// CHECK: call i32 @llvm.vector.reduce.umax.v8i32
- "llvm.intr.vector.reduce.umax"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.umax"(%arg2) : (!llvm.vec<8 x i32>) -> i32
// CHECK: call i32 @llvm.vector.reduce.umin.v8i32
- "llvm.intr.vector.reduce.umin"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.umin"(%arg2) : (!llvm.vec<8 x i32>) -> i32
// CHECK: call float @llvm.vector.reduce.fadd.v8f32
"llvm.intr.vector.reduce.fadd"(%arg0, %arg1) : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float
// CHECK: call float @llvm.vector.reduce.fmul.v8f32
@@ -211,14 +211,14 @@ llvm.func @vector_reductions(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>, %a
// CHECK: call reassoc float @llvm.vector.reduce.fmul.v8f32
"llvm.intr.vector.reduce.fmul"(%arg0, %arg1) {reassoc = true} : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float
// CHECK: call i32 @llvm.vector.reduce.xor.v8i32
- "llvm.intr.vector.reduce.xor"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32
+ "llvm.intr.vector.reduce.xor"(%arg2) : (!llvm.vec<8 x i32>) -> i32
llvm.return
}
// CHECK-LABEL: @matrix_intrinsics
// 4x16 16x3
llvm.func @matrix_intrinsics(%A: !llvm.vec<64 x float>, %B: !llvm.vec<48 x float>,
- %ptr: !llvm.ptr<float>, %stride: !llvm.i64) {
+ %ptr: !llvm.ptr<float>, %stride: i64) {
// CHECK: call <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float> %0, <48 x float> %1, i32 4, i32 16, i32 3)
%C = llvm.intr.matrix.multiply %A, %B
{ lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32} :
@@ -229,18 +229,18 @@ llvm.func @matrix_intrinsics(%A: !llvm.vec<64 x float>, %B: !llvm.vec<48 x float
// CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32(float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
%E = llvm.intr.matrix.column.major.load %ptr, <stride=%stride>
{ isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
- !llvm.vec<48 x float> from !llvm.ptr<float> stride !llvm.i64
+ !llvm.vec<48 x float> from !llvm.ptr<float> stride i64
// CHECK: call void @llvm.matrix.column.major.store.v48f32(<48 x float> %7, float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
llvm.intr.matrix.column.major.store %E, %ptr, <stride=%stride>
{ isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
- !llvm.vec<48 x float> to !llvm.ptr<float> stride !llvm.i64
+ !llvm.vec<48 x float> to !llvm.ptr<float> stride i64
llvm.return
}
// CHECK-LABEL: @get_active_lane_mask
-llvm.func @get_active_lane_mask(%base: !llvm.i64, %n: !llvm.i64) -> (!llvm.vec<7 x i1>) {
+llvm.func @get_active_lane_mask(%base: i64, %n: i64) -> (!llvm.vec<7 x i1>) {
// CHECK: call <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64 %0, i64 %1)
- %0 = llvm.intr.get.active.lane.mask %base, %n : !llvm.i64, !llvm.i64 to !llvm.vec<7 x i1>
+ %0 = llvm.intr.get.active.lane.mask %base, %n : i64, i64 to !llvm.vec<7 x i1>
llvm.return %0 : !llvm.vec<7 x i1>
}
@@ -284,64 +284,64 @@ llvm.func @masked_expand_compress_intrinsics(%ptr: !llvm.ptr<float>, %mask: !llv
}
// CHECK-LABEL: @memcpy_test
-llvm.func @memcpy_test(%arg0: !llvm.i32, %arg1: !llvm.i1, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
+llvm.func @memcpy_test(%arg0: i32, %arg1: i1, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %{{.*}}, i8* %{{.*}}, i32 %{{.*}}, i1 %{{.*}})
- "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i32, !llvm.i1) -> ()
- %sz = llvm.mlir.constant(10: i64) : !llvm.i64
+ "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
+ %sz = llvm.mlir.constant(10: i64) : i64
// CHECK: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %{{.*}}, i8* %{{.*}}, i64 10, i1 %{{.*}})
- "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, !llvm.i64, !llvm.i1) -> ()
+ "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i64, i1) -> ()
llvm.return
}
// CHECK-LABEL: @sadd_with_overflow_test
-llvm.func @sadd_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
+llvm.func @sadd_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
// CHECK: call { i32, i1 } @llvm.sadd.with.overflow.i32
- "llvm.intr.sadd.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)>
+ "llvm.intr.sadd.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)>
// CHECK: call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32
"llvm.intr.sadd.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)>
llvm.return
}
// CHECK-LABEL: @uadd_with_overflow_test
-llvm.func @uadd_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
+llvm.func @uadd_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
// CHECK: call { i32, i1 } @llvm.uadd.with.overflow.i32
- "llvm.intr.uadd.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)>
+ "llvm.intr.uadd.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)>
// CHECK: call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32
"llvm.intr.uadd.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)>
llvm.return
}
// CHECK-LABEL: @ssub_with_overflow_test
-llvm.func @ssub_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
+llvm.func @ssub_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
// CHECK: call { i32, i1 } @llvm.ssub.with.overflow.i32
- "llvm.intr.ssub.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)>
+ "llvm.intr.ssub.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)>
// CHECK: call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32
"llvm.intr.ssub.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)>
llvm.return
}
// CHECK-LABEL: @usub_with_overflow_test
-llvm.func @usub_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
+llvm.func @usub_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
// CHECK: call { i32, i1 } @llvm.usub.with.overflow.i32
- "llvm.intr.usub.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)>
+ "llvm.intr.usub.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)>
// CHECK: call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32
"llvm.intr.usub.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)>
llvm.return
}
// CHECK-LABEL: @smul_with_overflow_test
-llvm.func @smul_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
+llvm.func @smul_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
// CHECK: call { i32, i1 } @llvm.smul.with.overflow.i32
- "llvm.intr.smul.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)>
+ "llvm.intr.smul.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)>
// CHECK: call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32
"llvm.intr.smul.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)>
llvm.return
}
// CHECK-LABEL: @umul_with_overflow_test
-llvm.func @umul_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
+llvm.func @umul_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) {
// CHECK: call { i32, i1 } @llvm.umul.with.overflow.i32
- "llvm.intr.umul.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)>
+ "llvm.intr.umul.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)>
// CHECK: call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32
"llvm.intr.umul.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)>
llvm.return
diff --git a/mlir/test/Target/llvmir-types.mlir b/mlir/test/Target/llvmir-types.mlir
index d01bc2dbdf70..db2d6f8a9a34 100644
--- a/mlir/test/Target/llvmir-types.mlir
+++ b/mlir/test/Target/llvmir-types.mlir
@@ -28,17 +28,17 @@ llvm.func @return_x86_mmx() -> !llvm.x86_mmx
//
// CHECK: declare void @f_void_i32(i32)
-llvm.func @f_void_i32(!llvm.i32) -> !llvm.void
+llvm.func @f_void_i32(i32) -> !llvm.void
// CHECK: declare i32 @f_i32_empty()
-llvm.func @f_i32_empty() -> !llvm.i32
+llvm.func @f_i32_empty() -> i32
// CHECK: declare i32 @f_i32_half_bfloat_float_double(half, bfloat, float, double)
-llvm.func @f_i32_half_bfloat_float_double(!llvm.half, !llvm.bfloat, !llvm.float, !llvm.double) -> !llvm.i32
+llvm.func @f_i32_half_bfloat_float_double(!llvm.half, !llvm.bfloat, !llvm.float, !llvm.double) -> i32
// CHECK: declare i32 @f_i32_i32_i32(i32, i32)
-llvm.func @f_i32_i32_i32(!llvm.i32, !llvm.i32) -> !llvm.i32
+llvm.func @f_i32_i32_i32(i32, i32) -> i32
// CHECK: declare void @f_void_variadic(...)
llvm.func @f_void_variadic(...)
// CHECK: declare void @f_void_i32_i32_variadic(i32, i32, ...)
-llvm.func @f_void_i32_i32_variadic(!llvm.i32, !llvm.i32, ...)
+llvm.func @f_void_i32_i32_variadic(i32, i32, ...)
// CHECK: declare i32 (i32)* @f_f_i32_i32()
llvm.func @f_f_i32_i32() -> !llvm.ptr<func<i32 (i32)>>
@@ -47,19 +47,19 @@ llvm.func @f_f_i32_i32() -> !llvm.ptr<func<i32 (i32)>>
//
// CHECK: declare i1 @return_i1()
-llvm.func @return_i1() -> !llvm.i1
+llvm.func @return_i1() -> i1
// CHECK: declare i8 @return_i8()
-llvm.func @return_i8() -> !llvm.i8
+llvm.func @return_i8() -> i8
// CHECK: declare i16 @return_i16()
-llvm.func @return_i16() -> !llvm.i16
+llvm.func @return_i16() -> i16
// CHECK: declare i32 @return_i32()
-llvm.func @return_i32() -> !llvm.i32
+llvm.func @return_i32() -> i32
// CHECK: declare i64 @return_i64()
-llvm.func @return_i64() -> !llvm.i64
+llvm.func @return_i64() -> i64
// CHECK: declare i57 @return_i57()
-llvm.func @return_i57() -> !llvm.i57
+llvm.func @return_i57() -> i57
// CHECK: declare i129 @return_i129()
-llvm.func @return_i129() -> !llvm.i129
+llvm.func @return_i129() -> i129
//
// Pointers.
diff --git a/mlir/test/Target/llvmir.mlir b/mlir/test/Target/llvmir.mlir
index 921c3e87fdae..223d17172207 100644
--- a/mlir/test/Target/llvmir.mlir
+++ b/mlir/test/Target/llvmir.mlir
@@ -1,16 +1,16 @@
// RUN: mlir-translate -mlir-to-llvmir -split-input-file %s | FileCheck %s
// CHECK: @i32_global = internal global i32 42
-llvm.mlir.global internal @i32_global(42: i32) : !llvm.i32
+llvm.mlir.global internal @i32_global(42: i32) : i32
// CHECK: @i32_const = internal constant i53 52
-llvm.mlir.global internal constant @i32_const(52: i53) : !llvm.i53
+llvm.mlir.global internal constant @i32_const(52: i53) : i53
// CHECK: @int_global_array = internal global [3 x i32] [i32 62, i32 62, i32 62]
llvm.mlir.global internal @int_global_array(dense<62> : vector<3xi32>) : !llvm.array<3 x i32>
// CHECK: @i32_global_addr_space = internal addrspace(7) global i32 62
-llvm.mlir.global internal @i32_global_addr_space(62: i32) {addr_space = 7 : i32} : !llvm.i32
+llvm.mlir.global internal @i32_global_addr_space(62: i32) {addr_space = 7 : i32} : i32
// CHECK: @float_global = internal global float 0.000000e+00
llvm.mlir.global internal @float_global(0.0: f32) : !llvm.float
@@ -22,13 +22,13 @@ llvm.mlir.global internal @float_global_array(dense<[-5.0]> : vector<1xf32>) : !
llvm.mlir.global internal constant @string_const("foobar") : !llvm.array<6 x i8>
// CHECK: @int_global_undef = internal global i64 undef
-llvm.mlir.global internal @int_global_undef() : !llvm.i64
+llvm.mlir.global internal @int_global_undef() : i64
// CHECK: @int_gep = internal constant i32* getelementptr (i32, i32* @i32_global, i32 2)
llvm.mlir.global internal constant @int_gep() : !llvm.ptr<i32> {
%addr = llvm.mlir.addressof @i32_global : !llvm.ptr<i32>
- %_c0 = llvm.mlir.constant(2: i32) :!llvm.i32
- %gepinit = llvm.getelementptr %addr[%_c0] : (!llvm.ptr<i32>, !llvm.i32) -> !llvm.ptr<i32>
+ %_c0 = llvm.mlir.constant(2: i32) :i32
+ %gepinit = llvm.getelementptr %addr[%_c0] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
llvm.return %gepinit : !llvm.ptr<i32>
}
@@ -37,27 +37,27 @@ llvm.mlir.global internal constant @int_gep() : !llvm.ptr<i32> {
//
// CHECK: @private = private global i32 42
-llvm.mlir.global private @private(42 : i32) : !llvm.i32
+llvm.mlir.global private @private(42 : i32) : i32
// CHECK: @internal = internal global i32 42
-llvm.mlir.global internal @internal(42 : i32) : !llvm.i32
+llvm.mlir.global internal @internal(42 : i32) : i32
// CHECK: @available_externally = available_externally global i32 42
-llvm.mlir.global available_externally @available_externally(42 : i32) : !llvm.i32
+llvm.mlir.global available_externally @available_externally(42 : i32) : i32
// CHECK: @linkonce = linkonce global i32 42
-llvm.mlir.global linkonce @linkonce(42 : i32) : !llvm.i32
+llvm.mlir.global linkonce @linkonce(42 : i32) : i32
// CHECK: @weak = weak global i32 42
-llvm.mlir.global weak @weak(42 : i32) : !llvm.i32
+llvm.mlir.global weak @weak(42 : i32) : i32
// CHECK: @common = common global i32 42
-llvm.mlir.global common @common(42 : i32) : !llvm.i32
+llvm.mlir.global common @common(42 : i32) : i32
// CHECK: @appending = appending global i32 42
-llvm.mlir.global appending @appending(42 : i32) : !llvm.i32
+llvm.mlir.global appending @appending(42 : i32) : i32
// CHECK: @extern_weak = extern_weak global i32
-llvm.mlir.global extern_weak @extern_weak() : !llvm.i32
+llvm.mlir.global extern_weak @extern_weak() : i32
// CHECK: @linkonce_odr = linkonce_odr global i32 42
-llvm.mlir.global linkonce_odr @linkonce_odr(42 : i32) : !llvm.i32
+llvm.mlir.global linkonce_odr @linkonce_odr(42 : i32) : i32
// CHECK: @weak_odr = weak_odr global i32 42
-llvm.mlir.global weak_odr @weak_odr(42 : i32) : !llvm.i32
+llvm.mlir.global weak_odr @weak_odr(42 : i32) : i32
// CHECK: @external = external global i32
-llvm.mlir.global external @external() : !llvm.i32
+llvm.mlir.global external @external() : i32
//
@@ -66,7 +66,7 @@ llvm.mlir.global external @external() : !llvm.i32
//
// CHECK: declare i8* @malloc(i64)
-llvm.func @malloc(!llvm.i64) -> !llvm.ptr<i8>
+llvm.func @malloc(i64) -> !llvm.ptr<i8>
// CHECK: declare void @free(i8*)
@@ -92,15 +92,15 @@ llvm.func @global_refs() {
// Check the contracted form of load from array constants.
// CHECK: load i8, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @string_const, i64 0, i64 0)
%2 = llvm.mlir.addressof @string_const : !llvm.ptr<array<6 x i8>>
- %c0 = llvm.mlir.constant(0 : index) : !llvm.i64
- %3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr<array<6 x i8>>, !llvm.i64, !llvm.i64) -> !llvm.ptr<i8>
+ %c0 = llvm.mlir.constant(0 : index) : i64
+ %3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr<array<6 x i8>>, i64, i64) -> !llvm.ptr<i8>
%4 = llvm.load %3 : !llvm.ptr<i8>
llvm.return
}
// CHECK-LABEL: declare void @body(i64)
-llvm.func @body(!llvm.i64)
+llvm.func @body(i64)
// CHECK-LABEL: define void @simple_loop()
@@ -112,16 +112,16 @@ llvm.func @simple_loop() {
// CHECK: [[SIMPLE_bb1]]:
// CHECK-NEXT: br label %[[SIMPLE_bb2:[0-9]+]]
^bb1: // pred: ^bb0
- %0 = llvm.mlir.constant(1 : index) : !llvm.i64
- %1 = llvm.mlir.constant(42 : index) : !llvm.i64
- llvm.br ^bb2(%0 : !llvm.i64)
+ %0 = llvm.mlir.constant(1 : index) : i64
+ %1 = llvm.mlir.constant(42 : index) : i64
+ llvm.br ^bb2(%0 : i64)
// CHECK: [[SIMPLE_bb2]]:
// CHECK-NEXT: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %[[SIMPLE_bb3:[0-9]+]] ], [ 1, %[[SIMPLE_bb1]] ]
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 42
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %[[SIMPLE_bb3]], label %[[SIMPLE_bb4:[0-9]+]]
-^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb3
- %3 = llvm.icmp "slt" %2, %1 : !llvm.i64
+^bb2(%2: i64): // 2 preds: ^bb1, ^bb3
+ %3 = llvm.icmp "slt" %2, %1 : i64
llvm.cond_br %3, ^bb3, ^bb4
// CHECK: [[SIMPLE_bb3]]:
@@ -129,10 +129,10 @@ llvm.func @simple_loop() {
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
// CHECK-NEXT: br label %[[SIMPLE_bb2]]
^bb3: // pred: ^bb2
- llvm.call @body(%2) : (!llvm.i64) -> ()
- %4 = llvm.mlir.constant(1 : index) : !llvm.i64
- %5 = llvm.add %2, %4 : !llvm.i64
- llvm.br ^bb2(%5 : !llvm.i64)
+ llvm.call @body(%2) : (i64) -> ()
+ %4 = llvm.mlir.constant(1 : index) : i64
+ %5 = llvm.add %2, %4 : i64
+ llvm.br ^bb2(%5 : i64)
// CHECK: [[SIMPLE_bb4]]:
// CHECK-NEXT: ret void
@@ -168,29 +168,29 @@ llvm.func @ml_caller() {
}
// CHECK-LABEL: declare i64 @body_args(i64)
-llvm.func @body_args(!llvm.i64) -> !llvm.i64
+llvm.func @body_args(i64) -> i64
// CHECK-LABEL: declare i32 @other(i64, i32)
-llvm.func @other(!llvm.i64, !llvm.i32) -> !llvm.i32
+llvm.func @other(i64, i32) -> i32
// CHECK-LABEL: define i32 @func_args(i32 {{%.*}}, i32 {{%.*}})
// CHECK-NEXT: br label %[[ARGS_bb1:[0-9]+]]
-llvm.func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 {
- %0 = llvm.mlir.constant(0 : i32) : !llvm.i32
+llvm.func @func_args(%arg0: i32, %arg1: i32) -> i32 {
+ %0 = llvm.mlir.constant(0 : i32) : i32
llvm.br ^bb1
// CHECK: [[ARGS_bb1]]:
// CHECK-NEXT: br label %[[ARGS_bb2:[0-9]+]]
^bb1: // pred: ^bb0
- %1 = llvm.mlir.constant(0 : index) : !llvm.i64
- %2 = llvm.mlir.constant(42 : index) : !llvm.i64
- llvm.br ^bb2(%1 : !llvm.i64)
+ %1 = llvm.mlir.constant(0 : index) : i64
+ %2 = llvm.mlir.constant(42 : index) : i64
+ llvm.br ^bb2(%1 : i64)
// CHECK: [[ARGS_bb2]]:
// CHECK-NEXT: %5 = phi i64 [ %12, %[[ARGS_bb3:[0-9]+]] ], [ 0, %[[ARGS_bb1]] ]
// CHECK-NEXT: %6 = icmp slt i64 %5, 42
// CHECK-NEXT: br i1 %6, label %[[ARGS_bb3]], label %[[ARGS_bb4:[0-9]+]]
-^bb2(%3: !llvm.i64): // 2 preds: ^bb1, ^bb3
- %4 = llvm.icmp "slt" %3, %2 : !llvm.i64
+^bb2(%3: i64): // 2 preds: ^bb1, ^bb3
+ %4 = llvm.icmp "slt" %3, %2 : i64
llvm.cond_br %4, ^bb3, ^bb4
// CHECK: [[ARGS_bb3]]:
@@ -201,31 +201,31 @@ llvm.func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 {
// CHECK-NEXT: %12 = add i64 %5, 1
// CHECK-NEXT: br label %[[ARGS_bb2]]
^bb3: // pred: ^bb2
- %5 = llvm.call @body_args(%3) : (!llvm.i64) -> !llvm.i64
- %6 = llvm.call @other(%5, %arg0) : (!llvm.i64, !llvm.i32) -> !llvm.i32
- %7 = llvm.call @other(%5, %6) : (!llvm.i64, !llvm.i32) -> !llvm.i32
- %8 = llvm.call @other(%5, %arg1) : (!llvm.i64, !llvm.i32) -> !llvm.i32
- %9 = llvm.mlir.constant(1 : index) : !llvm.i64
- %10 = llvm.add %3, %9 : !llvm.i64
- llvm.br ^bb2(%10 : !llvm.i64)
+ %5 = llvm.call @body_args(%3) : (i64) -> i64
+ %6 = llvm.call @other(%5, %arg0) : (i64, i32) -> i32
+ %7 = llvm.call @other(%5, %6) : (i64, i32) -> i32
+ %8 = llvm.call @other(%5, %arg1) : (i64, i32) -> i32
+ %9 = llvm.mlir.constant(1 : index) : i64
+ %10 = llvm.add %3, %9 : i64
+ llvm.br ^bb2(%10 : i64)
// CHECK: [[ARGS_bb4]]:
// CHECK-NEXT: %14 = call i32 @other(i64 0, i32 0)
// CHECK-NEXT: ret i32 %14
^bb4: // pred: ^bb2
- %11 = llvm.mlir.constant(0 : index) : !llvm.i64
- %12 = llvm.call @other(%11, %0) : (!llvm.i64, !llvm.i32) -> !llvm.i32
- llvm.return %12 : !llvm.i32
+ %11 = llvm.mlir.constant(0 : index) : i64
+ %12 = llvm.call @other(%11, %0) : (i64, i32) -> i32
+ llvm.return %12 : i32
}
// CHECK: declare void @pre(i64)
-llvm.func @pre(!llvm.i64)
+llvm.func @pre(i64)
// CHECK: declare void @body2(i64, i64)
-llvm.func @body2(!llvm.i64, !llvm.i64)
+llvm.func @body2(i64, i64)
// CHECK: declare void @post(i64)
-llvm.func @post(!llvm.i64)
+llvm.func @post(i64)
// CHECK-LABEL: define void @imperfectly_nested_loops()
// CHECK-NEXT: br label %[[IMPER_bb1:[0-9]+]]
@@ -235,38 +235,38 @@ llvm.func @imperfectly_nested_loops() {
// CHECK: [[IMPER_bb1]]:
// CHECK-NEXT: br label %[[IMPER_bb2:[0-9]+]]
^bb1: // pred: ^bb0
- %0 = llvm.mlir.constant(0 : index) : !llvm.i64
- %1 = llvm.mlir.constant(42 : index) : !llvm.i64
- llvm.br ^bb2(%0 : !llvm.i64)
+ %0 = llvm.mlir.constant(0 : index) : i64
+ %1 = llvm.mlir.constant(42 : index) : i64
+ llvm.br ^bb2(%0 : i64)
// CHECK: [[IMPER_bb2]]:
// CHECK-NEXT: %3 = phi i64 [ %13, %[[IMPER_bb7:[0-9]+]] ], [ 0, %[[IMPER_bb1]] ]
// CHECK-NEXT: %4 = icmp slt i64 %3, 42
// CHECK-NEXT: br i1 %4, label %[[IMPER_bb3:[0-9]+]], label %[[IMPER_bb8:[0-9]+]]
-^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb7
- %3 = llvm.icmp "slt" %2, %1 : !llvm.i64
+^bb2(%2: i64): // 2 preds: ^bb1, ^bb7
+ %3 = llvm.icmp "slt" %2, %1 : i64
llvm.cond_br %3, ^bb3, ^bb8
// CHECK: [[IMPER_bb3]]:
// CHECK-NEXT: call void @pre(i64 %3)
// CHECK-NEXT: br label %[[IMPER_bb4:[0-9]+]]
^bb3: // pred: ^bb2
- llvm.call @pre(%2) : (!llvm.i64) -> ()
+ llvm.call @pre(%2) : (i64) -> ()
llvm.br ^bb4
// CHECK: [[IMPER_bb4]]:
// CHECK-NEXT: br label %[[IMPER_bb5:[0-9]+]]
^bb4: // pred: ^bb3
- %4 = llvm.mlir.constant(7 : index) : !llvm.i64
- %5 = llvm.mlir.constant(56 : index) : !llvm.i64
- llvm.br ^bb5(%4 : !llvm.i64)
+ %4 = llvm.mlir.constant(7 : index) : i64
+ %5 = llvm.mlir.constant(56 : index) : i64
+ llvm.br ^bb5(%4 : i64)
// CHECK: [[IMPER_bb5]]:
// CHECK-NEXT: %8 = phi i64 [ %11, %[[IMPER_bb6:[0-9]+]] ], [ 7, %[[IMPER_bb4]] ]
// CHECK-NEXT: %9 = icmp slt i64 %8, 56
// CHECK-NEXT: br i1 %9, label %[[IMPER_bb6]], label %[[IMPER_bb7]]
-^bb5(%6: !llvm.i64): // 2 preds: ^bb4, ^bb6
- %7 = llvm.icmp "slt" %6, %5 : !llvm.i64
+^bb5(%6: i64): // 2 preds: ^bb4, ^bb6
+ %7 = llvm.icmp "slt" %6, %5 : i64
llvm.cond_br %7, ^bb6, ^bb7
// CHECK: [[IMPER_bb6]]:
@@ -274,20 +274,20 @@ llvm.func @imperfectly_nested_loops() {
// CHECK-NEXT: %11 = add i64 %8, 2
// CHECK-NEXT: br label %[[IMPER_bb5]]
^bb6: // pred: ^bb5
- llvm.call @body2(%2, %6) : (!llvm.i64, !llvm.i64) -> ()
- %8 = llvm.mlir.constant(2 : index) : !llvm.i64
- %9 = llvm.add %6, %8 : !llvm.i64
- llvm.br ^bb5(%9 : !llvm.i64)
+ llvm.call @body2(%2, %6) : (i64, i64) -> ()
+ %8 = llvm.mlir.constant(2 : index) : i64
+ %9 = llvm.add %6, %8 : i64
+ llvm.br ^bb5(%9 : i64)
// CHECK: [[IMPER_bb7]]:
// CHECK-NEXT: call void @post(i64 %3)
// CHECK-NEXT: %13 = add i64 %3, 1
// CHECK-NEXT: br label %[[IMPER_bb2]]
^bb7: // pred: ^bb5
- llvm.call @post(%2) : (!llvm.i64) -> ()
- %10 = llvm.mlir.constant(1 : index) : !llvm.i64
- %11 = llvm.add %2, %10 : !llvm.i64
- llvm.br ^bb2(%11 : !llvm.i64)
+ llvm.call @post(%2) : (i64) -> ()
+ %10 = llvm.mlir.constant(1 : index) : i64
+ %11 = llvm.add %2, %10 : i64
+ llvm.br ^bb2(%11 : i64)
// CHECK: [[IMPER_bb8]]:
// CHECK-NEXT: ret void
@@ -296,10 +296,10 @@ llvm.func @imperfectly_nested_loops() {
}
// CHECK: declare void @mid(i64)
-llvm.func @mid(!llvm.i64)
+llvm.func @mid(i64)
// CHECK: declare void @body3(i64, i64)
-llvm.func @body3(!llvm.i64, !llvm.i64)
+llvm.func @body3(i64, i64)
// A complete function transformation check.
// CHECK-LABEL: define void @more_imperfectly_nested_loops()
@@ -346,47 +346,47 @@ llvm.func @body3(!llvm.i64, !llvm.i64)
llvm.func @more_imperfectly_nested_loops() {
llvm.br ^bb1
^bb1: // pred: ^bb0
- %0 = llvm.mlir.constant(0 : index) : !llvm.i64
- %1 = llvm.mlir.constant(42 : index) : !llvm.i64
- llvm.br ^bb2(%0 : !llvm.i64)
-^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb11
- %3 = llvm.icmp "slt" %2, %1 : !llvm.i64
+ %0 = llvm.mlir.constant(0 : index) : i64
+ %1 = llvm.mlir.constant(42 : index) : i64
+ llvm.br ^bb2(%0 : i64)
+^bb2(%2: i64): // 2 preds: ^bb1, ^bb11
+ %3 = llvm.icmp "slt" %2, %1 : i64
llvm.cond_br %3, ^bb3, ^bb12
^bb3: // pred: ^bb2
- llvm.call @pre(%2) : (!llvm.i64) -> ()
+ llvm.call @pre(%2) : (i64) -> ()
llvm.br ^bb4
^bb4: // pred: ^bb3
- %4 = llvm.mlir.constant(7 : index) : !llvm.i64
- %5 = llvm.mlir.constant(56 : index) : !llvm.i64
- llvm.br ^bb5(%4 : !llvm.i64)
-^bb5(%6: !llvm.i64): // 2 preds: ^bb4, ^bb6
- %7 = llvm.icmp "slt" %6, %5 : !llvm.i64
+ %4 = llvm.mlir.constant(7 : index) : i64
+ %5 = llvm.mlir.constant(56 : index) : i64
+ llvm.br ^bb5(%4 : i64)
+^bb5(%6: i64): // 2 preds: ^bb4, ^bb6
+ %7 = llvm.icmp "slt" %6, %5 : i64
llvm.cond_br %7, ^bb6, ^bb7
^bb6: // pred: ^bb5
- llvm.call @body2(%2, %6) : (!llvm.i64, !llvm.i64) -> ()
- %8 = llvm.mlir.constant(2 : index) : !llvm.i64
- %9 = llvm.add %6, %8 : !llvm.i64
- llvm.br ^bb5(%9 : !llvm.i64)
+ llvm.call @body2(%2, %6) : (i64, i64) -> ()
+ %8 = llvm.mlir.constant(2 : index) : i64
+ %9 = llvm.add %6, %8 : i64
+ llvm.br ^bb5(%9 : i64)
^bb7: // pred: ^bb5
- llvm.call @mid(%2) : (!llvm.i64) -> ()
+ llvm.call @mid(%2) : (i64) -> ()
llvm.br ^bb8
^bb8: // pred: ^bb7
- %10 = llvm.mlir.constant(18 : index) : !llvm.i64
- %11 = llvm.mlir.constant(37 : index) : !llvm.i64
- llvm.br ^bb9(%10 : !llvm.i64)
-^bb9(%12: !llvm.i64): // 2 preds: ^bb8, ^bb10
- %13 = llvm.icmp "slt" %12, %11 : !llvm.i64
+ %10 = llvm.mlir.constant(18 : index) : i64
+ %11 = llvm.mlir.constant(37 : index) : i64
+ llvm.br ^bb9(%10 : i64)
+^bb9(%12: i64): // 2 preds: ^bb8, ^bb10
+ %13 = llvm.icmp "slt" %12, %11 : i64
llvm.cond_br %13, ^bb10, ^bb11
^bb10: // pred: ^bb9
- llvm.call @body3(%2, %12) : (!llvm.i64, !llvm.i64) -> ()
- %14 = llvm.mlir.constant(3 : index) : !llvm.i64
- %15 = llvm.add %12, %14 : !llvm.i64
- llvm.br ^bb9(%15 : !llvm.i64)
+ llvm.call @body3(%2, %12) : (i64, i64) -> ()
+ %14 = llvm.mlir.constant(3 : index) : i64
+ %15 = llvm.add %12, %14 : i64
+ llvm.br ^bb9(%15 : i64)
^bb11: // pred: ^bb9
- llvm.call @post(%2) : (!llvm.i64) -> ()
- %16 = llvm.mlir.constant(1 : index) : !llvm.i64
- %17 = llvm.add %2, %16 : !llvm.i64
- llvm.br ^bb2(%17 : !llvm.i64)
+ llvm.call @post(%2) : (i64) -> ()
+ %16 = llvm.mlir.constant(1 : index) : i64
+ %17 = llvm.add %2, %16 : i64
+ llvm.br ^bb2(%17 : i64)
^bb12: // pred: ^bb2
llvm.return
}
@@ -411,13 +411,13 @@ llvm.func @memref_alloc() {
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 400)
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
- %0 = llvm.mlir.constant(10 : index) : !llvm.i64
- %1 = llvm.mlir.constant(10 : index) : !llvm.i64
- %2 = llvm.mul %0, %1 : !llvm.i64
+ %0 = llvm.mlir.constant(10 : index) : i64
+ %1 = llvm.mlir.constant(10 : index) : i64
+ %2 = llvm.mul %0, %1 : i64
%3 = llvm.mlir.undef : !llvm.struct<(ptr<float>)>
- %4 = llvm.mlir.constant(4 : index) : !llvm.i64
- %5 = llvm.mul %2, %4 : !llvm.i64
- %6 = llvm.call @malloc(%5) : (!llvm.i64) -> !llvm.ptr<i8>
+ %4 = llvm.mlir.constant(4 : index) : i64
+ %5 = llvm.mul %2, %4 : i64
+ %6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr<i8>
%7 = llvm.bitcast %6 : !llvm.ptr<i8> to !llvm.ptr<float>
%8 = llvm.insertvalue %7, %3[0] : !llvm.struct<(ptr<float>)>
// CHECK-NEXT: ret void
@@ -425,7 +425,7 @@ llvm.func @memref_alloc() {
}
// CHECK-LABEL: declare i64 @get_index()
-llvm.func @get_index() -> !llvm.i64
+llvm.func @get_index() -> i64
// CHECK-LABEL: define void @store_load_static()
llvm.func @store_load_static() {
@@ -433,79 +433,79 @@ llvm.func @store_load_static() {
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 40)
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
- %0 = llvm.mlir.constant(10 : index) : !llvm.i64
+ %0 = llvm.mlir.constant(10 : index) : i64
%1 = llvm.mlir.undef : !llvm.struct<(ptr<float>)>
- %2 = llvm.mlir.constant(4 : index) : !llvm.i64
- %3 = llvm.mul %0, %2 : !llvm.i64
- %4 = llvm.call @malloc(%3) : (!llvm.i64) -> !llvm.ptr<i8>
+ %2 = llvm.mlir.constant(4 : index) : i64
+ %3 = llvm.mul %0, %2 : i64
+ %4 = llvm.call @malloc(%3) : (i64) -> !llvm.ptr<i8>
%5 = llvm.bitcast %4 : !llvm.ptr<i8> to !llvm.ptr<float>
%6 = llvm.insertvalue %5, %1[0] : !llvm.struct<(ptr<float>)>
%7 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
llvm.br ^bb1
^bb1: // pred: ^bb0
- %8 = llvm.mlir.constant(0 : index) : !llvm.i64
- %9 = llvm.mlir.constant(10 : index) : !llvm.i64
- llvm.br ^bb2(%8 : !llvm.i64)
+ %8 = llvm.mlir.constant(0 : index) : i64
+ %9 = llvm.mlir.constant(10 : index) : i64
+ llvm.br ^bb2(%8 : i64)
// CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ]
-^bb2(%10: !llvm.i64): // 2 preds: ^bb1, ^bb3
+^bb2(%10: i64): // 2 preds: ^bb1, ^bb3
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 10
- %11 = llvm.icmp "slt" %10, %9 : !llvm.i64
+ %11 = llvm.icmp "slt" %10, %9 : i64
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
llvm.cond_br %11, ^bb3, ^bb4
^bb3: // pred: ^bb2
// CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
// CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
- %12 = llvm.mlir.constant(10 : index) : !llvm.i64
+ %12 = llvm.mlir.constant(10 : index) : i64
%13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<float>)>
- %14 = llvm.getelementptr %13[%10] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %14 = llvm.getelementptr %13[%10] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
llvm.store %7, %14 : !llvm.ptr<float>
- %15 = llvm.mlir.constant(1 : index) : !llvm.i64
+ %15 = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
- %16 = llvm.add %10, %15 : !llvm.i64
+ %16 = llvm.add %10, %15 : i64
// CHECK-NEXT: br label %{{[0-9]+}}
- llvm.br ^bb2(%16 : !llvm.i64)
+ llvm.br ^bb2(%16 : i64)
^bb4: // pred: ^bb2
llvm.br ^bb5
^bb5: // pred: ^bb4
- %17 = llvm.mlir.constant(0 : index) : !llvm.i64
- %18 = llvm.mlir.constant(10 : index) : !llvm.i64
- llvm.br ^bb6(%17 : !llvm.i64)
+ %17 = llvm.mlir.constant(0 : index) : i64
+ %18 = llvm.mlir.constant(10 : index) : i64
+ llvm.br ^bb6(%17 : i64)
// CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ]
-^bb6(%19: !llvm.i64): // 2 preds: ^bb5, ^bb7
+^bb6(%19: i64): // 2 preds: ^bb5, ^bb7
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 10
- %20 = llvm.icmp "slt" %19, %18 : !llvm.i64
+ %20 = llvm.icmp "slt" %19, %18 : i64
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
llvm.cond_br %20, ^bb7, ^bb8
^bb7: // pred: ^bb6
// CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
- %21 = llvm.mlir.constant(10 : index) : !llvm.i64
+ %21 = llvm.mlir.constant(10 : index) : i64
%22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<float>)>
- %23 = llvm.getelementptr %22[%19] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %23 = llvm.getelementptr %22[%19] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
%24 = llvm.load %23 : !llvm.ptr<float>
- %25 = llvm.mlir.constant(1 : index) : !llvm.i64
+ %25 = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
- %26 = llvm.add %19, %25 : !llvm.i64
+ %26 = llvm.add %19, %25 : i64
// CHECK-NEXT: br label %{{[0-9]+}}
- llvm.br ^bb6(%26 : !llvm.i64)
+ llvm.br ^bb6(%26 : i64)
^bb8: // pred: ^bb6
// CHECK: ret void
llvm.return
}
// CHECK-LABEL: define void @store_load_dynamic(i64 {{%.*}})
-llvm.func @store_load_dynamic(%arg0: !llvm.i64) {
+llvm.func @store_load_dynamic(%arg0: i64) {
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
%0 = llvm.mlir.undef : !llvm.struct<(ptr<float>, i64)>
- %1 = llvm.mlir.constant(4 : index) : !llvm.i64
- %2 = llvm.mul %arg0, %1 : !llvm.i64
- %3 = llvm.call @malloc(%2) : (!llvm.i64) -> !llvm.ptr<i8>
+ %1 = llvm.mlir.constant(4 : index) : i64
+ %2 = llvm.mul %arg0, %1 : i64
+ %3 = llvm.call @malloc(%2) : (i64) -> !llvm.ptr<i8>
%4 = llvm.bitcast %3 : !llvm.ptr<i8> to !llvm.ptr<float>
%5 = llvm.insertvalue %4, %0[0] : !llvm.struct<(ptr<float>, i64)>
%6 = llvm.insertvalue %arg0, %5[1] : !llvm.struct<(ptr<float>, i64)>
@@ -513,12 +513,12 @@ llvm.func @store_load_dynamic(%arg0: !llvm.i64) {
// CHECK-NEXT: br label %{{[0-9]+}}
llvm.br ^bb1
^bb1: // pred: ^bb0
- %8 = llvm.mlir.constant(0 : index) : !llvm.i64
- llvm.br ^bb2(%8 : !llvm.i64)
+ %8 = llvm.mlir.constant(0 : index) : i64
+ llvm.br ^bb2(%8 : i64)
// CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ]
-^bb2(%9: !llvm.i64): // 2 preds: ^bb1, ^bb3
+^bb2(%9: i64): // 2 preds: ^bb1, ^bb3
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, %{{[0-9]+}}
- %10 = llvm.icmp "slt" %9, %arg0 : !llvm.i64
+ %10 = llvm.icmp "slt" %9, %arg0 : i64
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
llvm.cond_br %10, ^bb3, ^bb4
^bb3: // pred: ^bb2
@@ -528,22 +528,22 @@ llvm.func @store_load_dynamic(%arg0: !llvm.i64) {
// CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
%11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<float>, i64)>
%12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<float>, i64)>
- %13 = llvm.getelementptr %12[%9] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %13 = llvm.getelementptr %12[%9] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
llvm.store %7, %13 : !llvm.ptr<float>
- %14 = llvm.mlir.constant(1 : index) : !llvm.i64
+ %14 = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
- %15 = llvm.add %9, %14 : !llvm.i64
+ %15 = llvm.add %9, %14 : i64
// CHECK-NEXT: br label %{{[0-9]+}}
- llvm.br ^bb2(%15 : !llvm.i64)
+ llvm.br ^bb2(%15 : i64)
^bb4: // pred: ^bb3
llvm.br ^bb5
^bb5: // pred: ^bb4
- %16 = llvm.mlir.constant(0 : index) : !llvm.i64
- llvm.br ^bb6(%16 : !llvm.i64)
+ %16 = llvm.mlir.constant(0 : index) : i64
+ llvm.br ^bb6(%16 : i64)
// CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ]
-^bb6(%17: !llvm.i64): // 2 preds: ^bb5, ^bb7
+^bb6(%17: i64): // 2 preds: ^bb5, ^bb7
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, %{{[0-9]+}}
- %18 = llvm.icmp "slt" %17, %arg0 : !llvm.i64
+ %18 = llvm.icmp "slt" %17, %arg0 : i64
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
llvm.cond_br %18, ^bb7, ^bb8
^bb7: // pred: ^bb6
@@ -553,21 +553,21 @@ llvm.func @store_load_dynamic(%arg0: !llvm.i64) {
// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
%19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<float>, i64)>
%20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<float>, i64)>
- %21 = llvm.getelementptr %20[%17] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %21 = llvm.getelementptr %20[%17] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
%22 = llvm.load %21 : !llvm.ptr<float>
- %23 = llvm.mlir.constant(1 : index) : !llvm.i64
+ %23 = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
- %24 = llvm.add %17, %23 : !llvm.i64
+ %24 = llvm.add %17, %23 : i64
// CHECK-NEXT: br label %{{[0-9]+}}
- llvm.br ^bb6(%24 : !llvm.i64)
+ llvm.br ^bb6(%24 : i64)
^bb8: // pred: ^bb6
// CHECK: ret void
llvm.return
}
// CHECK-LABEL: define void @store_load_mixed(i64 {{%.*}})
-llvm.func @store_load_mixed(%arg0: !llvm.i64) {
- %0 = llvm.mlir.constant(10 : index) : !llvm.i64
+llvm.func @store_load_mixed(%arg0: i64) {
+ %0 = llvm.mlir.constant(10 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = mul i64 2, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 10
@@ -577,15 +577,15 @@ llvm.func @store_load_mixed(%arg0: !llvm.i64) {
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } undef, float* %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 10, 2
- %1 = llvm.mlir.constant(2 : index) : !llvm.i64
- %2 = llvm.mlir.constant(4 : index) : !llvm.i64
- %3 = llvm.mul %1, %arg0 : !llvm.i64
- %4 = llvm.mul %3, %2 : !llvm.i64
- %5 = llvm.mul %4, %0 : !llvm.i64
+ %1 = llvm.mlir.constant(2 : index) : i64
+ %2 = llvm.mlir.constant(4 : index) : i64
+ %3 = llvm.mul %1, %arg0 : i64
+ %4 = llvm.mul %3, %2 : i64
+ %5 = llvm.mul %4, %0 : i64
%6 = llvm.mlir.undef : !llvm.struct<(ptr<float>, i64, i64)>
- %7 = llvm.mlir.constant(4 : index) : !llvm.i64
- %8 = llvm.mul %5, %7 : !llvm.i64
- %9 = llvm.call @malloc(%8) : (!llvm.i64) -> !llvm.ptr<i8>
+ %7 = llvm.mlir.constant(4 : index) : i64
+ %8 = llvm.mul %5, %7 : i64
+ %9 = llvm.call @malloc(%8) : (i64) -> !llvm.ptr<i8>
%10 = llvm.bitcast %9 : !llvm.ptr<i8> to !llvm.ptr<float>
%11 = llvm.insertvalue %10, %6[0] : !llvm.struct<(ptr<float>, i64, i64)>
%12 = llvm.insertvalue %arg0, %11[1] : !llvm.struct<(ptr<float>, i64, i64)>
@@ -593,12 +593,12 @@ llvm.func @store_load_mixed(%arg0: !llvm.i64) {
// CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
// CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
- %14 = llvm.mlir.constant(1 : index) : !llvm.i64
- %15 = llvm.mlir.constant(2 : index) : !llvm.i64
- %16 = llvm.call @get_index() : () -> !llvm.i64
- %17 = llvm.call @get_index() : () -> !llvm.i64
+ %14 = llvm.mlir.constant(1 : index) : i64
+ %15 = llvm.mlir.constant(2 : index) : i64
+ %16 = llvm.call @get_index() : () -> i64
+ %17 = llvm.call @get_index() : () -> i64
%18 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
- %19 = llvm.mlir.constant(2 : index) : !llvm.i64
+ %19 = llvm.mlir.constant(2 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
// CHECK-NEXT: %{{[0-9]+}} = mul i64 1, %{{[0-9]+}}
@@ -611,16 +611,16 @@ llvm.func @store_load_mixed(%arg0: !llvm.i64) {
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
%20 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<float>, i64, i64)>
- %21 = llvm.mlir.constant(4 : index) : !llvm.i64
+ %21 = llvm.mlir.constant(4 : index) : i64
%22 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<float>, i64, i64)>
- %23 = llvm.mul %14, %20 : !llvm.i64
- %24 = llvm.add %23, %15 : !llvm.i64
- %25 = llvm.mul %24, %21 : !llvm.i64
- %26 = llvm.add %25, %16 : !llvm.i64
- %27 = llvm.mul %26, %22 : !llvm.i64
- %28 = llvm.add %27, %17 : !llvm.i64
+ %23 = llvm.mul %14, %20 : i64
+ %24 = llvm.add %23, %15 : i64
+ %25 = llvm.mul %24, %21 : i64
+ %26 = llvm.add %25, %16 : i64
+ %27 = llvm.mul %26, %22 : i64
+ %28 = llvm.add %27, %17 : i64
%29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<float>, i64, i64)>
- %30 = llvm.getelementptr %29[%28] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %30 = llvm.getelementptr %29[%28] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
llvm.store %18, %30 : !llvm.ptr<float>
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
@@ -633,18 +633,18 @@ llvm.func @store_load_mixed(%arg0: !llvm.i64) {
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
- %31 = llvm.mlir.constant(2 : index) : !llvm.i64
+ %31 = llvm.mlir.constant(2 : index) : i64
%32 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<float>, i64, i64)>
- %33 = llvm.mlir.constant(4 : index) : !llvm.i64
+ %33 = llvm.mlir.constant(4 : index) : i64
%34 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<float>, i64, i64)>
- %35 = llvm.mul %17, %32 : !llvm.i64
- %36 = llvm.add %35, %16 : !llvm.i64
- %37 = llvm.mul %36, %33 : !llvm.i64
- %38 = llvm.add %37, %15 : !llvm.i64
- %39 = llvm.mul %38, %34 : !llvm.i64
- %40 = llvm.add %39, %14 : !llvm.i64
+ %35 = llvm.mul %17, %32 : i64
+ %36 = llvm.add %35, %16 : i64
+ %37 = llvm.mul %36, %33 : i64
+ %38 = llvm.add %37, %15 : i64
+ %39 = llvm.mul %38, %34 : i64
+ %40 = llvm.add %39, %14 : i64
%41 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<float>, i64, i64)>
- %42 = llvm.getelementptr %41[%40] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %42 = llvm.getelementptr %41[%40] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
%43 = llvm.load %42 : !llvm.ptr<float>
// CHECK-NEXT: ret void
llvm.return
@@ -652,16 +652,16 @@ llvm.func @store_load_mixed(%arg0: !llvm.i64) {
// CHECK-LABEL: define { float*, i64 } @memref_args_rets({ float* } {{%.*}}, { float*, i64 } {{%.*}}, { float*, i64 } {{%.*}})
llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<float>)>, %arg1: !llvm.struct<(ptr<float>, i64)>, %arg2: !llvm.struct<(ptr<float>, i64)>) -> !llvm.struct<(ptr<float>, i64)> {
- %0 = llvm.mlir.constant(7 : index) : !llvm.i64
+ %0 = llvm.mlir.constant(7 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
- %1 = llvm.call @get_index() : () -> !llvm.i64
+ %1 = llvm.call @get_index() : () -> i64
%2 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
- %3 = llvm.mlir.constant(10 : index) : !llvm.i64
+ %3 = llvm.mlir.constant(10 : index) : i64
%4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr<float>)>
- %5 = llvm.getelementptr %4[%0] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %5 = llvm.getelementptr %4[%0] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
llvm.store %2, %5 : !llvm.ptr<float>
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
@@ -669,7 +669,7 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<float>)>, %arg1: !llvm.stru
// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
%6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr<float>, i64)>
%7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr<float>, i64)>
- %8 = llvm.getelementptr %7[%0] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %8 = llvm.getelementptr %7[%0] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
llvm.store %2, %8 : !llvm.ptr<float>
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = mul i64 7, %{{[0-9]+}}
@@ -677,12 +677,12 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<float>)>, %arg1: !llvm.stru
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
- %9 = llvm.mlir.constant(10 : index) : !llvm.i64
+ %9 = llvm.mlir.constant(10 : index) : i64
%10 = llvm.extractvalue %arg2[1] : !llvm.struct<(ptr<float>, i64)>
- %11 = llvm.mul %0, %10 : !llvm.i64
- %12 = llvm.add %11, %1 : !llvm.i64
+ %11 = llvm.mul %0, %10 : i64
+ %12 = llvm.add %11, %1 : i64
%13 = llvm.extractvalue %arg2[0] : !llvm.struct<(ptr<float>, i64)>
- %14 = llvm.getelementptr %13[%12] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %14 = llvm.getelementptr %13[%12] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
llvm.store %2, %14 : !llvm.ptr<float>
// CHECK-NEXT: %{{[0-9]+}} = mul i64 10, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
@@ -690,12 +690,12 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<float>)>, %arg1: !llvm.stru
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
- %15 = llvm.mlir.constant(10 : index) : !llvm.i64
- %16 = llvm.mul %15, %1 : !llvm.i64
+ %15 = llvm.mlir.constant(10 : index) : i64
+ %16 = llvm.mul %15, %1 : i64
%17 = llvm.mlir.undef : !llvm.struct<(ptr<float>, i64)>
- %18 = llvm.mlir.constant(4 : index) : !llvm.i64
- %19 = llvm.mul %16, %18 : !llvm.i64
- %20 = llvm.call @malloc(%19) : (!llvm.i64) -> !llvm.ptr<i8>
+ %18 = llvm.mlir.constant(4 : index) : i64
+ %19 = llvm.mul %16, %18 : i64
+ %20 = llvm.call @malloc(%19) : (i64) -> !llvm.ptr<i8>
%21 = llvm.bitcast %20 : !llvm.ptr<i8> to !llvm.ptr<float>
%22 = llvm.insertvalue %21, %17[0] : !llvm.struct<(ptr<float>, i64)>
%23 = llvm.insertvalue %1, %22[1] : !llvm.struct<(ptr<float>, i64)>
@@ -705,34 +705,34 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<float>)>, %arg1: !llvm.stru
// CHECK-LABEL: define i64 @memref_dim({ float*, i64, i64 } {{%.*}})
-llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<float>, i64, i64)>) -> !llvm.i64 {
+llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<float>, i64, i64)>) -> i64 {
// Expecting this to create an LLVM constant.
- %0 = llvm.mlir.constant(42 : index) : !llvm.i64
+ %0 = llvm.mlir.constant(42 : index) : i64
// CHECK-NEXT: %2 = extractvalue { float*, i64, i64 } %0, 1
%1 = llvm.extractvalue %arg0[1] : !llvm.struct<(ptr<float>, i64, i64)>
// Expecting this to create an LLVM constant.
- %2 = llvm.mlir.constant(10 : index) : !llvm.i64
+ %2 = llvm.mlir.constant(10 : index) : i64
// CHECK-NEXT: %3 = extractvalue { float*, i64, i64 } %0, 2
%3 = llvm.extractvalue %arg0[2] : !llvm.struct<(ptr<float>, i64, i64)>
// Checking that the constant for d0 has been created.
// CHECK-NEXT: %4 = add i64 42, %2
- %4 = llvm.add %0, %1 : !llvm.i64
+ %4 = llvm.add %0, %1 : i64
// Checking that the constant for d2 has been created.
// CHECK-NEXT: %5 = add i64 10, %3
- %5 = llvm.add %2, %3 : !llvm.i64
+ %5 = llvm.add %2, %3 : i64
// CHECK-NEXT: %6 = add i64 %4, %5
- %6 = llvm.add %4, %5 : !llvm.i64
+ %6 = llvm.add %4, %5 : i64
// CHECK-NEXT: ret i64 %6
- llvm.return %6 : !llvm.i64
+ llvm.return %6 : i64
}
-llvm.func @get_i64() -> !llvm.i64
+llvm.func @get_i64() -> i64
llvm.func @get_f32() -> !llvm.float
llvm.func @get_memref() -> !llvm.struct<(ptr<float>, i64, i64)>
// CHECK-LABEL: define { i64, float, { float*, i64, i64 } } @multireturn()
llvm.func @multireturn() -> !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)> {
- %0 = llvm.call @get_i64() : () -> !llvm.i64
+ %0 = llvm.call @get_i64() : () -> i64
%1 = llvm.call @get_f32() : () -> !llvm.float
%2 = llvm.call @get_memref() : () -> !llvm.struct<(ptr<float>, i64, i64)>
// CHECK: %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } undef, i64 %{{[0-9]+}}, 0
@@ -757,26 +757,26 @@ llvm.func @multireturn_caller() {
%1 = llvm.extractvalue %0[0] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
%2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
%3 = llvm.extractvalue %0[2] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
- %4 = llvm.mlir.constant(42) : !llvm.i64
+ %4 = llvm.mlir.constant(42) : i64
// CHECK: add i64 [[ret0]], 42
- %5 = llvm.add %1, %4 : !llvm.i64
+ %5 = llvm.add %1, %4 : i64
%6 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
// CHECK: fadd float [[ret1]], 4.200000e+01
%7 = llvm.fadd %2, %6 : !llvm.float
- %8 = llvm.mlir.constant(0 : index) : !llvm.i64
- %9 = llvm.mlir.constant(42 : index) : !llvm.i64
+ %8 = llvm.mlir.constant(0 : index) : i64
+ %9 = llvm.mlir.constant(42 : index) : i64
// CHECK: extractvalue { float*, i64, i64 } [[ret2]], 0
%10 = llvm.extractvalue %3[1] : !llvm.struct<(ptr<float>, i64, i64)>
- %11 = llvm.mlir.constant(10 : index) : !llvm.i64
+ %11 = llvm.mlir.constant(10 : index) : i64
%12 = llvm.extractvalue %3[2] : !llvm.struct<(ptr<float>, i64, i64)>
- %13 = llvm.mul %8, %10 : !llvm.i64
- %14 = llvm.add %13, %8 : !llvm.i64
- %15 = llvm.mul %14, %11 : !llvm.i64
- %16 = llvm.add %15, %8 : !llvm.i64
- %17 = llvm.mul %16, %12 : !llvm.i64
- %18 = llvm.add %17, %8 : !llvm.i64
+ %13 = llvm.mul %8, %10 : i64
+ %14 = llvm.add %13, %8 : i64
+ %15 = llvm.mul %14, %11 : i64
+ %16 = llvm.add %15, %8 : i64
+ %17 = llvm.mul %16, %12 : i64
+ %18 = llvm.add %17, %8 : i64
%19 = llvm.extractvalue %3[0] : !llvm.struct<(ptr<float>, i64, i64)>
- %20 = llvm.getelementptr %19[%18] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %20 = llvm.getelementptr %19[%18] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
%21 = llvm.load %20 : !llvm.ptr<float>
llvm.return
}
@@ -845,23 +845,23 @@ llvm.func @vector_splat_nonzero() -> !llvm.vec<4 x float> {
}
// CHECK-LABEL: @ops
-llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32, %arg3: !llvm.i32) -> !llvm.struct<(float, i32)> {
+llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: i32, %arg3: i32) -> !llvm.struct<(float, i32)> {
// CHECK-NEXT: fsub float %0, %1
%0 = llvm.fsub %arg0, %arg1 : !llvm.float
// CHECK-NEXT: %6 = sub i32 %2, %3
- %1 = llvm.sub %arg2, %arg3 : !llvm.i32
+ %1 = llvm.sub %arg2, %arg3 : i32
// CHECK-NEXT: %7 = icmp slt i32 %2, %6
- %2 = llvm.icmp "slt" %arg2, %1 : !llvm.i32
+ %2 = llvm.icmp "slt" %arg2, %1 : i32
// CHECK-NEXT: %8 = select i1 %7, i32 %2, i32 %6
- %3 = llvm.select %2, %arg2, %1 : !llvm.i1, !llvm.i32
+ %3 = llvm.select %2, %arg2, %1 : i1, i32
// CHECK-NEXT: %9 = sdiv i32 %2, %3
- %4 = llvm.sdiv %arg2, %arg3 : !llvm.i32
+ %4 = llvm.sdiv %arg2, %arg3 : i32
// CHECK-NEXT: %10 = udiv i32 %2, %3
- %5 = llvm.udiv %arg2, %arg3 : !llvm.i32
+ %5 = llvm.udiv %arg2, %arg3 : i32
// CHECK-NEXT: %11 = srem i32 %2, %3
- %6 = llvm.srem %arg2, %arg3 : !llvm.i32
+ %6 = llvm.srem %arg2, %arg3 : i32
// CHECK-NEXT: %12 = urem i32 %2, %3
- %7 = llvm.urem %arg2, %arg3 : !llvm.i32
+ %7 = llvm.urem %arg2, %arg3 : i32
%8 = llvm.mlir.undef : !llvm.struct<(float, i32)>
%9 = llvm.insertvalue %0, %8[0] : !llvm.struct<(float, i32)>
@@ -873,17 +873,17 @@ llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32, %arg3:
%12 = llvm.frem %arg0, %arg1 : !llvm.float
// CHECK-NEXT: %17 = and i32 %2, %3
- %13 = llvm.and %arg2, %arg3 : !llvm.i32
+ %13 = llvm.and %arg2, %arg3 : i32
// CHECK-NEXT: %18 = or i32 %2, %3
- %14 = llvm.or %arg2, %arg3 : !llvm.i32
+ %14 = llvm.or %arg2, %arg3 : i32
// CHECK-NEXT: %19 = xor i32 %2, %3
- %15 = llvm.xor %arg2, %arg3 : !llvm.i32
+ %15 = llvm.xor %arg2, %arg3 : i32
// CHECK-NEXT: %20 = shl i32 %2, %3
- %16 = llvm.shl %arg2, %arg3 : !llvm.i32
+ %16 = llvm.shl %arg2, %arg3 : i32
// CHECK-NEXT: %21 = lshr i32 %2, %3
- %17 = llvm.lshr %arg2, %arg3 : !llvm.i32
+ %17 = llvm.lshr %arg2, %arg3 : i32
// CHECK-NEXT: %22 = ashr i32 %2, %3
- %18 = llvm.ashr %arg2, %arg3 : !llvm.i32
+ %18 = llvm.ashr %arg2, %arg3 : i32
// CHECK-NEXT: fneg float %0
%19 = llvm.fneg %arg0 : !llvm.float
@@ -896,20 +896,20 @@ llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32, %arg3:
//
// CHECK-LABEL: define void @indirect_const_call(i64 {{%.*}})
-llvm.func @indirect_const_call(%arg0: !llvm.i64) {
+llvm.func @indirect_const_call(%arg0: i64) {
// CHECK-NEXT: call void @body(i64 %0)
%0 = llvm.mlir.addressof @body : !llvm.ptr<func<void (i64)>>
- llvm.call %0(%arg0) : (!llvm.i64) -> ()
+ llvm.call %0(%arg0) : (i64) -> ()
// CHECK-NEXT: ret void
llvm.return
}
// CHECK-LABEL: define i32 @indirect_call(i32 (float)* {{%.*}}, float {{%.*}})
-llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (float)>>, %arg1: !llvm.float) -> !llvm.i32 {
+llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (float)>>, %arg1: !llvm.float) -> i32 {
// CHECK-NEXT: %3 = call i32 %0(float %1)
- %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> !llvm.i32
+ %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> i32
// CHECK-NEXT: ret i32 %3
- llvm.return %0 : !llvm.i32
+ llvm.return %0 : i32
}
//
@@ -918,20 +918,20 @@ llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (float)>>, %arg1: !llvm.float
//
// CHECK-LABEL: define void @cond_br_arguments(i1 {{%.*}}, i1 {{%.*}})
-llvm.func @cond_br_arguments(%arg0: !llvm.i1, %arg1: !llvm.i1) {
+llvm.func @cond_br_arguments(%arg0: i1, %arg1: i1) {
// CHECK-NEXT: br i1 %0, label %3, label %5
- llvm.cond_br %arg0, ^bb1(%arg0 : !llvm.i1), ^bb2
+ llvm.cond_br %arg0, ^bb1(%arg0 : i1), ^bb2
// CHECK: 3:
// CHECK-NEXT: %4 = phi i1 [ %1, %5 ], [ %0, %2 ]
-^bb1(%0 : !llvm.i1):
+^bb1(%0 : i1):
// CHECK-NEXT: ret void
llvm.return
// CHECK: 5:
^bb2:
// CHECK-NEXT: br label %3
- llvm.br ^bb1(%arg1 : !llvm.i1)
+ llvm.br ^bb1(%arg1 : i1)
}
// CHECK-LABEL: define void @llvm_noalias(float* noalias {{%*.}})
@@ -947,24 +947,24 @@ llvm.func @llvm_align(%arg0: !llvm.ptr<float> {llvm.align = 4}) {
// CHECK-LABEL: @llvm_varargs(...)
llvm.func @llvm_varargs(...)
-llvm.func @intpointerconversion(%arg0 : !llvm.i32) -> !llvm.i32 {
+llvm.func @intpointerconversion(%arg0 : i32) -> i32 {
// CHECK: %2 = inttoptr i32 %0 to i32*
// CHECK-NEXT: %3 = ptrtoint i32* %2 to i32
- %1 = llvm.inttoptr %arg0 : !llvm.i32 to !llvm.ptr<i32>
- %2 = llvm.ptrtoint %1 : !llvm.ptr<i32> to !llvm.i32
- llvm.return %2 : !llvm.i32
+ %1 = llvm.inttoptr %arg0 : i32 to !llvm.ptr<i32>
+ %2 = llvm.ptrtoint %1 : !llvm.ptr<i32> to i32
+ llvm.return %2 : i32
}
-llvm.func @fpconversion(%arg0 : !llvm.i32) -> !llvm.i32 {
+llvm.func @fpconversion(%arg0 : i32) -> i32 {
// CHECK: %2 = sitofp i32 %0 to float
// CHECK-NEXT: %3 = fptosi float %2 to i32
// CHECK-NEXT: %4 = uitofp i32 %3 to float
// CHECK-NEXT: %5 = fptoui float %4 to i32
- %1 = llvm.sitofp %arg0 : !llvm.i32 to !llvm.float
- %2 = llvm.fptosi %1 : !llvm.float to !llvm.i32
- %3 = llvm.uitofp %2 : !llvm.i32 to !llvm.float
- %4 = llvm.fptoui %3 : !llvm.float to !llvm.i32
- llvm.return %4 : !llvm.i32
+ %1 = llvm.sitofp %arg0 : i32 to !llvm.float
+ %2 = llvm.fptosi %1 : !llvm.float to i32
+ %3 = llvm.uitofp %2 : i32 to !llvm.float
+ %4 = llvm.fptoui %3 : !llvm.float to i32
+ llvm.return %4 : i32
}
// CHECK-LABEL: @addrspace
@@ -1019,33 +1019,33 @@ llvm.func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) {
}
// CHECK-LABEL: @vect
-llvm.func @vect(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) {
+llvm.func @vect(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
// CHECK-NEXT: extractelement <4 x float> {{.*}}, i32
// CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i32
// CHECK-NEXT: shufflevector <4 x float> {{.*}}, <4 x float> {{.*}}, <5 x i32> <i32 0, i32 0, i32 0, i32 0, i32 7>
- %0 = llvm.extractelement %arg0[%arg1 : !llvm.i32] : !llvm.vec<4 x float>
- %1 = llvm.insertelement %arg2, %arg0[%arg1 : !llvm.i32] : !llvm.vec<4 x float>
+ %0 = llvm.extractelement %arg0[%arg1 : i32] : !llvm.vec<4 x float>
+ %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : !llvm.vec<4 x float>
%2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
llvm.return
}
// CHECK-LABEL: @vect_i64idx
-llvm.func @vect_i64idx(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i64, %arg2: !llvm.float) {
+llvm.func @vect_i64idx(%arg0: !llvm.vec<4 x float>, %arg1: i64, %arg2: !llvm.float) {
// CHECK-NEXT: extractelement <4 x float> {{.*}}, i64
// CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i64
- %0 = llvm.extractelement %arg0[%arg1 : !llvm.i64] : !llvm.vec<4 x float>
- %1 = llvm.insertelement %arg2, %arg0[%arg1 : !llvm.i64] : !llvm.vec<4 x float>
+ %0 = llvm.extractelement %arg0[%arg1 : i64] : !llvm.vec<4 x float>
+ %1 = llvm.insertelement %arg2, %arg0[%arg1 : i64] : !llvm.vec<4 x float>
llvm.return
}
// CHECK-LABEL: @alloca
-llvm.func @alloca(%size : !llvm.i64) {
+llvm.func @alloca(%size : i64) {
// Alignment automatically set by the LLVM IR builder when alignment attribute
// is 0.
// CHECK: alloca {{.*}} align 4
- llvm.alloca %size x !llvm.i32 {alignment = 0} : (!llvm.i64) -> (!llvm.ptr<i32>)
+ llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr<i32>)
// CHECK-NEXT: alloca {{.*}} align 8
- llvm.alloca %size x !llvm.i32 {alignment = 8} : (!llvm.i64) -> (!llvm.ptr<i32>)
+ llvm.alloca %size x i32 {alignment = 8} : (i64) -> (!llvm.ptr<i32>)
llvm.return
}
@@ -1057,24 +1057,24 @@ llvm.func @constants() -> !llvm.vec<4 x float> {
}
// CHECK-LABEL: @fp_casts
-llvm.func @fp_casts(%fp1 : !llvm.float, %fp2 : !llvm.double) -> !llvm.i16 {
+llvm.func @fp_casts(%fp1 : !llvm.float, %fp2 : !llvm.double) -> i16 {
// CHECK: fptrunc double {{.*}} to float
%a = llvm.fptrunc %fp2 : !llvm.double to !llvm.float
// CHECK: fpext float {{.*}} to double
%b = llvm.fpext %fp1 : !llvm.float to !llvm.double
// CHECK: fptosi double {{.*}} to i16
- %c = llvm.fptosi %b : !llvm.double to !llvm.i16
- llvm.return %c : !llvm.i16
+ %c = llvm.fptosi %b : !llvm.double to i16
+ llvm.return %c : i16
}
// CHECK-LABEL: @integer_extension_and_truncation
-llvm.func @integer_extension_and_truncation(%a : !llvm.i32) {
+llvm.func @integer_extension_and_truncation(%a : i32) {
// CHECK: sext i32 {{.*}} to i64
// CHECK: zext i32 {{.*}} to i64
// CHECK: trunc i32 {{.*}} to i16
- %0 = llvm.sext %a : !llvm.i32 to !llvm.i64
- %1 = llvm.zext %a : !llvm.i32 to !llvm.i64
- %2 = llvm.trunc %a : !llvm.i32 to !llvm.i16
+ %0 = llvm.sext %a : i32 to i64
+ %1 = llvm.zext %a : i32 to i64
+ %2 = llvm.trunc %a : i32 to i16
llvm.return
}
@@ -1108,7 +1108,7 @@ llvm.func @elements_constant_3d_array() -> !llvm.array<2 x array<2 x array<2 x i
// CHECK-LABEL: @atomicrmw
llvm.func @atomicrmw(
%f32_ptr : !llvm.ptr<float>, %f32 : !llvm.float,
- %i32_ptr : !llvm.ptr<i32>, %i32 : !llvm.i32) {
+ %i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// CHECK: atomicrmw fadd float* %{{.*}}, float %{{.*}} unordered
%0 = llvm.atomicrmw fadd %f32_ptr, %f32 unordered : !llvm.float
// CHECK: atomicrmw fsub float* %{{.*}}, float %{{.*}} unordered
@@ -1116,32 +1116,32 @@ llvm.func @atomicrmw(
// CHECK: atomicrmw xchg float* %{{.*}}, float %{{.*}} monotonic
%2 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : !llvm.float
// CHECK: atomicrmw add i32* %{{.*}}, i32 %{{.*}} acquire
- %3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : !llvm.i32
+ %3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : i32
// CHECK: atomicrmw sub i32* %{{.*}}, i32 %{{.*}} release
- %4 = llvm.atomicrmw sub %i32_ptr, %i32 release : !llvm.i32
+ %4 = llvm.atomicrmw sub %i32_ptr, %i32 release : i32
// CHECK: atomicrmw and i32* %{{.*}}, i32 %{{.*}} acq_rel
- %5 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : !llvm.i32
+ %5 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : i32
// CHECK: atomicrmw nand i32* %{{.*}}, i32 %{{.*}} seq_cst
- %6 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : !llvm.i32
+ %6 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : i32
// CHECK: atomicrmw or i32* %{{.*}}, i32 %{{.*}} unordered
- %7 = llvm.atomicrmw _or %i32_ptr, %i32 unordered : !llvm.i32
+ %7 = llvm.atomicrmw _or %i32_ptr, %i32 unordered : i32
// CHECK: atomicrmw xor i32* %{{.*}}, i32 %{{.*}} unordered
- %8 = llvm.atomicrmw _xor %i32_ptr, %i32 unordered : !llvm.i32
+ %8 = llvm.atomicrmw _xor %i32_ptr, %i32 unordered : i32
// CHECK: atomicrmw max i32* %{{.*}}, i32 %{{.*}} unordered
- %9 = llvm.atomicrmw max %i32_ptr, %i32 unordered : !llvm.i32
+ %9 = llvm.atomicrmw max %i32_ptr, %i32 unordered : i32
// CHECK: atomicrmw min i32* %{{.*}}, i32 %{{.*}} unordered
- %10 = llvm.atomicrmw min %i32_ptr, %i32 unordered : !llvm.i32
+ %10 = llvm.atomicrmw min %i32_ptr, %i32 unordered : i32
// CHECK: atomicrmw umax i32* %{{.*}}, i32 %{{.*}} unordered
- %11 = llvm.atomicrmw umax %i32_ptr, %i32 unordered : !llvm.i32
+ %11 = llvm.atomicrmw umax %i32_ptr, %i32 unordered : i32
// CHECK: atomicrmw umin i32* %{{.*}}, i32 %{{.*}} unordered
- %12 = llvm.atomicrmw umin %i32_ptr, %i32 unordered : !llvm.i32
+ %12 = llvm.atomicrmw umin %i32_ptr, %i32 unordered : i32
llvm.return
}
// CHECK-LABEL: @cmpxchg
-llvm.func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : !llvm.i32, %val: !llvm.i32) {
+llvm.func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : i32, %val: i32) {
// CHECK: cmpxchg i32* %{{.*}}, i32 %{{.*}}, i32 %{{.*}} acq_rel monotonic
- %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : !llvm.i32
+ %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : i32
// CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 0
%1 = llvm.extractvalue %0[0] : !llvm.struct<(i32, i1)>
// CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 1
@@ -1152,18 +1152,18 @@ llvm.func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : !llvm.i32, %val: !llvm.i32) {
llvm.mlir.global external constant @_ZTIi() : !llvm.ptr<i8>
llvm.func @foo(!llvm.ptr<i8>)
llvm.func @bar(!llvm.ptr<i8>) -> !llvm.ptr<i8>
-llvm.func @__gxx_personality_v0(...) -> !llvm.i32
+llvm.func @__gxx_personality_v0(...) -> i32
// CHECK-LABEL: @invokeLandingpad
-llvm.func @invokeLandingpad() -> !llvm.i32 attributes { personality = @__gxx_personality_v0 } {
+llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personality_v0 } {
// CHECK: %[[a1:[0-9]+]] = alloca i8
- %0 = llvm.mlir.constant(0 : i32) : !llvm.i32
+ %0 = llvm.mlir.constant(0 : i32) : i32
%1 = llvm.mlir.constant("\01") : !llvm.array<1 x i8>
%2 = llvm.mlir.addressof @_ZTIi : !llvm.ptr<ptr<i8>>
%3 = llvm.bitcast %2 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
%4 = llvm.mlir.null : !llvm.ptr<ptr<i8>>
- %5 = llvm.mlir.constant(1 : i32) : !llvm.i32
- %6 = llvm.alloca %5 x !llvm.i8 : (!llvm.i32) -> !llvm.ptr<i8>
+ %5 = llvm.mlir.constant(1 : i32) : i32
+ %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr<i8>
// CHECK: invoke void @foo(i8* %[[a1]])
// CHECK-NEXT: to label %[[normal:[0-9]+]] unwind label %[[unwind:[0-9]+]]
llvm.invoke @foo(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> ()
@@ -1181,7 +1181,7 @@ llvm.func @invokeLandingpad() -> !llvm.i32 attributes { personality = @__gxx_per
// CHECK: [[normal]]:
// CHECK-NEXT: ret i32 1
^bb2: // 2 preds: ^bb0, ^bb3
- llvm.return %5 : !llvm.i32
+ llvm.return %5 : i32
// CHECK: [[final]]:
// CHECK-NEXT: %{{[0-9]+}} = invoke i8* @bar(i8* %[[a1]])
@@ -1191,22 +1191,22 @@ llvm.func @invokeLandingpad() -> !llvm.i32 attributes { personality = @__gxx_per
}
// CHECK-LABEL: @callFreezeOp
-llvm.func @callFreezeOp(%x : !llvm.i32) {
+llvm.func @callFreezeOp(%x : i32) {
// CHECK: freeze i32 %{{[0-9]+}}
- %0 = llvm.freeze %x : !llvm.i32
- %1 = llvm.mlir.undef : !llvm.i32
+ %0 = llvm.freeze %x : i32
+ %1 = llvm.mlir.undef : i32
// CHECK: freeze i32 undef
- %2 = llvm.freeze %1 : !llvm.i32
+ %2 = llvm.freeze %1 : i32
llvm.return
}
// CHECK-LABEL: @boolConstArg
-llvm.func @boolConstArg() -> !llvm.i1 {
+llvm.func @boolConstArg() -> i1 {
// CHECK: ret i1 false
- %0 = llvm.mlir.constant(true) : !llvm.i1
- %1 = llvm.mlir.constant(false) : !llvm.i1
- %2 = llvm.and %0, %1 : !llvm.i1
- llvm.return %2 : !llvm.i1
+ %0 = llvm.mlir.constant(true) : i1
+ %1 = llvm.mlir.constant(false) : i1
+ %2 = llvm.and %0, %1 : i1
+ llvm.return %2 : i1
}
// CHECK-LABEL: @callFenceInst
@@ -1256,13 +1256,13 @@ llvm.mlir.global internal constant @taker_of_address() : !llvm.ptr<func<void ()>
// -----
// Check that branch weight attributes are exported properly as metadata.
-llvm.func @cond_br_weights(%cond : !llvm.i1, %arg0 : !llvm.i32, %arg1 : !llvm.i32) -> !llvm.i32 {
+llvm.func @cond_br_weights(%cond : i1, %arg0 : i32, %arg1 : i32) -> i32 {
// CHECK: !prof ![[NODE:[0-9]+]]
llvm.cond_br %cond weights(dense<[5, 10]> : vector<2xi32>), ^bb1, ^bb2
^bb1: // pred: ^bb0
- llvm.return %arg0 : !llvm.i32
+ llvm.return %arg0 : i32
^bb2: // pred: ^bb0
- llvm.return %arg1 : !llvm.i32
+ llvm.return %arg1 : i32
}
// CHECK: ![[NODE]] = !{!"branch_weights", i32 5, i32 10}
@@ -1270,9 +1270,9 @@ llvm.func @cond_br_weights(%cond : !llvm.i1, %arg0 : !llvm.i32, %arg1 : !llvm.i
// -----
llvm.func @volatile_store_and_load() {
- %val = llvm.mlir.constant(5 : i32) : !llvm.i32
- %size = llvm.mlir.constant(1 : i64) : !llvm.i64
- %0 = llvm.alloca %size x !llvm.i32 : (!llvm.i64) -> (!llvm.ptr<i32>)
+ %val = llvm.mlir.constant(5 : i32) : i32
+ %size = llvm.mlir.constant(1 : i64) : i64
+ %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr<i32>)
// CHECK: store volatile i32 5, i32* %{{.*}}
llvm.store volatile %val, %0 : !llvm.ptr<i32>
// CHECK: %{{.*}} = load volatile i32, i32* %{{.*}}
@@ -1284,9 +1284,9 @@ llvm.func @volatile_store_and_load() {
// Check that nontemporal attribute is exported as metadata node.
llvm.func @nontemporal_store_and_load() {
- %val = llvm.mlir.constant(5 : i32) : !llvm.i32
- %size = llvm.mlir.constant(1 : i64) : !llvm.i64
- %0 = llvm.alloca %size x !llvm.i32 : (!llvm.i64) -> (!llvm.ptr<i32>)
+ %val = llvm.mlir.constant(5 : i32) : i32
+ %size = llvm.mlir.constant(1 : i64) : i64
+ %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr<i32>)
// CHECK: !nontemporal ![[NODE:[0-9]+]]
llvm.store %val, %0 {nontemporal} : !llvm.ptr<i32>
// CHECK: !nontemporal ![[NODE]]
@@ -1330,30 +1330,30 @@ module attributes {} {}
// -----
// CHECK-LABEL: @useInlineAsm
-llvm.func @useInlineAsm(%arg0: !llvm.i32) {
+llvm.func @useInlineAsm(%arg0: i32) {
// Constraints string is checked at LLVM InlineAsm instruction construction time.
// So we can't just use "bar" everywhere, number of in/out arguments has to match.
// CHECK-NEXT: call void asm "foo", "r"(i32 {{.*}}), !dbg !7
- llvm.inline_asm "foo", "r" %arg0 : (!llvm.i32) -> ()
+ llvm.inline_asm "foo", "r" %arg0 : (i32) -> ()
// CHECK-NEXT: call i8 asm "foo", "=r,r"(i32 {{.*}}), !dbg !9
- %0 = llvm.inline_asm "foo", "=r,r" %arg0 : (!llvm.i32) -> !llvm.i8
+ %0 = llvm.inline_asm "foo", "=r,r" %arg0 : (i32) -> i8
// CHECK-NEXT: call i8 asm "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}), !dbg !10
- %1 = llvm.inline_asm "foo", "=r,r,r" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8
+ %1 = llvm.inline_asm "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8
// CHECK-NEXT: call i8 asm sideeffect "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}), !dbg !11
- %2 = llvm.inline_asm has_side_effects "foo", "=r,r,r" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8
+ %2 = llvm.inline_asm has_side_effects "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8
// CHECK-NEXT: call i8 asm alignstack "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}), !dbg !12
- %3 = llvm.inline_asm is_align_stack "foo", "=r,r,r" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8
+ %3 = llvm.inline_asm is_align_stack "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8
// CHECK-NEXT: call i8 asm inteldialect "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}), !dbg !13
- %4 = llvm.inline_asm asm_dialect = "intel" "foo", "=r,r,r" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8
+ %4 = llvm.inline_asm asm_dialect = "intel" "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8
// CHECK-NEXT: call { i8, i8 } asm "foo", "=r,=r,r"(i32 {{.*}}), !dbg !14
- %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (!llvm.i32) -> !llvm.struct<(i8, i8)>
+ %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (i32) -> !llvm.struct<(i8, i8)>
llvm.return
}
@@ -1405,57 +1405,57 @@ llvm.func @fastmathFlags(%arg0: !llvm.float) {
// -----
// CHECK-LABEL: @switch_args
-llvm.func @switch_args(%arg0: !llvm.i32) {
- %0 = llvm.mlir.constant(5 : i32) : !llvm.i32
- %1 = llvm.mlir.constant(7 : i32) : !llvm.i32
- %2 = llvm.mlir.constant(11 : i32) : !llvm.i32
+llvm.func @switch_args(%arg0: i32) {
+ %0 = llvm.mlir.constant(5 : i32) : i32
+ %1 = llvm.mlir.constant(7 : i32) : i32
+ %2 = llvm.mlir.constant(11 : i32) : i32
// CHECK: switch i32 %[[SWITCH_arg0:[0-9]+]], label %[[SWITCHDEFAULT_bb1:[0-9]+]] [
// CHECK-NEXT: i32 -1, label %[[SWITCHCASE_bb2:[0-9]+]]
// CHECK-NEXT: i32 1, label %[[SWITCHCASE_bb3:[0-9]+]]
// CHECK-NEXT: ]
llvm.switch %arg0, ^bb1 [
- -1: ^bb2(%0 : !llvm.i32),
- 1: ^bb3(%1, %2 : !llvm.i32, !llvm.i32)
+ -1: ^bb2(%0 : i32),
+ 1: ^bb3(%1, %2 : i32, i32)
]
// CHECK: [[SWITCHDEFAULT_bb1]]:
// CHECK-NEXT: ret i32 %[[SWITCH_arg0]]
^bb1: // pred: ^bb0
- llvm.return %arg0 : !llvm.i32
+ llvm.return %arg0 : i32
// CHECK: [[SWITCHCASE_bb2]]:
// CHECK-NEXT: phi i32 [ 5, %1 ]
// CHECK-NEXT: ret i32
-^bb2(%3: !llvm.i32): // pred: ^bb0
- llvm.return %1 : !llvm.i32
+^bb2(%3: i32): // pred: ^bb0
+ llvm.return %1 : i32
// CHECK: [[SWITCHCASE_bb3]]:
// CHECK-NEXT: phi i32 [ 7, %1 ]
// CHECK-NEXT: phi i32 [ 11, %1 ]
// CHECK-NEXT: ret i32
-^bb3(%4: !llvm.i32, %5: !llvm.i32): // pred: ^bb0
- llvm.return %4 : !llvm.i32
+^bb3(%4: i32, %5: i32): // pred: ^bb0
+ llvm.return %4 : i32
}
// CHECK-LABEL: @switch_weights
-llvm.func @switch_weights(%arg0: !llvm.i32) {
- %0 = llvm.mlir.constant(19 : i32) : !llvm.i32
- %1 = llvm.mlir.constant(23 : i32) : !llvm.i32
- %2 = llvm.mlir.constant(29 : i32) : !llvm.i32
+llvm.func @switch_weights(%arg0: i32) {
+ %0 = llvm.mlir.constant(19 : i32) : i32
+ %1 = llvm.mlir.constant(23 : i32) : i32
+ %2 = llvm.mlir.constant(29 : i32) : i32
// CHECK: !prof ![[SWITCH_WEIGHT_NODE:[0-9]+]]
- llvm.switch %arg0, ^bb1(%0 : !llvm.i32) [
- 9: ^bb2(%1, %2 : !llvm.i32, !llvm.i32),
+ llvm.switch %arg0, ^bb1(%0 : i32) [
+ 9: ^bb2(%1, %2 : i32, i32),
99: ^bb3
] {branch_weights = dense<[13, 17, 19]> : vector<3xi32>}
-^bb1(%3: !llvm.i32): // pred: ^bb0
- llvm.return %3 : !llvm.i32
+^bb1(%3: i32): // pred: ^bb0
+ llvm.return %3 : i32
-^bb2(%4: !llvm.i32, %5: !llvm.i32): // pred: ^bb0
- llvm.return %5 : !llvm.i32
+^bb2(%4: i32, %5: i32): // pred: ^bb0
+ llvm.return %5 : i32
^bb3: // pred: ^bb0
- llvm.return %arg0 : !llvm.i32
+ llvm.return %arg0 : i32
}
// CHECK: ![[SWITCH_WEIGHT_NODE]] = !{!"branch_weights", i32 13, i32 17, i32 19}
diff --git a/mlir/test/Target/nvvmir.mlir b/mlir/test/Target/nvvmir.mlir
index fdb438b4e6a9..24642b8d1154 100644
--- a/mlir/test/Target/nvvmir.mlir
+++ b/mlir/test/Target/nvvmir.mlir
@@ -1,35 +1,35 @@
// RUN: mlir-translate -mlir-to-nvvmir %s | FileCheck %s
-llvm.func @nvvm_special_regs() -> !llvm.i32 {
+llvm.func @nvvm_special_regs() -> i32 {
// CHECK: %1 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
- %1 = nvvm.read.ptx.sreg.tid.x : !llvm.i32
+ %1 = nvvm.read.ptx.sreg.tid.x : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.y()
- %2 = nvvm.read.ptx.sreg.tid.y : !llvm.i32
+ %2 = nvvm.read.ptx.sreg.tid.y : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.z()
- %3 = nvvm.read.ptx.sreg.tid.z : !llvm.i32
+ %3 = nvvm.read.ptx.sreg.tid.z : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
- %4 = nvvm.read.ptx.sreg.ntid.x : !llvm.i32
+ %4 = nvvm.read.ptx.sreg.ntid.x : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ntid.y()
- %5 = nvvm.read.ptx.sreg.ntid.y : !llvm.i32
+ %5 = nvvm.read.ptx.sreg.ntid.y : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ntid.z()
- %6 = nvvm.read.ptx.sreg.ntid.z : !llvm.i32
+ %6 = nvvm.read.ptx.sreg.ntid.z : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
- %7 = nvvm.read.ptx.sreg.ctaid.x : !llvm.i32
+ %7 = nvvm.read.ptx.sreg.ctaid.x : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.y()
- %8 = nvvm.read.ptx.sreg.ctaid.y : !llvm.i32
+ %8 = nvvm.read.ptx.sreg.ctaid.y : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.z()
- %9 = nvvm.read.ptx.sreg.ctaid.z : !llvm.i32
+ %9 = nvvm.read.ptx.sreg.ctaid.z : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.nctaid.x()
- %10 = nvvm.read.ptx.sreg.nctaid.x : !llvm.i32
+ %10 = nvvm.read.ptx.sreg.nctaid.x : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.nctaid.y()
- %11 = nvvm.read.ptx.sreg.nctaid.y : !llvm.i32
+ %11 = nvvm.read.ptx.sreg.nctaid.y : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.nctaid.z()
- %12 = nvvm.read.ptx.sreg.nctaid.z : !llvm.i32
+ %12 = nvvm.read.ptx.sreg.nctaid.z : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
- %13 = nvvm.read.ptx.sreg.warpsize : !llvm.i32
+ %13 = nvvm.read.ptx.sreg.warpsize : i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.laneid()
- %14 = nvvm.read.ptx.sreg.laneid : !llvm.i32
- llvm.return %1 : !llvm.i32
+ %14 = nvvm.read.ptx.sreg.laneid : i32
+ llvm.return %1 : i32
}
llvm.func @llvm.nvvm.barrier0() {
@@ -39,18 +39,18 @@ llvm.func @llvm.nvvm.barrier0() {
}
llvm.func @nvvm_shfl(
- %0 : !llvm.i32, %1 : !llvm.i32, %2 : !llvm.i32,
- %3 : !llvm.i32, %4 : !llvm.float) -> !llvm.i32 {
+ %0 : i32, %1 : i32, %2 : i32,
+ %3 : i32, %4 : !llvm.float) -> i32 {
// CHECK: call i32 @llvm.nvvm.shfl.sync.bfly.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
- %6 = nvvm.shfl.sync.bfly %0, %3, %1, %2 : !llvm.i32
+ %6 = nvvm.shfl.sync.bfly %0, %3, %1, %2 : i32
// CHECK: call float @llvm.nvvm.shfl.sync.bfly.f32(i32 %{{.*}}, float %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%7 = nvvm.shfl.sync.bfly %0, %4, %1, %2 : !llvm.float
- llvm.return %6 : !llvm.i32
+ llvm.return %6 : i32
}
llvm.func @nvvm_shfl_pred(
- %0 : !llvm.i32, %1 : !llvm.i32, %2 : !llvm.i32,
- %3 : !llvm.i32, %4 : !llvm.float) -> !llvm.struct<(i32, i1)> {
+ %0 : i32, %1 : i32, %2 : i32,
+ %3 : i32, %4 : !llvm.float) -> !llvm.struct<(i32, i1)> {
// CHECK: call { i32, i1 } @llvm.nvvm.shfl.sync.bfly.i32p(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%6 = nvvm.shfl.sync.bfly %0, %3, %1, %2 {return_value_and_is_valid} : !llvm.struct<(i32, i1)>
// CHECK: call { float, i1 } @llvm.nvvm.shfl.sync.bfly.f32p(i32 %{{.*}}, float %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
@@ -58,10 +58,10 @@ llvm.func @nvvm_shfl_pred(
llvm.return %6 : !llvm.struct<(i32, i1)>
}
-llvm.func @nvvm_vote(%0 : !llvm.i32, %1 : !llvm.i1) -> !llvm.i32 {
+llvm.func @nvvm_vote(%0 : i32, %1 : i1) -> i32 {
// CHECK: call i32 @llvm.nvvm.vote.ballot.sync(i32 %{{.*}}, i1 %{{.*}})
- %3 = nvvm.vote.ballot.sync %0, %1 : !llvm.i32
- llvm.return %3 : !llvm.i32
+ %3 = nvvm.vote.ballot.sync %0, %1 : i32
+ llvm.return %3 : i32
}
llvm.func @nvvm_mma(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
diff --git a/mlir/test/Target/openmp-llvm.mlir b/mlir/test/Target/openmp-llvm.mlir
index c5ef16f4393d..a4d00c510abb 100644
--- a/mlir/test/Target/openmp-llvm.mlir
+++ b/mlir/test/Target/openmp-llvm.mlir
@@ -19,19 +19,19 @@ llvm.func @test_stand_alone_directives() {
}
// CHECK-LABEL: define void @test_flush_construct(i32 %0)
-llvm.func @test_flush_construct(%arg0: !llvm.i32) {
+llvm.func @test_flush_construct(%arg0: i32) {
// CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
omp.flush
// CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
- omp.flush (%arg0 : !llvm.i32)
+ omp.flush (%arg0 : i32)
// CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
- omp.flush (%arg0, %arg0 : !llvm.i32, !llvm.i32)
+ omp.flush (%arg0, %arg0 : i32, i32)
- %0 = llvm.mlir.constant(1 : i64) : !llvm.i64
+ %0 = llvm.mlir.constant(1 : i64) : i64
// CHECK: alloca {{.*}} align 4
- %1 = llvm.alloca %0 x !llvm.i32 {in_type = i32, name = "a"} : (!llvm.i64) -> !llvm.ptr<i32>
+ %1 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr<i32>
// CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
omp.flush
// CHECK: load i32, i32*
@@ -55,22 +55,22 @@ llvm.func @test_omp_parallel_1() -> () {
// CHECK: define internal void @[[OMP_OUTLINED_FN_1]]
// CHECK: call void @__kmpc_barrier
-llvm.func @body(!llvm.i64)
+llvm.func @body(i64)
// CHECK-LABEL: define void @test_omp_parallel_2()
llvm.func @test_omp_parallel_2() -> () {
// CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_2:.*]] to {{.*}}
omp.parallel {
^bb0:
- %0 = llvm.mlir.constant(1 : index) : !llvm.i64
- %1 = llvm.mlir.constant(42 : index) : !llvm.i64
- llvm.call @body(%0) : (!llvm.i64) -> ()
- llvm.call @body(%1) : (!llvm.i64) -> ()
+ %0 = llvm.mlir.constant(1 : index) : i64
+ %1 = llvm.mlir.constant(42 : index) : i64
+ llvm.call @body(%0) : (i64) -> ()
+ llvm.call @body(%1) : (i64) -> ()
llvm.br ^bb1
^bb1:
- %2 = llvm.add %0, %1 : !llvm.i64
- llvm.call @body(%2) : (!llvm.i64) -> ()
+ %2 = llvm.add %0, %1 : i64
+ llvm.call @body(%2) : (i64) -> ()
omp.terminator
}
llvm.return
@@ -88,11 +88,11 @@ llvm.func @test_omp_parallel_2() -> () {
// CHECK: br label %omp.par.pre_finalize
// CHECK: define void @test_omp_parallel_num_threads_1(i32 %[[NUM_THREADS_VAR_1:.*]])
-llvm.func @test_omp_parallel_num_threads_1(%arg0: !llvm.i32) -> () {
+llvm.func @test_omp_parallel_num_threads_1(%arg0: i32) -> () {
// CHECK: %[[GTN_NUM_THREADS_VAR_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_1:.*]])
// CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_1]], i32 %[[GTN_NUM_THREADS_VAR_1]], i32 %[[NUM_THREADS_VAR_1]])
// CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_1:.*]] to {{.*}}
- omp.parallel num_threads(%arg0: !llvm.i32) {
+ omp.parallel num_threads(%arg0: i32) {
omp.barrier
omp.terminator
}
@@ -105,11 +105,11 @@ llvm.func @test_omp_parallel_num_threads_1(%arg0: !llvm.i32) -> () {
// CHECK: define void @test_omp_parallel_num_threads_2()
llvm.func @test_omp_parallel_num_threads_2() -> () {
- %0 = llvm.mlir.constant(4 : index) : !llvm.i32
+ %0 = llvm.mlir.constant(4 : index) : i32
// CHECK: %[[GTN_NUM_THREADS_VAR_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_2:.*]])
// CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_2]], i32 %[[GTN_NUM_THREADS_VAR_2]], i32 4)
// CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_2:.*]] to {{.*}}
- omp.parallel num_threads(%0: !llvm.i32) {
+ omp.parallel num_threads(%0: i32) {
omp.barrier
omp.terminator
}
@@ -122,19 +122,19 @@ llvm.func @test_omp_parallel_num_threads_2() -> () {
// CHECK: define void @test_omp_parallel_num_threads_3()
llvm.func @test_omp_parallel_num_threads_3() -> () {
- %0 = llvm.mlir.constant(4 : index) : !llvm.i32
+ %0 = llvm.mlir.constant(4 : index) : i32
// CHECK: %[[GTN_NUM_THREADS_VAR_3_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_1:.*]])
// CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_1]], i32 %[[GTN_NUM_THREADS_VAR_3_1]], i32 4)
// CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_1:.*]] to {{.*}}
- omp.parallel num_threads(%0: !llvm.i32) {
+ omp.parallel num_threads(%0: i32) {
omp.barrier
omp.terminator
}
- %1 = llvm.mlir.constant(8 : index) : !llvm.i32
+ %1 = llvm.mlir.constant(8 : index) : i32
// CHECK: %[[GTN_NUM_THREADS_VAR_3_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_2:.*]])
// CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_2]], i32 %[[GTN_NUM_THREADS_VAR_3_2]], i32 8)
// CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_2:.*]] to {{.*}}
- omp.parallel num_threads(%1: !llvm.i32) {
+ omp.parallel num_threads(%1: i32) {
omp.barrier
omp.terminator
}
@@ -149,11 +149,11 @@ llvm.func @test_omp_parallel_num_threads_3() -> () {
// CHECK: call void @__kmpc_barrier
// CHECK: define void @test_omp_parallel_if_1(i32 %[[IF_VAR_1:.*]])
-llvm.func @test_omp_parallel_if_1(%arg0: !llvm.i32) -> () {
+llvm.func @test_omp_parallel_if_1(%arg0: i32) -> () {
// CHECK: %[[IF_COND_VAR_1:.*]] = icmp slt i32 %[[IF_VAR_1]], 0
- %0 = llvm.mlir.constant(0 : index) : !llvm.i32
- %1 = llvm.icmp "slt" %arg0, %0 : !llvm.i32
+ %0 = llvm.mlir.constant(0 : index) : i32
+ %1 = llvm.icmp "slt" %arg0, %0 : i32
// CHECK: %[[GTN_IF_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[SI_VAR_IF_1:.*]])
// CHECK: br i1 %[[IF_COND_VAR_1]], label %[[IF_COND_TRUE_BLOCK_1:.*]], label %[[IF_COND_FALSE_BLOCK_1:.*]]
@@ -171,7 +171,7 @@ llvm.func @test_omp_parallel_if_1(%arg0: !llvm.i32) -> () {
// CHECK: call void @[[OMP_OUTLINED_FN_IF_1]]
// CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
// CHECK: br label %[[RETURN_BLOCK_IF_1]]
- omp.parallel if(%1 : !llvm.i1) {
+ omp.parallel if(%1 : i1) {
omp.barrier
omp.terminator
}
@@ -303,22 +303,22 @@ llvm.func @test_omp_master() -> () {
// CHECK-LABEL: @wsloop_simple
llvm.func @wsloop_simple(%arg0: !llvm.ptr<float>) {
- %0 = llvm.mlir.constant(42 : index) : !llvm.i64
- %1 = llvm.mlir.constant(10 : index) : !llvm.i64
- %2 = llvm.mlir.constant(1 : index) : !llvm.i64
+ %0 = llvm.mlir.constant(42 : index) : i64
+ %1 = llvm.mlir.constant(10 : index) : i64
+ %2 = llvm.mlir.constant(1 : index) : i64
omp.parallel {
"omp.wsloop"(%1, %0, %2) ( {
- ^bb0(%arg1: !llvm.i64):
+ ^bb0(%arg1: i64):
// The form of the emitted IR is controlled by OpenMPIRBuilder and
// tested there. Just check that the right functions are called.
// CHECK: call i32 @__kmpc_global_thread_num
// CHECK: call void @__kmpc_for_static_init_{{.*}}(%struct.ident_t* @[[$wsloop_loc_struct]],
%3 = llvm.mlir.constant(2.000000e+00 : f32) : !llvm.float
- %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
llvm.store %3, %4 : !llvm.ptr<float>
omp.yield
// CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* @[[$wsloop_loc_struct]],
- }) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0, 0, 0]> : vector<9xi32>} : (!llvm.i64, !llvm.i64, !llvm.i64) -> ()
+ }) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0, 0, 0]> : vector<9xi32>} : (i64, i64, i64) -> ()
omp.terminator
}
llvm.return
diff --git a/mlir/test/Target/rocdl.mlir b/mlir/test/Target/rocdl.mlir
index 949b1e74ae29..738a3fe7825e 100644
--- a/mlir/test/Target/rocdl.mlir
+++ b/mlir/test/Target/rocdl.mlir
@@ -1,32 +1,32 @@
// RUN: mlir-translate -mlir-to-rocdlir %s | FileCheck %s
-llvm.func @rocdl_special_regs() -> !llvm.i32 {
+llvm.func @rocdl_special_regs() -> i32 {
// CHECK-LABEL: rocdl_special_regs
// CHECK: call i32 @llvm.amdgcn.workitem.id.x()
- %1 = rocdl.workitem.id.x : !llvm.i32
+ %1 = rocdl.workitem.id.x : i32
// CHECK: call i32 @llvm.amdgcn.workitem.id.y()
- %2 = rocdl.workitem.id.y : !llvm.i32
+ %2 = rocdl.workitem.id.y : i32
// CHECK: call i32 @llvm.amdgcn.workitem.id.z()
- %3 = rocdl.workitem.id.z : !llvm.i32
+ %3 = rocdl.workitem.id.z : i32
// CHECK: call i32 @llvm.amdgcn.workgroup.id.x()
- %4 = rocdl.workgroup.id.x : !llvm.i32
+ %4 = rocdl.workgroup.id.x : i32
// CHECK: call i32 @llvm.amdgcn.workgroup.id.y()
- %5 = rocdl.workgroup.id.y : !llvm.i32
+ %5 = rocdl.workgroup.id.y : i32
// CHECK: call i32 @llvm.amdgcn.workgroup.id.z()
- %6 = rocdl.workgroup.id.z : !llvm.i32
+ %6 = rocdl.workgroup.id.z : i32
// CHECK: call i64 @__ockl_get_local_size(i32 0)
- %7 = rocdl.workgroup.dim.x : !llvm.i64
+ %7 = rocdl.workgroup.dim.x : i64
// CHECK: call i64 @__ockl_get_local_size(i32 1)
- %8 = rocdl.workgroup.dim.y : !llvm.i64
+ %8 = rocdl.workgroup.dim.y : i64
// CHECK: call i64 @__ockl_get_local_size(i32 2)
- %9 = rocdl.workgroup.dim.z : !llvm.i64
+ %9 = rocdl.workgroup.dim.z : i64
// CHECK: call i64 @__ockl_get_global_size(i32 0)
- %10 = rocdl.grid.dim.x : !llvm.i64
+ %10 = rocdl.grid.dim.x : i64
// CHECK: call i64 @__ockl_get_global_size(i32 1)
- %11 = rocdl.grid.dim.y : !llvm.i64
+ %11 = rocdl.grid.dim.y : i64
// CHECK: call i64 @__ockl_get_global_size(i32 2)
- %12 = rocdl.grid.dim.z : !llvm.i64
- llvm.return %1 : !llvm.i32
+ %12 = rocdl.grid.dim.z : i64
+ llvm.return %1 : i32
}
llvm.func @kernel_func() attributes {gpu.kernel} {
@@ -43,7 +43,7 @@ llvm.func @rocdl.barrier() {
}
llvm.func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float,
- %arg2 : !llvm.vec<32 x float>, %arg3 : !llvm.i32,
+ %arg2 : !llvm.vec<32 x float>, %arg3 : i32,
%arg4 : !llvm.vec<16 x float>, %arg5 : !llvm.vec<4 x float>,
%arg6 : !llvm.vec<4 x half>, %arg7 : !llvm.vec<32 x i32>,
%arg8 : !llvm.vec<16 x i32>, %arg9 : !llvm.vec<4 x i32>,
@@ -52,109 +52,109 @@ llvm.func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float,
// CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %{{.*}}, float %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r0 = rocdl.mfma.f32.32x32x1f32 %arg0, %arg1, %arg2, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<32 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ i32, i32, i32) -> !llvm.vec<32 x float>
// CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x1f32(float %{{.*}}, float %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r1 = rocdl.mfma.f32.16x16x1f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
// CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x4f32(float %{{.*}}, float %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r2 = rocdl.mfma.f32.16x16x4f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
// CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float %{{.*}}, float %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r3 = rocdl.mfma.f32.4x4x1f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
// CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x2f32(float %{{.*}}, float %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r4= rocdl.mfma.f32.32x32x2f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 :
(!llvm.float, !llvm.float, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
// CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r5 = rocdl.mfma.f32.32x32x4f16 %arg6, %arg6, %arg2, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ i32, i32, i32) -> !llvm.vec<32 x float>
// CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r6 = rocdl.mfma.f32.16x16x4f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
// CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r7 = rocdl.mfma.f32.4x4x4f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
// CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x8f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r8 = rocdl.mfma.f32.32x32x8f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
// CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r9 = rocdl.mfma.f32.16x16x16f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 :
(!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
// CHECK: call <32 x i32> @llvm.amdgcn.mfma.i32.32x32x4i8(i32 %{{.*}}, i32 %{{.*}}, <32 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r10 = rocdl.mfma.i32.32x32x4i8 %arg3, %arg3, %arg7, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<32 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x i32>
+ (i32, i32, !llvm.vec<32 x i32>,
+ i32, i32, i32) -> !llvm.vec<32 x i32>
// CHECK: call <16 x i32> @llvm.amdgcn.mfma.i32.16x16x4i8(i32 %{{.*}}, i32 %{{.*}}, <16 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r11 = rocdl.mfma.i32.16x16x4i8 %arg3, %arg3, %arg8, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32>
+ (i32, i32, !llvm.vec<16 x i32>,
+ i32, i32, i32) -> !llvm.vec<16 x i32>
// CHECK: call <4 x i32> @llvm.amdgcn.mfma.i32.4x4x4i8(i32 %{{.*}}, i32 %{{.*}}, <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r12 = rocdl.mfma.i32.4x4x4i8 %arg3, %arg3, %arg9, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32>
+ (i32, i32, !llvm.vec<4 x i32>,
+ i32, i32, i32) -> !llvm.vec<4 x i32>
// CHECK: call <16 x i32> @llvm.amdgcn.mfma.i32.32x32x8i8(i32 %{{.*}}, i32 %{{.*}}, <16 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r13 = rocdl.mfma.i32.32x32x8i8 %arg3, %arg3, %arg8, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32>
+ (i32, i32, !llvm.vec<16 x i32>,
+ i32, i32, i32) -> !llvm.vec<16 x i32>
// CHECK: call <4 x i32> @llvm.amdgcn.mfma.i32.16x16x16i8(i32 %{{.*}}, i32 %{{.*}}, <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r14 = rocdl.mfma.i32.16x16x16i8 %arg3, %arg3, %arg9, %arg3, %arg3, %arg3 :
- (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32>
+ (i32, i32, !llvm.vec<4 x i32>,
+ i32, i32, i32) -> !llvm.vec<4 x i32>
// CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r15 = rocdl.mfma.f32.32x32x2bf16 %arg10, %arg10, %arg2, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float>
+ i32, i32, i32) -> !llvm.vec<32 x float>
// CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r16 = rocdl.mfma.f32.16x16x2bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
// CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r17 = rocdl.mfma.f32.4x4x2bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
// CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x4bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r18 = rocdl.mfma.f32.32x32x4bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float>
+ i32, i32, i32) -> !llvm.vec<16 x float>
// CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x8bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
%r19 = rocdl.mfma.f32.16x16x8bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 :
(!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>,
- !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float>
+ i32, i32, i32) -> !llvm.vec<4 x float>
llvm.return %r0 : !llvm.vec<32 x float>
}
-llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : !llvm.i32,
- %offset : !llvm.i32, %glc : !llvm.i1,
- %slc : !llvm.i1, %vdata1 : !llvm.vec<1 x float>,
+llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : i32,
+ %offset : i32, %glc : i1,
+ %slc : i1, %vdata1 : !llvm.vec<1 x float>,
%vdata2 : !llvm.vec<2 x float>, %vdata4 : !llvm.vec<4 x float>) {
// CHECK-LABEL: rocdl.mubuf
// CHECK: call <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 %{{.*}}, i1 %{{.*}})
diff --git a/mlir/test/Transforms/test-convert-call-op.mlir b/mlir/test/Transforms/test-convert-call-op.mlir
index ea6b6a1d2469..19914caad025 100644
--- a/mlir/test/Transforms/test-convert-call-op.mlir
+++ b/mlir/test/Transforms/test-convert-call-op.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -test-convert-call-op | FileCheck %s
-// CHECK-LABEL: llvm.func @callee(!llvm.ptr<i8>) -> !llvm.i32
+// CHECK-LABEL: llvm.func @callee(!llvm.ptr<i8>) -> i32
func private @callee(!test.test_type) -> i32
-// CHECK-NEXT: llvm.func @caller() -> !llvm.i32
+// CHECK-NEXT: llvm.func @caller() -> i32
func @caller() -> i32 {
%arg = "test.type_producer"() : () -> !test.test_type
%out = call @callee(%arg) : (!test.test_type) -> i32
@@ -11,4 +11,4 @@ func @caller() -> i32 {
}
// CHECK-NEXT: [[ARG:%.*]] = llvm.mlir.null : !llvm.ptr<i8>
// CHECK-NEXT: [[OUT:%.*]] = llvm.call @callee([[ARG]])
-// CHECK-SAME: : (!llvm.ptr<i8>) -> !llvm.i32
+// CHECK-SAME: : (!llvm.ptr<i8>) -> i32
diff --git a/mlir/test/lib/Transforms/TestConvertCallOp.cpp b/mlir/test/lib/Transforms/TestConvertCallOp.cpp
index 82cc95aac8a8..2fe29b44e006 100644
--- a/mlir/test/lib/Transforms/TestConvertCallOp.cpp
+++ b/mlir/test/lib/Transforms/TestConvertCallOp.cpp
@@ -45,8 +45,7 @@ class TestConvertCallOp
// Populate type conversions.
LLVMTypeConverter type_converter(m.getContext());
type_converter.addConversion([&](test::TestType type) {
- return LLVM::LLVMPointerType::get(
- LLVM::LLVMIntegerType::get(m.getContext(), 8));
+ return LLVM::LLVMPointerType::get(IntegerType::get(m.getContext(), 8));
});
// Populate patterns.
diff --git a/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir b/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir
index 9934083fb11a..155c3e858420 100644
--- a/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir
+++ b/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir
@@ -26,7 +26,7 @@ func @simple_add1_add2_test(%arg0: memref<2xf32>, %arg1: memref<2xf32>) {
}
// External declarations.
-llvm.func @malloc(!llvm.i64) -> !llvm.ptr<i8>
+llvm.func @malloc(i64) -> !llvm.ptr<i8>
llvm.func @free(!llvm.ptr<i8>)
func private @printF32(%arg0: f32)
func private @printComma()
diff --git a/mlir/test/mlir-cpu-runner/simple.mlir b/mlir/test/mlir-cpu-runner/simple.mlir
index e75d98aa8bd3..4007ac7c4f5f 100644
--- a/mlir/test/mlir-cpu-runner/simple.mlir
+++ b/mlir/test/mlir-cpu-runner/simple.mlir
@@ -15,7 +15,7 @@
// Declarations of C library functions.
llvm.func @fabsf(!llvm.float) -> !llvm.float
-llvm.func @malloc(!llvm.i64) -> !llvm.ptr<i8>
+llvm.func @malloc(i64) -> !llvm.ptr<i8>
llvm.func @free(!llvm.ptr<i8>)
// Check that a simple function with a nested call works.
@@ -28,8 +28,8 @@ llvm.func @main() -> !llvm.float {
// Helper typed functions wrapping calls to "malloc" and "free".
llvm.func @allocation() -> !llvm.ptr<float> {
- %0 = llvm.mlir.constant(4 : index) : !llvm.i64
- %1 = llvm.call @malloc(%0) : (!llvm.i64) -> !llvm.ptr<i8>
+ %0 = llvm.mlir.constant(4 : index) : i64
+ %1 = llvm.call @malloc(%0) : (i64) -> !llvm.ptr<i8>
%2 = llvm.bitcast %1 : !llvm.ptr<i8> to !llvm.ptr<float>
llvm.return %2 : !llvm.ptr<float>
}
@@ -43,11 +43,11 @@ llvm.func @deallocation(%arg0: !llvm.ptr<float>) {
// works.
llvm.func @foo() -> !llvm.float {
%0 = llvm.call @allocation() : () -> !llvm.ptr<float>
- %1 = llvm.mlir.constant(0 : index) : !llvm.i64
+ %1 = llvm.mlir.constant(0 : index) : i64
%2 = llvm.mlir.constant(1.234000e+03 : f32) : !llvm.float
- %3 = llvm.getelementptr %0[%1] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %3 = llvm.getelementptr %0[%1] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
llvm.store %2, %3 : !llvm.ptr<float>
- %4 = llvm.getelementptr %0[%1] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
+ %4 = llvm.getelementptr %0[%1] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
%5 = llvm.load %4 : !llvm.ptr<float>
llvm.call @deallocation(%0) : (!llvm.ptr<float>) -> ()
llvm.return %5 : !llvm.float
@@ -55,15 +55,15 @@ llvm.func @foo() -> !llvm.float {
// NOMAIN: 1.234000e+03
// Check that i32 return type works
-llvm.func @int32_main() -> !llvm.i32 {
- %0 = llvm.mlir.constant(42 : i32) : !llvm.i32
- llvm.return %0 : !llvm.i32
+llvm.func @int32_main() -> i32 {
+ %0 = llvm.mlir.constant(42 : i32) : i32
+ llvm.return %0 : i32
}
// INT32MAIN: 42
// Check that i64 return type works
-llvm.func @int64_main() -> !llvm.i64 {
- %0 = llvm.mlir.constant(42 : i64) : !llvm.i64
- llvm.return %0 : !llvm.i64
+llvm.func @int64_main() -> i64 {
+ %0 = llvm.mlir.constant(42 : i64) : i64
+ llvm.return %0 : i64
}
// INT64MAIN: 42
More information about the llvm-branch-commits
mailing list