[Mlir-commits] [mlir] [MLIR][LLVM] Remove typed pointer remnants from target tests (PR #71210)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Nov 3 10:43:21 PDT 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-flang-openmp
@llvm/pr-subscribers-mlir-llvm
Author: Christian Ulmann (Dinistro)
<details>
<summary>Changes</summary>
This commit removes all LLVM dialect typed pointers from the target tests. Typed pointers have been deprecated for a while now and it's planned to soon remove them from the LLVM dialect.
Related PSA: https://discourse.llvm.org/t/psa-removal-of-typed-pointers-from-the-llvm-dialect/74502
---
Patch is 76.33 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/71210.diff
11 Files Affected:
- (modified) mlir/test/Target/LLVMIR/amx.mlir (+2-2)
- (modified) mlir/test/Target/LLVMIR/arm-sme.mlir (+43-51)
- (modified) mlir/test/Target/LLVMIR/arm-sve.mlir (+20-22)
- (modified) mlir/test/Target/LLVMIR/llvmir-debug.mlir (+2-2)
- (modified) mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir (+4-4)
- (modified) mlir/test/Target/LLVMIR/llvmir-invalid.mlir (+2-2)
- (modified) mlir/test/Target/LLVMIR/llvmir-types.mlir (+11-33)
- (modified) mlir/test/Target/LLVMIR/llvmir.mlir (+184-197)
- (modified) mlir/test/Target/LLVMIR/nvvmir.mlir (+18-18)
- (modified) mlir/test/Target/LLVMIR/openacc-llvm.mlir (+20-20)
- (modified) mlir/test/Target/LLVMIR/openmp-nested.mlir (+1-1)
``````````diff
diff --git a/mlir/test/Target/LLVMIR/amx.mlir b/mlir/test/Target/LLVMIR/amx.mlir
index 4df349b17b0a024..0281dfcd6ad69fe 100644
--- a/mlir/test/Target/LLVMIR/amx.mlir
+++ b/mlir/test/Target/LLVMIR/amx.mlir
@@ -3,11 +3,11 @@
// CHECK-LABEL: define void @target(ptr %0)
// CHECK: %[[c:.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 16)
// CHECK: call void @llvm.x86.tilestored64.internal(i16 16, i16 16, ptr %0, i64 32, x86_amx %[[c]]
-llvm.func @target(%ptr: !llvm.ptr<i8>) {
+llvm.func @target(%ptr: !llvm.ptr) {
%c = llvm.mlir.constant(16 : i16) : i16
%s = llvm.mlir.constant(32 : i64) : i64
%0 = "amx.tilezero"(%c, %c) : (i16, i16) -> !llvm.array<16 x vector<16xbf16>>
- "amx.tilestored64"(%c, %c, %ptr, %s, %0) : (i16, i16, !llvm.ptr<i8>, i64, !llvm.array<16 x vector<16xbf16>>) -> ()
+ "amx.tilestored64"(%c, %c, %ptr, %s, %0) : (i16, i16, !llvm.ptr, i64, !llvm.array<16 x vector<16xbf16>>) -> ()
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/arm-sme.mlir b/mlir/test/Target/LLVMIR/arm-sme.mlir
index 628d7ba4b649e51..27c94d9aeac8bf4 100644
--- a/mlir/test/Target/LLVMIR/arm-sme.mlir
+++ b/mlir/test/Target/LLVMIR/arm-sme.mlir
@@ -138,42 +138,38 @@ llvm.func @arm_sme_load(%nxv1i1 : vector<[1]xi1>,
%nxv4i1 : vector<[4]xi1>,
%nxv8i1 : vector<[8]xi1>,
%nxv16i1 : vector<[16]xi1>,
- %p8 : !llvm.ptr<i8>,
- %p16 : !llvm.ptr<i16>,
- %p32 : !llvm.ptr<i32>,
- %p64 : !llvm.ptr<i64>,
- %p128 : !llvm.ptr<i128>) {
+ %ptr : !llvm.ptr) {
%c0 = llvm.mlir.constant(0 : index) : i32
// CHECK: call void @llvm.aarch64.sme.ld1q.horiz
- "arm_sme.intr.ld1q.horiz"(%nxv1i1, %p128, %c0, %c0) :
- (vector<[1]xi1>, !llvm.ptr<i128>, i32, i32) -> ()
+ "arm_sme.intr.ld1q.horiz"(%nxv1i1, %ptr, %c0, %c0) :
+ (vector<[1]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1d.horiz
- "arm_sme.intr.ld1d.horiz"(%nxv2i1, %p64, %c0, %c0) :
- (vector<[2]xi1>, !llvm.ptr<i64>, i32, i32) -> ()
+ "arm_sme.intr.ld1d.horiz"(%nxv2i1, %ptr, %c0, %c0) :
+ (vector<[2]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1w.horiz
- "arm_sme.intr.ld1w.horiz"(%nxv4i1, %p32, %c0, %c0) :
- (vector<[4]xi1>, !llvm.ptr<i32>, i32, i32) -> ()
+ "arm_sme.intr.ld1w.horiz"(%nxv4i1, %ptr, %c0, %c0) :
+ (vector<[4]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1h.horiz
- "arm_sme.intr.ld1h.horiz"(%nxv8i1, %p16, %c0, %c0) :
- (vector<[8]xi1>, !llvm.ptr<i16>, i32, i32) -> ()
+ "arm_sme.intr.ld1h.horiz"(%nxv8i1, %ptr, %c0, %c0) :
+ (vector<[8]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1b.horiz
- "arm_sme.intr.ld1b.horiz"(%nxv16i1, %p8, %c0, %c0) :
- (vector<[16]xi1>, !llvm.ptr<i8>, i32, i32) -> ()
+ "arm_sme.intr.ld1b.horiz"(%nxv16i1, %ptr, %c0, %c0) :
+ (vector<[16]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1q.vert
- "arm_sme.intr.ld1q.vert"(%nxv1i1, %p128, %c0, %c0) :
- (vector<[1]xi1>, !llvm.ptr<i128>, i32, i32) -> ()
+ "arm_sme.intr.ld1q.vert"(%nxv1i1, %ptr, %c0, %c0) :
+ (vector<[1]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1d.vert
- "arm_sme.intr.ld1d.vert"(%nxv2i1, %p64, %c0, %c0) :
- (vector<[2]xi1>, !llvm.ptr<i64>, i32, i32) -> ()
+ "arm_sme.intr.ld1d.vert"(%nxv2i1, %ptr, %c0, %c0) :
+ (vector<[2]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1w.vert
- "arm_sme.intr.ld1w.vert"(%nxv4i1, %p32, %c0, %c0) :
- (vector<[4]xi1>, !llvm.ptr<i32>, i32, i32) -> ()
+ "arm_sme.intr.ld1w.vert"(%nxv4i1, %ptr, %c0, %c0) :
+ (vector<[4]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1h.vert
- "arm_sme.intr.ld1h.vert"(%nxv8i1, %p16, %c0, %c0) :
- (vector<[8]xi1>, !llvm.ptr<i16>, i32, i32) -> ()
+ "arm_sme.intr.ld1h.vert"(%nxv8i1, %ptr, %c0, %c0) :
+ (vector<[8]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.ld1b.vert
- "arm_sme.intr.ld1b.vert"(%nxv16i1, %p8, %c0, %c0) :
- (vector<[16]xi1>, !llvm.ptr<i8>, i32, i32) -> ()
+ "arm_sme.intr.ld1b.vert"(%nxv16i1, %ptr, %c0, %c0) :
+ (vector<[16]xi1>, !llvm.ptr, i32, i32) -> ()
llvm.return
}
@@ -185,44 +181,40 @@ llvm.func @arm_sme_store(%nxv1i1 : vector<[1]xi1>,
%nxv4i1 : vector<[4]xi1>,
%nxv8i1 : vector<[8]xi1>,
%nxv16i1 : vector<[16]xi1>,
- %p8 : !llvm.ptr<i8>,
- %p16 : !llvm.ptr<i16>,
- %p32 : !llvm.ptr<i32>,
- %p64 : !llvm.ptr<i64>,
- %p128 : !llvm.ptr<i128>) {
+ %ptr : !llvm.ptr) {
%c0 = llvm.mlir.constant(0 : index) : i32
// CHECK: call void @llvm.aarch64.sme.st1q.horiz
- "arm_sme.intr.st1q.horiz"(%nxv1i1, %p128, %c0, %c0) :
- (vector<[1]xi1>, !llvm.ptr<i128>, i32, i32) -> ()
+ "arm_sme.intr.st1q.horiz"(%nxv1i1, %ptr, %c0, %c0) :
+ (vector<[1]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1d.horiz
- "arm_sme.intr.st1d.horiz"(%nxv2i1, %p64, %c0, %c0) :
- (vector<[2]xi1>, !llvm.ptr<i64>, i32, i32) -> ()
+ "arm_sme.intr.st1d.horiz"(%nxv2i1, %ptr, %c0, %c0) :
+ (vector<[2]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1w.horiz
- "arm_sme.intr.st1w.horiz"(%nxv4i1, %p32, %c0, %c0) :
- (vector<[4]xi1>, !llvm.ptr<i32>, i32, i32) -> ()
+ "arm_sme.intr.st1w.horiz"(%nxv4i1, %ptr, %c0, %c0) :
+ (vector<[4]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1h.horiz
- "arm_sme.intr.st1h.horiz"(%nxv8i1, %p16, %c0, %c0) :
- (vector<[8]xi1>, !llvm.ptr<i16>, i32, i32) -> ()
+ "arm_sme.intr.st1h.horiz"(%nxv8i1, %ptr, %c0, %c0) :
+ (vector<[8]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1b.horiz
- "arm_sme.intr.st1b.horiz"(%nxv16i1, %p8, %c0, %c0) :
- (vector<[16]xi1>, !llvm.ptr<i8>, i32, i32) -> ()
+ "arm_sme.intr.st1b.horiz"(%nxv16i1, %ptr, %c0, %c0) :
+ (vector<[16]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1q.vert
- "arm_sme.intr.st1q.vert"(%nxv1i1, %p128, %c0, %c0) :
- (vector<[1]xi1>, !llvm.ptr<i128>, i32, i32) -> ()
+ "arm_sme.intr.st1q.vert"(%nxv1i1, %ptr, %c0, %c0) :
+ (vector<[1]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1d.vert
- "arm_sme.intr.st1d.vert"(%nxv2i1, %p64, %c0, %c0) :
- (vector<[2]xi1>, !llvm.ptr<i64>, i32, i32) -> ()
+ "arm_sme.intr.st1d.vert"(%nxv2i1, %ptr, %c0, %c0) :
+ (vector<[2]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1w.vert
- "arm_sme.intr.st1w.vert"(%nxv4i1, %p32, %c0, %c0) :
- (vector<[4]xi1>, !llvm.ptr<i32>, i32, i32) -> ()
+ "arm_sme.intr.st1w.vert"(%nxv4i1, %ptr, %c0, %c0) :
+ (vector<[4]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1h.vert
- "arm_sme.intr.st1h.vert"(%nxv8i1, %p16, %c0, %c0) :
- (vector<[8]xi1>, !llvm.ptr<i16>, i32, i32) -> ()
+ "arm_sme.intr.st1h.vert"(%nxv8i1, %ptr, %c0, %c0) :
+ (vector<[8]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.st1b.vert
- "arm_sme.intr.st1b.vert"(%nxv16i1, %p8, %c0, %c0) :
- (vector<[16]xi1>, !llvm.ptr<i8>, i32, i32) -> ()
+ "arm_sme.intr.st1b.vert"(%nxv16i1, %ptr, %c0, %c0) :
+ (vector<[16]xi1>, !llvm.ptr, i32, i32) -> ()
// CHECK: call void @llvm.aarch64.sme.str
- "arm_sme.intr.str"(%c0, %p8) : (i32, !llvm.ptr<i8>) -> ()
+ "arm_sme.intr.str"(%c0, %ptr) : (i32, !llvm.ptr) -> ()
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/arm-sve.mlir b/mlir/test/Target/LLVMIR/arm-sve.mlir
index 172a2f7d12d440e..b63d3f06515690a 100644
--- a/mlir/test/Target/LLVMIR/arm-sve.mlir
+++ b/mlir/test/Target/LLVMIR/arm-sve.mlir
@@ -191,44 +191,44 @@ llvm.func @arm_sve_abs_diff(%arg0: vector<[4]xi32>,
}
// CHECK-LABEL: define void @memcopy
-llvm.func @memcopy(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>,
+llvm.func @memcopy(%arg0: !llvm.ptr, %arg1: !llvm.ptr,
%arg2: i64, %arg3: i64, %arg4: i64,
- %arg5: !llvm.ptr<f32>, %arg6: !llvm.ptr<f32>,
+ %arg5: !llvm.ptr, %arg6: !llvm.ptr,
%arg7: i64, %arg8: i64, %arg9: i64,
%arg10: i64) {
- %0 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %0 = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>, array<1 x i64>)>
- %1 = llvm.insertvalue %arg0, %0[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %1 = llvm.insertvalue %arg0, %0[0] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %2 = llvm.insertvalue %arg1, %1[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %2 = llvm.insertvalue %arg1, %1[1] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %3 = llvm.insertvalue %arg2, %2[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %3 = llvm.insertvalue %arg2, %2[2] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %4 = llvm.insertvalue %arg3, %3[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %4 = llvm.insertvalue %arg3, %3[3, 0] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %5 = llvm.insertvalue %arg4, %4[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %5 = llvm.insertvalue %arg4, %4[4, 0] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %6 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %6 = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %7 = llvm.insertvalue %arg5, %6[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %7 = llvm.insertvalue %arg5, %6[0] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %8 = llvm.insertvalue %arg6, %7[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %8 = llvm.insertvalue %arg6, %7[1] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %9 = llvm.insertvalue %arg7, %8[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %9 = llvm.insertvalue %arg7, %8[2] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %10 = llvm.insertvalue %arg8, %9[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %10 = llvm.insertvalue %arg8, %9[3, 0] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
- %11 = llvm.insertvalue %arg9, %10[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %11 = llvm.insertvalue %arg9, %10[4, 0] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
%12 = llvm.mlir.constant(0 : index) : i64
@@ -243,23 +243,21 @@ llvm.func @memcopy(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>,
llvm.cond_br %17, ^bb2, ^bb3
^bb2:
// CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
- %18 = llvm.extractvalue %5[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %18 = llvm.extractvalue %5[1] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
// CHECK: getelementptr float, ptr
- %19 = llvm.getelementptr %18[%16] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- %20 = llvm.bitcast %19 : !llvm.ptr<f32> to !llvm.ptr<vector<[4]xf32>>
+ %19 = llvm.getelementptr %18[%16] : (!llvm.ptr, i64) -> !llvm.ptr, f32
// CHECK: load <vscale x 4 x float>, ptr
- %21 = llvm.load %20 : !llvm.ptr<vector<[4]xf32>>
+ %21 = llvm.load %19 : !llvm.ptr -> vector<[4]xf32>
// CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
- %22 = llvm.extractvalue %11[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+ %22 = llvm.extractvalue %11[1] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
array<1 x i64>)>
// CHECK: getelementptr float, ptr
- %23 = llvm.getelementptr %22[%16] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- %24 = llvm.bitcast %23 : !llvm.ptr<f32> to !llvm.ptr<vector<[4]xf32>>
+ %23 = llvm.getelementptr %22[%16] : (!llvm.ptr, i64) -> !llvm.ptr, f32
// CHECK: store <vscale x 4 x float> %{{[0-9]+}}, ptr %{{[0-9]+}}
- llvm.store %21, %24 : !llvm.ptr<vector<[4]xf32>>
+ llvm.store %21, %23 : vector<[4]xf32>, !llvm.ptr
%25 = llvm.add %16, %15 : i64
llvm.br ^bb1(%25 : i64)
^bb3:
diff --git a/mlir/test/Target/LLVMIR/llvmir-debug.mlir b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
index 8d1734d7cdc3117..ea962c66cb8eff9 100644
--- a/mlir/test/Target/LLVMIR/llvmir-debug.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
@@ -89,13 +89,13 @@ llvm.func @func_no_debug() {
llvm.func @func_with_debug(%arg: i64) {
// CHECK: %[[ALLOC:.*]] = alloca
%allocCount = llvm.mlir.constant(1 : i32) : i32
- %alloc = llvm.alloca %allocCount x i64 : (i32) -> !llvm.ptr<i64>
+ %alloc = llvm.alloca %allocCount x i64 : (i32) -> !llvm.ptr
// CHECK: call void @llvm.dbg.value(metadata i64 %[[ARG]], metadata ![[VAR_LOC:[0-9]+]], metadata !DIExpression())
// CHECK: call void @llvm.dbg.declare(metadata ptr %[[ALLOC]], metadata ![[ADDR_LOC:[0-9]+]], metadata !DIExpression())
// CHECK: call void @llvm.dbg.value(metadata i64 %[[ARG]], metadata ![[NO_NAME_VAR:[0-9]+]], metadata !DIExpression())
llvm.intr.dbg.value #variable = %arg : i64
- llvm.intr.dbg.declare #variableAddr = %alloc : !llvm.ptr<i64>
+ llvm.intr.dbg.declare #variableAddr = %alloc : !llvm.ptr
llvm.intr.dbg.value #noNameVariable= %arg : i64
// CHECK: call void @func_no_debug(), !dbg ![[CALLSITE_LOC:[0-9]+]]
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index d23991b65523fcf..e586c0cd2720e48 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -427,16 +427,16 @@ llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr, %mask: vector<7xi1>) {
}
// CHECK-LABEL: @masked_gather_scatter_intrinsics
-llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr<f32>>, %mask: vector<7xi1>) {
+llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr>, %mask: vector<7xi1>) {
// CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> poison)
%a = llvm.intr.masked.gather %M, %mask { alignment = 1: i32} :
- (!llvm.vec<7 x ptr<f32>>, vector<7xi1>) -> vector<7xf32>
+ (!llvm.vec<7 x ptr>, vector<7xi1>) -> vector<7xf32>
// CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
%b = llvm.intr.masked.gather %M, %mask, %a { alignment = 1: i32} :
- (!llvm.vec<7 x ptr<f32>>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
+ (!llvm.vec<7 x ptr>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
// CHECK: call void @llvm.masked.scatter.v7f32.v7p0(<7 x float> %{{.*}}, <7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}})
llvm.intr.masked.scatter %b, %M, %mask { alignment = 1: i32} :
- vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr<f32>>
+ vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr>
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/llvmir-invalid.mlir b/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
index 2d6ccff2d436fea..9b14f5814987d99 100644
--- a/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
@@ -229,9 +229,9 @@ llvm.func @masked_gather_intr_wrong_type(%ptrs : vector<7xf32>, %mask : vector<7
// -----
-llvm.func @masked_scatter_intr_wrong_type(%vec : f32, %ptrs : !llvm.vec<7xptr<f32>>, %mask : vector<7xi1>) {
+llvm.func @masked_scatter_intr_wrong_type(%vec : f32, %ptrs : !llvm.vec<7xptr>, %mask : vector<7xi1>) {
// expected-error @below{{op operand #0 must be LLVM dialect-compatible vector type, but got 'f32'}}
- llvm.intr.masked.scatter %vec, %ptrs, %mask { alignment = 1: i32} : f32, vector<7xi1> into !llvm.vec<7xptr<f32>>
+ llvm.intr.masked.scatter %vec, %ptrs, %mask { alignment = 1: i32} : f32, vector<7xi1> into !llvm.vec<7xptr>
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/llvmir-types.mlir b/mlir/test/Target/LLVMIR/llvmir-types.mlir
index a92d46dfadfe25c..c85fa0101c00d74 100644
--- a/mlir/test/Target/LLVMIR/llvmir-types.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-types.mlir
@@ -40,7 +40,7 @@ llvm.func @f_void_variadic(...)
// CHECK: declare void @f_void_i32_i32_variadic(i32, i32, ...)
llvm.func @f_void_i32_i32_variadic(i32, i32, ...)
// CHECK: declare ptr @f_f_i32_i32()
-llvm.func @f_f_i32_i32() -> !llvm.ptr<func<i32 (i32)>>
+llvm.func @f_f_i32_i32() -> !llvm.ptr
//
// Integers.
@@ -65,22 +65,12 @@ llvm.func @return_i129() -> i129
// Pointers.
//
-// CHECK: declare ptr @return_pi8()
-llvm.func @return_pi8() -> !llvm.ptr<i8>
-// CHECK: declare ptr @return_pfloat()
-llvm.func @return_pfloat() -> !llvm.ptr<f32>
-// CHECK: declare ptr @return_ppi8()
-llvm.func @return_ppi8() -> !llvm.ptr<ptr<i8>>
-// CHECK: declare ptr @return_pppppi8()
-llvm.func @return_pppppi8() -> !llvm.ptr<ptr<ptr<ptr<ptr<i8>>>>>
-// CHECK: declare ptr @return_pi8_0()
-llvm.func @return_pi8_0() -> !llvm.ptr<i8, 0>
-// CHECK: declare ptr addrspace(1) @return_pi8_1()
-llvm.func @return_pi8_1() -> !llvm.ptr<i8, 1>
-// CHECK: declare ptr addrspace(42) @return_pi8_42()
-llvm.func @return_pi8_42() -> !llvm.ptr<i8, 42>
-// CHECK: declare ptr addrspace(9) @return_ppi8_42_9()
-llvm.func @return_ppi8_42_9() -> !llvm.ptr<ptr<i8, 42>, 9>
+// CHECK: declare ptr @return_p()
+llvm.func @return_p() -> !llvm.ptr
+// CHECK: declare ptr addrspace(1) @return_p_1()
+llvm.func @return_p_1() -> !llvm.ptr<1>
+// CHECK: declare ptr addrspace(42) @return_p_42()
+llvm.func @return_p_42() -> !llvm.ptr<42>
//
// Vectors.
@@ -97,7 +87,7 @@ llvm.func @return_vs_4_i32() -> !llvm.vec<?x4 x i32>
// CHECK: declare <vscale x 8 x half> @return_vs_8_half()
llvm.func @return_vs_8_half() -> !llvm.vec<?x8 x f16>
// CHECK: declare <4 x ptr> @return_v_4_pi8()
-llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr<i8>>
+llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr>
//
// Arrays.
@@ -107,8 +97,8 @@ llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr<i8>>
llvm.func @return_a10_i32() -> !llvm.array<10 x i32>
// CHECK: declare [8 x float] @return_a8_float()
llvm.func @return_a8_float() -> !llvm.array<8 x f32>
-// CHECK: declare [10 x ptr addrspace(4)] @return_a10_pi32_4()
-llvm.func @return_a10_pi32_4() -> !llvm.array<10 x ptr<i32, 4>>
+// CHECK: declare [10 x ptr addrspace(4)] @return_a10_p_4()
+llvm.func @return_a10_p_4() -> !llvm.array<10 x ptr<4>>
// CHECK: declare [10 x [4 x float]] @return_a10_a4_float()
llvm.func @return_a10_a4_float() -> !l...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/71210
More information about the Mlir-commits
mailing list