[Mlir-commits] [mlir] fcc26ba - [MLIR][LLVM] Remove typed pointer remnants from target tests (#71210)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Nov 3 13:21:49 PDT 2023


Author: Christian Ulmann
Date: 2023-11-03T21:21:45+01:00
New Revision: fcc26bad82e190e1ec09bc6fe76ea320f5ffeeeb

URL: https://github.com/llvm/llvm-project/commit/fcc26bad82e190e1ec09bc6fe76ea320f5ffeeeb
DIFF: https://github.com/llvm/llvm-project/commit/fcc26bad82e190e1ec09bc6fe76ea320f5ffeeeb.diff

LOG: [MLIR][LLVM] Remove typed pointer remnants from target tests (#71210)

This commit removes all LLVM dialect typed pointers from the target
tests. Typed pointers have been deprecated for a while now and it's
planned to soon remove them from the LLVM dialect.

Related PSA:
https://discourse.llvm.org/t/psa-removal-of-typed-pointers-from-the-llvm-dialect/74502

Added: 
    

Modified: 
    mlir/test/Target/LLVMIR/amx.mlir
    mlir/test/Target/LLVMIR/arm-sme.mlir
    mlir/test/Target/LLVMIR/arm-sve.mlir
    mlir/test/Target/LLVMIR/llvmir-debug.mlir
    mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
    mlir/test/Target/LLVMIR/llvmir-invalid.mlir
    mlir/test/Target/LLVMIR/llvmir-types.mlir
    mlir/test/Target/LLVMIR/llvmir.mlir
    mlir/test/Target/LLVMIR/nvvmir.mlir
    mlir/test/Target/LLVMIR/openacc-llvm.mlir
    mlir/test/Target/LLVMIR/openmp-nested.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Target/LLVMIR/amx.mlir b/mlir/test/Target/LLVMIR/amx.mlir
index 4df349b17b0a024..0281dfcd6ad69fe 100644
--- a/mlir/test/Target/LLVMIR/amx.mlir
+++ b/mlir/test/Target/LLVMIR/amx.mlir
@@ -3,11 +3,11 @@
 // CHECK-LABEL: define void @target(ptr %0)
 // CHECK: %[[c:.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 16)
 // CHECK: call void @llvm.x86.tilestored64.internal(i16 16, i16 16, ptr %0, i64 32, x86_amx %[[c]]
-llvm.func @target(%ptr: !llvm.ptr<i8>) {
+llvm.func @target(%ptr: !llvm.ptr) {
   %c = llvm.mlir.constant(16 : i16) : i16
   %s = llvm.mlir.constant(32 : i64) : i64
   %0 = "amx.tilezero"(%c, %c) : (i16, i16) -> !llvm.array<16 x vector<16xbf16>>
-  "amx.tilestored64"(%c, %c, %ptr, %s, %0) : (i16, i16, !llvm.ptr<i8>, i64, !llvm.array<16 x vector<16xbf16>>) -> ()
+  "amx.tilestored64"(%c, %c, %ptr, %s, %0) : (i16, i16, !llvm.ptr, i64, !llvm.array<16 x vector<16xbf16>>) -> ()
   llvm.return
 }
 

diff  --git a/mlir/test/Target/LLVMIR/arm-sme.mlir b/mlir/test/Target/LLVMIR/arm-sme.mlir
index 628d7ba4b649e51..27c94d9aeac8bf4 100644
--- a/mlir/test/Target/LLVMIR/arm-sme.mlir
+++ b/mlir/test/Target/LLVMIR/arm-sme.mlir
@@ -138,42 +138,38 @@ llvm.func @arm_sme_load(%nxv1i1  : vector<[1]xi1>,
                         %nxv4i1  : vector<[4]xi1>,
                         %nxv8i1  : vector<[8]xi1>,
                         %nxv16i1 : vector<[16]xi1>,
-                        %p8      : !llvm.ptr<i8>,
-                        %p16     : !llvm.ptr<i16>,
-                        %p32     : !llvm.ptr<i32>,
-                        %p64     : !llvm.ptr<i64>,
-                        %p128    : !llvm.ptr<i128>) {
+                        %ptr    : !llvm.ptr) {
   %c0 = llvm.mlir.constant(0 : index) : i32
   // CHECK: call void @llvm.aarch64.sme.ld1q.horiz
-  "arm_sme.intr.ld1q.horiz"(%nxv1i1, %p128, %c0, %c0) :
-              (vector<[1]xi1>, !llvm.ptr<i128>, i32, i32) -> ()
+  "arm_sme.intr.ld1q.horiz"(%nxv1i1, %ptr, %c0, %c0) :
+              (vector<[1]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1d.horiz
-  "arm_sme.intr.ld1d.horiz"(%nxv2i1, %p64, %c0, %c0) :
-              (vector<[2]xi1>, !llvm.ptr<i64>, i32, i32) -> ()
+  "arm_sme.intr.ld1d.horiz"(%nxv2i1, %ptr, %c0, %c0) :
+              (vector<[2]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1w.horiz
-  "arm_sme.intr.ld1w.horiz"(%nxv4i1, %p32, %c0, %c0) :
-              (vector<[4]xi1>, !llvm.ptr<i32>, i32, i32) -> ()
+  "arm_sme.intr.ld1w.horiz"(%nxv4i1, %ptr, %c0, %c0) :
+              (vector<[4]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1h.horiz
-  "arm_sme.intr.ld1h.horiz"(%nxv8i1, %p16, %c0, %c0) :
-              (vector<[8]xi1>, !llvm.ptr<i16>, i32, i32) -> ()
+  "arm_sme.intr.ld1h.horiz"(%nxv8i1, %ptr, %c0, %c0) :
+              (vector<[8]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1b.horiz
-  "arm_sme.intr.ld1b.horiz"(%nxv16i1, %p8, %c0, %c0) :
-              (vector<[16]xi1>, !llvm.ptr<i8>, i32, i32) -> ()
+  "arm_sme.intr.ld1b.horiz"(%nxv16i1, %ptr, %c0, %c0) :
+              (vector<[16]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1q.vert
-  "arm_sme.intr.ld1q.vert"(%nxv1i1, %p128, %c0, %c0) :
-              (vector<[1]xi1>, !llvm.ptr<i128>, i32, i32) -> ()
+  "arm_sme.intr.ld1q.vert"(%nxv1i1, %ptr, %c0, %c0) :
+              (vector<[1]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1d.vert
-  "arm_sme.intr.ld1d.vert"(%nxv2i1, %p64, %c0, %c0) :
-              (vector<[2]xi1>, !llvm.ptr<i64>, i32, i32) -> ()
+  "arm_sme.intr.ld1d.vert"(%nxv2i1, %ptr, %c0, %c0) :
+              (vector<[2]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1w.vert
-  "arm_sme.intr.ld1w.vert"(%nxv4i1, %p32, %c0, %c0) :
-              (vector<[4]xi1>, !llvm.ptr<i32>, i32, i32) -> ()
+  "arm_sme.intr.ld1w.vert"(%nxv4i1, %ptr, %c0, %c0) :
+              (vector<[4]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1h.vert
-  "arm_sme.intr.ld1h.vert"(%nxv8i1, %p16, %c0, %c0) :
-              (vector<[8]xi1>, !llvm.ptr<i16>, i32, i32) -> ()
+  "arm_sme.intr.ld1h.vert"(%nxv8i1, %ptr, %c0, %c0) :
+              (vector<[8]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.ld1b.vert
-  "arm_sme.intr.ld1b.vert"(%nxv16i1, %p8, %c0, %c0) :
-              (vector<[16]xi1>, !llvm.ptr<i8>, i32, i32) -> ()
+  "arm_sme.intr.ld1b.vert"(%nxv16i1, %ptr, %c0, %c0) :
+              (vector<[16]xi1>, !llvm.ptr, i32, i32) -> ()
   llvm.return
 }
 
@@ -185,44 +181,40 @@ llvm.func @arm_sme_store(%nxv1i1  : vector<[1]xi1>,
                          %nxv4i1  : vector<[4]xi1>,
                          %nxv8i1  : vector<[8]xi1>,
                          %nxv16i1 : vector<[16]xi1>,
-                         %p8      : !llvm.ptr<i8>,
-                         %p16     : !llvm.ptr<i16>,
-                         %p32     : !llvm.ptr<i32>,
-                         %p64     : !llvm.ptr<i64>,
-                         %p128    : !llvm.ptr<i128>) {
+                         %ptr    : !llvm.ptr) {
   %c0 = llvm.mlir.constant(0 : index) : i32
   // CHECK: call void @llvm.aarch64.sme.st1q.horiz
-  "arm_sme.intr.st1q.horiz"(%nxv1i1, %p128, %c0, %c0) :
-              (vector<[1]xi1>, !llvm.ptr<i128>, i32, i32) -> ()
+  "arm_sme.intr.st1q.horiz"(%nxv1i1, %ptr, %c0, %c0) :
+              (vector<[1]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1d.horiz
-  "arm_sme.intr.st1d.horiz"(%nxv2i1, %p64, %c0, %c0) :
-              (vector<[2]xi1>, !llvm.ptr<i64>, i32, i32) -> ()
+  "arm_sme.intr.st1d.horiz"(%nxv2i1, %ptr, %c0, %c0) :
+              (vector<[2]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1w.horiz
-  "arm_sme.intr.st1w.horiz"(%nxv4i1, %p32, %c0, %c0) :
-              (vector<[4]xi1>, !llvm.ptr<i32>, i32, i32) -> ()
+  "arm_sme.intr.st1w.horiz"(%nxv4i1, %ptr, %c0, %c0) :
+              (vector<[4]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1h.horiz
-  "arm_sme.intr.st1h.horiz"(%nxv8i1, %p16, %c0, %c0) :
-              (vector<[8]xi1>, !llvm.ptr<i16>, i32, i32) -> ()
+  "arm_sme.intr.st1h.horiz"(%nxv8i1, %ptr, %c0, %c0) :
+              (vector<[8]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1b.horiz
-  "arm_sme.intr.st1b.horiz"(%nxv16i1, %p8, %c0, %c0) :
-              (vector<[16]xi1>, !llvm.ptr<i8>, i32, i32) -> ()
+  "arm_sme.intr.st1b.horiz"(%nxv16i1, %ptr, %c0, %c0) :
+              (vector<[16]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1q.vert
-  "arm_sme.intr.st1q.vert"(%nxv1i1, %p128, %c0, %c0) :
-              (vector<[1]xi1>, !llvm.ptr<i128>, i32, i32) -> ()
+  "arm_sme.intr.st1q.vert"(%nxv1i1, %ptr, %c0, %c0) :
+              (vector<[1]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1d.vert
-  "arm_sme.intr.st1d.vert"(%nxv2i1, %p64, %c0, %c0) :
-              (vector<[2]xi1>, !llvm.ptr<i64>, i32, i32) -> ()
+  "arm_sme.intr.st1d.vert"(%nxv2i1, %ptr, %c0, %c0) :
+              (vector<[2]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1w.vert
-  "arm_sme.intr.st1w.vert"(%nxv4i1, %p32, %c0, %c0) :
-              (vector<[4]xi1>, !llvm.ptr<i32>, i32, i32) -> ()
+  "arm_sme.intr.st1w.vert"(%nxv4i1, %ptr, %c0, %c0) :
+              (vector<[4]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1h.vert
-  "arm_sme.intr.st1h.vert"(%nxv8i1, %p16, %c0, %c0) :
-              (vector<[8]xi1>, !llvm.ptr<i16>, i32, i32) -> ()
+  "arm_sme.intr.st1h.vert"(%nxv8i1, %ptr, %c0, %c0) :
+              (vector<[8]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.st1b.vert
-  "arm_sme.intr.st1b.vert"(%nxv16i1, %p8, %c0, %c0) :
-              (vector<[16]xi1>, !llvm.ptr<i8>, i32, i32) -> ()
+  "arm_sme.intr.st1b.vert"(%nxv16i1, %ptr, %c0, %c0) :
+              (vector<[16]xi1>, !llvm.ptr, i32, i32) -> ()
   // CHECK: call void @llvm.aarch64.sme.str
-  "arm_sme.intr.str"(%c0, %p8) : (i32, !llvm.ptr<i8>) -> ()
+  "arm_sme.intr.str"(%c0, %ptr) : (i32, !llvm.ptr) -> ()
   llvm.return
 }
 

diff  --git a/mlir/test/Target/LLVMIR/arm-sve.mlir b/mlir/test/Target/LLVMIR/arm-sve.mlir
index 172a2f7d12d440e..b63d3f06515690a 100644
--- a/mlir/test/Target/LLVMIR/arm-sve.mlir
+++ b/mlir/test/Target/LLVMIR/arm-sve.mlir
@@ -191,44 +191,44 @@ llvm.func @arm_sve_abs_
diff (%arg0: vector<[4]xi32>,
 }
 
 // CHECK-LABEL: define void @memcopy
-llvm.func @memcopy(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>,
+llvm.func @memcopy(%arg0: !llvm.ptr, %arg1: !llvm.ptr,
                    %arg2: i64, %arg3: i64, %arg4: i64,
-                   %arg5: !llvm.ptr<f32>, %arg6: !llvm.ptr<f32>,
+                   %arg5: !llvm.ptr, %arg6: !llvm.ptr,
                    %arg7: i64, %arg8: i64, %arg9: i64,
                    %arg10: i64) {
-  %0 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %0 = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64,
                                        array<1 x i64>, array<1 x i64>)>
-  %1 = llvm.insertvalue %arg0, %0[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %1 = llvm.insertvalue %arg0, %0[0] : !llvm.struct<(ptr, ptr, i64,
                                                      array<1 x i64>,
                                                      array<1 x i64>)>
-  %2 = llvm.insertvalue %arg1, %1[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %2 = llvm.insertvalue %arg1, %1[1] : !llvm.struct<(ptr, ptr, i64,
                                                      array<1 x i64>,
                                                      array<1 x i64>)>
-  %3 = llvm.insertvalue %arg2, %2[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %3 = llvm.insertvalue %arg2, %2[2] : !llvm.struct<(ptr, ptr, i64,
                                                      array<1 x i64>,
                                                      array<1 x i64>)>
-  %4 = llvm.insertvalue %arg3, %3[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %4 = llvm.insertvalue %arg3, %3[3, 0] : !llvm.struct<(ptr, ptr, i64,
                                                         array<1 x i64>,
                                                         array<1 x i64>)>
-  %5 = llvm.insertvalue %arg4, %4[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %5 = llvm.insertvalue %arg4, %4[4, 0] : !llvm.struct<(ptr, ptr, i64,
                                                         array<1 x i64>,
                                                         array<1 x i64>)>
-  %6 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %6 = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64,
                                        array<1 x i64>,
                                        array<1 x i64>)>
-  %7 = llvm.insertvalue %arg5, %6[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %7 = llvm.insertvalue %arg5, %6[0] : !llvm.struct<(ptr, ptr, i64,
                                                      array<1 x i64>,
                                                      array<1 x i64>)>
-  %8 = llvm.insertvalue %arg6, %7[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %8 = llvm.insertvalue %arg6, %7[1] : !llvm.struct<(ptr, ptr, i64,
                                                      array<1 x i64>,
                                                      array<1 x i64>)>
-  %9 = llvm.insertvalue %arg7, %8[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %9 = llvm.insertvalue %arg7, %8[2] : !llvm.struct<(ptr, ptr, i64,
                                                      array<1 x i64>,
                                                      array<1 x i64>)>
-  %10 = llvm.insertvalue %arg8, %9[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %10 = llvm.insertvalue %arg8, %9[3, 0] : !llvm.struct<(ptr, ptr, i64,
                                                          array<1 x i64>,
                                                          array<1 x i64>)>
-  %11 = llvm.insertvalue %arg9, %10[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %11 = llvm.insertvalue %arg9, %10[4, 0] : !llvm.struct<(ptr, ptr, i64,
                                                          array<1 x i64>,
                                                          array<1 x i64>)>
   %12 = llvm.mlir.constant(0 : index) : i64
@@ -243,23 +243,21 @@ llvm.func @memcopy(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>,
   llvm.cond_br %17, ^bb2, ^bb3
 ^bb2:
   // CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
-  %18 = llvm.extractvalue %5[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %18 = llvm.extractvalue %5[1] : !llvm.struct<(ptr, ptr, i64,
                                                 array<1 x i64>,
                                                 array<1 x i64>)>
   // CHECK: getelementptr float, ptr
-  %19 = llvm.getelementptr %18[%16] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  %20 = llvm.bitcast %19 : !llvm.ptr<f32> to !llvm.ptr<vector<[4]xf32>>
+  %19 = llvm.getelementptr %18[%16] : (!llvm.ptr, i64) -> !llvm.ptr, f32
   // CHECK: load <vscale x 4 x float>, ptr
-  %21 = llvm.load %20 : !llvm.ptr<vector<[4]xf32>>
+  %21 = llvm.load %19 : !llvm.ptr -> vector<[4]xf32>
   // CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
-  %22 = llvm.extractvalue %11[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
+  %22 = llvm.extractvalue %11[1] : !llvm.struct<(ptr, ptr, i64,
                                                  array<1 x i64>,
                                                  array<1 x i64>)>
   // CHECK: getelementptr float, ptr
-  %23 = llvm.getelementptr %22[%16] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  %24 = llvm.bitcast %23 : !llvm.ptr<f32> to !llvm.ptr<vector<[4]xf32>>
+  %23 = llvm.getelementptr %22[%16] : (!llvm.ptr, i64) -> !llvm.ptr, f32
   // CHECK: store <vscale x 4 x float> %{{[0-9]+}}, ptr %{{[0-9]+}}
-  llvm.store %21, %24 : !llvm.ptr<vector<[4]xf32>>
+  llvm.store %21, %23 : vector<[4]xf32>, !llvm.ptr
   %25 = llvm.add %16, %15  : i64
   llvm.br ^bb1(%25 : i64)
 ^bb3:

diff  --git a/mlir/test/Target/LLVMIR/llvmir-debug.mlir b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
index 8d1734d7cdc3117..ea962c66cb8eff9 100644
--- a/mlir/test/Target/LLVMIR/llvmir-debug.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
@@ -89,13 +89,13 @@ llvm.func @func_no_debug() {
 llvm.func @func_with_debug(%arg: i64) {
   // CHECK: %[[ALLOC:.*]] = alloca
   %allocCount = llvm.mlir.constant(1 : i32) : i32
-  %alloc = llvm.alloca %allocCount x i64 : (i32) -> !llvm.ptr<i64>
+  %alloc = llvm.alloca %allocCount x i64 : (i32) -> !llvm.ptr
 
   // CHECK: call void @llvm.dbg.value(metadata i64 %[[ARG]], metadata ![[VAR_LOC:[0-9]+]], metadata !DIExpression())
   // CHECK: call void @llvm.dbg.declare(metadata ptr %[[ALLOC]], metadata ![[ADDR_LOC:[0-9]+]], metadata !DIExpression())
   // CHECK: call void @llvm.dbg.value(metadata i64 %[[ARG]], metadata ![[NO_NAME_VAR:[0-9]+]], metadata !DIExpression())
   llvm.intr.dbg.value #variable = %arg : i64
-  llvm.intr.dbg.declare #variableAddr = %alloc : !llvm.ptr<i64>
+  llvm.intr.dbg.declare #variableAddr = %alloc : !llvm.ptr
   llvm.intr.dbg.value #noNameVariable= %arg : i64
 
   // CHECK: call void @func_no_debug(), !dbg ![[CALLSITE_LOC:[0-9]+]]

diff  --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index d23991b65523fcf..e586c0cd2720e48 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -427,16 +427,16 @@ llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr, %mask: vector<7xi1>) {
 }
 
 // CHECK-LABEL: @masked_gather_scatter_intrinsics
-llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr<f32>>, %mask: vector<7xi1>) {
+llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr>, %mask: vector<7xi1>) {
   // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> poison)
   %a = llvm.intr.masked.gather %M, %mask { alignment = 1: i32} :
-      (!llvm.vec<7 x ptr<f32>>, vector<7xi1>) -> vector<7xf32>
+      (!llvm.vec<7 x ptr>, vector<7xi1>) -> vector<7xf32>
   // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
   %b = llvm.intr.masked.gather %M, %mask, %a { alignment = 1: i32} :
-      (!llvm.vec<7 x ptr<f32>>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
+      (!llvm.vec<7 x ptr>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
   // CHECK: call void @llvm.masked.scatter.v7f32.v7p0(<7 x float> %{{.*}}, <7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}})
   llvm.intr.masked.scatter %b, %M, %mask { alignment = 1: i32} :
-      vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr<f32>>
+      vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr>
   llvm.return
 }
 

diff  --git a/mlir/test/Target/LLVMIR/llvmir-invalid.mlir b/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
index 2d6ccff2d436fea..9b14f5814987d99 100644
--- a/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
@@ -229,9 +229,9 @@ llvm.func @masked_gather_intr_wrong_type(%ptrs : vector<7xf32>, %mask : vector<7
 
 // -----
 
-llvm.func @masked_scatter_intr_wrong_type(%vec : f32, %ptrs : !llvm.vec<7xptr<f32>>, %mask : vector<7xi1>) {
+llvm.func @masked_scatter_intr_wrong_type(%vec : f32, %ptrs : !llvm.vec<7xptr>, %mask : vector<7xi1>) {
   // expected-error @below{{op operand #0 must be LLVM dialect-compatible vector type, but got 'f32'}}
-  llvm.intr.masked.scatter %vec, %ptrs, %mask { alignment = 1: i32} : f32, vector<7xi1> into !llvm.vec<7xptr<f32>>
+  llvm.intr.masked.scatter %vec, %ptrs, %mask { alignment = 1: i32} : f32, vector<7xi1> into !llvm.vec<7xptr>
   llvm.return
 }
 

diff  --git a/mlir/test/Target/LLVMIR/llvmir-types.mlir b/mlir/test/Target/LLVMIR/llvmir-types.mlir
index a92d46dfadfe25c..c85fa0101c00d74 100644
--- a/mlir/test/Target/LLVMIR/llvmir-types.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-types.mlir
@@ -40,7 +40,7 @@ llvm.func @f_void_variadic(...)
 // CHECK: declare void @f_void_i32_i32_variadic(i32, i32, ...)
 llvm.func @f_void_i32_i32_variadic(i32, i32, ...)
 // CHECK: declare ptr @f_f_i32_i32()
-llvm.func @f_f_i32_i32() -> !llvm.ptr<func<i32 (i32)>>
+llvm.func @f_f_i32_i32() -> !llvm.ptr
 
 //
 // Integers.
@@ -65,22 +65,12 @@ llvm.func @return_i129() -> i129
 // Pointers.
 //
 
-// CHECK: declare ptr @return_pi8()
-llvm.func @return_pi8() -> !llvm.ptr<i8>
-// CHECK: declare ptr @return_pfloat()
-llvm.func @return_pfloat() -> !llvm.ptr<f32>
-// CHECK: declare ptr @return_ppi8()
-llvm.func @return_ppi8() -> !llvm.ptr<ptr<i8>>
-// CHECK: declare ptr @return_pppppi8()
-llvm.func @return_pppppi8() -> !llvm.ptr<ptr<ptr<ptr<ptr<i8>>>>>
-// CHECK: declare ptr @return_pi8_0()
-llvm.func @return_pi8_0() -> !llvm.ptr<i8, 0>
-// CHECK: declare ptr addrspace(1) @return_pi8_1()
-llvm.func @return_pi8_1() -> !llvm.ptr<i8, 1>
-// CHECK: declare ptr addrspace(42) @return_pi8_42()
-llvm.func @return_pi8_42() -> !llvm.ptr<i8, 42>
-// CHECK: declare ptr addrspace(9) @return_ppi8_42_9()
-llvm.func @return_ppi8_42_9() -> !llvm.ptr<ptr<i8, 42>, 9>
+// CHECK: declare ptr @return_p()
+llvm.func @return_p() -> !llvm.ptr
+// CHECK: declare ptr addrspace(1) @return_p_1()
+llvm.func @return_p_1() -> !llvm.ptr<1>
+// CHECK: declare ptr addrspace(42) @return_p_42()
+llvm.func @return_p_42() -> !llvm.ptr<42>
 
 //
 // Vectors.
@@ -97,7 +87,7 @@ llvm.func @return_vs_4_i32() -> !llvm.vec<?x4 x i32>
 // CHECK: declare <vscale x 8 x half> @return_vs_8_half()
 llvm.func @return_vs_8_half() -> !llvm.vec<?x8 x f16>
 // CHECK: declare <4 x ptr> @return_v_4_pi8()
-llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr<i8>>
+llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr>
 
 //
 // Arrays.
@@ -107,8 +97,8 @@ llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr<i8>>
 llvm.func @return_a10_i32() -> !llvm.array<10 x i32>
 // CHECK: declare [8 x float] @return_a8_float()
 llvm.func @return_a8_float() -> !llvm.array<8 x f32>
-// CHECK: declare [10 x ptr addrspace(4)] @return_a10_pi32_4()
-llvm.func @return_a10_pi32_4() -> !llvm.array<10 x ptr<i32, 4>>
+// CHECK: declare [10 x ptr addrspace(4)] @return_a10_p_4()
+llvm.func @return_a10_p_4() -> !llvm.array<10 x ptr<4>>
 // CHECK: declare [10 x [4 x float]] @return_a10_a4_float()
 llvm.func @return_a10_a4_float() -> !llvm.array<10 x array<4 x f32>>
 
@@ -160,12 +150,9 @@ llvm.func @return_target_ext_params() -> !llvm.target<"target-params", i32, f64,
 // CHECK: %empty = type {}
 // CHECK: %opaque = type opaque
 // CHECK: %long = type { i32, { i32, i1 }, float, ptr }
-// CHECK: %self-recursive = type { ptr }
 // CHECK: %unpacked = type { i32 }
 // CHECK: %packed = type <{ i32 }>
 // CHECK: %"name with spaces and !^$@$#" = type <{ i32 }>
-// CHECK: %mutually-a = type { ptr }
-// CHECK: %mutually-b = type { ptr addrspace(3) }
 // CHECK: %struct-of-arrays = type { [10 x i32] }
 // CHECK: %array-of-structs = type { i32 }
 
@@ -174,9 +161,7 @@ llvm.func @return_s_empty() -> !llvm.struct<"empty", ()>
 // CHECK: declare %opaque
 llvm.func @return_s_opaque() -> !llvm.struct<"opaque", opaque>
 // CHECK: declare %long
-llvm.func @return_s_long() -> !llvm.struct<"long", (i32, struct<(i32, i1)>, f32, ptr<func<void ()>>)>
-// CHECK: declare %self-recursive
-llvm.func @return_s_self_recursive() -> !llvm.struct<"self-recursive", (ptr<struct<"self-recursive">>)>
+llvm.func @return_s_long() -> !llvm.struct<"long", (i32, struct<(i32, i1)>, f32, ptr)>
 // CHECK: declare %unpacked
 llvm.func @return_s_unpacked() -> !llvm.struct<"unpacked", (i32)>
 // CHECK: declare %packed
@@ -184,14 +169,7 @@ llvm.func @return_s_packed() -> !llvm.struct<"packed", packed (i32)>
 // CHECK: declare %"name with spaces and !^$@$#"
 llvm.func @return_s_symbols() -> !llvm.struct<"name with spaces and !^$@$#", packed (i32)>
 
-// CHECK: declare %mutually-a
-llvm.func @return_s_mutually_a() -> !llvm.struct<"mutually-a", (ptr<struct<"mutually-b", (ptr<struct<"mutually-a">, 3>)>>)>
-// CHECK: declare %mutually-b
-llvm.func @return_s_mutually_b() -> !llvm.struct<"mutually-b", (ptr<struct<"mutually-a", (ptr<struct<"mutually-b">>)>, 3>)>
-
 // CHECK: declare %struct-of-arrays
 llvm.func @return_s_struct_of_arrays() -> !llvm.struct<"struct-of-arrays", (array<10 x i32>)>
 // CHECK: declare [10 x %array-of-structs]
 llvm.func @return_s_array_of_structs() -> !llvm.array<10 x struct<"array-of-structs", (i32)>>
-// CHECK: declare ptr
-llvm.func @return_s_ptr_to_struct() -> !llvm.ptr<struct<"ptr-to-struct", (i8)>>

diff  --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index 7da44b6fbe1ab33..3f27b247edd3e67 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -64,11 +64,11 @@ llvm.mlir.global external @explicit_undef() : i32 {
 }
 
 // CHECK: @int_gep = internal constant ptr getelementptr (i32, ptr @i32_global, i32 2)
-llvm.mlir.global internal constant @int_gep() : !llvm.ptr<i32> {
-  %addr = llvm.mlir.addressof @i32_global : !llvm.ptr<i32>
+llvm.mlir.global internal constant @int_gep() : !llvm.ptr {
+  %addr = llvm.mlir.addressof @i32_global : !llvm.ptr
   %_c0 = llvm.mlir.constant(2: i32) :i32
-  %gepinit = llvm.getelementptr %addr[%_c0] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
-  llvm.return %gepinit : !llvm.ptr<i32>
+  %gepinit = llvm.getelementptr %addr[%_c0] : (!llvm.ptr, i32) -> !llvm.ptr, i32
+  llvm.return %gepinit : !llvm.ptr
 }
 
 // CHECK{LITERAL}: @dense_float_vector = internal global <3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
@@ -177,7 +177,7 @@ llvm.mlir.global internal constant @sectionvar("teststring")  {section = ".mysec
 //
 
 // CHECK: declare ptr @malloc(i64)
-llvm.func @malloc(i64) -> !llvm.ptr<i8>
+llvm.func @malloc(i64) -> !llvm.ptr
 // CHECK: declare void @free(ptr)
 
 
@@ -197,15 +197,15 @@ llvm.func @empty() {
 llvm.func @global_refs() {
   // Check load from globals.
   // CHECK: load i32, ptr @i32_global
-  %0 = llvm.mlir.addressof @i32_global : !llvm.ptr<i32>
-  %1 = llvm.load %0 : !llvm.ptr<i32>
+  %0 = llvm.mlir.addressof @i32_global : !llvm.ptr
+  %1 = llvm.load %0 : !llvm.ptr -> i32
 
   // Check the contracted form of load from array constants.
   // CHECK: load i8, ptr @string_const
-  %2 = llvm.mlir.addressof @string_const : !llvm.ptr<array<6 x i8>>
+  %2 = llvm.mlir.addressof @string_const : !llvm.ptr
   %c0 = llvm.mlir.constant(0 : index) : i64
-  %3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr<array<6 x i8>>, i64, i64) -> !llvm.ptr<i8>
-  %4 = llvm.load %3 : !llvm.ptr<i8>
+  %3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<6 x i8>
+  %4 = llvm.load %3 : !llvm.ptr -> i8
 
   llvm.return
 }
@@ -547,12 +547,11 @@ llvm.func @memref_alloc() {
   %0 = llvm.mlir.constant(10 : index) : i64
   %1 = llvm.mlir.constant(10 : index) : i64
   %2 = llvm.mul %0, %1 : i64
-  %3 = llvm.mlir.undef : !llvm.struct<(ptr<f32>)>
+  %3 = llvm.mlir.undef : !llvm.struct<(ptr)>
   %4 = llvm.mlir.constant(4 : index) : i64
   %5 = llvm.mul %2, %4 : i64
-  %6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr<i8>
-  %7 = llvm.bitcast %6 : !llvm.ptr<i8> to !llvm.ptr<f32>
-  %8 = llvm.insertvalue %7, %3[0] : !llvm.struct<(ptr<f32>)>
+  %6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr
+  %7 = llvm.insertvalue %6, %3[0] : !llvm.struct<(ptr)>
 // CHECK-NEXT: ret void
   llvm.return
 }
@@ -566,12 +565,11 @@ llvm.func @store_load_static() {
 // CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 40)
 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr } undef, ptr %{{[0-9]+}}, 0
   %0 = llvm.mlir.constant(10 : index) : i64
-  %1 = llvm.mlir.undef : !llvm.struct<(ptr<f32>)>
+  %1 = llvm.mlir.undef : !llvm.struct<(ptr)>
   %2 = llvm.mlir.constant(4 : index) : i64
   %3 = llvm.mul %0, %2 : i64
-  %4 = llvm.call @malloc(%3) : (i64) -> !llvm.ptr<i8>
-  %5 = llvm.bitcast %4 : !llvm.ptr<i8> to !llvm.ptr<f32>
-  %6 = llvm.insertvalue %5, %1[0] : !llvm.struct<(ptr<f32>)>
+  %4 = llvm.call @malloc(%3) : (i64) -> !llvm.ptr
+  %6 = llvm.insertvalue %4, %1[0] : !llvm.struct<(ptr)>
   %7 = llvm.mlir.constant(1.000000e+00 : f32) : f32
   llvm.br ^bb1
 ^bb1:   // pred: ^bb0
@@ -589,9 +587,9 @@ llvm.func @store_load_static() {
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: store float 1.000000e+00, ptr %{{[0-9]+}}
   %12 = llvm.mlir.constant(10 : index) : i64
-  %13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>)>
-  %14 = llvm.getelementptr %13[%10] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  llvm.store %7, %14 : !llvm.ptr<f32>
+  %13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr)>
+  %14 = llvm.getelementptr %13[%10] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  llvm.store %7, %14 : f32, !llvm.ptr
   %15 = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
   %16 = llvm.add %10, %15 : i64
@@ -614,9 +612,9 @@ llvm.func @store_load_static() {
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
   %21 = llvm.mlir.constant(10 : index) : i64
-  %22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>)>
-  %23 = llvm.getelementptr %22[%19] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  %24 = llvm.load %23 : !llvm.ptr<f32>
+  %22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr)>
+  %23 = llvm.getelementptr %22[%19] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  %24 = llvm.load %23 : !llvm.ptr -> f32
   %25 = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
   %26 = llvm.add %19, %25 : i64
@@ -633,13 +631,12 @@ llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } undef, ptr %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
-  %0 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64)>
+  %0 = llvm.mlir.undef : !llvm.struct<(ptr, i64)>
   %1 = llvm.mlir.constant(4 : index) : i64
   %2 = llvm.mul %arg0, %1 : i64
-  %3 = llvm.call @malloc(%2) : (i64) -> !llvm.ptr<i8>
-  %4 = llvm.bitcast %3 : !llvm.ptr<i8> to !llvm.ptr<f32>
-  %5 = llvm.insertvalue %4, %0[0] : !llvm.struct<(ptr<f32>, i64)>
-  %6 = llvm.insertvalue %arg0, %5[1] : !llvm.struct<(ptr<f32>, i64)>
+  %3 = llvm.call @malloc(%2) : (i64) -> !llvm.ptr
+  %5 = llvm.insertvalue %3, %0[0] : !llvm.struct<(ptr, i64)>
+  %6 = llvm.insertvalue %arg0, %5[1] : !llvm.struct<(ptr, i64)>
   %7 = llvm.mlir.constant(1.000000e+00 : f32) : f32
 // CHECK-NEXT: br label %{{[0-9]+}}
   llvm.br ^bb1
@@ -657,10 +654,10 @@ llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: store float 1.000000e+00, ptr %{{[0-9]+}}
-  %11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<f32>, i64)>
-  %12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>, i64)>
-  %13 = llvm.getelementptr %12[%9] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  llvm.store %7, %13 : !llvm.ptr<f32>
+  %11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr, i64)>
+  %12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr, i64)>
+  %13 = llvm.getelementptr %12[%9] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  llvm.store %7, %13 : f32, !llvm.ptr
   %14 = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
   %15 = llvm.add %9, %14 : i64
@@ -682,10 +679,10 @@ llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
-  %19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<f32>, i64)>
-  %20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>, i64)>
-  %21 = llvm.getelementptr %20[%17] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  %22 = llvm.load %21 : !llvm.ptr<f32>
+  %19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr, i64)>
+  %20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr, i64)>
+  %21 = llvm.getelementptr %20[%17] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  %22 = llvm.load %21 : !llvm.ptr -> f32
   %23 = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
   %24 = llvm.add %17, %23 : i64
@@ -712,14 +709,13 @@ llvm.func @store_load_mixed(%arg0: i64) {
   %3 = llvm.mul %1, %arg0 : i64
   %4 = llvm.mul %3, %2 : i64
   %5 = llvm.mul %4, %0 : i64
-  %6 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64, i64)>
+  %6 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i64)>
   %7 = llvm.mlir.constant(4 : index) : i64
   %8 = llvm.mul %5, %7 : i64
-  %9 = llvm.call @malloc(%8) : (i64) -> !llvm.ptr<i8>
-  %10 = llvm.bitcast %9 : !llvm.ptr<i8> to !llvm.ptr<f32>
-  %11 = llvm.insertvalue %10, %6[0] : !llvm.struct<(ptr<f32>, i64, i64)>
-  %12 = llvm.insertvalue %arg0, %11[1] : !llvm.struct<(ptr<f32>, i64, i64)>
-  %13 = llvm.insertvalue %0, %12[2] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %9 = llvm.call @malloc(%8) : (i64) -> !llvm.ptr
+  %11 = llvm.insertvalue %9, %6[0] : !llvm.struct<(ptr, i64, i64)>
+  %12 = llvm.insertvalue %arg0, %11[1] : !llvm.struct<(ptr, i64, i64)>
+  %13 = llvm.insertvalue %0, %12[2] : !llvm.struct<(ptr, i64, i64)>
 
 // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
 // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
@@ -740,18 +736,18 @@ llvm.func @store_load_mixed(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
-  %20 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %20 = llvm.extractvalue %13[1] : !llvm.struct<(ptr, i64, i64)>
   %21 = llvm.mlir.constant(4 : index) : i64
-  %22 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %22 = llvm.extractvalue %13[2] : !llvm.struct<(ptr, i64, i64)>
   %23 = llvm.mul %14, %20 : i64
   %24 = llvm.add %23, %15 : i64
   %25 = llvm.mul %24, %21 : i64
   %26 = llvm.add %25, %16 : i64
   %27 = llvm.mul %26, %22 : i64
   %28 = llvm.add %27, %17 : i64
-  %29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<f32>, i64, i64)>
-  %30 = llvm.getelementptr %29[%28] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  llvm.store %18, %30 : !llvm.ptr<f32>
+  %29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr, i64, i64)>
+  %30 = llvm.getelementptr %29[%28] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  llvm.store %18, %30 : f32, !llvm.ptr
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 1
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 2
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
@@ -764,24 +760,24 @@ llvm.func @store_load_mixed(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
   %31 = llvm.mlir.constant(2 : index) : i64
-  %32 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %32 = llvm.extractvalue %13[1] : !llvm.struct<(ptr, i64, i64)>
   %33 = llvm.mlir.constant(4 : index) : i64
-  %34 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %34 = llvm.extractvalue %13[2] : !llvm.struct<(ptr, i64, i64)>
   %35 = llvm.mul %17, %32 : i64
   %36 = llvm.add %35, %16 : i64
   %37 = llvm.mul %36, %33 : i64
   %38 = llvm.add %37, %15 : i64
   %39 = llvm.mul %38, %34 : i64
   %40 = llvm.add %39, %14 : i64
-  %41 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<f32>, i64, i64)>
-  %42 = llvm.getelementptr %41[%40] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  %43 = llvm.load %42 : !llvm.ptr<f32>
+  %41 = llvm.extractvalue %13[0] : !llvm.struct<(ptr, i64, i64)>
+  %42 = llvm.getelementptr %41[%40] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  %43 = llvm.load %42 : !llvm.ptr -> f32
 // CHECK-NEXT: ret void
   llvm.return
 }
 
 // CHECK-LABEL: define { ptr, i64 } @memref_args_rets({ ptr } {{%.*}}, { ptr, i64 } {{%.*}}, { ptr, i64 } {{%.*}})
-llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct<(ptr<f32>, i64)>, %arg2: !llvm.struct<(ptr<f32>, i64)>) -> !llvm.struct<(ptr<f32>, i64)> {
+llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr)>, %arg1: !llvm.struct<(ptr, i64)>, %arg2: !llvm.struct<(ptr, i64)>) -> !llvm.struct<(ptr, i64)> {
   %0 = llvm.mlir.constant(7 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
   %1 = llvm.call @get_index() : () -> i64
@@ -790,17 +786,17 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 7
 // CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
   %3 = llvm.mlir.constant(10 : index) : i64
-  %4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr<f32>)>
-  %5 = llvm.getelementptr %4[%0] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  llvm.store %2, %5 : !llvm.ptr<f32>
+  %4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr)>
+  %5 = llvm.getelementptr %4[%0] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  llvm.store %2, %5 : f32, !llvm.ptr
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 7
 // CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
-  %6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr<f32>, i64)>
-  %7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr<f32>, i64)>
-  %8 = llvm.getelementptr %7[%0] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  llvm.store %2, %8 : !llvm.ptr<f32>
+  %6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr, i64)>
+  %7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr, i64)>
+  %8 = llvm.getelementptr %7[%0] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  llvm.store %2, %8 : f32, !llvm.ptr
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 7, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
@@ -808,12 +804,12 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
   %9 = llvm.mlir.constant(10 : index) : i64
-  %10 = llvm.extractvalue %arg2[1] : !llvm.struct<(ptr<f32>, i64)>
+  %10 = llvm.extractvalue %arg2[1] : !llvm.struct<(ptr, i64)>
   %11 = llvm.mul %0, %10 : i64
   %12 = llvm.add %11, %1 : i64
-  %13 = llvm.extractvalue %arg2[0] : !llvm.struct<(ptr<f32>, i64)>
-  %14 = llvm.getelementptr %13[%12] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  llvm.store %2, %14 : !llvm.ptr<f32>
+  %13 = llvm.extractvalue %arg2[0] : !llvm.struct<(ptr, i64)>
+  %14 = llvm.getelementptr %13[%12] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  llvm.store %2, %14 : f32, !llvm.ptr
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 10, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
 // CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
@@ -821,28 +817,27 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct
 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
   %15 = llvm.mlir.constant(10 : index) : i64
   %16 = llvm.mul %15, %1 : i64
-  %17 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64)>
+  %17 = llvm.mlir.undef : !llvm.struct<(ptr, i64)>
   %18 = llvm.mlir.constant(4 : index) : i64
   %19 = llvm.mul %16, %18 : i64
-  %20 = llvm.call @malloc(%19) : (i64) -> !llvm.ptr<i8>
-  %21 = llvm.bitcast %20 : !llvm.ptr<i8> to !llvm.ptr<f32>
-  %22 = llvm.insertvalue %21, %17[0] : !llvm.struct<(ptr<f32>, i64)>
-  %23 = llvm.insertvalue %1, %22[1] : !llvm.struct<(ptr<f32>, i64)>
+  %20 = llvm.call @malloc(%19) : (i64) -> !llvm.ptr
+  %22 = llvm.insertvalue %20, %17[0] : !llvm.struct<(ptr, i64)>
+  %23 = llvm.insertvalue %1, %22[1] : !llvm.struct<(ptr, i64)>
 // CHECK-NEXT: ret { ptr, i64 } %{{[0-9]+}}
-  llvm.return %23 : !llvm.struct<(ptr<f32>, i64)>
+  llvm.return %23 : !llvm.struct<(ptr, i64)>
 }
 
 
 // CHECK-LABEL: define i64 @memref_dim({ ptr, i64, i64 } {{%.*}})
-llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<f32>, i64, i64)>) -> i64 {
+llvm.func @memref_dim(%arg0: !llvm.struct<(ptr, i64, i64)>) -> i64 {
 // Expecting this to create an LLVM constant.
   %0 = llvm.mlir.constant(42 : index) : i64
 // CHECK-NEXT: %2 = extractvalue { ptr, i64, i64 } %0, 1
-  %1 = llvm.extractvalue %arg0[1] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %1 = llvm.extractvalue %arg0[1] : !llvm.struct<(ptr, i64, i64)>
 // Expecting this to create an LLVM constant.
   %2 = llvm.mlir.constant(10 : index) : i64
 // CHECK-NEXT: %3 = extractvalue { ptr, i64, i64 } %0, 2
-  %3 = llvm.extractvalue %arg0[2] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %3 = llvm.extractvalue %arg0[2] : !llvm.struct<(ptr, i64, i64)>
 // Checking that the constant for d0 has been created.
 // CHECK-NEXT: %4 = add i64 42, %2
   %4 = llvm.add %0, %1 : i64
@@ -857,22 +852,22 @@ llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<f32>, i64, i64)>) -> i64 {
 
 llvm.func @get_i64() -> i64
 llvm.func @get_f32() -> f32
-llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, i64, i64)>
+llvm.func @get_memref() -> !llvm.struct<(ptr, i64, i64)>
 
 // CHECK-LABEL: define { i64, float, { ptr, i64, i64 } } @multireturn()
-llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)> {
+llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)> {
   %0 = llvm.call @get_i64() : () -> i64
   %1 = llvm.call @get_f32() : () -> f32
-  %2 = llvm.call @get_memref() : () -> !llvm.struct<(ptr<f32>, i64, i64)>
+  %2 = llvm.call @get_memref() : () -> !llvm.struct<(ptr, i64, i64)>
 // CHECK:        %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } undef, i64 %{{[0-9]+}}, 0
 // CHECK-NEXT:   %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}, float %{{[0-9]+}}, 1
 // CHECK-NEXT:   %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}, { ptr, i64, i64 } %{{[0-9]+}}, 2
 // CHECK-NEXT:   ret { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}
-  %3 = llvm.mlir.undef : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
-  %4 = llvm.insertvalue %0, %3[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
-  %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
-  %6 = llvm.insertvalue %2, %5[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
-  llvm.return %6 : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  %3 = llvm.mlir.undef : !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
+  %4 = llvm.insertvalue %0, %3[0] : !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
+  %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
+  %6 = llvm.insertvalue %2, %5[2] : !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
+  llvm.return %6 : !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
 }
 
 
@@ -882,10 +877,10 @@ llvm.func @multireturn_caller() {
 // CHECK-NEXT:   [[ret0:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 0
 // CHECK-NEXT:   [[ret1:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 1
 // CHECK-NEXT:   [[ret2:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 2
-  %0 = llvm.call @multireturn() : () -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
-  %1 = llvm.extractvalue %0[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
-  %2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
-  %3 = llvm.extractvalue %0[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  %0 = llvm.call @multireturn() : () -> !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
+  %1 = llvm.extractvalue %0[0] : !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
+  %2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
+  %3 = llvm.extractvalue %0[2] : !llvm.struct<(i64, f32, struct<(ptr, i64, i64)>)>
   %4 = llvm.mlir.constant(42) : i64
 // CHECK:   add i64 [[ret0]], 42
   %5 = llvm.add %1, %4 : i64
@@ -895,18 +890,18 @@ llvm.func @multireturn_caller() {
   %8 = llvm.mlir.constant(0 : index) : i64
   %9 = llvm.mlir.constant(42 : index) : i64
 // CHECK:   extractvalue { ptr, i64, i64 } [[ret2]], 0
-  %10 = llvm.extractvalue %3[1] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %10 = llvm.extractvalue %3[1] : !llvm.struct<(ptr, i64, i64)>
   %11 = llvm.mlir.constant(10 : index) : i64
-  %12 = llvm.extractvalue %3[2] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %12 = llvm.extractvalue %3[2] : !llvm.struct<(ptr, i64, i64)>
   %13 = llvm.mul %8, %10 : i64
   %14 = llvm.add %13, %8 : i64
   %15 = llvm.mul %14, %11 : i64
   %16 = llvm.add %15, %8 : i64
   %17 = llvm.mul %16, %12 : i64
   %18 = llvm.add %17, %8 : i64
-  %19 = llvm.extractvalue %3[0] : !llvm.struct<(ptr<f32>, i64, i64)>
-  %20 = llvm.getelementptr %19[%18] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  %21 = llvm.load %20 : !llvm.ptr<f32>
+  %19 = llvm.extractvalue %3[0] : !llvm.struct<(ptr, i64, i64)>
+  %20 = llvm.getelementptr %19[%18] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+  %21 = llvm.load %20 : !llvm.ptr -> f32
   llvm.return
 }
 
@@ -987,11 +982,6 @@ llvm.func @vector_splat_nonzero_scalable() -> vector<[4]xf32> {
   llvm.return %0 : vector<[4]xf32>
 }
 
-// CHECK-LABEL: @f8_ptrs(ptr {{%.*}}, ptr {{%.*}})
-llvm.func @f8_ptrs(%arg0: !llvm.ptr<f8E5M2>, %arg1: !llvm.ptr<f8E4M3FN>) {
-  llvm.return
-}
-
 // CHECK-LABEL: @ops
 llvm.func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32) -> !llvm.struct<(f32, i32)> {
 // CHECK-NEXT: fsub float %0, %1
@@ -1040,12 +1030,12 @@ llvm.func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32) -> !llvm.struct<(
 }
 
 // CHECK-LABEL: @gep
-llvm.func @gep(%ptr: !llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, %idx: i64,
-               %ptr2: !llvm.ptr<struct<(array<10 x f32>)>>) {
+llvm.func @gep(%ptr: !llvm.ptr, %idx: i64,
+               %ptr2: !llvm.ptr) {
   // CHECK: = getelementptr { i32, { i32, float } }, ptr %{{.*}}, i64 %{{.*}}, i32 1, i32 0
-  llvm.getelementptr %ptr[%idx, 1, 0] : (!llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, i64) -> !llvm.ptr<i32>
+  llvm.getelementptr %ptr[%idx, 1, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(i32, struct<(i32, f32)>)>
   // CHECK: = getelementptr inbounds { [10 x float] }, ptr %{{.*}}, i64 %{{.*}}, i32 0, i64 %{{.*}}
-  llvm.getelementptr inbounds %ptr2[%idx, 0, %idx] : (!llvm.ptr<struct<(array<10 x f32>)>>, i64, i64) -> !llvm.ptr<f32>
+  llvm.getelementptr inbounds %ptr2[%idx, 0, %idx] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.struct<(array<10 x f32>)>
   llvm.return
 }
 
@@ -1093,44 +1083,44 @@ llvm.func @cond_br_arguments(%arg0: i1, %arg1: i1) {
 }
 
 // CHECK-LABEL: define void @llvm_noalias(ptr noalias {{%*.}})
-llvm.func @llvm_noalias(%arg0: !llvm.ptr<f32> {llvm.noalias}) {
+llvm.func @llvm_noalias(%arg0: !llvm.ptr {llvm.noalias}) {
   llvm.return
 }
 
 // CHECK-LABEL: declare void @llvm_noalias_decl(ptr noalias)
-llvm.func @llvm_noalias_decl(!llvm.ptr<f32> {llvm.noalias})
+llvm.func @llvm_noalias_decl(!llvm.ptr {llvm.noalias})
 
 // CHECK-LABEL: define void @byrefattr(ptr byref(i32) %
-llvm.func @byrefattr(%arg0: !llvm.ptr<i32> {llvm.byref = i32}) {
+llvm.func @byrefattr(%arg0: !llvm.ptr {llvm.byref = i32}) {
   llvm.return
 }
 
 // CHECK-LABEL: declare void @byrefattr_decl(ptr byref(i32))
-llvm.func @byrefattr_decl(!llvm.ptr<i32> {llvm.byref = i32})
+llvm.func @byrefattr_decl(!llvm.ptr {llvm.byref = i32})
 
 // CHECK-LABEL: define void @byvalattr(ptr byval(i32) %
-llvm.func @byvalattr(%arg0: !llvm.ptr<i32> {llvm.byval = i32}) {
+llvm.func @byvalattr(%arg0: !llvm.ptr {llvm.byval = i32}) {
   llvm.return
 }
 
 // CHECK-LABEL: declare void @byvalattr_decl(ptr byval(i32))
-llvm.func @byvalattr_decl(!llvm.ptr<i32> {llvm.byval = i32})
+llvm.func @byvalattr_decl(!llvm.ptr {llvm.byval = i32})
 
 // CHECK-LABEL: define void @sretattr(ptr sret(i32) %
-llvm.func @sretattr(%arg0: !llvm.ptr<i32> {llvm.sret = i32}) {
+llvm.func @sretattr(%arg0: !llvm.ptr {llvm.sret = i32}) {
   llvm.return
 }
 
 // CHECK-LABEL: declare void @sretattr_decl(ptr sret(i32))
-llvm.func @sretattr_decl(!llvm.ptr<i32> {llvm.sret = i32})
+llvm.func @sretattr_decl(!llvm.ptr {llvm.sret = i32})
 
 // CHECK-LABEL: define void @nestattr(ptr nest %
-llvm.func @nestattr(%arg0: !llvm.ptr<i32> {llvm.nest}) {
+llvm.func @nestattr(%arg0: !llvm.ptr {llvm.nest}) {
   llvm.return
 }
 
 // CHECK-LABEL: declare void @nestattr_decl(ptr nest)
-llvm.func @nestattr_decl(!llvm.ptr<i32> {llvm.nest})
+llvm.func @nestattr_decl(!llvm.ptr {llvm.nest})
 
 // CHECK-LABEL: define void @noundefattr(i32 noundef %
 llvm.func @noundefattr(%arg0: i32 {llvm.noundef}) {
@@ -1141,20 +1131,20 @@ llvm.func @noundefattr(%arg0: i32 {llvm.noundef}) {
 llvm.func @noundefattr_decl(i32 {llvm.noundef})
 
 // CHECK-LABEL: define void @llvm_align(ptr align 4 {{%*.}})
-llvm.func @llvm_align(%arg0: !llvm.ptr<f32> {llvm.align = 4}) {
+llvm.func @llvm_align(%arg0: !llvm.ptr {llvm.align = 4}) {
   llvm.return
 }
 
 // CHECK-LABEL: declare void @llvm_align_decl(ptr align 4)
-llvm.func @llvm_align_decl(!llvm.ptr<f32> {llvm.align = 4})
+llvm.func @llvm_align_decl(!llvm.ptr {llvm.align = 4})
 
 // CHECK-LABEL: define void @inallocaattr(ptr inalloca(i32) %
-llvm.func @inallocaattr(%arg0: !llvm.ptr<i32> {llvm.inalloca = i32}) {
+llvm.func @inallocaattr(%arg0: !llvm.ptr {llvm.inalloca = i32}) {
   llvm.return
 }
 
 // CHECK-LABEL: declare void @inallocaattr_decl(ptr inalloca(i32))
-llvm.func @inallocaattr_decl(!llvm.ptr<i32> {llvm.inalloca = i32})
+llvm.func @inallocaattr_decl(!llvm.ptr {llvm.inalloca = i32})
 
 // CHECK-LABEL: define void @signextattr(i1 signext %
 llvm.func @signextattr(%arg0: i1 {llvm.signext}) {
@@ -1206,13 +1196,13 @@ llvm.func @alignstackattr_decl(!llvm.ptr {llvm.alignstack = 32 : i64})
 llvm.func @writeonlyattr_decl(!llvm.ptr {llvm.writeonly})
 
 // CHECK-LABEL: declare align 4 ptr @alignattr_ret_decl()
-llvm.func @alignattr_ret_decl() -> (!llvm.ptr<i32> {llvm.align = 4})
+llvm.func @alignattr_ret_decl() -> (!llvm.ptr {llvm.align = 4})
 
 // CHECK-LABEL: declare noalias ptr @noaliasattr_ret_decl()
-llvm.func @noaliasattr_ret_decl() -> (!llvm.ptr<i32> {llvm.noalias})
+llvm.func @noaliasattr_ret_decl() -> (!llvm.ptr {llvm.noalias})
 
 // CHECK-LABEL: declare noundef ptr @noundefattr_ret_decl()
-llvm.func @noundefattr_ret_decl() -> (!llvm.ptr<i32> {llvm.noundef})
+llvm.func @noundefattr_ret_decl() -> (!llvm.ptr {llvm.noundef})
 
 // CHECK-LABEL: declare signext i1 @signextattr_ret_decl()
 llvm.func @signextattr_ret_decl() -> (i1 {llvm.signext})
@@ -1252,8 +1242,8 @@ llvm.func @indirect_varargs_call(%arg0 : !llvm.ptr, %arg1 : i32) {
 llvm.func @intpointerconversion(%arg0 : i32) -> i32 {
 // CHECK:      %2 = inttoptr i32 %0 to ptr
 // CHECK-NEXT: %3 = ptrtoint ptr %2 to i32
-  %1 = llvm.inttoptr %arg0 : i32 to !llvm.ptr<i32>
-  %2 = llvm.ptrtoint %1 : !llvm.ptr<i32> to i32
+  %1 = llvm.inttoptr %arg0 : i32 to !llvm.ptr
+  %2 = llvm.ptrtoint %1 : !llvm.ptr to i32
   llvm.return %2 : i32
 }
 
@@ -1270,10 +1260,10 @@ llvm.func @fpconversion(%arg0 : i32) -> i32 {
 }
 
 // CHECK-LABEL: @addrspace
-llvm.func @addrspace(%arg0 : !llvm.ptr<i32>) -> !llvm.ptr<i32, 2> {
+llvm.func @addrspace(%arg0 : !llvm.ptr) -> !llvm.ptr<2> {
 // CHECK: %2 = addrspacecast ptr %0 to ptr addrspace(2)
-  %1 = llvm.addrspacecast %arg0 : !llvm.ptr<i32> to !llvm.ptr<i32, 2>
-  llvm.return %1 : !llvm.ptr<i32, 2>
+  %1 = llvm.addrspacecast %arg0 : !llvm.ptr to !llvm.ptr<2>
+  llvm.return %1 : !llvm.ptr<2>
 }
 
 llvm.func @stringconstant() -> !llvm.array<12 x i8> {
@@ -1403,11 +1393,11 @@ llvm.func @alloca(%size : i64) {
   // Alignment automatically set by the LLVM IR builder when alignment attribute
   // is 0.
   //  CHECK: alloca {{.*}} align 4
-  llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr<i32>)
+  llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr)
   // CHECK-NEXT: alloca {{.*}} align 8
-  llvm.alloca %size x i32 {alignment = 8} : (i64) -> (!llvm.ptr<i32>)
+  llvm.alloca %size x i32 {alignment = 8} : (i64) -> (!llvm.ptr)
   // CHECK-NEXT: alloca {{.*}} addrspace(3)
-  llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr<i32, 3>)
+  llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr<3>)
   // CHECK-NEXT: alloca inalloca {{.*}} align 4
   llvm.alloca inalloca %size x i32 : (i64) -> !llvm.ptr
   llvm.return
@@ -1444,10 +1434,10 @@ llvm.func @integer_extension_and_truncation(%a : i32) {
 
 // Check that the auxiliary `null` operation is converted into a `null` value.
 // CHECK-LABEL: @null
-llvm.func @null() -> !llvm.ptr<i32> {
-  %0 = llvm.mlir.zero : !llvm.ptr<i32>
+llvm.func @null() -> !llvm.ptr {
+  %0 = llvm.mlir.zero : !llvm.ptr
   // CHECK: ret ptr null
-  llvm.return %0 : !llvm.ptr<i32>
+  llvm.return %0 : !llvm.ptr
 }
 
 // Check that dense elements attributes are exported properly in constants.
@@ -1471,54 +1461,54 @@ llvm.func @elements_constant_3d_array() -> !llvm.array<2 x array<2 x array<2 x i
 
 // CHECK-LABEL: @atomicrmw
 llvm.func @atomicrmw(
-    %f32_ptr : !llvm.ptr<f32>, %f32 : f32,
-    %i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
+    %f32_ptr : !llvm.ptr, %f32 : f32,
+    %i32_ptr : !llvm.ptr, %i32 : i32) {
   // CHECK: atomicrmw fadd ptr %{{.*}}, float %{{.*}} monotonic
-  %0 = llvm.atomicrmw fadd %f32_ptr, %f32 monotonic : !llvm.ptr<f32>, f32
+  %0 = llvm.atomicrmw fadd %f32_ptr, %f32 monotonic : !llvm.ptr, f32
   // CHECK: atomicrmw fsub ptr %{{.*}}, float %{{.*}} monotonic
-  %1 = llvm.atomicrmw fsub %f32_ptr, %f32 monotonic : !llvm.ptr<f32>, f32
+  %1 = llvm.atomicrmw fsub %f32_ptr, %f32 monotonic : !llvm.ptr, f32
   // CHECK: atomicrmw fmax ptr %{{.*}}, float %{{.*}} monotonic
-  %2 = llvm.atomicrmw fmax %f32_ptr, %f32 monotonic : !llvm.ptr<f32>, f32
+  %2 = llvm.atomicrmw fmax %f32_ptr, %f32 monotonic : !llvm.ptr, f32
   // CHECK: atomicrmw fmin ptr %{{.*}}, float %{{.*}} monotonic
-  %3 = llvm.atomicrmw fmin %f32_ptr, %f32 monotonic : !llvm.ptr<f32>, f32
+  %3 = llvm.atomicrmw fmin %f32_ptr, %f32 monotonic : !llvm.ptr, f32
   // CHECK: atomicrmw xchg ptr %{{.*}}, float %{{.*}} monotonic
-  %4 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : !llvm.ptr<f32>, f32
+  %4 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : !llvm.ptr, f32
   // CHECK: atomicrmw add ptr %{{.*}}, i32 %{{.*}} acquire
-  %5 = llvm.atomicrmw add %i32_ptr, %i32 acquire : !llvm.ptr<i32>, i32
+  %5 = llvm.atomicrmw add %i32_ptr, %i32 acquire : !llvm.ptr, i32
   // CHECK: atomicrmw sub ptr %{{.*}}, i32 %{{.*}} release
-  %6 = llvm.atomicrmw sub %i32_ptr, %i32 release : !llvm.ptr<i32>, i32
+  %6 = llvm.atomicrmw sub %i32_ptr, %i32 release : !llvm.ptr, i32
   // CHECK: atomicrmw and ptr %{{.*}}, i32 %{{.*}} acq_rel
-  %7 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : !llvm.ptr<i32>, i32
+  %7 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : !llvm.ptr, i32
   // CHECK: atomicrmw nand ptr %{{.*}}, i32 %{{.*}} seq_cst
-  %8 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : !llvm.ptr<i32>, i32
+  %8 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : !llvm.ptr, i32
   // CHECK: atomicrmw or ptr %{{.*}}, i32 %{{.*}} monotonic
-  %9 = llvm.atomicrmw _or %i32_ptr, %i32 monotonic : !llvm.ptr<i32>, i32
+  %9 = llvm.atomicrmw _or %i32_ptr, %i32 monotonic : !llvm.ptr, i32
   // CHECK: atomicrmw xor ptr %{{.*}}, i32 %{{.*}} monotonic
-  %10 = llvm.atomicrmw _xor %i32_ptr, %i32 monotonic : !llvm.ptr<i32>, i32
+  %10 = llvm.atomicrmw _xor %i32_ptr, %i32 monotonic : !llvm.ptr, i32
   // CHECK: atomicrmw max ptr %{{.*}}, i32 %{{.*}} monotonic
-  %11 = llvm.atomicrmw max %i32_ptr, %i32 monotonic : !llvm.ptr<i32>, i32
+  %11 = llvm.atomicrmw max %i32_ptr, %i32 monotonic : !llvm.ptr, i32
   // CHECK: atomicrmw min ptr %{{.*}}, i32 %{{.*}} monotonic
-  %12 = llvm.atomicrmw min %i32_ptr, %i32 monotonic : !llvm.ptr<i32>, i32
+  %12 = llvm.atomicrmw min %i32_ptr, %i32 monotonic : !llvm.ptr, i32
   // CHECK: atomicrmw umax ptr %{{.*}}, i32 %{{.*}} monotonic
-  %13 = llvm.atomicrmw umax %i32_ptr, %i32 monotonic : !llvm.ptr<i32>, i32
+  %13 = llvm.atomicrmw umax %i32_ptr, %i32 monotonic : !llvm.ptr, i32
   // CHECK: atomicrmw umin ptr %{{.*}}, i32 %{{.*}} monotonic
-  %14 = llvm.atomicrmw umin %i32_ptr, %i32 monotonic : !llvm.ptr<i32>, i32
+  %14 = llvm.atomicrmw umin %i32_ptr, %i32 monotonic : !llvm.ptr, i32
   // CHECK: atomicrmw uinc_wrap ptr %{{.*}}, i32 %{{.*}} monotonic
-  %15 = llvm.atomicrmw uinc_wrap %i32_ptr, %i32 monotonic : !llvm.ptr<i32>, i32
+  %15 = llvm.atomicrmw uinc_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
   // CHECK: atomicrmw udec_wrap ptr %{{.*}}, i32 %{{.*}} monotonic
-  %16 = llvm.atomicrmw udec_wrap %i32_ptr, %i32 monotonic : !llvm.ptr<i32>, i32
+  %16 = llvm.atomicrmw udec_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
 
   // CHECK: atomicrmw volatile
   // CHECK-SAME:  syncscope("singlethread")
   // CHECK-SAME:  align 8
-  %17 = llvm.atomicrmw volatile udec_wrap %i32_ptr, %i32 syncscope("singlethread") monotonic {alignment = 8 : i64} : !llvm.ptr<i32>, i32
+  %17 = llvm.atomicrmw volatile udec_wrap %i32_ptr, %i32 syncscope("singlethread") monotonic {alignment = 8 : i64} : !llvm.ptr, i32
   llvm.return
 }
 
 // CHECK-LABEL: @cmpxchg
-llvm.func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : i32, %val: i32) {
+llvm.func @cmpxchg(%ptr : !llvm.ptr, %cmp : i32, %val: i32) {
   // CHECK: cmpxchg ptr %{{.*}}, i32 %{{.*}}, i32 %{{.*}} acq_rel monotonic
-  %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : !llvm.ptr<i32>, i32
+  %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : !llvm.ptr, i32
   // CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 0
   %1 = llvm.extractvalue %0[0] : !llvm.struct<(i32, i1)>
   // CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 1
@@ -1527,14 +1517,14 @@ llvm.func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : i32, %val: i32) {
   // CHECK:  cmpxchg weak volatile
   // CHECK-SAME:  syncscope("singlethread")
   // CHECK-SAME:  align 8
-  %3 = llvm.cmpxchg weak volatile %ptr, %cmp, %val syncscope("singlethread") acq_rel monotonic {alignment = 8 : i64} : !llvm.ptr<i32>, i32
+  %3 = llvm.cmpxchg weak volatile %ptr, %cmp, %val syncscope("singlethread") acq_rel monotonic {alignment = 8 : i64} : !llvm.ptr, i32
   llvm.return
 }
 
-llvm.mlir.global external constant @_ZTIi() : !llvm.ptr<i8>
-llvm.func @foo(!llvm.ptr<i8>)
-llvm.func @vararg_foo(!llvm.ptr<i8>, ...)
-llvm.func @bar(!llvm.ptr<i8>) -> !llvm.ptr<i8>
+llvm.mlir.global external constant @_ZTIi() : !llvm.ptr
+llvm.func @foo(!llvm.ptr)
+llvm.func @vararg_foo(!llvm.ptr, ...)
+llvm.func @bar(!llvm.ptr) -> !llvm.ptr
 llvm.func @__gxx_personality_v0(...) -> i32
 
 // CHECK-LABEL: @invokeLandingpad
@@ -1542,14 +1532,13 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
 // CHECK: %[[a1:[0-9]+]] = alloca i8
   %0 = llvm.mlir.constant(0 : i32) : i32
   %1 = llvm.mlir.constant(dense<0> : vector<1xi8>) : !llvm.array<1 x i8>
-  %2 = llvm.mlir.addressof @_ZTIi : !llvm.ptr<ptr<i8>>
-  %3 = llvm.bitcast %2 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
-  %4 = llvm.mlir.zero : !llvm.ptr<ptr<i8>>
+  %2 = llvm.mlir.addressof @_ZTIi : !llvm.ptr
+  %4 = llvm.mlir.zero : !llvm.ptr
   %5 = llvm.mlir.constant(1 : i32) : i32
-  %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr<i8>
+  %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr
 // CHECK: invoke void @foo(ptr %[[a1]])
 // CHECK-NEXT: to label %[[normal:[0-9]+]] unwind label %[[unwind:[0-9]+]]
-  llvm.invoke @foo(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> ()
+  llvm.invoke @foo(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> ()
 
 // CHECK: [[unwind]]:
 ^bb1:
@@ -1557,7 +1546,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
 // CHECK-NEXT:             catch ptr null
 // CHECK-NEXT:             catch ptr @_ZTIi
 // CHECK-NEXT:             filter [1 x i8] zeroinitializer
-  %7 = llvm.landingpad (catch %4 : !llvm.ptr<ptr<i8>>) (catch %3 : !llvm.ptr<i8>) (filter %1 : !llvm.array<1 x i8>) : !llvm.struct<(ptr<i8>, i32)>
+  %7 = llvm.landingpad (catch %4 : !llvm.ptr) (catch %2 : !llvm.ptr) (filter %1 : !llvm.array<1 x i8>) : !llvm.struct<(ptr, i32)>
 // CHECK: br label %[[final:[0-9]+]]
   llvm.br ^bb3
 
@@ -1570,18 +1559,18 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
 // CHECK-NEXT: %{{[0-9]+}} = invoke ptr @bar(ptr %[[a1]])
 // CHECK-NEXT:          to label %[[normal]] unwind label %[[unwind]]
 ^bb3:	// pred: ^bb1
-  %8 = llvm.invoke @bar(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> !llvm.ptr<i8>
+  %8 = llvm.invoke @bar(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> !llvm.ptr
 
 // CHECK: [[BB4:.*]]:
 // CHECK: invoke void (ptr, ...) @vararg_foo(ptr %[[a1]], i32 0)
 ^bb4:
-  llvm.invoke @vararg_foo(%6, %0) to ^bb2 unwind ^bb1 vararg(!llvm.func<void (ptr<i8>, ...)>) : (!llvm.ptr<i8>, i32) -> ()
+  llvm.invoke @vararg_foo(%6, %0) to ^bb2 unwind ^bb1 vararg(!llvm.func<void (ptr, ...)>) : (!llvm.ptr, i32) -> ()
 
 // CHECK: [[BB5:.*]]:
 // CHECK: invoke void (ptr, ...) undef(ptr %[[a1]], i32 0)
 ^bb5:
   %9 = llvm.mlir.undef : !llvm.ptr
-  llvm.invoke %9(%6, %0) to ^bb2 unwind ^bb1 vararg(!llvm.func<void (ptr<i8>, ...)>) : !llvm.ptr, (!llvm.ptr<i8>, i32) -> ()
+  llvm.invoke %9(%6, %0) to ^bb2 unwind ^bb1 vararg(!llvm.func<void (ptr, ...)>) : !llvm.ptr, (!llvm.ptr, i32) -> ()
 }
 
 // -----
@@ -1591,7 +1580,7 @@ llvm.func @__gxx_personality_v0(...) -> i32
 
 // CHECK-LABEL: @invoke_result
 // CHECK-SAME: %[[a0:[0-9]+]]
-llvm.func @invoke_result(%arg0 : !llvm.ptr<i8>) attributes { personality = @__gxx_personality_v0 } {
+llvm.func @invoke_result(%arg0 : !llvm.ptr) attributes { personality = @__gxx_personality_v0 } {
 // CHECK: %[[a1:[0-9]+]] = invoke i8 @foo()
 // CHECK-NEXT: to label %[[normal:[0-9]+]] unwind label %[[unwind:[0-9]+]]
     %0 = llvm.invoke @foo() to ^bb1 unwind ^bb2 : () -> i8
@@ -1600,7 +1589,7 @@ llvm.func @invoke_result(%arg0 : !llvm.ptr<i8>) attributes { personality = @__gx
 // CHECK-NEXT: store i8 %[[a1]], ptr %[[a0]]
 // CHECK-NEXT: ret void
 ^bb1:
-    llvm.store %0, %arg0 : !llvm.ptr<i8>
+    llvm.store %0, %arg0 : i8, !llvm.ptr
     llvm.return
 
 // CHECK: [[unwind]]:
@@ -1608,7 +1597,7 @@ llvm.func @invoke_result(%arg0 : !llvm.ptr<i8>) attributes { personality = @__gx
 // CHECK-NEXT: cleanup
 // CHECK-NEXT: ret void
 ^bb2:
-    %7 = llvm.landingpad cleanup : !llvm.struct<(ptr<i8>, i32)>
+    %7 = llvm.landingpad cleanup : !llvm.struct<(ptr, i32)>
     llvm.return
 }
 
@@ -1635,7 +1624,7 @@ llvm.func @invoke_phis() -> i32 attributes { personality = @__gxx_personality_v0
 // CHECK-NEXT: cleanup
 // CHECK-NEXT: br label %[[normal]]
 ^bb2:
-    %2 = llvm.landingpad cleanup : !llvm.struct<(ptr<i8>, i32)>
+    %2 = llvm.landingpad cleanup : !llvm.struct<(ptr, i32)>
     %3 = llvm.mlir.constant(1 : i32) : i32
     llvm.br ^bb1(%3 : i32)
 }
@@ -1782,17 +1771,17 @@ llvm.func @address_taken() {
   llvm.return
 }
 
-llvm.mlir.global internal constant @taker_of_address() : !llvm.ptr<func<void ()>> {
-  %0 = llvm.mlir.addressof @address_taken : !llvm.ptr<func<void ()>>
-  llvm.return %0 : !llvm.ptr<func<void ()>>
+llvm.mlir.global internal constant @taker_of_address() : !llvm.ptr {
+  %0 = llvm.mlir.addressof @address_taken : !llvm.ptr
+  llvm.return %0 : !llvm.ptr
 }
 
 // -----
 
 // CHECK: @forward_use_of_address = linkonce global ptr @address_declared_after_use
-llvm.mlir.global linkonce @forward_use_of_address() : !llvm.ptr<f32> {
-  %0 = llvm.mlir.addressof @address_declared_after_use : !llvm.ptr<f32>
-  llvm.return %0 : !llvm.ptr<f32>
+llvm.mlir.global linkonce @forward_use_of_address() : !llvm.ptr {
+  %0 = llvm.mlir.addressof @address_declared_after_use : !llvm.ptr
+  llvm.return %0 : !llvm.ptr
 }
 
 llvm.mlir.global linkonce @address_declared_after_use() : f32
@@ -1800,14 +1789,14 @@ llvm.mlir.global linkonce @address_declared_after_use() : f32
 // -----
 
 // CHECK: @take_self_address = linkonce global { i32, ptr } {{.*}} ptr @take_self_address
-llvm.mlir.global linkonce @take_self_address() : !llvm.struct<(i32, !llvm.ptr<i32>)> {
+llvm.mlir.global linkonce @take_self_address() : !llvm.struct<(i32, !llvm.ptr)> {
   %z32 = llvm.mlir.constant(0 : i32) : i32
-  %0 = llvm.mlir.undef : !llvm.struct<(i32, !llvm.ptr<i32>)>
-  %1 = llvm.mlir.addressof @take_self_address : !llvm.ptr<!llvm.struct<(i32, !llvm.ptr<i32>)>>
-  %2 = llvm.getelementptr %1[%z32, 0] : (!llvm.ptr<!llvm.struct<(i32, !llvm.ptr<i32>)>>, i32) -> !llvm.ptr<i32>
-  %3 = llvm.insertvalue %z32, %0[0] : !llvm.struct<(i32, !llvm.ptr<i32>)>
-  %4 = llvm.insertvalue %2, %3[1] : !llvm.struct<(i32, !llvm.ptr<i32>)>
-  llvm.return %4 : !llvm.struct<(i32, !llvm.ptr<i32>)>
+  %0 = llvm.mlir.undef : !llvm.struct<(i32, !llvm.ptr)>
+  %1 = llvm.mlir.addressof @take_self_address : !llvm.ptr
+  %2 = llvm.getelementptr %1[%z32, 0] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<(i32, !llvm.ptr)>
+  %3 = llvm.insertvalue %z32, %0[0] : !llvm.struct<(i32, !llvm.ptr)>
+  %4 = llvm.insertvalue %2, %3[1] : !llvm.struct<(i32, !llvm.ptr)>
+  llvm.return %4 : !llvm.struct<(i32, !llvm.ptr)>
 }
 
 // -----
@@ -1879,7 +1868,7 @@ llvm.func @invoke_branch_weights() -> i32 attributes {personality = @__gxx_perso
   // CHECK: !prof ![[NODE:[0-9]+]]
   llvm.invoke @foo() to ^bb2 unwind ^bb1 {branch_weights = array<i32 : 42, 99>} : () -> ()
 ^bb1:  // pred: ^bb0
-  %1 = llvm.landingpad cleanup : !llvm.struct<(ptr<i8>, i32)>
+  %1 = llvm.landingpad cleanup : !llvm.struct<(ptr, i32)>
   llvm.br ^bb2
 ^bb2:  // 2 preds: ^bb0, ^bb1
   llvm.return %0 : i32
@@ -1892,11 +1881,11 @@ llvm.func @invoke_branch_weights() -> i32 attributes {personality = @__gxx_perso
 llvm.func @volatile_store_and_load() {
   %val = llvm.mlir.constant(5 : i32) : i32
   %size = llvm.mlir.constant(1 : i64) : i64
-  %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr<i32>)
+  %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr)
   // CHECK: store volatile i32 5, ptr %{{.*}}
-  llvm.store volatile %val, %0 : !llvm.ptr<i32>
+  llvm.store volatile %val, %0 : i32, !llvm.ptr
   // CHECK: %{{.*}} = load volatile i32, ptr %{{.*}}
-  %1 = llvm.load volatile %0: !llvm.ptr<i32>
+  %1 = llvm.load volatile %0: !llvm.ptr -> i32
   llvm.return
 }
 
@@ -1906,11 +1895,11 @@ llvm.func @volatile_store_and_load() {
 llvm.func @nontemporal_store_and_load() {
   %val = llvm.mlir.constant(5 : i32) : i32
   %size = llvm.mlir.constant(1 : i64) : i64
-  %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr<i32>)
+  %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr)
   // CHECK: !nontemporal ![[NODE:[0-9]+]]
-  llvm.store %val, %0 {nontemporal} : !llvm.ptr<i32>
+  llvm.store %val, %0 {nontemporal} : i32, !llvm.ptr
   // CHECK: !nontemporal ![[NODE]]
-  %1 = llvm.load %0 {nontemporal} : !llvm.ptr<i32>
+  %1 = llvm.load %0 {nontemporal} : !llvm.ptr -> i32
   llvm.return
 }
 
@@ -2246,19 +2235,17 @@ llvm.func @vararg_function(%arg0: i32, ...) {
   %0 = llvm.mlir.constant(1 : i32) : i32
   %1 = llvm.mlir.constant(1 : i32) : i32
   // CHECK: %[[ALLOCA0:.+]] = alloca %struct.va_list, align 8
-  %2 = llvm.alloca %1 x !llvm.struct<"struct.va_list", (ptr<i8>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>>
-  %3 = llvm.bitcast %2 : !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>> to !llvm.ptr<i8>
+  %2 = llvm.alloca %1 x !llvm.struct<"struct.va_list", (ptr)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
   // CHECK: call void @llvm.va_start(ptr %[[ALLOCA0]])
-  llvm.intr.vastart %3 : !llvm.ptr<i8>
+  llvm.intr.vastart %2 : !llvm.ptr
   // CHECK: %[[ALLOCA1:.+]] = alloca ptr, align 8
-  %4 = llvm.alloca %0 x !llvm.ptr<i8> {alignment = 8 : i64} : (i32) -> !llvm.ptr<ptr<i8>>
-  %5 = llvm.bitcast %4 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
+  %4 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i32) -> !llvm.ptr
   // CHECK: call void @llvm.va_copy(ptr %[[ALLOCA1]], ptr %[[ALLOCA0]])
-  llvm.intr.vacopy %3 to %5 : !llvm.ptr<i8>, !llvm.ptr<i8>
+  llvm.intr.vacopy %2 to %4 : !llvm.ptr, !llvm.ptr
   // CHECK: call void @llvm.va_end(ptr %[[ALLOCA1]])
   // CHECK: call void @llvm.va_end(ptr %[[ALLOCA0]])
-  llvm.intr.vaend %5 : !llvm.ptr<i8>
-  llvm.intr.vaend %3 : !llvm.ptr<i8>
+  llvm.intr.vaend %4 : !llvm.ptr
+  llvm.intr.vaend %2 : !llvm.ptr
   // CHECK: ret void
   llvm.return
 }
@@ -2266,7 +2253,7 @@ llvm.func @vararg_function(%arg0: i32, ...) {
 // -----
 
 // CHECK: declare void @readonly_function([[PTR:.+]] readonly)
-llvm.func @readonly_function(%arg0: !llvm.ptr<f32> {llvm.readonly})
+llvm.func @readonly_function(%arg0: !llvm.ptr {llvm.readonly})
 
 // -----
 

diff  --git a/mlir/test/Target/LLVMIR/nvvmir.mlir b/mlir/test/Target/LLVMIR/nvvmir.mlir
index 24ef1198577937f..76540cc2c3973c3 100644
--- a/mlir/test/Target/LLVMIR/nvvmir.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir.mlir
@@ -268,11 +268,11 @@ llvm.func @nvvm_mma_m16n8k4_tf32_f32(%a0 : i32, %a1 : i32,
 // The test below checks the correct mapping of the nvvm.wmma.*.load.* op to the correct intrinsic
 // in the LLVM NVPTX backend.
 // CHECK-LABEL: @gpu_wmma_load_op
-llvm.func @gpu_wmma_load_op(%arg0: !llvm.ptr<i32, 3>, %arg1: i32) {
+llvm.func @gpu_wmma_load_op(%arg0: !llvm.ptr<3>, %arg1: i32) {
   // CHECK: call { <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half> } @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
   %0 = nvvm.wmma.load %arg0, %arg1
     {eltype = #nvvm.mma_type<f16>, frag = #nvvm.mma_frag<a>, k = 16 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
-    : (!llvm.ptr<i32, 3>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
+    : (!llvm.ptr<3>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
 
   llvm.return
 }
@@ -280,13 +280,13 @@ llvm.func @gpu_wmma_load_op(%arg0: !llvm.ptr<i32, 3>, %arg1: i32) {
 // The test below checks the correct mapping of the nvvm.wmma.*.store.* op to the correct intrinsic
 // in the LLVM NVPTX backend.
 // CHECK-LABEL: @gpu_wmma_store_op
-llvm.func @gpu_wmma_store_op(%arg0: !llvm.ptr<i32, 3>, %arg1: i32,
+llvm.func @gpu_wmma_store_op(%arg0: !llvm.ptr<3>, %arg1: i32,
                             %arg2: vector<2 x f16>, %arg3: vector<2 x f16>,
                             %arg4: vector<2 xf16>, %arg5: vector<2 x f16>) {
   // CHECK: call void @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16.p3(ptr addrspace(3) %{{.*}}, <2 x half> {{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, i32 %{{.*}})
   nvvm.wmma.store %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
     {eltype = #nvvm.mma_type<f16>, k = 16 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
-    : !llvm.ptr<i32, 3>, vector<2 x f16>, vector<2 x f16>, vector<2 x f16>, vector<2 x f16>
+    : !llvm.ptr<3>, vector<2 x f16>, vector<2 x f16>, vector<2 x f16>, vector<2 x f16>
   llvm.return
 }
 
@@ -315,11 +315,11 @@ llvm.func @gpu_wmma_mma_op(%arg0: vector<2 x f16>, %arg1: vector<2 x f16>,
 }
 
 // CHECK-LABEL: @nvvm_wmma_load_tf32
-llvm.func @nvvm_wmma_load_tf32(%arg0: !llvm.ptr<i32>, %arg1 : i32) {
+llvm.func @nvvm_wmma_load_tf32(%arg0: !llvm.ptr, %arg1 : i32) {
   // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32.p0(ptr %{{.*}}, i32 %{{.*}})
   %0 = nvvm.wmma.load %arg0, %arg1
     {eltype = #nvvm.mma_type<tf32>, frag = #nvvm.mma_frag<a>, k = 8 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
-    : (!llvm.ptr<i32>) -> !llvm.struct<(i32, i32, i32, i32)>
+    : (!llvm.ptr) -> !llvm.struct<(i32, i32, i32, i32)>
   llvm.return
 }
 
@@ -336,15 +336,15 @@ llvm.func @nvvm_wmma_mma(%0 : i32, %1 : i32, %2 : i32, %3 : i32, %4 : i32, %5 :
 }
 
 // CHECK-LABEL: @cp_async
-llvm.func @cp_async(%arg0: !llvm.ptr<i8, 3>, %arg1: !llvm.ptr<i8, 1>) {
+llvm.func @cp_async(%arg0: !llvm.ptr<3>, %arg1: !llvm.ptr<1>) {
 // CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.4(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
-  nvvm.cp.async.shared.global %arg0, %arg1, 4, cache =  ca : !llvm.ptr<i8, 3>, !llvm.ptr<i8, 1>
+  nvvm.cp.async.shared.global %arg0, %arg1, 4, cache =  ca : !llvm.ptr<3>, !llvm.ptr<1>
 // CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.8(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
-  nvvm.cp.async.shared.global %arg0, %arg1, 8, cache =  ca : !llvm.ptr<i8, 3>, !llvm.ptr<i8, 1>
+  nvvm.cp.async.shared.global %arg0, %arg1, 8, cache =  ca : !llvm.ptr<3>, !llvm.ptr<1>
 // CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.16(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
-  nvvm.cp.async.shared.global %arg0, %arg1, 16, cache =  ca : !llvm.ptr<i8, 3>, !llvm.ptr<i8, 1>
+  nvvm.cp.async.shared.global %arg0, %arg1, 16, cache =  ca : !llvm.ptr<3>, !llvm.ptr<1>
 // CHECK: call void @llvm.nvvm.cp.async.cg.shared.global.16(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
-  nvvm.cp.async.shared.global %arg0, %arg1, 16, cache =  cg : !llvm.ptr<i8, 3>, !llvm.ptr<i8, 1>
+  nvvm.cp.async.shared.global %arg0, %arg1, 16, cache =  cg : !llvm.ptr<3>, !llvm.ptr<1>
 // CHECK: call void @llvm.nvvm.cp.async.commit.group()
   nvvm.cp.async.commit.group
 // CHECK: call void @llvm.nvvm.cp.async.wait.group(i32 0)
@@ -353,19 +353,19 @@ llvm.func @cp_async(%arg0: !llvm.ptr<i8, 3>, %arg1: !llvm.ptr<i8, 1>) {
 }
 
 // CHECK-LABEL: @ld_matrix
-llvm.func @ld_matrix(%arg0: !llvm.ptr<i32, 3>) {
+llvm.func @ld_matrix(%arg0: !llvm.ptr<3>) {
   // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16.p3(ptr addrspace(3) %{{.*}})
-  %l1 = nvvm.ldmatrix %arg0 {num = 1 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> i32
+  %l1 = nvvm.ldmatrix %arg0 {num = 1 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<3>) -> i32
   // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16.p3(ptr addrspace(3) %{{.*}})
-  %l2 = nvvm.ldmatrix %arg0 {num = 2 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32)>
+  %l2 = nvvm.ldmatrix %arg0 {num = 2 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<3>) -> !llvm.struct<(i32, i32)>
   // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16.p3(ptr addrspace(3) %{{.*}})
-  %l4 = nvvm.ldmatrix %arg0 {num = 4 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32, i32, i32)>
+  %l4 = nvvm.ldmatrix %arg0 {num = 4 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<3>) -> !llvm.struct<(i32, i32, i32, i32)>
    // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16.p3(ptr addrspace(3) %{{.*}})
-  %l1t = nvvm.ldmatrix %arg0 {num = 1 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> i32
+  %l1t = nvvm.ldmatrix %arg0 {num = 1 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<3>) -> i32
   // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16.p3(ptr addrspace(3) %{{.*}})
-  %l2t = nvvm.ldmatrix %arg0 {num = 2 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32)>
+  %l2t = nvvm.ldmatrix %arg0 {num = 2 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<3>) -> !llvm.struct<(i32, i32)>
   // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16.p3(ptr addrspace(3) %{{.*}})
-  %l4t = nvvm.ldmatrix %arg0 {num = 4 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32, i32, i32)>
+  %l4t = nvvm.ldmatrix %arg0 {num = 4 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<3>) -> !llvm.struct<(i32, i32, i32, i32)>
   llvm.return
 }
 

diff  --git a/mlir/test/Target/LLVMIR/openacc-llvm.mlir b/mlir/test/Target/LLVMIR/openacc-llvm.mlir
index 0954d929326c29b..897311c6e81beae 100644
--- a/mlir/test/Target/LLVMIR/openacc-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openacc-llvm.mlir
@@ -1,9 +1,9 @@
 // RUN: mlir-translate -mlir-to-llvmir -split-input-file %s | FileCheck %s
 
-llvm.func @testenterdataop(%arg0: !llvm.ptr<f32>, %arg1 : !llvm.ptr<f32>) {
-  %0 = acc.create varPtr(%arg0 : !llvm.ptr<f32>) -> !llvm.ptr<f32>
-  %1 = acc.copyin varPtr(%arg1 : !llvm.ptr<f32>) -> !llvm.ptr<f32>
-  acc.enter_data dataOperands(%0, %1 : !llvm.ptr<f32>, !llvm.ptr<f32>)
+llvm.func @testenterdataop(%arg0: !llvm.ptr, %arg1 : !llvm.ptr) {
+  %0 = acc.create varPtr(%arg0 : !llvm.ptr) -> !llvm.ptr
+  %1 = acc.copyin varPtr(%arg1 : !llvm.ptr) -> !llvm.ptr
+  acc.enter_data dataOperands(%0, %1 : !llvm.ptr, !llvm.ptr)
   llvm.return
 }
 
@@ -47,12 +47,12 @@ llvm.func @testenterdataop(%arg0: !llvm.ptr<f32>, %arg1 : !llvm.ptr<f32>) {
 // -----
 
 
-llvm.func @testexitdataop(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>) {
-  %arg0_devptr = acc.getdeviceptr varPtr(%arg0 : !llvm.ptr<f32>) -> !llvm.ptr<f32>
-  %1 = acc.getdeviceptr varPtr(%arg1 : !llvm.ptr<f32>) -> !llvm.ptr<f32>
-  acc.exit_data dataOperands(%arg0_devptr, %1 : !llvm.ptr<f32>, !llvm.ptr<f32>)
-  acc.delete accPtr(%arg0_devptr : !llvm.ptr<f32>)
-  acc.copyout accPtr(%1 : !llvm.ptr<f32>) to varPtr(%arg1 : !llvm.ptr<f32>)
+llvm.func @testexitdataop(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
+  %arg0_devptr = acc.getdeviceptr varPtr(%arg0 : !llvm.ptr) -> !llvm.ptr
+  %1 = acc.getdeviceptr varPtr(%arg1 : !llvm.ptr) -> !llvm.ptr
+  acc.exit_data dataOperands(%arg0_devptr, %1 : !llvm.ptr, !llvm.ptr)
+  acc.delete accPtr(%arg0_devptr : !llvm.ptr)
+  acc.copyout accPtr(%1 : !llvm.ptr) to varPtr(%arg1 : !llvm.ptr)
   llvm.return
 }
 
@@ -94,9 +94,9 @@ llvm.func @testexitdataop(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>) {
 
 // -----
 
-llvm.func @testupdateop(%arg1: !llvm.ptr<f32>) {
-  %0 = acc.update_device varPtr(%arg1 : !llvm.ptr<f32>) -> !llvm.ptr<f32>
-  acc.update dataOperands(%0 : !llvm.ptr<f32>)
+llvm.func @testupdateop(%arg1: !llvm.ptr) {
+  %0 = acc.update_device varPtr(%arg1 : !llvm.ptr) -> !llvm.ptr
+  acc.update dataOperands(%0 : !llvm.ptr)
   llvm.return
 }
 
@@ -130,17 +130,17 @@ llvm.func @testupdateop(%arg1: !llvm.ptr<f32>) {
 
 // -----
 
-llvm.func @testdataop(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>, %arg2: !llvm.ptr<i32>) {
+llvm.func @testdataop(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
   
-  %0 = acc.copyin varPtr(%arg0 : !llvm.ptr<f32>) -> !llvm.ptr<f32>
-  %1 = acc.create varPtr(%arg1 : !llvm.ptr<f32>) -> !llvm.ptr<f32>
-  acc.data dataOperands(%0, %1 : !llvm.ptr<f32>, !llvm.ptr<f32>) {
+  %0 = acc.copyin varPtr(%arg0 : !llvm.ptr) -> !llvm.ptr
+  %1 = acc.create varPtr(%arg1 : !llvm.ptr) -> !llvm.ptr
+  acc.data dataOperands(%0, %1 : !llvm.ptr, !llvm.ptr) {
     %9 = llvm.mlir.constant(2 : i32) : i32
-    llvm.store %9, %arg2 : !llvm.ptr<i32>
+    llvm.store %9, %arg2 : i32, !llvm.ptr
     acc.terminator
   }
-  acc.copyout accPtr(%0 : !llvm.ptr<f32>) to varPtr(%arg0 : !llvm.ptr<f32>)
-  acc.copyout accPtr(%1 : !llvm.ptr<f32>) to varPtr(%arg1 : !llvm.ptr<f32>)
+  acc.copyout accPtr(%0 : !llvm.ptr) to varPtr(%arg0 : !llvm.ptr)
+  acc.copyout accPtr(%1 : !llvm.ptr) to varPtr(%arg1 : !llvm.ptr)
   llvm.return
 }
 

diff  --git a/mlir/test/Target/LLVMIR/openmp-nested.mlir b/mlir/test/Target/LLVMIR/openmp-nested.mlir
index d83a78cfbd589b7..e1fdfdd24a3cb06 100644
--- a/mlir/test/Target/LLVMIR/openmp-nested.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-nested.mlir
@@ -23,7 +23,7 @@ module {
         %20 = llvm.trunc %19 : i64 to i32
         %5 = llvm.mlir.addressof @str0 : !llvm.ptr
         %6 = llvm.getelementptr %5[%4, %4] : (!llvm.ptr, i32, i32) -> !llvm.ptr, !llvm.array<29 x i8>
-        %21 = llvm.call @printf(%6, %20, %20) vararg(!llvm.func<i32 (ptr<i8>, ...)>): (!llvm.ptr, i32, i32) -> i32
+        %21 = llvm.call @printf(%6, %20, %20) vararg(!llvm.func<i32 (ptr, ...)>): (!llvm.ptr, i32, i32) -> i32
         omp.yield
       }
       omp.terminator


        


More information about the Mlir-commits mailing list