[llvm] 8ecc6c9 - [IR] Partially remove pointer element types from intrinsic signatures (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 13 01:00:59 PDT 2023


Author: Nikita Popov
Date: 2023-07-13T10:00:51+02:00
New Revision: 8ecc6c9378399bf8b7a0cec72e73138ac686a1bb

URL: https://github.com/llvm/llvm-project/commit/8ecc6c9378399bf8b7a0cec72e73138ac686a1bb
DIFF: https://github.com/llvm/llvm-project/commit/8ecc6c9378399bf8b7a0cec72e73138ac686a1bb.diff

LOG: [IR] Partially remove pointer element types from intrinsic signatures (NFC)

As typed pointers are no longer supported, we should no longer
specify element types in intrinsic signatures.

The only meaningful pointer types are now:

    llvm_ptr_ty -> ptr
    llvm_anyptr_ty -> ptr addrspace(any)
    LLVMQualPointerType<N> -> ptr addrspace(N)

This is only "partially" because we also have a bunch of special
IIT descriptors like LLVMPointerTo, LLVMPointerToElt and
LLVMAnyPointerToElt, which I'll leave for a later revision.

Differential Revision: https://reviews.llvm.org/D155086

Added: 
    

Modified: 
    llvm/include/llvm/IR/Intrinsics.td
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/include/llvm/IR/IntrinsicsAMDGPU.td
    llvm/include/llvm/IR/IntrinsicsARM.td
    llvm/include/llvm/IR/IntrinsicsHexagon.td
    llvm/include/llvm/IR/IntrinsicsNVVM.td
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/include/llvm/IR/IntrinsicsSystemZ.td
    llvm/include/llvm/IR/IntrinsicsWebAssembly.td
    llvm/include/llvm/IR/IntrinsicsX86.td
    llvm/lib/IR/Function.cpp

Removed: 
    llvm/test/TableGen/intrinsic-pointer-to-any.td


################################################################################
diff  --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 638a9fda29b15c..5f4626bd109444 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -389,32 +389,21 @@ class LLVMAnyType<ValueType vt> : LLVMType<vt> {
   assert isAny, "LLVMAnyType.VT should have isOverloaded";
 }
 
-class LLVMQualPointerType<LLVMType elty, int addrspace>
-  : LLVMType<iPTR>{
-  LLVMType ElTy = elty;
+class LLVMQualPointerType<int addrspace>
+  : LLVMType<iPTR> {
   assert !and(!le(0, addrspace), !le(addrspace, 255)),
     "Address space exceeds 255";
 
-  // D63507: LLVMPointerType<llvm_any_ty>
-  let isAny = elty.isAny;
-
-  let Sig = !listconcat(
+  let Sig =
     !if(addrspace, [
       IIT_ANYPTR.Number,
       addrspace,
     ], [
       IIT_PTR.Number,
-    ]),
-    ElTy.Sig);
+    ]);
 }
 
-class LLVMPointerType<LLVMType elty>
-  : LLVMQualPointerType<elty, 0>;
-
-class LLVMAnyPointerType<LLVMType elty>
-  : LLVMAnyType<iPTRAny> {
-  LLVMType ElTy = elty;
-
+class LLVMAnyPointerType : LLVMAnyType<iPTRAny> {
   assert isAny, "iPTRAny should have isOverloaded";
 }
 
@@ -506,16 +495,13 @@ def llvm_double_ty     : LLVMType<f64>;
 def llvm_f80_ty        : LLVMType<f80>;
 def llvm_f128_ty       : LLVMType<f128>;
 def llvm_ppcf128_ty    : LLVMType<ppcf128>;
-def llvm_ptr_ty        : LLVMPointerType<llvm_i8_ty>;             // i8*
-def llvm_ptrptr_ty     : LLVMPointerType<llvm_ptr_ty>;            // i8**
-def llvm_anyptr_ty     : LLVMAnyPointerType<llvm_i8_ty>;          // (space)i8*
-def llvm_empty_ty      : LLVMType<OtherVT>;                       // { }
-def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>;          // { }*
-def llvm_metadata_ty   : LLVMType<MetadataVT>;                    // !{...}
-def llvm_token_ty      : LLVMType<token>;                         // token
+def llvm_ptr_ty        : LLVMQualPointerType<0>; // ptr
+def llvm_anyptr_ty     : LLVMAnyPointerType;     // ptr addrspace(N)
+def llvm_empty_ty      : LLVMType<OtherVT>;      // { }
+def llvm_metadata_ty   : LLVMType<MetadataVT>;   // !{...}
+def llvm_token_ty      : LLVMType<token>;        // token
 
 def llvm_x86mmx_ty     : LLVMType<x86mmx>;
-def llvm_ptrx86mmx_ty  : LLVMPointerType<llvm_x86mmx_ty>;         // <1 x i64>*
 
 def llvm_aarch64_svcount_ty : LLVMType<aarch64svcount>;
 
@@ -726,12 +712,12 @@ def int_vaend   : DefaultAttrsIntrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">;
 //===------------------- Garbage Collection Intrinsics --------------------===//
 //
 def int_gcroot  : Intrinsic<[],
-                            [llvm_ptrptr_ty, llvm_ptr_ty]>;
+                            [llvm_ptr_ty, llvm_ptr_ty]>;
 def int_gcread  : Intrinsic<[llvm_ptr_ty],
-                            [llvm_ptr_ty, llvm_ptrptr_ty],
+                            [llvm_ptr_ty, llvm_ptr_ty],
                             [IntrReadMem, IntrArgMemOnly]>;
 def int_gcwrite : Intrinsic<[],
-                            [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty],
+                            [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
                             [IntrArgMemOnly, NoCapture<ArgIndex<1>>,
                              NoCapture<ArgIndex<2>>]>;
 
@@ -747,19 +733,19 @@ def int_objc_autoreleasePoolPush            : Intrinsic<[llvm_ptr_ty], []>;
 def int_objc_autoreleaseReturnValue         : Intrinsic<[llvm_ptr_ty],
                                                         [llvm_ptr_ty]>;
 def int_objc_copyWeak                       : Intrinsic<[],
-                                                        [llvm_ptrptr_ty,
-                                                         llvm_ptrptr_ty]>;
-def int_objc_destroyWeak                    : Intrinsic<[], [llvm_ptrptr_ty]>;
+                                                        [llvm_ptr_ty,
+                                                         llvm_ptr_ty]>;
+def int_objc_destroyWeak                    : Intrinsic<[], [llvm_ptr_ty]>;
 def int_objc_initWeak                       : Intrinsic<[llvm_ptr_ty],
-                                                        [llvm_ptrptr_ty,
+                                                        [llvm_ptr_ty,
                                                          llvm_ptr_ty]>;
 def int_objc_loadWeak                       : Intrinsic<[llvm_ptr_ty],
-                                                        [llvm_ptrptr_ty]>;
+                                                        [llvm_ptr_ty]>;
 def int_objc_loadWeakRetained               : Intrinsic<[llvm_ptr_ty],
-                                                        [llvm_ptrptr_ty]>;
+                                                        [llvm_ptr_ty]>;
 def int_objc_moveWeak                       : Intrinsic<[],
-                                                        [llvm_ptrptr_ty,
-                                                         llvm_ptrptr_ty]>;
+                                                        [llvm_ptr_ty,
+                                                         llvm_ptr_ty]>;
 def int_objc_release                        : Intrinsic<[], [llvm_ptr_ty]>;
 def int_objc_retain                         : Intrinsic<[llvm_ptr_ty],
                                                         [llvm_ptr_ty]>;
@@ -772,10 +758,10 @@ def int_objc_retainAutoreleasedReturnValue  : Intrinsic<[llvm_ptr_ty],
 def int_objc_retainBlock                    : Intrinsic<[llvm_ptr_ty],
                                                         [llvm_ptr_ty]>;
 def int_objc_storeStrong                    : Intrinsic<[],
-                                                        [llvm_ptrptr_ty,
+                                                        [llvm_ptr_ty,
                                                          llvm_ptr_ty]>;
 def int_objc_storeWeak                      : Intrinsic<[llvm_ptr_ty],
-                                                        [llvm_ptrptr_ty,
+                                                        [llvm_ptr_ty,
                                                          llvm_ptr_ty]>;
 def int_objc_clang_arc_use                  : Intrinsic<[],
                                                         [llvm_vararg_ty]>;
@@ -797,23 +783,23 @@ def int_objc_sync_enter                     : Intrinsic<[llvm_i32_ty],
 def int_objc_sync_exit                      : Intrinsic<[llvm_i32_ty],
                                                         [llvm_ptr_ty]>;
 def int_objc_arc_annotation_topdown_bbstart : Intrinsic<[],
-                                                        [llvm_ptrptr_ty,
-                                                         llvm_ptrptr_ty]>;
+                                                        [llvm_ptr_ty,
+                                                         llvm_ptr_ty]>;
 def int_objc_arc_annotation_topdown_bbend   : Intrinsic<[],
-                                                        [llvm_ptrptr_ty,
-                                                         llvm_ptrptr_ty]>;
+                                                        [llvm_ptr_ty,
+                                                         llvm_ptr_ty]>;
 def int_objc_arc_annotation_bottomup_bbstart  : Intrinsic<[],
-                                                          [llvm_ptrptr_ty,
-                                                           llvm_ptrptr_ty]>;
+                                                          [llvm_ptr_ty,
+                                                           llvm_ptr_ty]>;
 def int_objc_arc_annotation_bottomup_bbend  : Intrinsic<[],
-                                                        [llvm_ptrptr_ty,
-                                                         llvm_ptrptr_ty]>;
+                                                        [llvm_ptr_ty,
+                                                         llvm_ptr_ty]>;
 //===--------------- Swift asynchronous context intrinsics ----------------===//
 
 // Returns the location of the Swift asynchronous context (usually stored just
 // before the frame pointer), and triggers the creation of a null context if it
 // would otherwise be unneeded.
-def int_swift_async_context_addr : Intrinsic<[llvm_ptrptr_ty], [], []>;
+def int_swift_async_context_addr : Intrinsic<[llvm_ptr_ty], [], []>;
 
 //===--------------------- Code Generator Intrinsics ----------------------===//
 //
@@ -902,7 +888,7 @@ def int_experimental_noalias_scope_decl
 
 // Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
 // guard to the correct place on the stack frame.
-def int_stackprotector : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>;
+def int_stackprotector : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>;
 def int_stackguard : DefaultAttrsIntrinsic<[llvm_ptr_ty], [], []>;
 
 // A cover for instrumentation based profiling.
@@ -1387,7 +1373,7 @@ def int_var_annotation : DefaultAttrsIntrinsic<
     [IntrInaccessibleMemOnly], "llvm.var.annotation">;
 
 def int_ptr_annotation : DefaultAttrsIntrinsic<
-    [LLVMAnyPointerType<llvm_anyint_ty>],
+    [llvm_anyptr_ty],
     [LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>],
     [IntrInaccessibleMemOnly], "llvm.ptr.annotation">;
 
@@ -1532,13 +1518,13 @@ def int_lifetime_end    : DefaultAttrsIntrinsic<[],
                                     [IntrArgMemOnly, IntrWillReturn,
                                      NoCapture<ArgIndex<1>>,
                                      ImmArg<ArgIndex<0>>]>;
-def int_invariant_start : DefaultAttrsIntrinsic<[llvm_descriptor_ty],
+def int_invariant_start : DefaultAttrsIntrinsic<[llvm_ptr_ty],
                                     [llvm_i64_ty, llvm_anyptr_ty],
                                     [IntrArgMemOnly, IntrWillReturn,
                                      NoCapture<ArgIndex<1>>,
                                      ImmArg<ArgIndex<0>>]>;
 def int_invariant_end   : DefaultAttrsIntrinsic<[],
-                                    [llvm_descriptor_ty, llvm_i64_ty,
+                                    [llvm_ptr_ty, llvm_i64_ty,
                                      llvm_anyptr_ty],
                                     [IntrArgMemOnly, IntrWillReturn,
                                      NoCapture<ArgIndex<2>>,
@@ -1764,13 +1750,13 @@ def int_experimental_stepvector : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
 // Memory Intrinsics
 def int_vp_store : DefaultAttrsIntrinsic<[],
                              [ llvm_anyvector_ty,
-                               LLVMAnyPointerType<LLVMMatchType<0>>,
+                               llvm_anyptr_ty,
                                LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                llvm_i32_ty],
                              [ NoCapture<ArgIndex<1>>, IntrNoSync, IntrWriteMem, IntrArgMemOnly, IntrWillReturn ]>;
 
 def int_vp_load  : DefaultAttrsIntrinsic<[ llvm_anyvector_ty],
-                             [ LLVMAnyPointerType<LLVMMatchType<0>>,
+                             [ llvm_anyptr_ty,
                                LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                llvm_i32_ty],
                              [ NoCapture<ArgIndex<0>>, IntrNoSync, IntrReadMem, IntrWillReturn, IntrArgMemOnly ]>;
@@ -1791,14 +1777,14 @@ def int_vp_scatter: DefaultAttrsIntrinsic<[],
 // Experimental strided memory accesses
 def int_experimental_vp_strided_store : DefaultAttrsIntrinsic<[],
                              [ llvm_anyvector_ty,
-                               LLVMAnyPointerToElt<0>,
+                               llvm_anyptr_ty,
                                llvm_anyint_ty, // Stride in bytes
                                LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                llvm_i32_ty],
                              [ NoCapture<ArgIndex<1>>, IntrNoSync, IntrWriteMem, IntrArgMemOnly, IntrWillReturn ]>;
 
 def int_experimental_vp_strided_load  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                             [ LLVMAnyPointerToElt<0>,
+                             [ llvm_anyptr_ty,
                                llvm_anyint_ty, // Stride in bytes
                                LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                llvm_i32_ty],
@@ -2192,14 +2178,14 @@ def int_experimental_vp_splice:
 //
 def int_masked_load:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-            [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty,
+            [llvm_anyptr_ty, llvm_i32_ty,
              LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
             [IntrReadMem, IntrArgMemOnly, IntrWillReturn, ImmArg<ArgIndex<1>>,
              NoCapture<ArgIndex<0>>]>;
 
 def int_masked_store:
   DefaultAttrsIntrinsic<[],
-            [llvm_anyvector_ty, LLVMAnyPointerType<LLVMMatchType<0>>,
+            [llvm_anyvector_ty, llvm_anyptr_ty,
              llvm_i32_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
             [IntrWriteMem, IntrArgMemOnly, IntrWillReturn,
              ImmArg<ArgIndex<2>>, NoCapture<ArgIndex<1>>]>;

diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index bc8ce503fa003e..c53c4f5a4e8906 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -557,7 +557,7 @@ def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
 
 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
   class AdvSIMD_1Vec_Load_Intrinsic
-      : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
+      : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyptr_ty],
                   [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_1Vec_Store_Lane_Intrinsic
     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
@@ -565,7 +565,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
 
   class AdvSIMD_2Vec_Load_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, llvm_anyvector_ty],
-                [LLVMAnyPointerType<LLVMMatchType<0>>],
+                [llvm_anyptr_ty],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_2Vec_Load_Lane_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>],
@@ -574,7 +574,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_2Vec_Store_Intrinsic
     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
-                     LLVMAnyPointerType<LLVMMatchType<0>>],
+                     llvm_anyptr_ty],
                 [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
   class AdvSIMD_2Vec_Store_Lane_Intrinsic
     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
@@ -583,7 +583,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
 
   class AdvSIMD_3Vec_Load_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
-                [LLVMAnyPointerType<LLVMMatchType<0>>],
+                [llvm_anyptr_ty],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_3Vec_Load_Lane_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
@@ -592,7 +592,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_3Vec_Store_Intrinsic
     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
-                     LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
+                     LLVMMatchType<0>, llvm_anyptr_ty],
                 [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
   class AdvSIMD_3Vec_Store_Lane_Intrinsic
     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty,
@@ -603,7 +603,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
   class AdvSIMD_4Vec_Load_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
                  LLVMMatchType<0>, llvm_anyvector_ty],
-                [LLVMAnyPointerType<LLVMMatchType<0>>],
+                [llvm_anyptr_ty],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_4Vec_Load_Lane_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
@@ -615,7 +615,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
   class AdvSIMD_4Vec_Store_Intrinsic
     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
                  LLVMMatchType<0>, LLVMMatchType<0>,
-                 LLVMAnyPointerType<LLVMMatchType<0>>],
+                 llvm_anyptr_ty],
                 [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
   class AdvSIMD_4Vec_Store_Lane_Intrinsic
     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
@@ -1354,8 +1354,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
 
   class SVE2_CONFLICT_DETECT_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                [LLVMAnyPointerType<llvm_any_ty>,
-                 LLVMMatchType<1>],
+                [llvm_anyptr_ty, LLVMMatchType<1>],
                 [IntrNoMem]>;
 
   class SVE2_3VectorArg_Indexed_Intrinsic

diff  --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 726f0bee648541..a5f8b505e60f3f 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -53,7 +53,7 @@ def int_r600_group_barrier : ClangBuiltin<"__builtin_r600_group_barrier">,
 // AS 7 is PARAM_I_ADDRESS, used for kernel arguments
 def int_r600_implicitarg_ptr :
   ClangBuiltin<"__builtin_r600_implicitarg_ptr">,
-  DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [],
+  DefaultAttrsIntrinsic<[LLVMQualPointerType<7>], [],
   [IntrNoMem, IntrSpeculatable]>;
 
 def int_r600_rat_store_typed :
@@ -141,22 +141,22 @@ defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
                                <"__builtin_amdgcn_workgroup_id">;
 
 def int_amdgcn_dispatch_ptr :
-  DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
   [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
 
 def int_amdgcn_queue_ptr :
   ClangBuiltin<"__builtin_amdgcn_queue_ptr">,
-  DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
   [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
 
 def int_amdgcn_kernarg_segment_ptr :
   ClangBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
-  DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
   [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
 
 def int_amdgcn_implicitarg_ptr :
   ClangBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
-  DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
   [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
 
 def int_amdgcn_groupstaticsize :
@@ -173,7 +173,7 @@ def int_amdgcn_lds_kernel_id :
 
 def int_amdgcn_implicit_buffer_ptr :
   ClangBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
-  DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
   [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
 
 // Set EXEC to the 64-bit value given.
@@ -463,7 +463,7 @@ def int_amdgcn_fmad_ftz :
 
 class AMDGPULDSIntrin :
   Intrinsic<[llvm_any_ty],
-    [LLVMQualPointerType<LLVMMatchType<0>, 3>,
+    [LLVMQualPointerType<3>,
     LLVMMatchType<0>,
     llvm_i32_ty, // ordering
     llvm_i32_ty, // scope
@@ -477,7 +477,7 @@ class AMDGPUDSOrderedIntrinsic : Intrinsic<
   [llvm_i32_ty],
   // M0 = {hi16:address, lo16:waveID}. Allow passing M0 as a pointer, so that
   // the bit packing can be optimized at the IR level.
-  [LLVMQualPointerType<llvm_i32_ty, 2>, // IntToPtr(M0)
+  [LLVMQualPointerType<2>, // IntToPtr(M0)
    llvm_i32_ty, // value to add or swap
    llvm_i32_ty, // ordering
    llvm_i32_ty, // scope
@@ -994,13 +994,12 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimAtomicIntrinsics = {
 
 // Data type for buffer resources (V#). Maybe, in the future, we can create a
 // similar one for textures (T#).
-class AMDGPUBufferRsrcTy<LLVMType data_ty = llvm_any_ty>
-  : LLVMQualPointerType<data_ty, 8>;
+def AMDGPUBufferRsrcTy : LLVMQualPointerType<8>;
 
 let TargetPrefix = "amdgcn" in {
 
 def int_amdgcn_make_buffer_rsrc : DefaultAttrsIntrinsic <
-  [AMDGPUBufferRsrcTy<llvm_i8_ty>],
+  [AMDGPUBufferRsrcTy],
   [llvm_anyptr_ty, // base
    llvm_i16_ty,    // stride (and swizzle control)
    llvm_i32_ty,    // NumRecords / extent
@@ -1073,7 +1072,7 @@ def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
 
 class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
   [data_ty],
-  [AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+  [AMDGPUBufferRsrcTy,         // rsrc(SGPR)
    llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
    llvm_i32_ty],                // auxiliary data (imm, cachepolicy (bit 0 = glc,
@@ -1103,7 +1102,7 @@ def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;
 
 class AMDGPUStructPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
   [data_ty],
-  [AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+  [AMDGPUBufferRsrcTy,          // rsrc(SGPR)
    llvm_i32_ty,                 // vindex(VGPR)
    llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1135,7 +1134,7 @@ def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore;
 class AMDGPURawPtrBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
   [],
   [data_ty,                     // vdata(VGPR)
-   AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+   AMDGPUBufferRsrcTy,          // rsrc(SGPR)
    llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
    llvm_i32_ty],                // auxiliary data (imm, cachepolicy (bit 0 = glc,
@@ -1167,7 +1166,7 @@ def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore;
 class AMDGPUStructPtrBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
   [],
   [data_ty,                     // vdata(VGPR)
-   AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+   AMDGPUBufferRsrcTy,          // rsrc(SGPR)
    llvm_i32_ty,                 // vindex(VGPR)
    llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1218,7 +1217,7 @@ def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
 class AMDGPURawPtrBufferAtomic<LLVMType data_ty = llvm_any_ty, bit NoRtn = false> : Intrinsic <
   !if(NoRtn, [], [data_ty]),
   [!if(NoRtn, data_ty, LLVMMatchType<0>),  // vdata(VGPR)
-   AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+   AMDGPUBufferRsrcTy,          // rsrc(SGPR)
    llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
    llvm_i32_ty],                // cachepolicy(imm; bit 1 = slc)
@@ -1244,7 +1243,7 @@ def int_amdgcn_raw_ptr_buffer_atomic_cmpswap : Intrinsic<
   [llvm_anyint_ty],
   [LLVMMatchType<0>,  // src(VGPR)
    LLVMMatchType<0>,  // cmp(VGPR)
-   AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+   AMDGPUBufferRsrcTy, // rsrc(SGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
    llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
@@ -1293,7 +1292,7 @@ def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
 class AMDGPUStructPtrBufferAtomic<LLVMType data_ty = llvm_any_ty, bit NoRtn = false> : Intrinsic <
   !if(NoRtn, [], [data_ty]),
   [!if(NoRtn, data_ty, LLVMMatchType<0>),  // vdata(VGPR)
-   AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+   AMDGPUBufferRsrcTy,          // rsrc(SGPR)
    llvm_i32_ty,                 // vindex(VGPR)
    llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1317,7 +1316,7 @@ def int_amdgcn_struct_ptr_buffer_atomic_cmpswap : Intrinsic<
   [llvm_anyint_ty],
   [LLVMMatchType<0>,  // src(VGPR)
    LLVMMatchType<0>,  // cmp(VGPR)
-   AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+   AMDGPUBufferRsrcTy, // rsrc(SGPR)
    llvm_i32_ty,       // vindex(VGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1391,7 +1390,7 @@ def int_amdgcn_raw_tbuffer_load : DefaultAttrsIntrinsic <
 
 def int_amdgcn_raw_ptr_tbuffer_load : DefaultAttrsIntrinsic <
     [llvm_any_ty],      // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
-    [AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+    [AMDGPUBufferRsrcTy, // rsrc(SGPR)
      llvm_i32_ty,     // offset(VGPR/imm, included in bounds` checking and swizzling)
      llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
      llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
@@ -1421,7 +1420,7 @@ def int_amdgcn_raw_tbuffer_store : DefaultAttrsIntrinsic <
 def int_amdgcn_raw_ptr_tbuffer_store : DefaultAttrsIntrinsic <
     [],
     [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
-     AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+     AMDGPUBufferRsrcTy, // rsrc(SGPR)
      llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
      llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
      llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
@@ -1450,7 +1449,7 @@ def int_amdgcn_struct_tbuffer_load : DefaultAttrsIntrinsic <
 
 def int_amdgcn_struct_ptr_tbuffer_load : DefaultAttrsIntrinsic <
     [llvm_any_ty],    // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
-    [AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+    [AMDGPUBufferRsrcTy, // rsrc(SGPR)
      llvm_i32_ty,     // vindex(VGPR)
      llvm_i32_ty,     // offset(VGPR/imm, included in bounds checking and swizzling)
      llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1466,7 +1465,7 @@ def int_amdgcn_struct_ptr_tbuffer_load : DefaultAttrsIntrinsic <
 def int_amdgcn_struct_ptr_tbuffer_store : DefaultAttrsIntrinsic <
     [],
     [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
-     AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+     AMDGPUBufferRsrcTy, // rsrc(SGPR)
      llvm_i32_ty,    // vindex(VGPR)
      llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
      llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1543,7 +1542,7 @@ def int_amdgcn_buffer_atomic_fadd : AMDGPUBufferAtomicFP;
 class AMDGPURawBufferLoadLDS : Intrinsic <
   [],
   [llvm_v4i32_ty,                      // rsrc(SGPR)
-   LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base offset
+   LLVMQualPointerType<3>,             // LDS base offset
    llvm_i32_ty,                        // Data byte size: 1/2/4
    llvm_i32_ty,                        // voffset(VGPR, included in bounds checking and swizzling)
    llvm_i32_ty,                        // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1558,8 +1557,8 @@ def int_amdgcn_raw_buffer_load_lds : AMDGPURawBufferLoadLDS;
 
 class AMDGPURawPtrBufferLoadLDS : Intrinsic <
   [],
-  [AMDGPUBufferRsrcTy<llvm_i8_ty>,     // rsrc(SGPR)
-   LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base offset
+  [AMDGPUBufferRsrcTy,                 // rsrc(SGPR)
+   LLVMQualPointerType<3>,             // LDS base offset
    llvm_i32_ty,                        // Data byte size: 1/2/4
    llvm_i32_ty,                        // voffset(VGPR, included in bounds checking and swizzling)
    llvm_i32_ty,                        // soffset(SGPR/imm, excluded from bounds checking and swizzling)
@@ -1578,7 +1577,7 @@ def int_amdgcn_raw_ptr_buffer_load_lds : AMDGPURawPtrBufferLoadLDS;
 class AMDGPUStructBufferLoadLDS : Intrinsic <
   [],
   [llvm_v4i32_ty,                      // rsrc(SGPR)
-   LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base offset
+   LLVMQualPointerType<3>,             // LDS base offset
    llvm_i32_ty,                        // Data byte size: 1/2/4
    llvm_i32_ty,                        // vindex(VGPR)
    llvm_i32_ty,                        // voffset(VGPR, included in bounds checking and swizzling)
@@ -1594,8 +1593,8 @@ def int_amdgcn_struct_buffer_load_lds : AMDGPUStructBufferLoadLDS;
 
 class AMDGPUStructPtrBufferLoadLDS : Intrinsic <
   [],
-  [AMDGPUBufferRsrcTy<llvm_i8_ty>,     // rsrc(SGPR)
-   LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base offset
+  [AMDGPUBufferRsrcTy,                 // rsrc(SGPR)
+   LLVMQualPointerType<3> ,            // LDS base offset
    llvm_i32_ty,                        // Data byte size: 1/2/4
    llvm_i32_ty,                        // vindex(VGPR)
    llvm_i32_ty,                        // voffset(VGPR, included in bounds checking and swizzling)
@@ -2208,8 +2207,8 @@ def int_amdgcn_perm :
 
 class AMDGPUGlobalLoadLDS : Intrinsic <
   [],
-  [LLVMQualPointerType<llvm_i8_ty, 1>, // Base global pointer to load from
-   LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base pointer to store to
+  [LLVMQualPointerType<1>,             // Base global pointer to load from
+   LLVMQualPointerType<3>,             // LDS base pointer to store to
    llvm_i32_ty,                        // Data byte size: 1/2/4
    llvm_i32_ty,                        // imm offset (applied to both global and LDS address)
    llvm_i32_ty],                       // auxiliary data (imm, cachepolicy (bit 0 = glc/sc0,
@@ -2624,7 +2623,7 @@ def int_amdgcn_global_atomic_fadd_v2bf16 : AMDGPUGlobalAtomicRtn<llvm_v2i16_ty>;
 def int_amdgcn_flat_atomic_fadd_v2bf16   : AMDGPUGlobalAtomicRtn<llvm_v2i16_ty>;
 def int_amdgcn_ds_fadd_v2bf16 : DefaultAttrsIntrinsic<
     [llvm_v2i16_ty],
-    [LLVMQualPointerType<llvm_v2i16_ty, 3>, llvm_v2i16_ty],
+    [LLVMQualPointerType<3>, llvm_v2i16_ty],
     [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>,
     ClangBuiltin<"__builtin_amdgcn_ds_atomic_fadd_v2bf16">;
 

diff  --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index f3b1a0c5d282d5..11b9877091a8ed 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -702,13 +702,13 @@ def int_arm_neon_vld4 : DefaultAttrsIntrinsic<
 
 def int_arm_neon_vld1x2 : DefaultAttrsIntrinsic<
     [llvm_anyvector_ty, LLVMMatchType<0>],
-    [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>;
+    [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
 def int_arm_neon_vld1x3 : DefaultAttrsIntrinsic<
     [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
-    [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>;
+    [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
 def int_arm_neon_vld1x4 : DefaultAttrsIntrinsic<
     [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
-    [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>;
+    [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
 
 // Vector load N-element structure to one lane.
 // Source operands are: the address, the N input vectors (since only one

diff  --git a/llvm/include/llvm/IR/IntrinsicsHexagon.td b/llvm/include/llvm/IR/IntrinsicsHexagon.td
index 847197ce28b939..67b873d16cb5ae 100644
--- a/llvm/include/llvm/IR/IntrinsicsHexagon.td
+++ b/llvm/include/llvm/IR/IntrinsicsHexagon.td
@@ -125,30 +125,27 @@ Hexagon_mem_memsisisi_Intrinsic<"circ_stb">;
 def int_hexagon_prefetch :
 Hexagon_Intrinsic<"HEXAGON_prefetch", [], [llvm_ptr_ty], []>;
 
-def llvm_ptr32_ty : LLVMPointerType<llvm_i32_ty>;
-def llvm_ptr64_ty : LLVMPointerType<llvm_i64_ty>;
-
 // Mark locked loads as read/write to prevent any accidental reordering.
 // These don't use Hexagon_Intrinsic, because they are not nosync, and as such
 // cannot use default attributes.
 let TargetPrefix = "hexagon" in {
   def int_hexagon_L2_loadw_locked :
   ClangBuiltin<"__builtin_HEXAGON_L2_loadw_locked">,
-  Intrinsic<[llvm_i32_ty], [llvm_ptr32_ty],
+  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty],
         [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
   def int_hexagon_L4_loadd_locked :
   ClangBuiltin<"__builtin__HEXAGON_L4_loadd_locked">,
-  Intrinsic<[llvm_i64_ty], [llvm_ptr64_ty],
+  Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
         [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
 
   def int_hexagon_S2_storew_locked :
   ClangBuiltin<"__builtin_HEXAGON_S2_storew_locked">,
   Intrinsic<[llvm_i32_ty],
-        [llvm_ptr32_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
+        [llvm_ptr_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
   def int_hexagon_S4_stored_locked :
   ClangBuiltin<"__builtin_HEXAGON_S4_stored_locked">,
   Intrinsic<[llvm_i32_ty],
-        [llvm_ptr64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
+        [llvm_ptr_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
 }
 
 def int_hexagon_vmemcpy : Hexagon_Intrinsic<"hexagon_vmemcpy",
@@ -266,7 +263,7 @@ Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc_128B">;
 class Hexagon_pred_vload_imm<LLVMType ValTy>
   : Hexagon_NonGCC_Intrinsic<
       [ValTy],
-      [llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty],
+      [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty],
       [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
        ImmArg<ArgIndex<2>>]>;
 
@@ -284,8 +281,8 @@ def int_hexagon_V6_vL32b_nt_npred_ai_128B:  Hexagon_pred_vload_imm_128B;
 
 class Hexagom_pred_vload_upd<LLVMType ValTy, bit TakesImm>
   : Hexagon_NonGCC_Intrinsic<
-      [ValTy, LLVMPointerType<ValTy>],
-      [llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty],
+      [ValTy, llvm_ptr_ty],
+      [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty],
       !if(TakesImm,
           [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
            ImmArg<ArgIndex<2>>],
@@ -318,7 +315,7 @@ def int_hexagon_V6_vL32b_nt_npred_ppu_128B: Hexagom_pred_vload_upd_128B<0>;
 class Hexagon_pred_vstore_imm<LLVMType ValTy>
   : Hexagon_NonGCC_Intrinsic<
       [],
-      [llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty, ValTy],
+      [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty, ValTy],
       [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
        ImmArg<ArgIndex<2>>]>;
 
@@ -340,8 +337,8 @@ def int_hexagon_V6_vS32b_nt_npred_ai_128B:  Hexagon_pred_vstore_imm_128B;
 
 class Hexagon_pred_vstore_upd<LLVMType ValTy, bit TakesImm>
   : Hexagon_NonGCC_Intrinsic<
-      [LLVMPointerType<ValTy>],
-      [llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty, ValTy],
+      [llvm_ptr_ty],
+      [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty, ValTy],
       !if(TakesImm,
           [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
            ImmArg<ArgIndex<2>>],

diff  --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 914f6c36a3e4a2..6fd8e80013cee5 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -31,11 +31,8 @@
 //   * llvm.nvvm.max.ull --> ibid.
 //   * llvm.nvvm.h2f     --> llvm.convert.to.fp16.f32
 
-def llvm_global_i8ptr_ty  : LLVMQualPointerType<llvm_i8_ty, 1>;  // (global)i8*
-def llvm_shared_i8ptr_ty  : LLVMQualPointerType<llvm_i8_ty, 3>;  // (shared)i8*
-def llvm_i64ptr_ty        : LLVMPointerType<llvm_i64_ty>;        // i64*
-def llvm_any_i64ptr_ty    : LLVMAnyPointerType<llvm_i64_ty>;     // (space)i64*
-def llvm_shared_i64ptr_ty : LLVMQualPointerType<llvm_i64_ty, 3>; // (shared)i64*
+def llvm_global_ptr_ty  : LLVMQualPointerType<1>;  // (global)ptr
+def llvm_shared_ptr_ty  : LLVMQualPointerType<3>;  // (shared)ptr
 
 //
 // MISC
@@ -1293,19 +1290,19 @@ let TargetPrefix = "nvvm" in {
 
 // Atomics not available as llvm intrinsics.
   def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty],
-          [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+          [llvm_anyptr_ty, llvm_i32_ty],
                                       [IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
   def int_nvvm_atomic_load_dec_32 : Intrinsic<[llvm_i32_ty],
-          [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+          [llvm_anyptr_ty, llvm_i32_ty],
                                       [IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
 
   class SCOPED_ATOMIC2_impl<LLVMType elty>
         : Intrinsic<[elty],
-          [LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>],
+          [llvm_anyptr_ty, LLVMMatchType<0>],
           [IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
   class SCOPED_ATOMIC3_impl<LLVMType elty>
         : Intrinsic<[elty],
-          [LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>,
+          [llvm_anyptr_ty, LLVMMatchType<0>,
            LLVMMatchType<0>],
           [IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
 
@@ -1388,23 +1385,23 @@ let TargetPrefix = "nvvm" in {
 // Async Copy
 def int_nvvm_cp_async_mbarrier_arrive :
     ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive">,
-    Intrinsic<[],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_cp_async_mbarrier_arrive_shared :
     ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_shared">,
-    Intrinsic<[],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_cp_async_mbarrier_arrive_noinc :
     ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_noinc">,
-    Intrinsic<[],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_cp_async_mbarrier_arrive_noinc_shared :
     ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_noinc_shared">,
-    Intrinsic<[],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
 
 multiclass CP_ASYNC_SHARED_GLOBAL<string n, string cc> {
-  def NAME: Intrinsic<[],[llvm_shared_i8ptr_ty, llvm_global_i8ptr_ty],
+  def NAME: Intrinsic<[],[llvm_shared_ptr_ty, llvm_global_ptr_ty],
         [IntrArgMemOnly, IntrNoCallback, NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
         WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>],
         "llvm.nvvm.cp.async." # cc # ".shared.global." # n>;
-  def _s: Intrinsic<[],[llvm_shared_i8ptr_ty, llvm_global_i8ptr_ty, llvm_i32_ty],
+  def _s: Intrinsic<[],[llvm_shared_ptr_ty, llvm_global_ptr_ty, llvm_i32_ty],
         [IntrArgMemOnly, IntrNoCallback, NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
         WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>],
         "llvm.nvvm.cp.async." # cc # ".shared.global." # n # ".s">;
@@ -1429,54 +1426,54 @@ def int_nvvm_cp_async_wait_all :
 
 // mbarrier
 def int_nvvm_mbarrier_init : ClangBuiltin<"__nvvm_mbarrier_init">,
-    Intrinsic<[],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_mbarrier_init_shared :
     ClangBuiltin<"__nvvm_mbarrier_init_shared">,
-    Intrinsic<[],[llvm_shared_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[],[llvm_shared_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
 
 def int_nvvm_mbarrier_inval : ClangBuiltin<"__nvvm_mbarrier_inval">,
-    Intrinsic<[],[llvm_i64ptr_ty],
+    Intrinsic<[],[llvm_ptr_ty],
     [IntrConvergent, IntrWriteMem, IntrArgMemOnly, IntrNoCallback,
     WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;
 def int_nvvm_mbarrier_inval_shared :
     ClangBuiltin<"__nvvm_mbarrier_inval_shared">,
-    Intrinsic<[],[llvm_shared_i64ptr_ty],
+    Intrinsic<[],[llvm_shared_ptr_ty],
     [IntrConvergent, IntrWriteMem, IntrArgMemOnly, IntrNoCallback,
     WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;
 
 def int_nvvm_mbarrier_arrive : ClangBuiltin<"__nvvm_mbarrier_arrive">,
-    Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[llvm_i64_ty],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_mbarrier_arrive_shared :
     ClangBuiltin<"__nvvm_mbarrier_arrive_shared">,
-    Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_mbarrier_arrive_noComplete :
     ClangBuiltin<"__nvvm_mbarrier_arrive_noComplete">,
-    Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[llvm_i64_ty],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_mbarrier_arrive_noComplete_shared :
     ClangBuiltin<"__nvvm_mbarrier_arrive_noComplete_shared">,
-    Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty,
+    Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty,
     llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
 
 def int_nvvm_mbarrier_arrive_drop :
     ClangBuiltin<"__nvvm_mbarrier_arrive_drop">,
-    Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[llvm_i64_ty],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_mbarrier_arrive_drop_shared :
     ClangBuiltin<"__nvvm_mbarrier_arrive_drop_shared">,
-    Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_mbarrier_arrive_drop_noComplete :
     ClangBuiltin<"__nvvm_mbarrier_arrive_drop_noComplete">,
-    Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[llvm_i64_ty],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_mbarrier_arrive_drop_noComplete_shared :
     ClangBuiltin<"__nvvm_mbarrier_arrive_drop_noComplete_shared">,
-    Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty,
+    Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty,
     llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
 
 def int_nvvm_mbarrier_test_wait :
     ClangBuiltin<"__nvvm_mbarrier_test_wait">,
-    Intrinsic<[llvm_i1_ty],[llvm_i64ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[llvm_i1_ty],[llvm_ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
 def int_nvvm_mbarrier_test_wait_shared :
     ClangBuiltin<"__nvvm_mbarrier_test_wait_shared">,
-    Intrinsic<[llvm_i1_ty],[llvm_shared_i64ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
+    Intrinsic<[llvm_i1_ty],[llvm_shared_ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
 
 def int_nvvm_mbarrier_pending_count :
     ClangBuiltin<"__nvvm_mbarrier_pending_count">,
@@ -1485,30 +1482,30 @@ def int_nvvm_mbarrier_pending_count :
 // Generated within nvvm. Use for ldu on sm_20 or later.  Second arg is the
 // pointer's alignment.
 def int_nvvm_ldu_global_i : Intrinsic<[llvm_anyint_ty],
-  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [llvm_anyptr_ty, llvm_i32_ty],
   [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
   "llvm.nvvm.ldu.global.i">;
 def int_nvvm_ldu_global_f : Intrinsic<[llvm_anyfloat_ty],
-  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [llvm_anyptr_ty, llvm_i32_ty],
   [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
   "llvm.nvvm.ldu.global.f">;
 def int_nvvm_ldu_global_p : Intrinsic<[llvm_anyptr_ty],
-  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [llvm_anyptr_ty, llvm_i32_ty],
   [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
   "llvm.nvvm.ldu.global.p">;
 
 // Generated within nvvm. Use for ldg on sm_35 or later.  Second arg is the
 // pointer's alignment.
 def int_nvvm_ldg_global_i : Intrinsic<[llvm_anyint_ty],
-  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [llvm_anyptr_ty, llvm_i32_ty],
   [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
   "llvm.nvvm.ldg.global.i">;
 def int_nvvm_ldg_global_f : Intrinsic<[llvm_anyfloat_ty],
-  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [llvm_anyptr_ty, llvm_i32_ty],
   [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
   "llvm.nvvm.ldg.global.f">;
 def int_nvvm_ldg_global_p : Intrinsic<[llvm_anyptr_ty],
-  [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+  [llvm_anyptr_ty, llvm_i32_ty],
   [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
   "llvm.nvvm.ldg.global.p">;
 
@@ -1571,7 +1568,7 @@ def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty],
 
 // For getting the handle from a texture or surface variable
 def int_nvvm_texsurf_handle
-  : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_any_i64ptr_ty],
+  : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyptr_ty],
               [IntrNoMem], "llvm.nvvm.texsurf.handle">;
 def int_nvvm_texsurf_handle_internal
   : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
@@ -4697,7 +4694,7 @@ def int_nvvm_mapa
               [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
               "llvm.nvvm.mapa">;
 def int_nvvm_mapa_shared_cluster
-  : DefaultAttrsIntrinsic<[llvm_shared_i8ptr_ty], [llvm_shared_i8ptr_ty, llvm_i32_ty],
+  : DefaultAttrsIntrinsic<[llvm_shared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
               [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
               "llvm.nvvm.mapa.shared.cluster">;
 def int_nvvm_getctarank
@@ -4705,7 +4702,7 @@ def int_nvvm_getctarank
               [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
               "llvm.nvvm.getctarank">;
 def int_nvvm_getctarank_shared_cluster
-  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_shared_i8ptr_ty],
+  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_shared_ptr_ty],
               [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
               "llvm.nvvm.getctarank.shared.cluster">;
 def int_nvvm_is_explicit_cluster

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index cfadbd6d2fa49c..7b7df970b9e6c2 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -146,8 +146,7 @@ let TargetPrefix = "riscv" in {
   // Input: (pointer, vl)
   class RISCVUSMLoad
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                    [LLVMPointerType<LLVMMatchType<0>>,
-                     llvm_anyint_ty],
+                    [llvm_ptr_ty, llvm_anyint_ty],
                     [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
     let VLOperand = 1;
   }
@@ -155,9 +154,7 @@ let TargetPrefix = "riscv" in {
   // Input: (passthru, pointer, vl)
   class RISCVUSLoad
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                    [LLVMMatchType<0>,
-                     LLVMPointerType<LLVMMatchType<0>>,
-                     llvm_anyint_ty],
+                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
     let VLOperand = 2;
   }
@@ -168,8 +165,7 @@ let TargetPrefix = "riscv" in {
   // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
   class RISCVUSLoadFF
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
-                    [LLVMMatchType<0>,
-                     LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
+                    [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>],
                     [NoCapture<ArgIndex<1>>]>,
                     RISCVVIntrinsic {
     let VLOperand = 2;
@@ -178,8 +174,7 @@ let TargetPrefix = "riscv" in {
   // Input: (maskedoff, pointer, mask, vl, policy)
   class RISCVUSLoadMasked
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
-                    [LLVMMatchType<0>,
-                     LLVMPointerType<LLVMMatchType<0>>,
+                    [LLVMMatchType<0>, llvm_ptr_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                      llvm_anyint_ty, LLVMMatchType<1>],
                     [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
@@ -193,8 +188,7 @@ let TargetPrefix = "riscv" in {
   // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
   class RISCVUSLoadFFMasked
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
-                    [LLVMMatchType<0>,
-                     LLVMPointerType<LLVMMatchType<0>>,
+                    [LLVMMatchType<0>, llvm_ptr_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                      LLVMMatchType<1>, LLVMMatchType<1>],
                     [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
@@ -204,8 +198,7 @@ let TargetPrefix = "riscv" in {
   // Input: (passthru, pointer, stride, vl)
   class RISCVSLoad
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                    [LLVMMatchType<0>,
-                     LLVMPointerType<LLVMMatchType<0>>,
+                    [LLVMMatchType<0>, llvm_ptr_ty,
                      llvm_anyint_ty, LLVMMatchType<1>],
                     [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
     let VLOperand = 3;
@@ -214,8 +207,7 @@ let TargetPrefix = "riscv" in {
   // Input: (maskedoff, pointer, stride, mask, vl, policy)
   class RISCVSLoadMasked
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
-                    [LLVMMatchType<0>,
-                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
+                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
                      LLVMMatchType<1>],
                     [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
@@ -226,8 +218,7 @@ let TargetPrefix = "riscv" in {
   // Input: (passthru, pointer, index, vl)
   class RISCVILoad
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                    [LLVMMatchType<0>,
-                     LLVMPointerType<LLVMMatchType<0>>,
+                    [LLVMMatchType<0>, llvm_ptr_ty,
                      llvm_anyvector_ty, llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
     let VLOperand = 3;
@@ -236,8 +227,7 @@ let TargetPrefix = "riscv" in {
   // Input: (maskedoff, pointer, index, mask, vl, policy)
   class RISCVILoadMasked
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
-                    [LLVMMatchType<0>,
-                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
+                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                      LLVMMatchType<2>],
                     [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
@@ -248,9 +238,7 @@ let TargetPrefix = "riscv" in {
   // Input: (vector_in, pointer, vl)
   class RISCVUSStore
         : DefaultAttrsIntrinsic<[],
-                    [llvm_anyvector_ty,
-                     LLVMPointerType<LLVMMatchType<0>>,
-                     llvm_anyint_ty],
+                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
     let VLOperand = 2;
   }
@@ -258,8 +246,7 @@ let TargetPrefix = "riscv" in {
   // Input: (vector_in, pointer, mask, vl)
   class RISCVUSStoreMasked
         : DefaultAttrsIntrinsic<[],
-                    [llvm_anyvector_ty,
-                     LLVMPointerType<LLVMMatchType<0>>,
+                    [llvm_anyvector_ty, llvm_ptr_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                      llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
@@ -269,8 +256,7 @@ let TargetPrefix = "riscv" in {
   // Input: (vector_in, pointer, stride, vl)
   class RISCVSStore
         : DefaultAttrsIntrinsic<[],
-                    [llvm_anyvector_ty,
-                     LLVMPointerType<LLVMMatchType<0>>,
+                    [llvm_anyvector_ty, llvm_ptr_ty,
                      llvm_anyint_ty, LLVMMatchType<1>],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
     let VLOperand = 3;
@@ -279,8 +265,7 @@ let TargetPrefix = "riscv" in {
   // Input: (vector_in, pointer, stirde, mask, vl)
   class RISCVSStoreMasked
         : DefaultAttrsIntrinsic<[],
-                    [llvm_anyvector_ty,
-                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
+                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
     let VLOperand = 4;
@@ -289,8 +274,7 @@ let TargetPrefix = "riscv" in {
   // Input: (vector_in, pointer, index, vl)
   class RISCVIStore
         : DefaultAttrsIntrinsic<[],
-                    [llvm_anyvector_ty,
-                     LLVMPointerType<LLVMMatchType<0>>,
+                    [llvm_anyvector_ty, llvm_ptr_ty,
                      llvm_anyint_ty, llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
     let VLOperand = 3;
@@ -299,8 +283,7 @@ let TargetPrefix = "riscv" in {
   // Input: (vector_in, pointer, index, mask, vl)
   class RISCVIStoreMasked
         : DefaultAttrsIntrinsic<[],
-                    [llvm_anyvector_ty,
-                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
+                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
     let VLOperand = 4;

diff  --git a/llvm/include/llvm/IR/IntrinsicsSystemZ.td b/llvm/include/llvm/IR/IntrinsicsSystemZ.td
index d881a1126bf2ce..9d21f3eb5352ec 100644
--- a/llvm/include/llvm/IR/IntrinsicsSystemZ.td
+++ b/llvm/include/llvm/IR/IntrinsicsSystemZ.td
@@ -222,7 +222,7 @@ let TargetPrefix = "s390" in {
   def int_s390_etnd : ClangBuiltin<"__builtin_tx_nesting_depth">,
                       Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
 
-  def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr64_ty],
+  def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty],
                                  [IntrArgMemOnly, IntrWriteMem]>;
 
   def int_s390_ppa_txassist : ClangBuiltin<"__builtin_tx_assist">,

diff  --git a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
index d6a14f99b6942a..b93a5e7be1b51e 100644
--- a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -12,7 +12,7 @@
 //===----------------------------------------------------------------------===//
 
 // Type definition for a table in an intrinsic
-def llvm_table_ty : LLVMQualPointerType<llvm_i8_ty, 1>;
+def llvm_table_ty : LLVMQualPointerType<1>;
 
 let TargetPrefix = "wasm" in {  // All intrinsics start with "llvm.wasm.".
 
@@ -144,18 +144,18 @@ def int_wasm_lsda : DefaultAttrsIntrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
 // These don't use default attributes, because they are not nosync.
 def int_wasm_memory_atomic_wait32 :
   Intrinsic<[llvm_i32_ty],
-            [LLVMPointerType<llvm_i32_ty>, llvm_i32_ty, llvm_i64_ty],
+            [llvm_ptr_ty, llvm_i32_ty, llvm_i64_ty],
             [IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>,
              NoCapture<ArgIndex<0>>, IntrHasSideEffects],
             "", [SDNPMemOperand]>;
 def int_wasm_memory_atomic_wait64 :
   Intrinsic<[llvm_i32_ty],
-            [LLVMPointerType<llvm_i64_ty>, llvm_i64_ty, llvm_i64_ty],
+            [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
             [IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>,
              NoCapture<ArgIndex<0>>, IntrHasSideEffects],
             "", [SDNPMemOperand]>;
 def int_wasm_memory_atomic_notify:
-  Intrinsic<[llvm_i32_ty], [LLVMPointerType<llvm_i32_ty>, llvm_i32_ty],
+  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
             [IntrInaccessibleMemOnly, NoCapture<ArgIndex<0>>,
              IntrHasSideEffects],
             "", [SDNPMemOperand]>;

diff  --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td
index ab735daee93dd4..ed10a84835ac13 100644
--- a/llvm/include/llvm/IR/IntrinsicsX86.td
+++ b/llvm/include/llvm/IR/IntrinsicsX86.td
@@ -2558,7 +2558,7 @@ let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
       DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
 
   def int_x86_mmx_movnt_dq : ClangBuiltin<"__builtin_ia32_movntq">,
-              Intrinsic<[], [llvm_ptrx86mmx_ty, llvm_x86mmx_ty], []>;
+              Intrinsic<[], [llvm_ptr_ty, llvm_x86mmx_ty], []>;
 
   def int_x86_mmx_palignr_b : ClangBuiltin<"__builtin_ia32_palignr">,
       DefaultAttrsIntrinsic<[llvm_x86mmx_ty],

diff  --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp
index fb29918e1fa36d..17df2c73ef4d86 100644
--- a/llvm/lib/IR/Function.cpp
+++ b/llvm/lib/IR/Function.cpp
@@ -1167,22 +1167,17 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
     return;
   case IIT_EXTERNREF:
     OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 10));
-    OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
     return;
   case IIT_FUNCREF:
     OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 20));
-    OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8));
     return;
   case IIT_PTR:
     OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0));
-    DecodeIITType(NextElt, Infos, Info, OutputTable);
     return;
-  case IIT_ANYPTR: {  // [ANYPTR addrspace, subtype]
+  case IIT_ANYPTR: // [ANYPTR addrspace]
     OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer,
                                              Infos[NextElt++]));
-    DecodeIITType(NextElt, Infos, Info, OutputTable);
     return;
-  }
   case IIT_ARG: {
     unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
     OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo));
@@ -1352,8 +1347,7 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
     return VectorType::get(DecodeFixedType(Infos, Tys, Context),
                            D.Vector_Width);
   case IITDescriptor::Pointer:
-    return PointerType::get(DecodeFixedType(Infos, Tys, Context),
-                            D.Pointer_AddressSpace);
+    return PointerType::get(Context, D.Pointer_AddressSpace);
   case IITDescriptor::Struct: {
     SmallVector<Type *, 8> Elts;
     for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
@@ -1530,33 +1524,7 @@ static bool matchIntrinsicType(
     }
     case IITDescriptor::Pointer: {
       PointerType *PT = dyn_cast<PointerType>(Ty);
-      if (!PT || PT->getAddressSpace() != D.Pointer_AddressSpace)
-        return true;
-      if (!PT->isOpaque()) {
-        /* Manually consume a pointer to empty struct descriptor, which is
-         * used for externref. We don't want to enforce that the struct is
-         * anonymous in this case. (This renders externref intrinsics
-         * non-unique, but this will go away with opaque pointers anyway.) */
-        if (Infos.front().Kind == IITDescriptor::Struct &&
-            Infos.front().Struct_NumElements == 0) {
-          Infos = Infos.slice(1);
-          return false;
-        }
-        return matchIntrinsicType(PT->getNonOpaquePointerElementType(), Infos,
-                                  ArgTys, DeferredChecks, IsDeferredCheck);
-      }
-      // Consume IIT descriptors relating to the pointer element type.
-      // FIXME: Intrinsic type matching of nested single value types or even
-      // aggregates doesn't work properly with opaque pointers but hopefully
-      // doesn't happen in practice.
-      while (Infos.front().Kind == IITDescriptor::Pointer ||
-             Infos.front().Kind == IITDescriptor::Vector)
-        Infos = Infos.slice(1);
-      assert((Infos.front().Kind != IITDescriptor::Argument ||
-              Infos.front().getArgumentKind() == IITDescriptor::AK_MatchType) &&
-             "Unsupported polymorphic pointer type with opaque pointer");
-      Infos = Infos.slice(1);
-      return false;
+      return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace;
     }
 
     case IITDescriptor::Struct: {

diff  --git a/llvm/test/TableGen/intrinsic-pointer-to-any.td b/llvm/test/TableGen/intrinsic-pointer-to-any.td
deleted file mode 100644
index 1b090b98cef451..00000000000000
--- a/llvm/test/TableGen/intrinsic-pointer-to-any.td
+++ /dev/null
@@ -1,12 +0,0 @@
-// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS | FileCheck %s
-
-// This test is validating that it an Intrinsic with an LLVMPointerType to
-// llvm_any_ty still properly work after r363233. That patch rewrote the
-// substitution handling code in the Intrinsic Emitter, and didn't consider this
-// case, so TableGen would hit an assertion in EncodeFixedType that was checking
-// to ensure that the substitution being processed was correctly replaced.
-
-include "llvm/IR/Intrinsics.td"
-
-def int_has_ptr_to_any : Intrinsic<[LLVMPointerType<llvm_any_ty>, llvm_i8_ty]>;
-// CHECK: /* 0 */ 21, 14, 15, 0, 2, 0


        


More information about the llvm-commits mailing list