[llvm] 4cbab1e - SeparateConstOffsetFromGEP: Update tests to use opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 27 18:08:01 PST 2022


Author: Matt Arsenault
Date: 2022-11-27T20:53:52-05:00
New Revision: 4cbab1e5ff733d3006d4f9542fdfa0c638c3ac20

URL: https://github.com/llvm/llvm-project/commit/4cbab1e5ff733d3006d4f9542fdfa0c638c3ac20
DIFF: https://github.com/llvm/llvm-project/commit/4cbab1e5ff733d3006d4f9542fdfa0c638c3ac20.diff

LOG: SeparateConstOffsetFromGEP: Update tests to use opaque pointers

NVPTX/split-gep.ll needed a check for a bitcast replaced.

Added: 
    

Modified: 
    llvm/test/Transforms/SeparateConstOffsetFromGEP/AArch64/split-gep.ll
    llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll
    llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
    llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
    llvm/test/Transforms/SeparateConstOffsetFromGEP/RISCV/split-gep.ll
    llvm/test/Transforms/SeparateConstOffsetFromGEP/crash-in-unreachable-code.ll
    llvm/test/Transforms/SeparateConstOffsetFromGEP/pr45371-find-either-reset.ll
    llvm/test/Transforms/SeparateConstOffsetFromGEP/test-add-sub-separation.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AArch64/split-gep.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AArch64/split-gep.ll
index 0e91465e67ddd..77f3eb7d42aef 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AArch64/split-gep.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AArch64/split-gep.ll
@@ -2,7 +2,7 @@
 
 %struct = type { i32, i32, i32 }
 
-define i32 @test1(%struct* %ptr, i64 %idx) {
+define i32 @test1(ptr %ptr, i64 %idx) {
 ; CHECK-LABEL: test1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #12
@@ -16,14 +16,14 @@ define i32 @test1(%struct* %ptr, i64 %idx) {
 ; CHECK-NEXT:    ldr w8, [x8, #8]
 ; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
- %gep.1 = getelementptr %struct, %struct* %ptr, i64 %idx, i32 1
- %lv.1 = load i32, i32* %gep.1
+ %gep.1 = getelementptr %struct, ptr %ptr, i64 %idx, i32 1
+ %lv.1 = load i32, ptr %gep.1
  %c = icmp slt i32 %lv.1, 0
  br i1 %c, label %then, label %else
 
 then:
- %gep.2 = getelementptr %struct, %struct* %ptr, i64 %idx, i32 2
- %lv.2 = load i32, i32* %gep.2
+ %gep.2 = getelementptr %struct, ptr %ptr, i64 %idx, i32 2
+ %lv.2 = load i32, ptr %gep.2
  %res = add i32 %lv.1, %lv.2
  ret i32 %res
 

diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll
index 8848b49aca64e..5cb8cbd05a7ae 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll
@@ -5,30 +5,30 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
 @array = internal addrspace(4) constant [4096 x [32 x float]] zeroinitializer, align 4
 
 ; IR-LABEL: @sum_of_array(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [32 x float]], [4096 x [32 x float]] addrspace(4)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr inbounds float, float addrspace(4)* [[BASE_PTR]], i64 1
-; IR: getelementptr inbounds float, float addrspace(4)* [[BASE_PTR]], i64 32
-; IR: getelementptr inbounds float, float addrspace(4)* [[BASE_PTR]], i64 33
-define amdgpu_kernel void @sum_of_array(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [32 x float]], ptr addrspace(4) @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds float, ptr addrspace(4) [[BASE_PTR]], i64 1
+; IR: getelementptr inbounds float, ptr addrspace(4) [[BASE_PTR]], i64 32
+; IR: getelementptr inbounds float, ptr addrspace(4) [[BASE_PTR]], i64 33
+define amdgpu_kernel void @sum_of_array(i32 %x, i32 %y, ptr addrspace(1) nocapture %output) {
   %tmp = sext i32 %y to i64
   %tmp1 = sext i32 %x to i64
-  %tmp2 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(4)* @array, i64 0, i64 %tmp1, i64 %tmp
-  %tmp4 = load float, float addrspace(4)* %tmp2, align 4
+  %tmp2 = getelementptr inbounds [4096 x [32 x float]], ptr addrspace(4) @array, i64 0, i64 %tmp1, i64 %tmp
+  %tmp4 = load float, ptr addrspace(4) %tmp2, align 4
   %tmp5 = fadd float %tmp4, 0.000000e+00
   %tmp6 = add i32 %y, 1
   %tmp7 = sext i32 %tmp6 to i64
-  %tmp8 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(4)* @array, i64 0, i64 %tmp1, i64 %tmp7
-  %tmp10 = load float, float addrspace(4)* %tmp8, align 4
+  %tmp8 = getelementptr inbounds [4096 x [32 x float]], ptr addrspace(4) @array, i64 0, i64 %tmp1, i64 %tmp7
+  %tmp10 = load float, ptr addrspace(4) %tmp8, align 4
   %tmp11 = fadd float %tmp5, %tmp10
   %tmp12 = add i32 %x, 1
   %tmp13 = sext i32 %tmp12 to i64
-  %tmp14 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(4)* @array, i64 0, i64 %tmp13, i64 %tmp
-  %tmp16 = load float, float addrspace(4)* %tmp14, align 4
+  %tmp14 = getelementptr inbounds [4096 x [32 x float]], ptr addrspace(4) @array, i64 0, i64 %tmp13, i64 %tmp
+  %tmp16 = load float, ptr addrspace(4) %tmp14, align 4
   %tmp17 = fadd float %tmp11, %tmp16
-  %tmp18 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(4)* @array, i64 0, i64 %tmp13, i64 %tmp7
-  %tmp20 = load float, float addrspace(4)* %tmp18, align 4
+  %tmp18 = getelementptr inbounds [4096 x [32 x float]], ptr addrspace(4) @array, i64 0, i64 %tmp13, i64 %tmp7
+  %tmp20 = load float, ptr addrspace(4) %tmp18, align 4
   %tmp21 = fadd float %tmp17, %tmp20
-  store float %tmp21, float addrspace(1)* %output, align 4
+  store float %tmp21, ptr addrspace(1) %output, align 4
   ret void
 }
 
@@ -37,31 +37,31 @@ define amdgpu_kernel void @sum_of_array(i32 %x, i32 %y, float addrspace(1)* noca
 ; Some of the indices go over the maximum mubuf offset, so don't split them.
 
 ; IR-LABEL: @sum_of_array_over_max_mubuf_offset(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [4 x float]], [4096 x [4 x float]] addrspace(4)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr inbounds float, float addrspace(4)* [[BASE_PTR]], i64 255
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [4 x float]], ptr addrspace(4) @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds float, ptr addrspace(4) [[BASE_PTR]], i64 255
 ; IR: add i32 %x, 256
-; IR: getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(4)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(4)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-define amdgpu_kernel void @sum_of_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
+; IR: getelementptr inbounds [4096 x [4 x float]], ptr addrspace(4) @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds [4096 x [4 x float]], ptr addrspace(4) @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+define amdgpu_kernel void @sum_of_array_over_max_mubuf_offset(i32 %x, i32 %y, ptr addrspace(1) nocapture %output) {
   %tmp = sext i32 %y to i64
   %tmp1 = sext i32 %x to i64
-  %tmp2 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(4)* @array2, i64 0, i64 %tmp1, i64 %tmp
-  %tmp4 = load float, float addrspace(4)* %tmp2, align 4
+  %tmp2 = getelementptr inbounds [4096 x [4 x float]], ptr addrspace(4) @array2, i64 0, i64 %tmp1, i64 %tmp
+  %tmp4 = load float, ptr addrspace(4) %tmp2, align 4
   %tmp5 = fadd float %tmp4, 0.000000e+00
   %tmp6 = add i32 %y, 255
   %tmp7 = sext i32 %tmp6 to i64
-  %tmp8 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(4)* @array2, i64 0, i64 %tmp1, i64 %tmp7
-  %tmp10 = load float, float addrspace(4)* %tmp8, align 4
+  %tmp8 = getelementptr inbounds [4096 x [4 x float]], ptr addrspace(4) @array2, i64 0, i64 %tmp1, i64 %tmp7
+  %tmp10 = load float, ptr addrspace(4) %tmp8, align 4
   %tmp11 = fadd float %tmp5, %tmp10
   %tmp12 = add i32 %x, 256
   %tmp13 = sext i32 %tmp12 to i64
-  %tmp14 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(4)* @array2, i64 0, i64 %tmp13, i64 %tmp
-  %tmp16 = load float, float addrspace(4)* %tmp14, align 4
+  %tmp14 = getelementptr inbounds [4096 x [4 x float]], ptr addrspace(4) @array2, i64 0, i64 %tmp13, i64 %tmp
+  %tmp16 = load float, ptr addrspace(4) %tmp14, align 4
   %tmp17 = fadd float %tmp11, %tmp16
-  %tmp18 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(4)* @array2, i64 0, i64 %tmp13, i64 %tmp7
-  %tmp20 = load float, float addrspace(4)* %tmp18, align 4
+  %tmp18 = getelementptr inbounds [4096 x [4 x float]], ptr addrspace(4) @array2, i64 0, i64 %tmp13, i64 %tmp7
+  %tmp20 = load float, ptr addrspace(4) %tmp18, align 4
   %tmp21 = fadd float %tmp17, %tmp20
-  store float %tmp21, float addrspace(1)* %output, align 4
+  store float %tmp21, ptr addrspace(1) %output, align 4
   ret void
 }
 
@@ -70,26 +70,26 @@ define amdgpu_kernel void @sum_of_array_over_max_mubuf_offset(i32 %x, i32 %y, fl
 
 ; DS instructions have a larger immediate offset, so make sure these are OK.
 ; IR-LABEL: @sum_of_lds_array_over_max_mubuf_offset(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %{{[a-zA-Z0-9]+}}, i32 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 255
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 16128
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 16383
-define amdgpu_kernel void @sum_of_lds_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
-  %tmp2 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %x, i32 %y
-  %tmp4 = load float, float addrspace(3)* %tmp2, align 4
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [4096 x [4 x float]], ptr addrspace(3) @lds_array, i32 0, i32 %{{[a-zA-Z0-9]+}}, i32 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i32 255
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i32 16128
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i32 16383
+define amdgpu_kernel void @sum_of_lds_array_over_max_mubuf_offset(i32 %x, i32 %y, ptr addrspace(1) nocapture %output) {
+  %tmp2 = getelementptr inbounds [4096 x [4 x float]], ptr addrspace(3) @lds_array, i32 0, i32 %x, i32 %y
+  %tmp4 = load float, ptr addrspace(3) %tmp2, align 4
   %tmp5 = fadd float %tmp4, 0.000000e+00
   %tmp6 = add i32 %y, 255
-  %tmp8 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %x, i32 %tmp6
-  %tmp10 = load float, float addrspace(3)* %tmp8, align 4
+  %tmp8 = getelementptr inbounds [4096 x [4 x float]], ptr addrspace(3) @lds_array, i32 0, i32 %x, i32 %tmp6
+  %tmp10 = load float, ptr addrspace(3) %tmp8, align 4
   %tmp11 = fadd float %tmp5, %tmp10
   %tmp12 = add i32 %x, 4032
-  %tmp14 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %tmp12, i32 %y
-  %tmp16 = load float, float addrspace(3)* %tmp14, align 4
+  %tmp14 = getelementptr inbounds [4096 x [4 x float]], ptr addrspace(3) @lds_array, i32 0, i32 %tmp12, i32 %y
+  %tmp16 = load float, ptr addrspace(3) %tmp14, align 4
   %tmp17 = fadd float %tmp11, %tmp16
-  %tmp18 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %tmp12, i32 %tmp6
-  %tmp20 = load float, float addrspace(3)* %tmp18, align 4
+  %tmp18 = getelementptr inbounds [4096 x [4 x float]], ptr addrspace(3) @lds_array, i32 0, i32 %tmp12, i32 %tmp6
+  %tmp20 = load float, ptr addrspace(3) %tmp18, align 4
   %tmp21 = fadd float %tmp17, %tmp20
-  store float %tmp21, float addrspace(1)* %output, align 4
+  store float %tmp21, ptr addrspace(1) %output, align 4
   ret void
 }
 
@@ -97,31 +97,30 @@ define amdgpu_kernel void @sum_of_lds_array_over_max_mubuf_offset(i32 %x, i32 %y
 ; IR: getelementptr {{.*}} !amdgpu.uniform
 ; IR: getelementptr {{.*}} !amdgpu.uniform
 ; IR: getelementptr {{.*}} !amdgpu.uniform
-define amdgpu_ps <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @keep_metadata([0 x <4 x i32>] addrspace(4)* inreg noalias dereferenceable(18446744073709551615), [0 x <8 x i32>] addrspace(4)* inreg noalias dereferenceable(18446744073709551615), [0 x <4 x i32>] addrspace(4)* inreg noalias dereferenceable(18446744073709551615), [0 x <8 x i32>] addrspace(4)* inreg noalias dereferenceable(18446744073709551615), float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, i32, i32, float, i32) #5 {
+define amdgpu_ps <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @keep_metadata(ptr addrspace(4) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(4) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(4) inreg noalias dereferenceable(18446744073709551615), ptr addrspace(4) inreg noalias dereferenceable(18446744073709551615), float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, i32, i32, float, i32) #5 {
 main_body:
   %22 = call nsz float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %5) #8
   %23 = bitcast float %22 to i32
   %24 = shl i32 %23, 1
-  %25 = getelementptr [0 x <8 x i32>], [0 x <8 x i32>] addrspace(4)* %1, i32 0, i32 %24, !amdgpu.uniform !0
-  %26 = load <8 x i32>, <8 x i32> addrspace(4)* %25, align 32, !invariant.load !0
+  %25 = getelementptr [0 x <8 x i32>], ptr addrspace(4) %1, i32 0, i32 %24, !amdgpu.uniform !0
+  %26 = load <8 x i32>, ptr addrspace(4) %25, align 32, !invariant.load !0
   %27 = shl i32 %23, 2
   %28 = or i32 %27, 3
-  %29 = bitcast [0 x <8 x i32>] addrspace(4)* %1 to [0 x <4 x i32>] addrspace(4)*
-  %30 = getelementptr [0 x <4 x i32>], [0 x <4 x i32>] addrspace(4)* %29, i32 0, i32 %28, !amdgpu.uniform !0
-  %31 = load <4 x i32>, <4 x i32> addrspace(4)* %30, align 16, !invariant.load !0
-  %32 = call nsz <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> zeroinitializer, <8 x i32> %26, <4 x i32> %31, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #8
-  %33 = extractelement <4 x float> %32, i32 0
-  %34 = extractelement <4 x float> %32, i32 1
-  %35 = extractelement <4 x float> %32, i32 2
-  %36 = extractelement <4 x float> %32, i32 3
-  %37 = bitcast float %4 to i32
-  %38 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef, i32 %37, 4
-  %39 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %38, float %33, 5
-  %40 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %39, float %34, 6
-  %41 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %40, float %35, 7
-  %42 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %41, float %36, 8
-  %43 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %42, float %20, 19
-  ret <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %43
+  %29 = getelementptr [0 x <4 x i32>], ptr addrspace(4) %1, i32 0, i32 %28, !amdgpu.uniform !0
+  %30 = load <4 x i32>, ptr addrspace(4) %29, align 16, !invariant.load !0
+  %31 = call nsz <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> zeroinitializer, <8 x i32> %26, <4 x i32> %30, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #8
+  %32 = extractelement <4 x float> %31, i32 0
+  %33 = extractelement <4 x float> %31, i32 1
+  %34 = extractelement <4 x float> %31, i32 2
+  %35 = extractelement <4 x float> %31, i32 3
+  %36 = bitcast float %4 to i32
+  %37 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef, i32 %36, 4
+  %38 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %37, float %32, 5
+  %39 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %38, float %33, 6
+  %40 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %39, float %34, 7
+  %41 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %40, float %35, 8
+  %42 = insertvalue <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %41, float %20, 19
+  ret <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %42
 }
 
 ; Function Attrs: nounwind readnone speculatable

diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
index c2c2dd6314025..84fc8da006994 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
@@ -10,38 +10,38 @@
 ;
 ; We expect SeparateConstOffsetFromGEP to transform it to
 ;
-; float *base = &a[x][y];
+; ptr base = &a[x][y];
 ; *output = base[0] + base[1] + base[32] + base[33];
 ;
 ; so the backend can emit PTX that uses fewer virtual registers.
 
 @array = internal addrspace(3) global [32 x [32 x float]] zeroinitializer, align 4
 
-define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
+define void @sum_of_array(i32 %x, i32 %y, ptr nocapture %output) {
 .preheader:
   %0 = sext i32 %y to i64
   %1 = sext i32 %x to i64
-  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
-  %3 = addrspacecast float addrspace(3)* %2 to float*
-  %4 = load float, float* %3, align 4
+  %2 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %1, i64 %0
+  %3 = addrspacecast ptr addrspace(3) %2 to ptr
+  %4 = load float, ptr %3, align 4
   %5 = fadd float %4, 0.000000e+00
   %6 = add i32 %y, 1
   %7 = sext i32 %6 to i64
-  %8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
-  %9 = addrspacecast float addrspace(3)* %8 to float*
-  %10 = load float, float* %9, align 4
+  %8 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %1, i64 %7
+  %9 = addrspacecast ptr addrspace(3) %8 to ptr
+  %10 = load float, ptr %9, align 4
   %11 = fadd float %5, %10
   %12 = add i32 %x, 1
   %13 = sext i32 %12 to i64
-  %14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
-  %15 = addrspacecast float addrspace(3)* %14 to float*
-  %16 = load float, float* %15, align 4
+  %14 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %13, i64 %0
+  %15 = addrspacecast ptr addrspace(3) %14 to ptr
+  %16 = load float, ptr %15, align 4
   %17 = fadd float %11, %16
-  %18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
-  %19 = addrspacecast float addrspace(3)* %18 to float*
-  %20 = load float, float* %19, align 4
+  %18 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %13, i64 %7
+  %19 = addrspacecast ptr addrspace(3) %18 to ptr
+  %20 = load float, ptr %19, align 4
   %21 = fadd float %17, %20
-  store float %21, float* %output, align 4
+  store float %21, ptr %output, align 4
   ret void
 }
 ; PTX-LABEL: sum_of_array(
@@ -53,10 +53,10 @@ define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
 ; IR-LABEL: @sum_of_array(
 ; TODO: GVN is unable to preserve the "inbounds" keyword on the first GEP. Need
 ; some infrastructure changes to enable such optimizations.
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 1
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 32
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 33
 
 ; @sum_of_array2 is very similar to @sum_of_array. The only 
diff erence is in
 ; the order of "sext" and "add" when computing the array indices. @sum_of_array
@@ -64,29 +64,29 @@ define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
 ; @sum_of_array2 computes sext before add,
 ; e.g., array[sext(x) + 1][sext(y) + 1]. SeparateConstOffsetFromGEP should be
 ; able to extract constant offsets from both forms.
-define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
+define void @sum_of_array2(i32 %x, i32 %y, ptr nocapture %output) {
 .preheader:
   %0 = sext i32 %y to i64
   %1 = sext i32 %x to i64
-  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
-  %3 = addrspacecast float addrspace(3)* %2 to float*
-  %4 = load float, float* %3, align 4
+  %2 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %1, i64 %0
+  %3 = addrspacecast ptr addrspace(3) %2 to ptr
+  %4 = load float, ptr %3, align 4
   %5 = fadd float %4, 0.000000e+00
   %6 = add i64 %0, 1
-  %7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
-  %8 = addrspacecast float addrspace(3)* %7 to float*
-  %9 = load float, float* %8, align 4
+  %7 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %1, i64 %6
+  %8 = addrspacecast ptr addrspace(3) %7 to ptr
+  %9 = load float, ptr %8, align 4
   %10 = fadd float %5, %9
   %11 = add i64 %1, 1
-  %12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
-  %13 = addrspacecast float addrspace(3)* %12 to float*
-  %14 = load float, float* %13, align 4
+  %12 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %11, i64 %0
+  %13 = addrspacecast ptr addrspace(3) %12 to ptr
+  %14 = load float, ptr %13, align 4
   %15 = fadd float %10, %14
-  %16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
-  %17 = addrspacecast float addrspace(3)* %16 to float*
-  %18 = load float, float* %17, align 4
+  %16 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %11, i64 %6
+  %17 = addrspacecast ptr addrspace(3) %16 to ptr
+  %18 = load float, ptr %17, align 4
   %19 = fadd float %15, %18
-  store float %19, float* %output, align 4
+  store float %19, ptr %output, align 4
   ret void
 }
 ; PTX-LABEL: sum_of_array2(
@@ -96,10 +96,10 @@ define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
 ; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, [[[BASE_REG]]+132]
 
 ; IR-LABEL: @sum_of_array2(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 1
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 32
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 33
 
 
 ; This function loads
@@ -112,31 +112,31 @@ define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
 ; 1) extends array indices using zext instead of sext;
 ; 2) annotates the addition with "nuw"; otherwise, zext(x + 1) => zext(x) + 1
 ;    may be invalid.
-define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) {
+define void @sum_of_array3(i32 %x, i32 %y, ptr nocapture %output) {
 .preheader:
   %0 = zext i32 %y to i64
   %1 = zext i32 %x to i64
-  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
-  %3 = addrspacecast float addrspace(3)* %2 to float*
-  %4 = load float, float* %3, align 4
+  %2 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %1, i64 %0
+  %3 = addrspacecast ptr addrspace(3) %2 to ptr
+  %4 = load float, ptr %3, align 4
   %5 = fadd float %4, 0.000000e+00
   %6 = add nuw i32 %y, 1
   %7 = zext i32 %6 to i64
-  %8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
-  %9 = addrspacecast float addrspace(3)* %8 to float*
-  %10 = load float, float* %9, align 4
+  %8 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %1, i64 %7
+  %9 = addrspacecast ptr addrspace(3) %8 to ptr
+  %10 = load float, ptr %9, align 4
   %11 = fadd float %5, %10
   %12 = add nuw i32 %x, 1
   %13 = zext i32 %12 to i64
-  %14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
-  %15 = addrspacecast float addrspace(3)* %14 to float*
-  %16 = load float, float* %15, align 4
+  %14 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %13, i64 %0
+  %15 = addrspacecast ptr addrspace(3) %14 to ptr
+  %16 = load float, ptr %15, align 4
   %17 = fadd float %11, %16
-  %18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
-  %19 = addrspacecast float addrspace(3)* %18 to float*
-  %20 = load float, float* %19, align 4
+  %18 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %13, i64 %7
+  %19 = addrspacecast ptr addrspace(3) %18 to ptr
+  %20 = load float, ptr %19, align 4
   %21 = fadd float %17, %20
-  store float %21, float* %output, align 4
+  store float %21, ptr %output, align 4
   ret void
 }
 ; PTX-LABEL: sum_of_array3(
@@ -146,10 +146,10 @@ define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) {
 ; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, [[[BASE_REG]]+132]
 
 ; IR-LABEL: @sum_of_array3(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 1
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 32
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 33
 
 
 ; This function loads
@@ -160,29 +160,29 @@ define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) {
 ;
 ; We expect the generated code to reuse the computation of
 ; &array[zext(x)][zext(y)]. See the expected IR and PTX for details.
-define void @sum_of_array4(i32 %x, i32 %y, float* nocapture %output) {
+define void @sum_of_array4(i32 %x, i32 %y, ptr nocapture %output) {
 .preheader:
   %0 = zext i32 %y to i64
   %1 = zext i32 %x to i64
-  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
-  %3 = addrspacecast float addrspace(3)* %2 to float*
-  %4 = load float, float* %3, align 4
+  %2 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %1, i64 %0
+  %3 = addrspacecast ptr addrspace(3) %2 to ptr
+  %4 = load float, ptr %3, align 4
   %5 = fadd float %4, 0.000000e+00
   %6 = add i64 %0, 1
-  %7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
-  %8 = addrspacecast float addrspace(3)* %7 to float*
-  %9 = load float, float* %8, align 4
+  %7 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %1, i64 %6
+  %8 = addrspacecast ptr addrspace(3) %7 to ptr
+  %9 = load float, ptr %8, align 4
   %10 = fadd float %5, %9
   %11 = add i64 %1, 1
-  %12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
-  %13 = addrspacecast float addrspace(3)* %12 to float*
-  %14 = load float, float* %13, align 4
+  %12 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %11, i64 %0
+  %13 = addrspacecast ptr addrspace(3) %12 to ptr
+  %14 = load float, ptr %13, align 4
   %15 = fadd float %10, %14
-  %16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
-  %17 = addrspacecast float addrspace(3)* %16 to float*
-  %18 = load float, float* %17, align 4
+  %16 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %11, i64 %6
+  %17 = addrspacecast ptr addrspace(3) %16 to ptr
+  %18 = load float, ptr %17, align 4
   %19 = fadd float %15, %18
-  store float %19, float* %output, align 4
+  store float %19, ptr %output, align 4
   ret void
 }
 ; PTX-LABEL: sum_of_array4(
@@ -192,10 +192,10 @@ define void @sum_of_array4(i32 %x, i32 %y, float* nocapture %output) {
 ; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, [[[BASE_REG]]+132]
 
 ; IR-LABEL: @sum_of_array4(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 1
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 32
+; IR: getelementptr inbounds float, ptr addrspace(3) [[BASE_PTR]], i64 33
 
 
 ; The source code is:
@@ -210,23 +210,23 @@ define void @sum_of_array4(i32 %x, i32 %y, float* nocapture %output) {
 ; With reuniting extensions, it merges p0 and t1 and thus emits
 ;   p0 = &input[sext(x + y)];
 ;   p1 = &p0[5];
-define void @reunion(i32 %x, i32 %y, float* %input) {
+define void @reunion(i32 %x, i32 %y, ptr %input) {
 ; IR-LABEL: @reunion(
 ; PTX-LABEL: reunion(
 entry:
   %xy = add nsw i32 %x, %y
   %0 = sext i32 %xy to i64
-  %p0 = getelementptr inbounds float, float* %input, i64 %0
-  %v0 = load float, float* %p0, align 4
+  %p0 = getelementptr inbounds float, ptr %input, i64 %0
+  %v0 = load float, ptr %p0, align 4
 ; PTX: ld.f32 %f{{[0-9]+}}, [[[p0:%rd[0-9]+]]]
   call void @use(float %v0)
 
   %y5 = add nsw i32 %y, 5
   %xy5 = add nsw i32 %x, %y5
   %1 = sext i32 %xy5 to i64
-  %p1 = getelementptr inbounds float, float* %input, i64 %1
-; IR: getelementptr inbounds float, float* %p0, i64 5
-  %v1 = load float, float* %p1, align 4
+  %p1 = getelementptr inbounds float, ptr %input, i64 %1
+; IR: getelementptr inbounds float, ptr %p0, i64 5
+  %v1 = load float, ptr %p1, align 4
 ; PTX: ld.f32 %f{{[0-9]+}}, [[[p0]]+20]
   call void @use(float %v1)
 

diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
index 101bb17d15f61..5a76104c7a65f 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
@@ -12,35 +12,35 @@
 
 ; We should not extract any struct field indices, because fields in a struct
 ; may have 
diff erent types.
-define double* @struct(i32 %i) {
+define ptr @struct(i32 %i) {
 entry:
   %add = add nsw i32 %i, 5
   %idxprom = sext i32 %add to i64
-  %p = getelementptr inbounds [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
-  ret double* %p
+  %p = getelementptr inbounds [1024 x %struct.S], ptr @struct_array, i64 0, i64 %idxprom, i32 1
+  ret ptr %p
 }
 ; CHECK-LABEL: @struct(
-; CHECK: getelementptr [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
+; CHECK: getelementptr [1024 x %struct.S], ptr @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
 
 ; We should be able to trace into sext(a + b) if a + b is non-negative
 ; (e.g., used as an index of an inbounds GEP) and one of a and b is
 ; non-negative.
-define float* @sext_add(i32 %i, i32 %j) {
+define ptr @sext_add(i32 %i, i32 %j) {
 entry:
   %0 = add i32 %i, 1
   %1 = sext i32 %0 to i64  ; inbound sext(i + 1) = sext(i) + 1
   %2 = add i32 %j, -2
   ; However, inbound sext(j + -2) != sext(j) + -2, e.g., j = INT_MIN
   %3 = sext i32 %2 to i64
-  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
-  ret float* %p
+  %p = getelementptr inbounds [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %1, i64 %3
+  ret ptr %p
 }
 ; CHECK-LABEL: @sext_add(
 ; CHECK-NOT: = add
 ; CHECK: add i32 %j, -2
 ; CHECK: sext
-; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr inbounds float, float* %{{[a-zA-Z0-9]+}}, i64 32
+; CHECK: getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr inbounds float, ptr %{{[a-zA-Z0-9]+}}, i64 32
 
 ; We should be able to trace into sext/zext if it can be distributed to both
 ; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
@@ -49,22 +49,22 @@ entry:
 ;   gep base, a + sext(b +nsw 1), c + zext(d +nuw 1)
 ; to
 ;   gep base, a + sext(b), c + zext(d); gep ..., 1 * 32 + 1
-define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) {
+define ptr @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) {
   %b1 = add nsw i32 %b, 1
   %b2 = sext i32 %b1 to i64
   %i = add i64 %a, %b2       ; i = a + sext(b +nsw 1)
   %d1 = add nuw i32 %d, 1
   %d2 = zext i32 %d1 to i64
   %j = add i64 %c, %d2       ; j = c + zext(d +nuw 1)
-  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
-  ret float* %p
+  %p = getelementptr inbounds [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %i, i64 %j
+  ret ptr %p
 }
 ; CHECK-LABEL: @ext_add_no_overflow(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 33
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr inbounds float, ptr [[BASE_PTR]], i64 33
 
 ; Verifies we handle nested sext/zext correctly.
-define void @sext_zext(i32 %a, i32 %b, float** %out1, float** %out2) {
+define void @sext_zext(i32 %a, i32 %b, ptr %out1, ptr %out2) {
 entry:
   %0 = add nsw nuw i32 %a, 1
   %1 = sext i32 %0 to i48
@@ -72,27 +72,27 @@ entry:
   %3 = add nsw i32 %b, 2
   %4 = sext i32 %3 to i48
   %5 = zext i48 %4 to i64    ; zext(sext(b +nsw 2)) != zext(sext(b)) + 2
-  %p1 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5
-  store float* %p1, float** %out1
+  %p1 = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %2, i64 %5
+  store ptr %p1, ptr %out1
   %6 = add nuw i32 %a, 3
   %7 = zext i32 %6 to i48
   %8 = sext i48 %7 to i64 ; sext(zext(a +nuw 3)) = zext(a +nuw 3) = zext(a) + 3
   %9 = add nsw i32 %b, 4
   %10 = zext i32 %9 to i48
   %11 = sext i48 %10 to i64  ; sext(zext(b +nsw 4)) != zext(b) + 4
-  %p2 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11
-  store float* %p2, float** %out2
+  %p2 = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %8, i64 %11
+  store ptr %p2, ptr %out2
   ret void
 }
 ; CHECK-LABEL: @sext_zext(
-; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float, float* [[BASE_PTR_1]], i64 32
-; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float, float* [[BASE_PTR_2]], i64 96
+; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, ptr [[BASE_PTR_1]], i64 32
+; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, ptr [[BASE_PTR_2]], i64 96
 
 ; Similar to @ext_add_no_overflow, we should be able to trace into s/zext if
 ; its operand is an OR and the two operands of the OR have no common bits.
-define float* @sext_or(i64 %a, i32 %b) {
+define ptr @sext_or(i64 %a, i32 %b) {
 entry:
   %b1 = shl i32 %b, 2
   %b2 = or i32 %b1, 1 ; (b << 2) and 1 have no common bits
@@ -101,89 +101,88 @@ entry:
   %b3.ext = sext i32 %b3 to i64
   %i = add i64 %a, %b2.ext
   %j = add i64 %a, %b3.ext
-  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
-  ret float* %p
+  %p = getelementptr inbounds [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %i, i64 %j
+  ret ptr %p
 }
 ; CHECK-LABEL: @sext_or(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 32
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr inbounds float, ptr [[BASE_PTR]], i64 32
 
 ; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b +
 ; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't
 ; affected.
-define float* @expr(i64 %a, i64 %b, i64* %out) {
+define ptr @expr(i64 %a, i64 %b, ptr %out) {
 entry:
   %b5 = add i64 %b, 5
   %i = add i64 %b5, %a
-  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
-  store i64 %b5, i64* %out
-  ret float* %p
+  %p = getelementptr inbounds [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %i, i64 0
+  store i64 %b5, ptr %out
+  ret ptr %p
 }
 ; CHECK-LABEL: @expr(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
-; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 160
-; CHECK: store i64 %b5, i64* %out
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
+; CHECK: getelementptr inbounds float, ptr [[BASE_PTR]], i64 160
+; CHECK: store i64 %b5, ptr %out
 
 ; d + sext(a +nsw (b +nsw (c +nsw 8))) => (d + sext(a) + sext(b) + sext(c)) + 8
-define float* @sext_expr(i32 %a, i32 %b, i32 %c, i64 %d) {
+define ptr @sext_expr(i32 %a, i32 %b, i32 %c, i64 %d) {
 entry:
   %0 = add nsw i32 %c, 8
   %1 = add nsw i32 %b, %0
   %2 = add nsw i32 %a, %1
   %3 = sext i32 %2 to i64
   %i = add i64 %d, %3
-  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
-  ret float* %p
+  %p = getelementptr inbounds [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 0, i64 %i
+  ret ptr %p
 }
 ; CHECK-LABEL: @sext_expr(
 ; CHECK: sext i32
 ; CHECK: sext i32
 ; CHECK: sext i32
-; CHECK: getelementptr inbounds float, float* %{{[a-zA-Z0-9]+}}, i64 8
+; CHECK: getelementptr inbounds float, ptr %{{[a-zA-Z0-9]+}}, i64 8
 
 ; Verifies we handle "sub" correctly.
-define float* @sub(i64 %i, i64 %j) {
+define ptr @sub(i64 %i, i64 %j) {
   %i2 = sub i64 %i, 5 ; i - 5
   %j2 = sub i64 5, %j ; 5 - i
-  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
-  ret float* %p
+  %p = getelementptr inbounds [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %i2, i64 %j2
+  ret ptr %p
 }
 ; CHECK-LABEL: @sub(
 ; CHECK: %[[j2:[a-zA-Z0-9]+]] = sub i64 0, %j
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
-; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 -155
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
+; CHECK: getelementptr inbounds float, ptr [[BASE_PTR]], i64 -155
 
 %struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed
 
 ; Verifies we can emit correct uglygep if the address is not natually aligned.
-define i64* @packed_struct(i32 %i, i32 %j) {
+define ptr @packed_struct(i32 %i, i32 %j) {
 entry:
   %s = alloca [1024 x %struct.Packed], align 16
   %add = add nsw i32 %j, 3
   %idxprom = sext i32 %add to i64
   %add1 = add nsw i32 %i, 1
   %idxprom2 = sext i32 %add1 to i64
-  %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
-  ret i64* %arrayidx3
+  %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed], ptr %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
+  ret ptr %arrayidx3
 }
 ; CHECK-LABEL: @packed_struct(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: [[CASTED_PTR:%[a-zA-Z0-9]+]] = bitcast i64* [[BASE_PTR]] to i8*
-; CHECK: %uglygep = getelementptr inbounds i8, i8* [[CASTED_PTR]], i64 100
-; CHECK: bitcast i8* %uglygep to i64*
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed], ptr %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: %uglygep = getelementptr inbounds i8, ptr [[BASE_PTR]], i64 100
+; CHECK-NEXT: ret ptr %uglygep
 
 ; We shouldn't be able to extract the 8 from "zext(a +nuw (b + 8))",
 ; because "zext(b + 8) != zext(b) + 8"
-define float* @zext_expr(i32 %a, i32 %b) {
+define ptr @zext_expr(i32 %a, i32 %b) {
 entry:
   %0 = add i32 %b, 8
   %1 = add nuw i32 %a, %0
   %i = zext i32 %1 to i64
-  %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
-  ret float* %p
+  %p = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 0, i64 %i
+  ret ptr %p
 }
 ; CHECK-LABEL: zext_expr(
-; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+; CHECK: getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 0, i64 %i
 
 ; Per http://llvm.org/docs/LangRef.html#id181, the indices of a off-bound gep
 ; should be considered sign-extended to the pointer size. Therefore,
@@ -193,47 +192,47 @@ entry:
 ;
 ; This test verifies we do not illegitimately extract the 8 from
 ;   gep base, (i32 a + 8)
-define float* @i32_add(i32 %a) {
+define ptr @i32_add(i32 %a) {
 entry:
   %i = add i32 %a, 8
-  %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i
-  ret float* %p
+  %p = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 0, i32 %i
+  ret ptr %p
 }
 ; CHECK-LABEL: @i32_add(
-; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
 ; CHECK-NOT: getelementptr
 
 ; Verifies that we compute the correct constant offset when the index is
 ; sign-extended and then zero-extended. The old version of our code failed to
 ; handle this case because it simply computed the constant offset as the
 ; sign-extended value of the constant part of the GEP index.
-define float* @apint(i1 %a) {
+define ptr @apint(i1 %a) {
 entry:
   %0 = add nsw nuw i1 %a, 1
   %1 = sext i1 %0 to i4
   %2 = zext i4 %1 to i64         ; zext (sext i1 1 to i4) to i64 = 15
-  %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2
-  ret float* %p
+  %p = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 0, i64 %2
+  ret ptr %p
 }
 ; CHECK-LABEL: @apint(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float, float* [[BASE_PTR]], i64 15
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, ptr [[BASE_PTR]], i64 15
 
 ; Do not trace into binary operators other than ADD, SUB, and OR.
-define float* @and(i64 %a) {
+define ptr @and(i64 %a) {
 entry:
   %0 = shl i64 %a, 2
   %1 = and i64 %0, 1
-  %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1
-  ret float* %p
+  %p = getelementptr [32 x [32 x float]], ptr @float_2d_array, i64 0, i64 0, i64 %1
+  ret ptr %p
 }
 ; CHECK-LABEL: @and(
-; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array
+; CHECK: getelementptr [32 x [32 x float]], ptr @float_2d_array
 ; CHECK-NOT: getelementptr
 
 ; The code that rebuilds an OR expression used to be buggy, and failed on this
 ; test.
-define float* @shl_add_or(i64 %a, float* %ptr) {
+define ptr @shl_add_or(i64 %a, ptr %ptr) {
 ; CHECK-LABEL: @shl_add_or(
 entry:
   %shl = shl i64 %a, 2
@@ -243,10 +242,10 @@ entry:
   ; ((a << 2) + 12) and 1 have no common bits. Therefore,
   ; SeparateConstOffsetFromGEP is able to extract the 12.
   ; TODO(jingyue): We could reassociate the expression to combine 12 and 1.
-  %p = getelementptr float, float* %ptr, i64 %or
-; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr float, float* %ptr, i64 [[OR]]
-; CHECK: getelementptr float, float* [[PTR]], i64 12
-  ret float* %p
+  %p = getelementptr float, ptr %ptr, i64 %or
+; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr float, ptr %ptr, i64 [[OR]]
+; CHECK: getelementptr float, ptr [[PTR]], i64 12
+  ret ptr %p
 ; CHECK-NEXT: ret
 }
 
@@ -259,42 +258,42 @@ entry:
 %struct3 = type { i64, i32 }
 %struct2 = type { %struct3, i32 }
 %struct1 = type { i64, %struct2 }
-%struct0 = type { i32, i32, i64*, [100 x %struct1] }
-define %struct2* @sign_mod_unsign(%struct0* %ptr, i64 %idx) {
+%struct0 = type { i32, i32, ptr, [100 x %struct1] }
+define ptr @sign_mod_unsign(ptr %ptr, i64 %idx) {
 ; CHECK-LABEL: @sign_mod_unsign(
 entry:
   %arrayidx = add nsw i64 %idx, -2
 ; CHECK-NOT: add
-  %ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
-; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1
-; CHECK: getelementptr inbounds %struct2, %struct2* [[PTR]], i64 -3
-  ret %struct2* %ptr2
+  %ptr2 = getelementptr inbounds %struct0, ptr %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
+; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, ptr %ptr, i64 0, i32 3, i64 %idx, i32 1
+; CHECK: getelementptr inbounds %struct2, ptr [[PTR]], i64 -3
+  ret ptr %ptr2
 ; CHECK-NEXT: ret
 }
 
 ; Check that we can see through explicit trunc() instruction.
-define %struct2* @trunk_explicit(%struct0* %ptr, i64 %idx) {
+define ptr @trunk_explicit(ptr %ptr, i64 %idx) {
 ; CHECK-LABEL: @trunk_explicit(
 entry:
   %idx0 = trunc i64 1 to i32
-  %ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i32 %idx0, i32 3, i64 %idx, i32 1
+  %ptr2 = getelementptr inbounds %struct0, ptr %ptr, i32 %idx0, i32 3, i64 %idx, i32 1
 ; CHECK-NOT: trunc
-; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1
-; CHECK: getelementptr inbounds %struct2, %struct2* %0, i64 151
-  ret %struct2* %ptr2
+; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, ptr %ptr, i64 0, i32 3, i64 %idx, i32 1
+; CHECK: getelementptr inbounds %struct2, ptr %0, i64 151
+  ret ptr %ptr2
 ; CHECK-NEXT: ret
 }
 
 ; Check that we can deal with trunc inserted by
 ; canonicalizeArrayIndicesToPointerSize() if size of an index is larger than
 ; that of the pointer.
-define %struct2* @trunk_long_idx(%struct0* %ptr, i64 %idx) {
+define ptr @trunk_long_idx(ptr %ptr, i64 %idx) {
 ; CHECK-LABEL: @trunk_long_idx(
 entry:
-  %ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i65 1, i32 3, i64 %idx, i32 1
+  %ptr2 = getelementptr inbounds %struct0, ptr %ptr, i65 1, i32 3, i64 %idx, i32 1
 ; CHECK-NOT: trunc
-; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1
-; CHECK: getelementptr inbounds %struct2, %struct2* %0, i64 151
-  ret %struct2* %ptr2
+; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, ptr %ptr, i64 0, i32 3, i64 %idx, i32 1
+; CHECK: getelementptr inbounds %struct2, ptr %0, i64 151
+  ret ptr %ptr2
 ; CHECK-NEXT: ret
 }

diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/RISCV/split-gep.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/RISCV/split-gep.ll
index fb98f3f3567a5..1f47d2f0c2c79 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/RISCV/split-gep.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/RISCV/split-gep.ll
@@ -7,285 +7,285 @@
 ; target-specific folders.
 
 ; Simple case when GEPs should be optimized.
-define i64 @test1(i64* %array, i64 %i, i64 %j)  {
+define i64 @test1(ptr %array, i64 %i, i64 %j)  {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[I:%.*]], 5
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr i64, i64* [[ARRAY:%.*]], i64 [[I]]
-; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 5
-; CHECK-NEXT:    store i64 [[J:%.*]], i64* [[GEP4]], align 4
-; CHECK-NEXT:    [[GEP26:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 6
-; CHECK-NEXT:    store i64 [[J]], i64* [[GEP26]], align 4
-; CHECK-NEXT:    [[GEP38:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 35
-; CHECK-NEXT:    store i64 [[ADD]], i64* [[GEP38]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr i64, ptr [[ARRAY:%.*]], i64 [[I]]
+; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i64 5
+; CHECK-NEXT:    store i64 [[J:%.*]], ptr [[GEP4]], align 4
+; CHECK-NEXT:    [[GEP26:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i64 6
+; CHECK-NEXT:    store i64 [[J]], ptr [[GEP26]], align 4
+; CHECK-NEXT:    [[GEP38:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i64 35
+; CHECK-NEXT:    store i64 [[ADD]], ptr [[GEP38]], align 4
 ; CHECK-NEXT:    ret i64 undef
 ;
 entry:
   %add = add nsw i64 %i, 5
-  %gep = getelementptr inbounds i64, i64* %array, i64 %add
-  store i64 %j, i64* %gep
+  %gep = getelementptr inbounds i64, ptr %array, i64 %add
+  store i64 %j, ptr %gep
   %add2 = add nsw i64 %i, 6
-  %gep2 = getelementptr inbounds i64, i64* %array, i64 %add2
-  store i64 %j, i64* %gep2
+  %gep2 = getelementptr inbounds i64, ptr %array, i64 %add2
+  store i64 %j, ptr %gep2
   %add3 = add nsw i64 %i, 35
-  %gep3 = getelementptr inbounds i64, i64* %array, i64 %add3
-  store i64 %add, i64* %gep3
+  %gep3 = getelementptr inbounds i64, ptr %array, i64 %add3
+  store i64 %add, ptr %gep3
   ret i64 undef
 }
 
 ; Optimize GEPs when there sext instructions are needed to cast index value to expected type.
-define i32 @test2(i32* %array, i32 %i, i32 %j) {
+define i32 @test2(ptr %array, i32 %i, i32 %j) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[I:%.*]], 5
 ; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i32, i32* [[ARRAY:%.*]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 5
-; CHECK-NEXT:    store i32 [[J:%.*]], i32* [[GEP2]], align 4
-; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6
-; CHECK-NEXT:    store i32 [[J]], i32* [[GEP54]], align 4
-; CHECK-NEXT:    [[GEP86:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 35
-; CHECK-NEXT:    store i32 [[ADD]], i32* [[GEP86]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[ARRAY:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 5
+; CHECK-NEXT:    store i32 [[J:%.*]], ptr [[GEP2]], align 4
+; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 6
+; CHECK-NEXT:    store i32 [[J]], ptr [[GEP54]], align 4
+; CHECK-NEXT:    [[GEP86:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 35
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP86]], align 4
 ; CHECK-NEXT:    ret i32 undef
 ;
 entry:
   %add = add nsw i32 %i, 5
   %sext = sext i32 %add to i64
-  %gep = getelementptr inbounds i32, i32* %array, i64 %sext
-  store i32 %j, i32* %gep
+  %gep = getelementptr inbounds i32, ptr %array, i64 %sext
+  store i32 %j, ptr %gep
   %add3 = add nsw i32 %i, 6
   %sext4 = sext i32 %add3 to i64
-  %gep5 = getelementptr inbounds i32, i32* %array, i64 %sext4
-  store i32 %j, i32* %gep5
+  %gep5 = getelementptr inbounds i32, ptr %array, i64 %sext4
+  store i32 %j, ptr %gep5
   %add6 = add nsw i32 %i, 35
   %sext7 = sext i32 %add6 to i64
-  %gep8 = getelementptr inbounds i32, i32* %array, i64 %sext7
-  store i32 %add, i32* %gep8
+  %gep8 = getelementptr inbounds i32, ptr %array, i64 %sext7
+  store i32 %add, ptr %gep8
   ret i32 undef
 }
 
 ; No need to modify because all values are also used in other expressions.
 ; Modification doesn't decrease register pressure.
-define i32 @test3(i32* %array, i32 %i) {
+define i32 @test3(ptr %array, i32 %i) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[I:%.*]], 5
 ; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i32, i32* [[ARRAY:%.*]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 5
-; CHECK-NEXT:    store i32 [[ADD]], i32* [[GEP2]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[ARRAY:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 5
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP2]], align 4
 ; CHECK-NEXT:    [[ADD3:%.*]] = add nsw i32 [[I]], 6
-; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6
-; CHECK-NEXT:    store i32 [[ADD3]], i32* [[GEP54]], align 4
+; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 6
+; CHECK-NEXT:    store i32 [[ADD3]], ptr [[GEP54]], align 4
 ; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[I]], 35
-; CHECK-NEXT:    [[GEP86:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 35
-; CHECK-NEXT:    store i32 [[ADD6]], i32* [[GEP86]], align 4
+; CHECK-NEXT:    [[GEP86:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 35
+; CHECK-NEXT:    store i32 [[ADD6]], ptr [[GEP86]], align 4
 ; CHECK-NEXT:    ret i32 undef
 ;
 entry:
   %add = add nsw i32 %i, 5
   %sext = sext i32 %add to i64
-  %gep = getelementptr inbounds i32, i32* %array, i64 %sext
-  store i32 %add, i32* %gep
+  %gep = getelementptr inbounds i32, ptr %array, i64 %sext
+  store i32 %add, ptr %gep
   %add3 = add nsw i32 %i, 6
   %sext4 = sext i32 %add3 to i64
-  %gep5 = getelementptr inbounds i32, i32* %array, i64 %sext4
-  store i32 %add3, i32* %gep5
+  %gep5 = getelementptr inbounds i32, ptr %array, i64 %sext4
+  store i32 %add3, ptr %gep5
   %add6 = add nsw i32 %i, 35
   %sext7 = sext i32 %add6 to i64
-  %gep8 = getelementptr inbounds i32, i32* %array, i64 %sext7
-  store i32 %add6, i32* %gep8
+  %gep8 = getelementptr inbounds i32, ptr %array, i64 %sext7
+  store i32 %add6, ptr %gep8
   ret i32 undef
 }
 
 ; Optimized GEPs for multidimensional array with same base
-define i32 @test4([50 x i32]* %array2, i32 %i) {
+define i32 @test4(ptr %array2, i32 %i) {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[I:%.*]], 5
 ; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [50 x i32], [50 x i32]* [[ARRAY2:%.*]], i64 [[TMP0]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 255
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP3]], align 4
-; CHECK-NEXT:    [[GEP56:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 256
-; CHECK-NEXT:    store i32 [[ADD]], i32* [[GEP56]], align 4
-; CHECK-NEXT:    [[GEP89:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 285
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP89]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [50 x i32], ptr [[ARRAY2:%.*]], i64 [[TMP0]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 255
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP3]], align 4
+; CHECK-NEXT:    [[GEP56:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 256
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP56]], align 4
+; CHECK-NEXT:    [[GEP89:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 285
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP89]], align 4
 ; CHECK-NEXT:    ret i32 undef
 ;
 entry:
   %add = add nsw i32 %i, 5
   %sext = sext i32 %add to i64
-  %gep = getelementptr inbounds [50 x i32], [50 x i32]* %array2, i64 %sext, i64 %sext
-  store i32 %i, i32* %gep
+  %gep = getelementptr inbounds [50 x i32], ptr %array2, i64 %sext, i64 %sext
+  store i32 %i, ptr %gep
   %add3 = add nsw i32 %i, 6
   %sext4 = sext i32 %add3 to i64
-  %gep5 = getelementptr inbounds [50 x i32], [50 x i32]* %array2, i64 %sext, i64 %sext4
-  store i32 %add, i32* %gep5
+  %gep5 = getelementptr inbounds [50 x i32], ptr %array2, i64 %sext, i64 %sext4
+  store i32 %add, ptr %gep5
   %add6 = add nsw i32 %i, 35
   %sext7 = sext i32 %add6 to i64
-  %gep8 = getelementptr inbounds [50 x i32], [50 x i32]* %array2, i64 %sext, i64 %sext7
-  store i32 %i, i32* %gep8
+  %gep8 = getelementptr inbounds [50 x i32], ptr %array2, i64 %sext, i64 %sext7
+  store i32 %i, ptr %gep8
   ret i32 undef
 }
 
 ; Don't optimize GEPs for multidimensional array with same base because RISC-V doesn't support the addressing mode
-define i32 @test5([50 x i32]* %array2, i32 %i, i64 %j) {
+define i32 @test5(ptr %array2, i32 %i, i64 %j) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[I:%.*]], 5
 ; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [50 x i32], [50 x i32]* [[ARRAY2:%.*]], i64 [[TMP0]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 255
-; CHECK-NEXT:    store i32 [[ADD]], i32* [[GEP3]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr [50 x i32], [50 x i32]* [[ARRAY2]], i64 [[TMP0]], i64 [[J:%.*]]
-; CHECK-NEXT:    [[GEP55:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 300
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP55]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [50 x i32], ptr [[ARRAY2:%.*]], i64 [[TMP0]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 255
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP3]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr [50 x i32], ptr [[ARRAY2]], i64 [[TMP0]], i64 [[J:%.*]]
+; CHECK-NEXT:    [[GEP55:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 300
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP55]], align 4
 ; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[I]], 35
 ; CHECK-NEXT:    [[SEXT7:%.*]] = sext i32 [[ADD6]] to i64
-; CHECK-NEXT:    [[GEP8:%.*]] = getelementptr inbounds [50 x i32], [50 x i32]* [[ARRAY2]], i64 [[SEXT7]], i64 [[J]]
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP8]], align 4
+; CHECK-NEXT:    [[GEP8:%.*]] = getelementptr inbounds [50 x i32], ptr [[ARRAY2]], i64 [[SEXT7]], i64 [[J]]
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP8]], align 4
 ; CHECK-NEXT:    ret i32 undef
 ;
 entry:
   %add = add nsw i32 %i, 5
   %sext = sext i32 %add to i64
-  %gep = getelementptr inbounds [50 x i32], [50 x i32]* %array2, i64 %sext, i64 %sext
-  store i32 %add, i32* %gep
+  %gep = getelementptr inbounds [50 x i32], ptr %array2, i64 %sext, i64 %sext
+  store i32 %add, ptr %gep
   %add3 = add nsw i32 %i, 6
   %sext4 = sext i32 %add3 to i64
-  %gep5 = getelementptr inbounds [50 x i32], [50 x i32]* %array2, i64 %sext4, i64 %j
-  store i32 %i, i32* %gep5
+  %gep5 = getelementptr inbounds [50 x i32], ptr %array2, i64 %sext4, i64 %j
+  store i32 %i, ptr %gep5
   %add6 = add nsw i32 %i, 35
   %sext7 = sext i32 %add6 to i64
-  %gep8 = getelementptr inbounds [50 x i32], [50 x i32]* %array2, i64 %sext7, i64 %j
-  store i32 %i, i32* %gep8
+  %gep8 = getelementptr inbounds [50 x i32], ptr %array2, i64 %sext7, i64 %j
+  store i32 %i, ptr %gep8
   ret i32 undef
 }
 
 ; No need to optimize GEPs, because there is critical amount with non-constant offsets.
-define i64 @test6(i64* %array, i64 %i, i64 %j) {
+define i64 @test6(ptr %array, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[I:%.*]], 5
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i64, i64* [[ARRAY:%.*]], i64 [[J:%.*]]
-; CHECK-NEXT:    store i64 [[ADD]], i64* [[GEP]], align 4
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr i64, i64* [[ARRAY]], i64 [[I]]
-; CHECK-NEXT:    [[GEP52:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 6
-; CHECK-NEXT:    store i64 [[I]], i64* [[GEP52]], align 4
-; CHECK-NEXT:    store i64 [[I]], i64* [[TMP0]], align 4
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i64, ptr [[ARRAY:%.*]], i64 [[J:%.*]]
+; CHECK-NEXT:    store i64 [[ADD]], ptr [[GEP]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr i64, ptr [[ARRAY]], i64 [[I]]
+; CHECK-NEXT:    [[GEP52:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i64 6
+; CHECK-NEXT:    store i64 [[I]], ptr [[GEP52]], align 4
+; CHECK-NEXT:    store i64 [[I]], ptr [[TMP0]], align 4
 ; CHECK-NEXT:    ret i64 undef
 ;
 entry:
   %add = add nsw i64 %i, 5
-  %gep = getelementptr inbounds i64, i64* %array, i64 %j
-  store i64 %add, i64* %gep
+  %gep = getelementptr inbounds i64, ptr %array, i64 %j
+  store i64 %add, ptr %gep
   %add3 = add nsw i64 %i, 6
-  %gep5 = getelementptr inbounds i64, i64* %array, i64 %add3
-  store i64 %i, i64* %gep5
+  %gep5 = getelementptr inbounds i64, ptr %array, i64 %add3
+  store i64 %i, ptr %gep5
   %add6 = add nsw i64 %i, 35
-  %gep8 = getelementptr inbounds i64, i64* %array, i64 %i
-  store i64 %i, i64* %gep8
+  %gep8 = getelementptr inbounds i64, ptr %array, i64 %i
+  store i64 %i, ptr %gep8
   ret i64 undef
 }
 
 ; No need to optimize GEPs, because the base variable is 
diff erent.
-define i32 @test7(i32* %array, i32 %i, i32 %j, i32 %k) {
+define i32 @test7(ptr %array, i32 %i, i32 %j, i32 %k) {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[I:%.*]], 5
 ; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i32, i32* [[ARRAY:%.*]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 5
-; CHECK-NEXT:    store i32 [[ADD]], i32* [[GEP2]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[ARRAY:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 5
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP2]], align 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[K:%.*]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr i32, i32* [[ARRAY]], i64 [[TMP2]]
-; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 6
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP54]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr i32, ptr [[ARRAY]], i64 [[TMP2]]
+; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 6
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP54]], align 4
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext i32 [[J:%.*]] to i64
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i32, i32* [[ARRAY]], i64 [[TMP4]]
-; CHECK-NEXT:    [[GEP86:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i64 35
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP86]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i32, ptr [[ARRAY]], i64 [[TMP4]]
+; CHECK-NEXT:    [[GEP86:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 35
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP86]], align 4
 ; CHECK-NEXT:    ret i32 undef
 ;
 entry:
   %add = add nsw i32 %i, 5
   %sext = sext i32 %add to i64
-  %gep = getelementptr inbounds i32, i32* %array, i64 %sext
-  store i32 %add, i32* %gep
+  %gep = getelementptr inbounds i32, ptr %array, i64 %sext
+  store i32 %add, ptr %gep
   %add3 = add nsw i32 %k, 6
   %sext4 = sext i32 %add3 to i64
-  %gep5 = getelementptr inbounds i32, i32* %array, i64 %sext4
-  store i32 %i, i32* %gep5
+  %gep5 = getelementptr inbounds i32, ptr %array, i64 %sext4
+  store i32 %i, ptr %gep5
   %add6 = add nsw i32 %j, 35
   %sext7 = sext i32 %add6 to i64
-  %gep8 = getelementptr inbounds i32, i32* %array, i64 %sext7
-  store i32 %i, i32* %gep8
+  %gep8 = getelementptr inbounds i32, ptr %array, i64 %sext7
+  store i32 %i, ptr %gep8
   ret i32 undef
 }
 
 ; No need to optimize GEPs, because the base of GEP instructions is 
diff erent.
-define i32 @test8(i32* %array, i32* %array2, i32* %array3, i32 %i) {
+define i32 @test8(ptr %array, ptr %array2, ptr %array3, i32 %i) {
 ; CHECK-LABEL: @test8(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[I:%.*]], 5
 ; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i32, i32* [[ARRAY:%.*]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 5
-; CHECK-NEXT:    store i32 [[ADD]], i32* [[GEP2]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr i32, i32* [[ARRAY2:%.*]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 6
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP54]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr i32, i32* [[ARRAY3:%.*]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP86:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 35
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP86]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[ARRAY:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 5
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP2]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr i32, ptr [[ARRAY2:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 6
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP54]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr i32, ptr [[ARRAY3:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP86:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 35
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP86]], align 4
 ; CHECK-NEXT:    ret i32 undef
 ;
 entry:
   %add = add nsw i32 %i, 5
   %sext = sext i32 %add to i64
-  %gep = getelementptr inbounds i32, i32* %array, i64 %sext
-  store i32 %add, i32* %gep
+  %gep = getelementptr inbounds i32, ptr %array, i64 %sext
+  store i32 %add, ptr %gep
   %add3 = add nsw i32 %i, 6
   %sext4 = sext i32 %add3 to i64
-  %gep5 = getelementptr inbounds i32, i32* %array2, i64 %sext4
-  store i32 %i, i32* %gep5
+  %gep5 = getelementptr inbounds i32, ptr %array2, i64 %sext4
+  store i32 %i, ptr %gep5
   %add6 = add nsw i32 %i, 35
   %sext7 = sext i32 %add6 to i64
-  %gep8 = getelementptr inbounds i32, i32* %array3, i64 %sext7
-  store i32 %i, i32* %gep8
+  %gep8 = getelementptr inbounds i32, ptr %array3, i64 %sext7
+  store i32 %i, ptr %gep8
   ret i32 undef
 }
 
 ; No need to optimize GEPs of multidimensional array, because the base of GEP instructions is 
diff erent.
-define i32 @test9([50 x i32]* %array, i32 %i) {
+define i32 @test9(ptr %array, i32 %i) {
 ; CHECK-LABEL: @test9(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[I:%.*]], 5
 ; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [50 x i32], [50 x i32]* [[ARRAY:%.*]], i64 0, i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 5
-; CHECK-NEXT:    store i32 [[ADD]], i32* [[GEP2]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr [50 x i32], [50 x i32]* [[ARRAY]], i64 [[TMP0]], i64 [[TMP0]]
-; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 6
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP54]], align 4
-; CHECK-NEXT:    [[GEP87:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 335
-; CHECK-NEXT:    store i32 [[I]], i32* [[GEP87]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [50 x i32], ptr [[ARRAY:%.*]], i64 0, i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 5
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP2]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr [50 x i32], ptr [[ARRAY]], i64 [[TMP0]], i64 [[TMP0]]
+; CHECK-NEXT:    [[GEP54:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 6
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP54]], align 4
+; CHECK-NEXT:    [[GEP87:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 335
+; CHECK-NEXT:    store i32 [[I]], ptr [[GEP87]], align 4
 ; CHECK-NEXT:    ret i32 undef
 ;
 entry:
   %add = add nsw i32 %i, 5
   %sext = sext i32 %add to i64
-  %gep = getelementptr inbounds [50 x i32], [50 x i32]* %array, i64 0, i64 %sext
-  store i32 %add, i32* %gep
+  %gep = getelementptr inbounds [50 x i32], ptr %array, i64 0, i64 %sext
+  store i32 %add, ptr %gep
   %add3 = add nsw i32 %i, 6
   %sext4 = sext i32 %add3 to i64
   %int = sext i32 %i to i64
-  %gep5 = getelementptr inbounds [50 x i32], [50 x i32]* %array, i64 %int, i64 %sext4
-  store i32 %i, i32* %gep5
+  %gep5 = getelementptr inbounds [50 x i32], ptr %array, i64 %int, i64 %sext4
+  store i32 %i, ptr %gep5
   %add6 = add nsw i32 %i, 35
   %sext7 = sext i32 %add6 to i64
-  %gep8 = getelementptr inbounds [50 x i32], [50 x i32]* %array, i64 %sext4, i64 %sext7
-  store i32 %i, i32* %gep8
+  %gep8 = getelementptr inbounds [50 x i32], ptr %array, i64 %sext4, i64 %sext7
+  store i32 %i, ptr %gep8
   ret i32 undef
 }

diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/crash-in-unreachable-code.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/crash-in-unreachable-code.ll
index b61c9bb229206..fa6c77573d876 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/crash-in-unreachable-code.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/crash-in-unreachable-code.ll
@@ -8,7 +8,7 @@ entry:
   ret void
 
 for.body28.i:                                     ; preds = %for.body28.i
-  %arrayidx3389.i = getelementptr inbounds [16 x i8], [16 x i8] addrspace(3)* @gv, i32 0, i32 %inc38.7.i.1
+  %arrayidx3389.i = getelementptr inbounds [16 x i8], ptr addrspace(3) @gv, i32 0, i32 %inc38.7.i.1
   %inc38.7.i.1 = add nuw nsw i32 %inc38.7.i.1, 16
   br label %for.body28.i
 }

diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/pr45371-find-either-reset.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/pr45371-find-either-reset.ll
index efe426b718eb3..2fd067e5740f7 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/pr45371-find-either-reset.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/pr45371-find-either-reset.ll
@@ -11,13 +11,13 @@ define void @find_either_reset() {
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[TMP0]], 96
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i8 0 to i64
 ; CHECK-NEXT:    [[IDXPROM1:%.*]] = sext i8 [[TMP1]] to i64
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4000 x i8], [4000 x i8]* @e, i64 [[IDXPROM]], i64 [[IDXPROM1]]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4000 x i8], ptr @e, i64 [[IDXPROM]], i64 [[IDXPROM1]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %sub = sub nsw i32 65536, undef
   %0 = trunc i32 %sub to i8
   %1 = add i8 %0, -4000
-  %arrayidx = getelementptr inbounds [4000 x i8], [4000 x i8]* @e, i8 0, i8 %1
+  %arrayidx = getelementptr inbounds [4000 x i8], ptr @e, i8 0, i8 %1
   ret void
 }

diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/test-add-sub-separation.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/test-add-sub-separation.ll
index 72dca78ccf39a..b8f07a457fa66 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/test-add-sub-separation.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/test-add-sub-separation.ll
@@ -2,31 +2,31 @@
 ; RUN: opt -S -separate-const-offset-from-gep < %s | FileCheck %s
 ; RUN: opt -S -passes=separate-const-offset-from-gep < %s | FileCheck %s
 
-define void @matchingExtensions(i32* %ap, i32* %bp, i64* %result) {
+define void @matchingExtensions(ptr %ap, ptr %bp, ptr %result) {
 ; CHECK-LABEL: @matchingExtensions(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[AP:%.*]], align 4
-; CHECK-NEXT:    [[B:%.*]] = load i32, i32* [[BP:%.*]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[AP:%.*]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load i32, ptr [[BP:%.*]], align 4
 ; CHECK-NEXT:    [[EB:%.*]] = sext i32 [[B]] to i64
 ; CHECK-NEXT:    [[SUBAB:%.*]] = sub nsw i32 [[A]], [[B]]
 ; CHECK-NEXT:    [[EA:%.*]] = sext i32 [[A]] to i64
 ; CHECK-NEXT:    [[ADDEAEB:%.*]] = add nsw i64 [[EA]], [[EB]]
 ; CHECK-NEXT:    [[EXTSUB:%.*]] = sext i32 [[SUBAB]] to i64
-; CHECK-NEXT:    [[IDX:%.*]] = getelementptr i32, i32* [[AP]], i64 [[EXTSUB]]
-; CHECK-NEXT:    store i64 [[ADDEAEB]], i64* [[RESULT:%.*]]
-; CHECK-NEXT:    store i32 [[SUBAB]], i32* [[IDX]]
+; CHECK-NEXT:    [[IDX:%.*]] = getelementptr i32, ptr [[AP]], i64 [[EXTSUB]]
+; CHECK-NEXT:    store i64 [[ADDEAEB]], ptr [[RESULT:%.*]]
+; CHECK-NEXT:    store i32 [[SUBAB]], ptr [[IDX]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a = load i32, i32* %ap
-  %b = load i32, i32* %bp
+  %a = load i32, ptr %ap
+  %b = load i32, ptr %bp
   %eb = sext i32 %b to i64
   %subab = sub nsw i32 %a, %b
   %ea = sext i32 %a to i64
   %addeaeb = add nsw i64 %ea, %eb
   %extsub = sext i32 %subab to i64
-  %idx = getelementptr i32, i32* %ap, i64 %extsub
-  store i64 %addeaeb, i64* %result
-  store i32 %subab, i32* %idx
+  %idx = getelementptr i32, ptr %ap, i64 %extsub
+  store i64 %addeaeb, ptr %result
+  store i32 %subab, ptr %idx
   ret void
 }


        


More information about the llvm-commits mailing list