[llvm] 8fcf387 - InstCombine: Convert target tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 1 18:56:47 PST 2022


Author: Matt Arsenault
Date: 2022-12-01T21:56:14-05:00
New Revision: 8fcf387202d684f6083f62353c29a5eb4050f6df

URL: https://github.com/llvm/llvm-project/commit/8fcf387202d684f6083f62353c29a5eb4050f6df
DIFF: https://github.com/llvm/llvm-project/commit/8fcf387202d684f6083f62353c29a5eb4050f6df.diff

LOG: InstCombine: Convert target tests to opaque pointers

The opaquify script deleted a few declarations for some reason which
were manually deleted.

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
    llvm/test/Transforms/InstCombine/ARM/neon-intrinsics.ll
    llvm/test/Transforms/InstCombine/X86/pr2645-1.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll b/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
index d258e46611753..44f343fff5485 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
@@ -7,277 +7,236 @@
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Simple memcpy to alloca from constant address space argument.
-define i8 @memcpy_constant_arg_ptr_to_alloca([32 x i8] addrspace(4)* noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
+define i8 @memcpy_constant_arg_ptr_to_alloca(ptr addrspace(4) noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i8], [32 x i8] addrspace(4)* [[ARG:%.*]], i64 0, i64 [[TMP1]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(4)* [[GEP]], align 1
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i8], ptr addrspace(4) [[ARG:%.*]], i64 0, i64 [[TMP1]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) [[GEP]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
   %alloca = alloca [32 x i8], align 4, addrspace(5)
-  %alloca.cast = bitcast [32 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
-  %arg.cast = bitcast [32 x i8] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* %alloca.cast, i8 addrspace(4)* %arg.cast, i64 32, i1 false)
-  %gep = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load i8, i8 addrspace(5)* %gep
+  call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %alloca, ptr addrspace(4) %arg, i64 32, i1 false)
+  %gep = getelementptr inbounds [32 x i8], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load i8, ptr addrspace(5) %gep
   ret i8 %load
 }
 
-define i8 @memcpy_constant_arg_ptr_to_alloca_load_metadata([32 x i8] addrspace(4)* noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
+define i8 @memcpy_constant_arg_ptr_to_alloca_load_metadata(ptr addrspace(4) noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca_load_metadata(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i8], [32 x i8] addrspace(4)* [[ARG:%.*]], i64 0, i64 [[TMP1]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(4)* [[GEP]], align 1, !noalias !0
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i8], ptr addrspace(4) [[ARG:%.*]], i64 0, i64 [[TMP1]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) [[GEP]], align 1, !noalias !0
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
   %alloca = alloca [32 x i8], align 4, addrspace(5)
-  %alloca.cast = bitcast [32 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
-  %arg.cast = bitcast [32 x i8] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* %alloca.cast, i8 addrspace(4)* %arg.cast, i64 32, i1 false)
-  %gep = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load i8, i8 addrspace(5)* %gep, !noalias !0
+  call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %alloca, ptr addrspace(4) %arg, i64 32, i1 false)
+  %gep = getelementptr inbounds [32 x i8], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load i8, ptr addrspace(5) %gep, !noalias !0
   ret i8 %load
 }
 
-define i64 @memcpy_constant_arg_ptr_to_alloca_load_alignment([32 x i64] addrspace(4)* noalias readonly align 4 dereferenceable(256) %arg, i32 %idx) {
+define i64 @memcpy_constant_arg_ptr_to_alloca_load_alignment(ptr addrspace(4) noalias readonly align 4 dereferenceable(256) %arg, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca_load_alignment(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i64], [32 x i64] addrspace(4)* [[ARG:%.*]], i64 0, i64 [[TMP1]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i64, i64 addrspace(4)* [[GEP]], align 16
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i64], ptr addrspace(4) [[ARG:%.*]], i64 0, i64 [[TMP1]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i64, ptr addrspace(4) [[GEP]], align 16
 ; CHECK-NEXT:    ret i64 [[LOAD]]
 ;
   %alloca = alloca [32 x i64], align 4, addrspace(5)
-  %alloca.cast = bitcast [32 x i64] addrspace(5)* %alloca to i8 addrspace(5)*
-  %arg.cast = bitcast [32 x i64] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* %alloca.cast, i8 addrspace(4)* %arg.cast, i64 256, i1 false)
-  %gep = getelementptr inbounds [32 x i64], [32 x i64] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load i64, i64 addrspace(5)* %gep, align 16
+  call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %alloca, ptr addrspace(4) %arg, i64 256, i1 false)
+  %gep = getelementptr inbounds [32 x i64], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load i64, ptr addrspace(5) %gep, align 16
   ret i64 %load
 }
 
-define i64 @memcpy_constant_arg_ptr_to_alloca_load_atomic([32 x i64] addrspace(4)* noalias readonly align 8 dereferenceable(256) %arg, i32 %idx) {
+define i64 @memcpy_constant_arg_ptr_to_alloca_load_atomic(ptr addrspace(4) noalias readonly align 8 dereferenceable(256) %arg, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca_load_atomic(
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [32 x i64], align 8, addrspace(5)
-; CHECK-NEXT:    [[ALLOCA_CAST:%.*]] = bitcast [32 x i64] addrspace(5)* [[ALLOCA]] to i8 addrspace(5)*
-; CHECK-NEXT:    [[ARG_CAST:%.*]] = bitcast [32 x i64] addrspace(4)* [[ARG:%.*]] to i8 addrspace(4)*
-; CHECK-NEXT:    call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* noundef align 8 dereferenceable(256) [[ALLOCA_CAST]], i8 addrspace(4)* noundef align 8 dereferenceable(256) [[ARG_CAST]], i64 256, i1 false)
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i64], [32 x i64] addrspace(5)* [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load atomic i64, i64 addrspace(5)* [[GEP]] syncscope("somescope") acquire, align 8
+; CHECK-NEXT:    call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef align 8 dereferenceable(256) [[ALLOCA]], ptr addrspace(4) noundef align 8 dereferenceable(256) [[ARG:%.*]], i64 256, i1 false)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i64], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load atomic i64, ptr addrspace(5) [[GEP]] syncscope("somescope") acquire, align 8
 ; CHECK-NEXT:    ret i64 [[LOAD]]
 ;
   %alloca = alloca [32 x i64], align 8, addrspace(5)
-  %alloca.cast = bitcast [32 x i64] addrspace(5)* %alloca to i8 addrspace(5)*
-  %arg.cast = bitcast [32 x i64] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* %alloca.cast, i8 addrspace(4)* %arg.cast, i64 256, i1 false)
-  %gep = getelementptr inbounds [32 x i64], [32 x i64] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load atomic i64, i64 addrspace(5)* %gep syncscope("somescope") acquire, align 8
+  call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %alloca, ptr addrspace(4) %arg, i64 256, i1 false)
+  %gep = getelementptr inbounds [32 x i64], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load atomic i64, ptr addrspace(5) %gep syncscope("somescope") acquire, align 8
   ret i64 %load
 }
 
 ; Simple memmove to alloca from constant address space argument.
-define i8 @memmove_constant_arg_ptr_to_alloca([32 x i8] addrspace(4)* noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
+define i8 @memmove_constant_arg_ptr_to_alloca(ptr addrspace(4) noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
 ; CHECK-LABEL: @memmove_constant_arg_ptr_to_alloca(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i8], [32 x i8] addrspace(4)* [[ARG:%.*]], i64 0, i64 [[TMP1]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(4)* [[GEP]], align 1
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i8], ptr addrspace(4) [[ARG:%.*]], i64 0, i64 [[TMP1]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) [[GEP]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
   %alloca = alloca [32 x i8], align 4, addrspace(5)
-  %alloca.cast = bitcast [32 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
-  %arg.cast = bitcast [32 x i8] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memmove.p5i8.p4i8.i32(i8 addrspace(5)* %alloca.cast, i8 addrspace(4)* %arg.cast, i32 32, i1 false)
-  %gep = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load i8, i8 addrspace(5)* %gep
+  call void @llvm.memmove.p5.p4.i32(ptr addrspace(5) %alloca, ptr addrspace(4) %arg, i32 32, i1 false)
+  %gep = getelementptr inbounds [32 x i8], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load i8, ptr addrspace(5) %gep
   ret i8 %load
 }
 
 ; Simple memcpy to alloca from byref constant address space argument.
-define amdgpu_kernel void @memcpy_constant_byref_arg_ptr_to_alloca([32 x i8] addrspace(4)* noalias readonly align 4 byref([32 x i8]) %arg, i8 addrspace(1)* %out, i32 %idx) {
+define amdgpu_kernel void @memcpy_constant_byref_arg_ptr_to_alloca(ptr addrspace(4) noalias readonly align 4 byref([32 x i8]) %arg, ptr addrspace(1) %out, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_byref_arg_ptr_to_alloca(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i8], [32 x i8] addrspace(4)* [[ARG:%.*]], i64 0, i64 [[TMP1]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(4)* [[GEP]], align 1
-; CHECK-NEXT:    store i8 [[LOAD]], i8 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [32 x i8], ptr addrspace(4) [[ARG:%.*]], i64 0, i64 [[TMP1]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) [[GEP]], align 1
+; CHECK-NEXT:    store i8 [[LOAD]], ptr addrspace(1) [[OUT:%.*]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca [32 x i8], align 4, addrspace(5)
-  %alloca.cast = bitcast [32 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
-  %arg.cast = bitcast [32 x i8] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* %alloca.cast, i8 addrspace(4)* %arg.cast, i64 32, i1 false)
-  %gep = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load i8, i8 addrspace(5)* %gep
-  store i8 %load, i8 addrspace(1)* %out
+  call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %alloca, ptr addrspace(4) %arg, i64 32, i1 false)
+  %gep = getelementptr inbounds [32 x i8], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load i8, ptr addrspace(5) %gep
+  store i8 %load, ptr addrspace(1) %out
   ret void
 }
 
 ; Simple memcpy to alloca from byref constant address space argument, but not enough bytes are dereferenceable
-define amdgpu_kernel void @memcpy_constant_byref_arg_ptr_to_alloca_too_many_bytes([31 x i8] addrspace(4)* noalias readonly align 4 byref([31 x i8]) %arg, i8 addrspace(1)* %out, i32 %idx) {
+define amdgpu_kernel void @memcpy_constant_byref_arg_ptr_to_alloca_too_many_bytes(ptr addrspace(4) noalias readonly align 4 byref([31 x i8]) %arg, ptr addrspace(1) %out, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_byref_arg_ptr_to_alloca_too_many_bytes(
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [32 x i8], align 4, addrspace(5)
-; CHECK-NEXT:    [[ALLOCA_CAST:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* [[ALLOCA]], i32 0, i32 0
-; CHECK-NEXT:    [[ARG_CAST:%.*]] = getelementptr inbounds [31 x i8], [31 x i8] addrspace(4)* [[ARG:%.*]], i64 0, i64 0
-; CHECK-NEXT:    call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* noundef align 4 dereferenceable(31) [[ALLOCA_CAST]], i8 addrspace(4)* noundef align 4 dereferenceable(31) [[ARG_CAST]], i64 31, i1 false)
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(5)* [[GEP]], align 1
-; CHECK-NEXT:    store i8 [[LOAD]], i8 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef align 4 dereferenceable(31) [[ALLOCA]], ptr addrspace(4) noundef align 4 dereferenceable(31) [[ARG:%.*]], i64 31, i1 false)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(5) [[GEP]], align 1
+; CHECK-NEXT:    store i8 [[LOAD]], ptr addrspace(1) [[OUT:%.*]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca [32 x i8], align 4, addrspace(5)
-  %alloca.cast = bitcast [32 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
-  %arg.cast = bitcast [31 x i8] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* %alloca.cast, i8 addrspace(4)* %arg.cast, i64 31, i1 false)
-  %gep = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load i8, i8 addrspace(5)* %gep
-  store i8 %load, i8 addrspace(1)* %out
+  call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %alloca, ptr addrspace(4) %arg, i64 31, i1 false)
+  %gep = getelementptr inbounds [32 x i8], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load i8, ptr addrspace(5) %gep
+  store i8 %load, ptr addrspace(1) %out
   ret void
 }
 
 ; Simple memcpy to alloca from constant address space intrinsic call
-define amdgpu_kernel void @memcpy_constant_intrinsic_ptr_to_alloca(i8 addrspace(1)* %out, i32 %idx) {
+define amdgpu_kernel void @memcpy_constant_intrinsic_ptr_to_alloca(ptr addrspace(1) %out, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_intrinsic_ptr_to_alloca(
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [32 x i8], align 4, addrspace(5)
-; CHECK-NEXT:    [[ALLOCA_CAST:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* [[ALLOCA]], i32 0, i32 0
-; CHECK-NEXT:    [[KERNARG_SEGMENT_PTR:%.*]] = call align 16 dereferenceable(32) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
-; CHECK-NEXT:    call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* noundef align 4 dereferenceable(32) [[ALLOCA_CAST]], i8 addrspace(4)* noundef align 16 dereferenceable(32) [[KERNARG_SEGMENT_PTR]], i64 32, i1 false)
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(5)* [[GEP]], align 1
-; CHECK-NEXT:    store i8 [[LOAD]], i8 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    [[KERNARG_SEGMENT_PTR:%.*]] = call align 16 dereferenceable(32) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+; CHECK-NEXT:    call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef align 4 dereferenceable(32) [[ALLOCA]], ptr addrspace(4) noundef align 16 dereferenceable(32) [[KERNARG_SEGMENT_PTR]], i64 32, i1 false)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(5) [[GEP]], align 1
+; CHECK-NEXT:    store i8 [[LOAD]], ptr addrspace(1) [[OUT:%.*]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca [32 x i8], align 4, addrspace(5)
-  %alloca.cast = bitcast [32 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
-  %kernarg.segment.ptr = call dereferenceable(32) align 16 i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
-  call void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* %alloca.cast, i8 addrspace(4)* %kernarg.segment.ptr, i64 32, i1 false)
-  %gep = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load i8, i8 addrspace(5)* %gep
-  store i8 %load, i8 addrspace(1)* %out
+  %kernarg.segment.ptr = call dereferenceable(32) align 16 ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+  call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) %alloca, ptr addrspace(4) %kernarg.segment.ptr, i64 32, i1 false)
+  %gep = getelementptr inbounds [32 x i8], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load i8, ptr addrspace(5) %gep
+  store i8 %load, ptr addrspace(1) %out
   ret void
 }
 
 ; Alloca is written through a flat pointer
-define i8 @memcpy_constant_arg_ptr_to_alloca_addrspacecast_to_flat([31 x i8] addrspace(4)* noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
+define i8 @memcpy_constant_arg_ptr_to_alloca_addrspacecast_to_flat(ptr addrspace(4) noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca_addrspacecast_to_flat(
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [32 x i8], align 4, addrspace(5)
-; CHECK-NEXT:    [[ALLOCA_CAST:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* [[ALLOCA]], i32 0, i32 0
-; CHECK-NEXT:    [[ALLOCA_CAST_ASC:%.*]] = addrspacecast i8 addrspace(5)* [[ALLOCA_CAST]] to i8*
-; CHECK-NEXT:    [[ARG_CAST:%.*]] = getelementptr inbounds [31 x i8], [31 x i8] addrspace(4)* [[ARG:%.*]], i64 0, i64 0
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p4i8.i64(i8* noundef nonnull align 1 dereferenceable(31) [[ALLOCA_CAST_ASC]], i8 addrspace(4)* noundef align 4 dereferenceable(31) [[ARG_CAST]], i64 31, i1 false)
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(5)* [[GEP]], align 1
+; CHECK-NEXT:    [[ALLOCA_CAST_ASC:%.*]] = addrspacecast ptr addrspace(5) [[ALLOCA]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p4.i64(ptr noundef nonnull align 1 dereferenceable(31) [[ALLOCA_CAST_ASC]], ptr addrspace(4) noundef align 4 dereferenceable(31) [[ARG:%.*]], i64 31, i1 false)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(5) [[GEP]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
   %alloca = alloca [32 x i8], align 4, addrspace(5)
-  %alloca.cast = bitcast [32 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
-  %alloca.cast.asc = addrspacecast i8 addrspace(5)* %alloca.cast to i8*
-  %arg.cast = bitcast [31 x i8] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memcpy.p0i8.p4i8.i64(i8* %alloca.cast.asc, i8 addrspace(4)* %arg.cast, i64 31, i1 false)
-  %gep = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* %alloca, i32 0, i32 %idx
-  %load = load i8, i8 addrspace(5)* %gep
+  %alloca.cast.asc = addrspacecast ptr addrspace(5) %alloca to ptr
+  call void @llvm.memcpy.p0.p4.i64(ptr %alloca.cast.asc, ptr addrspace(4) %arg, i64 31, i1 false)
+  %gep = getelementptr inbounds [32 x i8], ptr addrspace(5) %alloca, i32 0, i32 %idx
+  %load = load i8, ptr addrspace(5) %gep
   ret i8 %load
 }
 
 ; Alloca is only addressed through flat pointer.
-define i8 @memcpy_constant_arg_ptr_to_alloca_addrspacecast_to_flat2([32 x i8] addrspace(4)* noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
+define i8 @memcpy_constant_arg_ptr_to_alloca_addrspacecast_to_flat2(ptr addrspace(4) noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
 ; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca_addrspacecast_to_flat2(
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [32 x i8], align 4, addrspace(5)
-; CHECK-NEXT:    [[ALLOCA_CAST1:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* [[ALLOCA]], i32 0, i32 0
-; CHECK-NEXT:    [[ALLOCA_CAST:%.*]] = addrspacecast i8 addrspace(5)* [[ALLOCA_CAST1]] to i8*
-; CHECK-NEXT:    [[ARG_CAST:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(4)* [[ARG:%.*]], i64 0, i64 0
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p4i8.i64(i8* noundef nonnull align 1 dereferenceable(32) [[ALLOCA_CAST]], i8 addrspace(4)* noundef align 4 dereferenceable(32) [[ARG_CAST]], i64 32, i1 false)
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds [32 x i8], [32 x i8] addrspace(5)* [[ALLOCA]], i32 0, i32 [[IDX:%.*]]
-; CHECK-NEXT:    [[GEP:%.*]] = addrspacecast i8 addrspace(5)* [[GEP2]] to i8*
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8* [[GEP]], align 1
+; CHECK-NEXT:    [[ALLOCA_CAST_ASC:%.*]] = addrspacecast ptr addrspace(5) [[ALLOCA]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p4.i64(ptr noundef nonnull align 1 dereferenceable(32) [[ALLOCA_CAST_ASC]], ptr addrspace(4) noundef align 4 dereferenceable(32) [[ARG:%.*]], i64 32, i1 false)
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [32 x i8], ptr [[ALLOCA_CAST_ASC]], i64 0, i64 [[TMP1]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr [[GEP]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
   %alloca = alloca [32 x i8], align 4, addrspace(5)
-  %alloca.cast.asc = addrspacecast [32 x i8] addrspace(5)* %alloca to [32 x i8]*
-  %alloca.cast = bitcast [32 x i8]* %alloca.cast.asc to i8*
-  %arg.cast = bitcast [32 x i8] addrspace(4)* %arg to i8 addrspace(4)*
-  call void @llvm.memcpy.p0i8.p4i8.i64(i8* %alloca.cast, i8 addrspace(4)* %arg.cast, i64 32, i1 false)
-  %gep = getelementptr inbounds [32 x i8], [32 x i8]* %alloca.cast.asc, i32 0, i32 %idx
-  %load = load i8, i8* %gep
+  %alloca.cast.asc = addrspacecast ptr addrspace(5) %alloca to ptr
+  call void @llvm.memcpy.p0.p4.i64(ptr %alloca.cast.asc, ptr addrspace(4) %arg, i64 32, i1 false)
+  %gep = getelementptr inbounds [32 x i8], ptr %alloca.cast.asc, i32 0, i32 %idx
+  %load = load i8, ptr %gep
   ret i8 %load
 }
 
 %struct.ty = type { [4 x i32] }
 
-define amdgpu_kernel void @byref_infloop(i8* %scratch, %struct.ty addrspace(4)* byref(%struct.ty) align 4 %arg) local_unnamed_addr #1 {
+define amdgpu_kernel void @byref_infloop(ptr %scratch, ptr addrspace(4) byref(%struct.ty) align 4 %arg) local_unnamed_addr #1 {
 ; CHECK-LABEL: @byref_infloop(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[CAST_ALLOCA:%.*]] = bitcast [[STRUCT_TY:%.*]] addrspace(4)* [[ARG:%.*]] to i8 addrspace(4)*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p4i8.i32(i8* noundef nonnull align 4 dereferenceable(16) [[SCRATCH:%.*]], i8 addrspace(4)* noundef align 4 dereferenceable(16) [[CAST_ALLOCA]], i32 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p4.i32(ptr noundef nonnull align 4 dereferenceable(16) [[SCRATCH:%.*]], ptr addrspace(4) noundef align 4 dereferenceable(16) [[ARG:%.*]], i32 16, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %alloca = alloca [4 x i32], align 4, addrspace(5)
-  %cast.arg = bitcast %struct.ty addrspace(4)* %arg to i8 addrspace(4)*
-  %cast.alloca = bitcast [4 x i32] addrspace(5)* %alloca to i8 addrspace(5)*
-  call void @llvm.memcpy.p5i8.p4i8.i32(i8 addrspace(5)* align 4 %cast.alloca, i8 addrspace(4)* align 4 %cast.arg, i32 16, i1 false)
-  call void @llvm.memcpy.p0i8.p5i8.i32(i8* align 4 %scratch, i8 addrspace(5)* align 4 %cast.alloca, i32 16, i1 false)
+  call void @llvm.memcpy.p5.p4.i32(ptr addrspace(5) align 4 %alloca, ptr addrspace(4) align 4 %arg, i32 16, i1 false)
+  call void @llvm.memcpy.p0.p5.i32(ptr align 4 %scratch, ptr addrspace(5) align 4 %alloca, i32 16, i1 false)
   ret void
 }
 
-define amdgpu_kernel void @byref_infloop_metadata(i8* %scratch, %struct.ty addrspace(4)* byref(%struct.ty) align 4 %arg) local_unnamed_addr #1 {
+define amdgpu_kernel void @byref_infloop_metadata(ptr %scratch, ptr addrspace(4) byref(%struct.ty) align 4 %arg) local_unnamed_addr #1 {
 ; CHECK-LABEL: @byref_infloop_metadata(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[CAST_ALLOCA:%.*]] = bitcast [[STRUCT_TY:%.*]] addrspace(4)* [[ARG:%.*]] to i8 addrspace(4)*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p4i8.i32(i8* noundef nonnull align 4 dereferenceable(16) [[SCRATCH:%.*]], i8 addrspace(4)* noundef align 4 dereferenceable(16) [[CAST_ALLOCA]], i32 16, i1 false), !noalias !0
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p4.i32(ptr noundef nonnull align 4 dereferenceable(16) [[SCRATCH:%.*]], ptr addrspace(4) noundef align 4 dereferenceable(16) [[ARG:%.*]], i32 16, i1 false), !noalias !0
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %alloca = alloca [4 x i32], align 4, addrspace(5)
-  %cast.arg = bitcast %struct.ty addrspace(4)* %arg to i8 addrspace(4)*
-  %cast.alloca = bitcast [4 x i32] addrspace(5)* %alloca to i8 addrspace(5)*
-  call void @llvm.memcpy.p5i8.p4i8.i32(i8 addrspace(5)* align 4 %cast.alloca, i8 addrspace(4)* align 4 %cast.arg, i32 16, i1 false), !noalias !0
-  call void @llvm.memcpy.p0i8.p5i8.i32(i8* align 4 %scratch, i8 addrspace(5)* align 4 %cast.alloca, i32 16, i1 false), !noalias !0
+  call void @llvm.memcpy.p5.p4.i32(ptr addrspace(5) align 4 %alloca, ptr addrspace(4) align 4 %arg, i32 16, i1 false), !noalias !0
+  call void @llvm.memcpy.p0.p5.i32(ptr align 4 %scratch, ptr addrspace(5) align 4 %alloca, i32 16, i1 false), !noalias !0
   ret void
 }
 
-define amdgpu_kernel void @byref_infloop_addrspacecast(i8* %scratch, %struct.ty addrspace(4)* byref(%struct.ty) align 4 %arg) local_unnamed_addr #1 {
+define amdgpu_kernel void @byref_infloop_addrspacecast(ptr %scratch, ptr addrspace(4) byref(%struct.ty) align 4 %arg) local_unnamed_addr #1 {
 ; CHECK-LABEL: @byref_infloop_addrspacecast(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [4 x i32], align 4, addrspace(5)
-; CHECK-NEXT:    [[CAST_ARG:%.*]] = bitcast [[STRUCT_TY:%.*]] addrspace(4)* [[ARG:%.*]] to i8 addrspace(4)*
-; CHECK-NEXT:    [[CAST_ALLOCA:%.*]] = bitcast [4 x i32] addrspace(5)* [[ALLOCA]] to i8 addrspace(5)*
-; CHECK-NEXT:    [[ADDRSPACECAST_ALLOCA:%.*]] = addrspacecast i8 addrspace(5)* [[CAST_ALLOCA]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p4i8.i64(i8* noundef nonnull align 4 dereferenceable(16) [[ADDRSPACECAST_ALLOCA]], i8 addrspace(4)* noundef align 4 dereferenceable(16) [[CAST_ARG]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) [[SCRATCH:%.*]], i8* noundef nonnull align 4 dereferenceable(16) [[ADDRSPACECAST_ALLOCA]], i64 16, i1 false)
+; CHECK-NEXT:    [[ADDRSPACECAST_ALLOCA:%.*]] = addrspacecast ptr addrspace(5) [[ALLOCA]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p4.i64(ptr noundef nonnull align 4 dereferenceable(16) [[ADDRSPACECAST_ALLOCA]], ptr addrspace(4) noundef align 4 dereferenceable(16) [[ARG:%.*]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) [[SCRATCH:%.*]], ptr noundef nonnull align 4 dereferenceable(16) [[ADDRSPACECAST_ALLOCA]], i64 16, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %alloca = alloca [4 x i32], align 4, addrspace(5)
-  %cast.arg = bitcast %struct.ty addrspace(4)* %arg to i8 addrspace(4)*
-  %cast.alloca = bitcast [4 x i32] addrspace(5)* %alloca to i8 addrspace(5)*
-  %addrspacecast.alloca = addrspacecast i8 addrspace(5)* %cast.alloca to i8*
-  call void @llvm.memcpy.p0i8.p4i8.i64(i8* nonnull align 4 dereferenceable(16) %addrspacecast.alloca, i8 addrspace(4)* align 4 dereferenceable(16) %cast.arg, i64 16, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 dereferenceable(16) %scratch, i8* nonnull align 4 dereferenceable(16) %addrspacecast.alloca, i64 16, i1 false)
+  %addrspacecast.alloca = addrspacecast ptr addrspace(5) %alloca to ptr
+  call void @llvm.memcpy.p0.p4.i64(ptr nonnull align 4 dereferenceable(16) %addrspacecast.alloca, ptr addrspace(4) align 4 dereferenceable(16) %arg, i64 16, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 dereferenceable(16) %scratch, ptr nonnull align 4 dereferenceable(16) %addrspacecast.alloca, i64 16, i1 false)
   ret void
 }
 
-define amdgpu_kernel void @byref_infloop_memmove(i8* %scratch, %struct.ty addrspace(4)* byref(%struct.ty) align 4 %arg) local_unnamed_addr #1 {
+define amdgpu_kernel void @byref_infloop_memmove(ptr %scratch, ptr addrspace(4) byref(%struct.ty) align 4 %arg) local_unnamed_addr #1 {
 ; CHECK-LABEL: @byref_infloop_memmove(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[CAST_ALLOCA:%.*]] = bitcast [[STRUCT_TY:%.*]] addrspace(4)* [[ARG:%.*]] to i8 addrspace(4)*
-; CHECK-NEXT:    call void @llvm.memmove.p0i8.p4i8.i32(i8* noundef nonnull align 4 dereferenceable(16) [[SCRATCH:%.*]], i8 addrspace(4)* noundef align 4 dereferenceable(16) [[CAST_ALLOCA]], i32 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p4.i32(ptr noundef nonnull align 4 dereferenceable(16) [[SCRATCH:%.*]], ptr addrspace(4) noundef align 4 dereferenceable(16) [[ARG:%.*]], i32 16, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %alloca = alloca [4 x i32], align 4, addrspace(5)
-  %cast.arg = bitcast %struct.ty addrspace(4)* %arg to i8 addrspace(4)*
-  %cast.alloca = bitcast [4 x i32] addrspace(5)* %alloca to i8 addrspace(5)*
-  call void @llvm.memmove.p5i8.p4i8.i32(i8 addrspace(5)* align 4 %cast.alloca, i8 addrspace(4)* align 4 %cast.arg, i32 16, i1 false)
-  call void @llvm.memmove.p0i8.p5i8.i32(i8* align 4 %scratch, i8 addrspace(5)* align 4 %cast.alloca, i32 16, i1 false)
+  call void @llvm.memmove.p5.p4.i32(ptr addrspace(5) align 4 %alloca, ptr addrspace(4) align 4 %arg, i32 16, i1 false)
+  call void @llvm.memmove.p0.p5.i32(ptr align 4 %scratch, ptr addrspace(5) align 4 %alloca, i32 16, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p5i8.i32(i8* noalias nocapture writeonly, i8 addrspace(5)* noalias nocapture readonly, i32, i1 immarg) #0
-declare void @llvm.memcpy.p5i8.p4i8.i32(i8 addrspace(5)* nocapture, i8 addrspace(4)* nocapture, i32, i1) #0
-declare void @llvm.memcpy.p0i8.p4i8.i64(i8* nocapture, i8 addrspace(4)* nocapture, i64, i1) #0
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #0
-declare void @llvm.memcpy.p5i8.p4i8.i64(i8 addrspace(5)* nocapture, i8 addrspace(4)* nocapture, i64, i1) #0
-declare void @llvm.memmove.p5i8.p4i8.i32(i8 addrspace(5)* nocapture, i8 addrspace(4)* nocapture, i32, i1) #0
-declare void @llvm.memmove.p0i8.p5i8.i32(i8* nocapture, i8 addrspace(5)* nocapture, i32, i1) #0
-declare i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #1
+declare void @llvm.memcpy.p0.p5.i32(ptr noalias nocapture writeonly, ptr addrspace(5) noalias nocapture readonly, i32, i1 immarg) #0
+declare void @llvm.memcpy.p5.p4.i32(ptr addrspace(5) nocapture, ptr addrspace(4) nocapture, i32, i1) #0
+declare void @llvm.memcpy.p0.p4.i64(ptr nocapture, ptr addrspace(4) nocapture, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #0
+declare void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) nocapture, ptr addrspace(4) nocapture, i64, i1) #0
+declare void @llvm.memmove.p5.p4.i32(ptr addrspace(5) nocapture, ptr addrspace(4) nocapture, i32, i1) #0
+declare void @llvm.memmove.p0.p5.i32(ptr nocapture, ptr addrspace(5) nocapture, i32, i1) #0
+declare ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr() #1
 
 attributes #0 = { argmemonly nounwind willreturn }
 attributes #1 = { nounwind readnone speculatable }

diff  --git a/llvm/test/Transforms/InstCombine/ARM/neon-intrinsics.ll b/llvm/test/Transforms/InstCombine/ARM/neon-intrinsics.ll
index 489566ec5ac07..1c5287b7042fb 100644
--- a/llvm/test/Transforms/InstCombine/ARM/neon-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/ARM/neon-intrinsics.ll
@@ -3,8 +3,8 @@
 ; The alignment arguments for NEON load/store intrinsics can be increased
 ; by instcombine.  Check for this.
 
-; CHECK: vld4.v2i32.p0i8({{.*}}, i32 32)
-; CHECK: vst4.p0i8.v2i32({{.*}}, i32 16)
+; CHECK: vld4.v2i32.p0({{.*}}, i32 32)
+; CHECK: vst4.p0.v2i32({{.*}}, i32 16)
 
 @x = common global [8 x i32] zeroinitializer, align 32
 @y = common global [8 x i32] zeroinitializer, align 16
@@ -12,14 +12,14 @@
 %struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
 
 define void @test() nounwind ssp {
-  %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0i8(i8* bitcast ([8 x i32]* @x to i8*), i32 1)
+  %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0(ptr @x, i32 1)
   %tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
   %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 1
   %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2
   %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 3
-  call void @llvm.arm.neon.vst4.p0i8.v2i32(i8* bitcast ([8 x i32]* @y to i8*), <2 x i32> %tmp2, <2 x i32> %tmp3, <2 x i32> %tmp4, <2 x i32> %tmp5, i32 1)
+  call void @llvm.arm.neon.vst4.p0.v2i32(ptr @y, <2 x i32> %tmp2, <2 x i32> %tmp3, <2 x i32> %tmp4, <2 x i32> %tmp5, i32 1)
   ret void
 }
 
-declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0i8(i8*, i32) nounwind readonly
-declare void @llvm.arm.neon.vst4.p0i8.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
+declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0(ptr, i32) nounwind readonly
+declare void @llvm.arm.neon.vst4.p0.v2i32(ptr, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind

diff  --git a/llvm/test/Transforms/InstCombine/X86/pr2645-1.ll b/llvm/test/Transforms/InstCombine/X86/pr2645-1.ll
index 9f35c97088f0a..9ab1395b03625 100644
--- a/llvm/test/Transforms/InstCombine/X86/pr2645-1.ll
+++ b/llvm/test/Transforms/InstCombine/X86/pr2645-1.ll
@@ -3,7 +3,7 @@
 ; PR2645
 ; instcombine shouldn't delete the shufflevector.
 
-define internal void @0(i8* %arg, i32 %arg1, i8* %arg2) {
+define internal void @0(ptr %arg, i32 %arg1, ptr %arg2) {
 ; CHECK-LABEL: @0(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
@@ -13,18 +13,16 @@ define internal void @0(i8* %arg, i32 %arg1, i8* %arg2) {
 ; CHECK-NEXT:    br i1 [[I]], label [[BB4]], label [[BB18:%.*]]
 ; CHECK:       bb4:
 ; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[DOT0]] to i64
-; CHECK-NEXT:    [[I5:%.*]] = getelementptr i8, i8* [[ARG2:%.*]], i64 [[TMP0]]
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[I5]] to <1 x i64>*
-; CHECK-NEXT:    [[I71:%.*]] = load <1 x i64>, <1 x i64>* [[TMP1]], align 1
+; CHECK-NEXT:    [[I5:%.*]] = getelementptr i8, ptr [[ARG2:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[I71:%.*]] = load <1 x i64>, ptr [[I5]], align 1
 ; CHECK-NEXT:    [[I9:%.*]] = call <2 x i64> @foo(<1 x i64> [[I71]])
 ; CHECK-NEXT:    [[I11:%.*]] = bitcast <2 x i64> [[I9]] to <8 x i16>
 ; CHECK-NEXT:    [[I12:%.*]] = shufflevector <8 x i16> [[I11]], <8 x i16> poison, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
 ; CHECK-NEXT:    [[I13:%.*]] = bitcast <8 x i16> [[I12]] to <4 x i32>
 ; CHECK-NEXT:    [[I14:%.*]] = sitofp <4 x i32> [[I13]] to <4 x float>
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[DOT0]] to i64
-; CHECK-NEXT:    [[I15:%.*]] = getelementptr i8, i8* [[ARG:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[I16:%.*]] = bitcast i8* [[I15]] to <4 x float>*
-; CHECK-NEXT:    store <4 x float> [[I14]], <4 x float>* [[I16]], align 1
+; CHECK-NEXT:    [[I15:%.*]] = getelementptr i8, ptr [[ARG:%.*]], i64 [[TMP2]]
+; CHECK-NEXT:    store <4 x float> [[I14]], ptr [[I15]], align 1
 ; CHECK-NEXT:    [[I17]] = add i32 [[DOT0]], 1
 ; CHECK-NEXT:    br label [[BB3]]
 ; CHECK:       bb18:
@@ -40,9 +38,8 @@ bb3:                                              ; preds = %bb4, %bb
   br i1 %i, label %bb4, label %bb18
 
 bb4:                                              ; preds = %bb3
-  %i5 = getelementptr i8, i8* %arg2, i32 %.0
-  %i6 = bitcast i8* %i5 to <4 x i16>*
-  %i7 = load <4 x i16>, <4 x i16>* %i6, align 1
+  %i5 = getelementptr i8, ptr %arg2, i32 %.0
+  %i7 = load <4 x i16>, ptr %i5, align 1
   %i8 = bitcast <4 x i16> %i7 to <1 x i64>
   %i9 = call <2 x i64> @foo(<1 x i64> %i8)
   %i10 = bitcast <2 x i64> %i9 to <4 x i32>
@@ -50,9 +47,8 @@ bb4:                                              ; preds = %bb3
   %i12 = shufflevector <8 x i16> %i11, <8 x i16> %i11, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
   %i13 = bitcast <8 x i16> %i12 to <4 x i32>
   %i14 = sitofp <4 x i32> %i13 to <4 x float>
-  %i15 = getelementptr i8, i8* %arg, i32 %.0
-  %i16 = bitcast i8* %i15 to <4 x float>*
-  store <4 x float> %i14, <4 x float>* %i16, align 1
+  %i15 = getelementptr i8, ptr %arg, i32 %.0
+  store <4 x float> %i14, ptr %i15, align 1
   %i17 = add i32 %.0, 1
   br label %bb3
 


        


More information about the llvm-commits mailing list