[llvm] fa46f1a - [X86] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 5 05:08:41 PDT 2023


Author: Nikita Popov
Date: 2023-04-05T14:08:30+02:00
New Revision: fa46f1ac3b02bcd786bcaa947f8c4f14ea652f7a

URL: https://github.com/llvm/llvm-project/commit/fa46f1ac3b02bcd786bcaa947f8c4f14ea652f7a
DIFF: https://github.com/llvm/llvm-project/commit/fa46f1ac3b02bcd786bcaa947f8c4f14ea652f7a.diff

LOG: [X86] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/atomic-unordered.ll
    llvm/test/CodeGen/X86/atomic64.ll
    llvm/test/CodeGen/X86/catchpad-lifetime.ll
    llvm/test/CodeGen/X86/cfguard-checks.ll
    llvm/test/CodeGen/X86/codegen-prepare-cast.ll
    llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
    llvm/test/CodeGen/X86/fold-sext-trunc.ll
    llvm/test/CodeGen/X86/pr32610.ll
    llvm/test/CodeGen/X86/sink-gep-before-mem-inst.ll
    llvm/test/CodeGen/X86/stack-protector-no-return.ll
    llvm/test/CodeGen/X86/tailcall-extract.ll
    llvm/test/CodeGen/X86/tls-loads-control.ll
    llvm/test/CodeGen/X86/tls-loads-control2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index 8a4bdf514c03b..9482d71ebff03 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=0 | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-CUR %s
-; RUN: llc -opaque-pointers=0 -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake  -x86-experimental-unordered-atomic-isel=0 | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-CUR %s
-; RUN: llc -opaque-pointers=0 -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=1 | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-EX %s
-; RUN: llc -opaque-pointers=0 -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=1 | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-EX %s
+; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=0 | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-CUR %s
+; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake  -x86-experimental-unordered-atomic-isel=0 | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-CUR %s
+; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=1 | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-EX %s
+; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=1 | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-EX %s
 
-define i8 @load_i8(i8* %ptr) {
+define i8 @load_i8(ptr %ptr) {
 ; CHECK-O0-LABEL: load_i8:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movb (%rdi), %al
@@ -14,11 +14,11 @@ define i8 @load_i8(i8* %ptr) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    movzbl (%rdi), %eax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i8, i8* %ptr unordered, align 1
+  %v = load atomic i8, ptr %ptr unordered, align 1
   ret i8 %v
 }
 
-define void @store_i8(i8* %ptr, i8 %v) {
+define void @store_i8(ptr %ptr, i8 %v) {
 ; CHECK-O0-LABEL: store_i8:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movb %sil, %al
@@ -29,11 +29,11 @@ define void @store_i8(i8* %ptr, i8 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    movb %sil, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  store atomic i8 %v, i8* %ptr unordered, align 1
+  store atomic i8 %v, ptr %ptr unordered, align 1
   ret void
 }
 
-define i16 @load_i16(i16* %ptr) {
+define i16 @load_i16(ptr %ptr) {
 ; CHECK-O0-LABEL: load_i16:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movw (%rdi), %ax
@@ -43,12 +43,12 @@ define i16 @load_i16(i16* %ptr) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    movzwl (%rdi), %eax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i16, i16* %ptr unordered, align 2
+  %v = load atomic i16, ptr %ptr unordered, align 2
   ret i16 %v
 }
 
 
-define void @store_i16(i16* %ptr, i16 %v) {
+define void @store_i16(ptr %ptr, i16 %v) {
 ; CHECK-O0-LABEL: store_i16:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movw %si, %ax
@@ -59,43 +59,43 @@ define void @store_i16(i16* %ptr, i16 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    movw %si, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  store atomic i16 %v, i16* %ptr unordered, align 2
+  store atomic i16 %v, ptr %ptr unordered, align 2
   ret void
 }
 
-define i32 @load_i32(i32* %ptr) {
+define i32 @load_i32(ptr %ptr) {
 ; CHECK-LABEL: load_i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl (%rdi), %eax
 ; CHECK-NEXT:    retq
-  %v = load atomic i32, i32* %ptr unordered, align 4
+  %v = load atomic i32, ptr %ptr unordered, align 4
   ret i32 %v
 }
 
-define void @store_i32(i32* %ptr, i32 %v) {
+define void @store_i32(ptr %ptr, i32 %v) {
 ; CHECK-LABEL: store_i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, (%rdi)
 ; CHECK-NEXT:    retq
-  store atomic i32 %v, i32* %ptr unordered, align 4
+  store atomic i32 %v, ptr %ptr unordered, align 4
   ret void
 }
 
-define i64 @load_i64(i64* %ptr) {
+define i64 @load_i64(ptr %ptr) {
 ; CHECK-LABEL: load_i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %ptr unordered, align 8
+  %v = load atomic i64, ptr %ptr unordered, align 8
   ret i64 %v
 }
 
-define void @store_i64(i64* %ptr, i64 %v) {
+define void @store_i64(ptr %ptr, i64 %v) {
 ; CHECK-LABEL: store_i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, (%rdi)
 ; CHECK-NEXT:    retq
-  store atomic i64 %v, i64* %ptr unordered, align 8
+  store atomic i64 %v, ptr %ptr unordered, align 8
   ret void
 }
 
@@ -106,7 +106,7 @@ define void @store_i64(i64* %ptr, i64 %v) {
 ;; Start w/some clearly illegal ones.
 
 ; Must use a full width op, not a byte op
-define void @narrow_writeback_or(i64* %ptr) {
+define void @narrow_writeback_or(ptr %ptr) {
 ; CHECK-O0-LABEL: narrow_writeback_or:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -118,14 +118,14 @@ define void @narrow_writeback_or(i64* %ptr) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    orq $7, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %ptr unordered, align 8
+  %v = load atomic i64, ptr %ptr unordered, align 8
   %v.new = or i64 %v, 7
-  store atomic i64 %v.new, i64* %ptr unordered, align 8
+  store atomic i64 %v.new, ptr %ptr unordered, align 8
   ret void
 }
 
 ; Must use a full width op, not a byte op
-define void @narrow_writeback_and(i64* %ptr) {
+define void @narrow_writeback_and(ptr %ptr) {
 ; CHECK-O0-LABEL: narrow_writeback_and:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -140,14 +140,14 @@ define void @narrow_writeback_and(i64* %ptr) {
 ; CHECK-O3-NEXT:    movl $4294967040, %eax # imm = 0xFFFFFF00
 ; CHECK-O3-NEXT:    andq %rax, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %ptr unordered, align 8
+  %v = load atomic i64, ptr %ptr unordered, align 8
   %v.new = and i64 %v, 4294967040 ;; 0xFFFF_FF00
-  store atomic i64 %v.new, i64* %ptr unordered, align 8
+  store atomic i64 %v.new, ptr %ptr unordered, align 8
   ret void
 }
 
 ; Must use a full width op, not a byte op
-define void @narrow_writeback_xor(i64* %ptr) {
+define void @narrow_writeback_xor(ptr %ptr) {
 ; CHECK-O0-LABEL: narrow_writeback_xor:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -159,9 +159,9 @@ define void @narrow_writeback_xor(i64* %ptr) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    xorq $7, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %ptr unordered, align 8
+  %v = load atomic i64, ptr %ptr unordered, align 8
   %v.new = xor i64 %v, 7
-  store atomic i64 %v.new, i64* %ptr unordered, align 8
+  store atomic i64 %v.new, ptr %ptr unordered, align 8
   ret void
 }
 
@@ -176,60 +176,60 @@ define void @narrow_writeback_xor(i64* %ptr) {
 ;; approach to incremental improvement.
 
 ; Legal if wider type is also atomic (TODO)
-define void @widen_store(i32* %p0, i32 %v1, i32 %v2) {
+define void @widen_store(ptr %p0, i32 %v1, i32 %v2) {
 ; CHECK-LABEL: widen_store:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, (%rdi)
 ; CHECK-NEXT:    movl %edx, 4(%rdi)
 ; CHECK-NEXT:    retq
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 %v1, i32* %p0 unordered, align 8
-  store atomic i32 %v2, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 %v1, ptr %p0 unordered, align 8
+  store atomic i32 %v2, ptr %p1 unordered, align 4
   ret void
 }
 
 ; This one is *NOT* legal to widen.  With weaker alignment,
 ; the wider type might cross a cache line and violate the
 ; atomicity requirement.
-define void @widen_store_unaligned(i32* %p0, i32 %v1, i32 %v2) {
+define void @widen_store_unaligned(ptr %p0, i32 %v1, i32 %v2) {
 ; CHECK-LABEL: widen_store_unaligned:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, (%rdi)
 ; CHECK-NEXT:    movl %edx, 4(%rdi)
 ; CHECK-NEXT:    retq
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 %v1, i32* %p0 unordered, align 4
-  store atomic i32 %v2, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 %v1, ptr %p0 unordered, align 4
+  store atomic i32 %v2, ptr %p1 unordered, align 4
   ret void
 }
 
 ; Legal if wider type is also atomic (TODO)
-define void @widen_broadcast(i32* %p0, i32 %v) {
+define void @widen_broadcast(ptr %p0, i32 %v) {
 ; CHECK-LABEL: widen_broadcast:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, (%rdi)
 ; CHECK-NEXT:    movl %esi, 4(%rdi)
 ; CHECK-NEXT:    retq
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 %v, i32* %p0 unordered, align 8
-  store atomic i32 %v, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 %v, ptr %p0 unordered, align 8
+  store atomic i32 %v, ptr %p1 unordered, align 4
   ret void
 }
 
 ; Not legal to widen due to alignment restriction
-define void @widen_broadcast_unaligned(i32* %p0, i32 %v) {
+define void @widen_broadcast_unaligned(ptr %p0, i32 %v) {
 ; CHECK-LABEL: widen_broadcast_unaligned:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, (%rdi)
 ; CHECK-NEXT:    movl %esi, 4(%rdi)
 ; CHECK-NEXT:    retq
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 %v, i32* %p0 unordered, align 4
-  store atomic i32 %v, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 %v, ptr %p0 unordered, align 4
+  store atomic i32 %v, ptr %p1 unordered, align 4
   ret void
 }
 
-define i128 @load_i128(i128* %ptr) {
+define i128 @load_i128(ptr %ptr) {
 ; CHECK-O0-LABEL: load_i128:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    pushq %rbx
@@ -258,11 +258,11 @@ define i128 @load_i128(i128* %ptr) {
 ; CHECK-O3-NEXT:    popq %rbx
 ; CHECK-O3-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i128, i128* %ptr unordered, align 16
+  %v = load atomic i128, ptr %ptr unordered, align 16
   ret i128 %v
 }
 
-define void @store_i128(i128* %ptr, i128 %v) {
+define void @store_i128(ptr %ptr, i128 %v) {
 ; CHECK-O0-LABEL: store_i128:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    pushq %rbx
@@ -311,11 +311,11 @@ define void @store_i128(i128* %ptr, i128 %v) {
 ; CHECK-O3-NEXT:    popq %rbx
 ; CHECK-O3-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-O3-NEXT:    retq
-  store atomic i128 %v, i128* %ptr unordered, align 16
+  store atomic i128 %v, ptr %ptr unordered, align 16
   ret void
 }
 
-define i256 @load_i256(i256* %ptr) {
+define i256 @load_i256(ptr %ptr) {
 ; CHECK-O0-LABEL: load_i256:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    subq $56, %rsp
@@ -362,25 +362,23 @@ define i256 @load_i256(i256* %ptr) {
 ; CHECK-O3-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-O3-NEXT:    vzeroupper
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i256, i256* %ptr unordered, align 16
+  %v = load atomic i256, ptr %ptr unordered, align 16
   ret i256 %v
 }
 
-define void @store_i256(i256* %ptr, i256 %v) {
+define void @store_i256(ptr %ptr, i256 %v) {
 ; CHECK-O0-LABEL: store_i256:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    subq $40, %rsp
 ; CHECK-O0-NEXT:    .cfi_def_cfa_offset 48
-; CHECK-O0-NEXT:    movq %rdx, %rax
-; CHECK-O0-NEXT:    movq %rsi, (%rsp) # 8-byte Spill
+; CHECK-O0-NEXT:    movq %rsi, %rax
 ; CHECK-O0-NEXT:    movq %rdi, %rsi
-; CHECK-O0-NEXT:    movq (%rsp), %rdi # 8-byte Reload
-; CHECK-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
-; CHECK-O0-NEXT:    movq %rdi, {{[0-9]+}}(%rsp)
 ; CHECK-O0-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; CHECK-O0-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
 ; CHECK-O0-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
 ; CHECK-O0-NEXT:    movq %r8, {{[0-9]+}}(%rsp)
 ; CHECK-O0-NEXT:    movl $32, %edi
+; CHECK-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
 ; CHECK-O0-NEXT:    xorl %ecx, %ecx
 ; CHECK-O0-NEXT:    callq __atomic_store at PLT
 ; CHECK-O0-NEXT:    addq $40, %rsp
@@ -404,12 +402,12 @@ define void @store_i256(i256* %ptr, i256 %v) {
 ; CHECK-O3-NEXT:    addq $40, %rsp
 ; CHECK-O3-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-O3-NEXT:    retq
-  store atomic i256 %v, i256* %ptr unordered, align 16
+  store atomic i256 %v, ptr %ptr unordered, align 16
   ret void
 }
 
 ; Legal if wider type is also atomic (TODO)
-define void @vec_store(i32* %p0, <2 x i32> %vec) {
+define void @vec_store(ptr %p0, <2 x i32> %vec) {
 ; CHECK-O0-CUR-LABEL: vec_store:
 ; CHECK-O0-CUR:       # %bb.0:
 ; CHECK-O0-CUR-NEXT:    vmovd %xmm0, %ecx
@@ -439,14 +437,14 @@ define void @vec_store(i32* %p0, <2 x i32> %vec) {
 ; CHECK-O3-EX-NEXT:    retq
   %v1 = extractelement <2 x i32> %vec, i32 0
   %v2 = extractelement <2 x i32> %vec, i32 1
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 %v1, i32* %p0 unordered, align 8
-  store atomic i32 %v2, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 %v1, ptr %p0 unordered, align 8
+  store atomic i32 %v2, ptr %p1 unordered, align 4
   ret void
 }
 
 ; Not legal to widen due to alignment restriction
-define void @vec_store_unaligned(i32* %p0, <2 x i32> %vec) {
+define void @vec_store_unaligned(ptr %p0, <2 x i32> %vec) {
 ; CHECK-O0-CUR-LABEL: vec_store_unaligned:
 ; CHECK-O0-CUR:       # %bb.0:
 ; CHECK-O0-CUR-NEXT:    vmovd %xmm0, %ecx
@@ -476,9 +474,9 @@ define void @vec_store_unaligned(i32* %p0, <2 x i32> %vec) {
 ; CHECK-O3-EX-NEXT:    retq
   %v1 = extractelement <2 x i32> %vec, i32 0
   %v2 = extractelement <2 x i32> %vec, i32 1
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 %v1, i32* %p0 unordered, align 4
-  store atomic i32 %v2, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 %v1, ptr %p0 unordered, align 4
+  store atomic i32 %v2, ptr %p1 unordered, align 4
   ret void
 }
 
@@ -486,7 +484,7 @@ define void @vec_store_unaligned(i32* %p0, <2 x i32> %vec) {
 
 ; Legal if wider type is also atomic (TODO)
 ; Also, can avoid register move from xmm to eax (TODO)
-define void @widen_broadcast2(i32* %p0, <2 x i32> %vec) {
+define void @widen_broadcast2(ptr %p0, <2 x i32> %vec) {
 ; CHECK-O0-CUR-LABEL: widen_broadcast2:
 ; CHECK-O0-CUR:       # %bb.0:
 ; CHECK-O0-CUR-NEXT:    vmovd %xmm0, %eax
@@ -513,14 +511,14 @@ define void @widen_broadcast2(i32* %p0, <2 x i32> %vec) {
 ; CHECK-O3-EX-NEXT:    vmovss %xmm0, 4(%rdi)
 ; CHECK-O3-EX-NEXT:    retq
   %v1 = extractelement <2 x i32> %vec, i32 0
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 %v1, i32* %p0 unordered, align 8
-  store atomic i32 %v1, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 %v1, ptr %p0 unordered, align 8
+  store atomic i32 %v1, ptr %p1 unordered, align 4
   ret void
 }
 
 ; Not legal to widen due to alignment restriction
-define void @widen_broadcast2_unaligned(i32* %p0, <2 x i32> %vec) {
+define void @widen_broadcast2_unaligned(ptr %p0, <2 x i32> %vec) {
 ; CHECK-O0-CUR-LABEL: widen_broadcast2_unaligned:
 ; CHECK-O0-CUR:       # %bb.0:
 ; CHECK-O0-CUR-NEXT:    vmovd %xmm0, %eax
@@ -547,35 +545,35 @@ define void @widen_broadcast2_unaligned(i32* %p0, <2 x i32> %vec) {
 ; CHECK-O3-EX-NEXT:    vmovss %xmm0, 4(%rdi)
 ; CHECK-O3-EX-NEXT:    retq
   %v1 = extractelement <2 x i32> %vec, i32 0
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 %v1, i32* %p0 unordered, align 4
-  store atomic i32 %v1, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 %v1, ptr %p0 unordered, align 4
+  store atomic i32 %v1, ptr %p1 unordered, align 4
   ret void
 }
 
 ; Legal if wider type is also atomic (TODO)
-define void @widen_zero_init(i32* %p0, i32 %v1, i32 %v2) {
+define void @widen_zero_init(ptr %p0, i32 %v1, i32 %v2) {
 ; CHECK-LABEL: widen_zero_init:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl $0, (%rdi)
 ; CHECK-NEXT:    movl $0, 4(%rdi)
 ; CHECK-NEXT:    retq
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 0, i32* %p0 unordered, align 8
-  store atomic i32 0, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 0, ptr %p0 unordered, align 8
+  store atomic i32 0, ptr %p1 unordered, align 4
   ret void
 }
 
 ; Not legal to widen due to alignment restriction
-define void @widen_zero_init_unaligned(i32* %p0, i32 %v1, i32 %v2) {
+define void @widen_zero_init_unaligned(ptr %p0, i32 %v1, i32 %v2) {
 ; CHECK-LABEL: widen_zero_init_unaligned:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl $0, (%rdi)
 ; CHECK-NEXT:    movl $0, 4(%rdi)
 ; CHECK-NEXT:    retq
-  %p1 = getelementptr i32, i32* %p0, i64 1
-  store atomic i32 0, i32* %p0 unordered, align 4
-  store atomic i32 0, i32* %p1 unordered, align 4
+  %p1 = getelementptr i32, ptr %p0, i64 1
+  store atomic i32 0, ptr %p0 unordered, align 4
+  store atomic i32 0, ptr %p1 unordered, align 4
   ret void
 }
 
@@ -583,29 +581,29 @@ define void @widen_zero_init_unaligned(i32* %p0, i32 %v1, i32 %v2) {
 ;; on x86, so these are simply checking optimization quality.
 
 ; Legal, as expected
-define i64 @load_fold_add1(i64* %p) {
+define i64 @load_fold_add1(ptr %p) {
 ; CHECK-LABEL: load_fold_add1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    addq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = add i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_add2(i64* %p, i64 %v2) {
+define i64 @load_fold_add2(ptr %p, i64 %v2) {
 ; CHECK-LABEL: load_fold_add2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
 ; CHECK-NEXT:    addq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = add i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_add3(i64* %p1, i64* %p2) {
+define i64 @load_fold_add3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_add3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -623,14 +621,14 @@ define i64 @load_fold_add3(i64* %p1, i64* %p2) {
 ; CHECK-O3-EX-NEXT:    movq (%rdi), %rax
 ; CHECK-O3-EX-NEXT:    addq (%rsi), %rax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = add i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_sub1(i64* %p) {
+define i64 @load_fold_sub1(ptr %p) {
 ; CHECK-O0-LABEL: load_fold_sub1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -642,36 +640,36 @@ define i64 @load_fold_sub1(i64* %p) {
 ; CHECK-O3-NEXT:    movq (%rdi), %rax
 ; CHECK-O3-NEXT:    addq $-15, %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = sub i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_sub2(i64* %p, i64 %v2) {
+define i64 @load_fold_sub2(ptr %p, i64 %v2) {
 ; CHECK-LABEL: load_fold_sub2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    subq %rsi, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = sub i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_sub3(i64* %p1, i64* %p2) {
+define i64 @load_fold_sub3(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: load_fold_sub3:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    subq (%rsi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = sub i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_mul1(i64* %p) {
+define i64 @load_fold_mul1(ptr %p) {
 ; CHECK-O0-LABEL: load_fold_mul1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    imulq $15, (%rdi), %rax
@@ -683,23 +681,23 @@ define i64 @load_fold_mul1(i64* %p) {
 ; CHECK-O3-NEXT:    leaq (%rax,%rax,4), %rax
 ; CHECK-O3-NEXT:    leaq (%rax,%rax,2), %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = mul i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_mul2(i64* %p, i64 %v2) {
+define i64 @load_fold_mul2(ptr %p, i64 %v2) {
 ; CHECK-LABEL: load_fold_mul2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
 ; CHECK-NEXT:    imulq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = mul i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_mul3(i64* %p1, i64* %p2) {
+define i64 @load_fold_mul3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_mul3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -717,14 +715,14 @@ define i64 @load_fold_mul3(i64* %p1, i64* %p2) {
 ; CHECK-O3-EX-NEXT:    movq (%rdi), %rax
 ; CHECK-O3-EX-NEXT:    imulq (%rsi), %rax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = mul i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal to fold (TODO)
-define i64 @load_fold_sdiv1(i64* %p) {
+define i64 @load_fold_sdiv1(ptr %p) {
 ; CHECK-O0-LABEL: load_fold_sdiv1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -746,13 +744,13 @@ define i64 @load_fold_sdiv1(i64* %p) {
 ; CHECK-O3-NEXT:    addq %rax, %rcx
 ; CHECK-O3-NEXT:    movq %rcx, %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = sdiv i64 %v, 15
   ret i64 %ret
 }
 
 ; Legal to fold (TODO)
-define i64 @load_fold_sdiv2(i64* %p, i64 %v2) {
+define i64 @load_fold_sdiv2(ptr %p, i64 %v2) {
 ; CHECK-O0-LABEL: load_fold_sdiv2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -777,12 +775,12 @@ define i64 @load_fold_sdiv2(i64* %p, i64 %v2) {
 ; CHECK-O3-NEXT:    divl %esi
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax def $rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = sdiv i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_sdiv3(i64* %p1, i64* %p2) {
+define i64 @load_fold_sdiv3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_sdiv3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -808,14 +806,14 @@ define i64 @load_fold_sdiv3(i64* %p1, i64* %p2) {
 ; CHECK-O3-NEXT:    divl %ecx
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax def $rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = sdiv i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal to fold (TODO)
-define i64 @load_fold_udiv1(i64* %p) {
+define i64 @load_fold_udiv1(ptr %p) {
 ; CHECK-O0-LABEL: load_fold_udiv1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -839,12 +837,12 @@ define i64 @load_fold_udiv1(i64* %p) {
 ; CHECK-O3-EX-NEXT:    mulxq (%rdi), %rax, %rax
 ; CHECK-O3-EX-NEXT:    shrq $3, %rax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = udiv i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_udiv2(i64* %p, i64 %v2) {
+define i64 @load_fold_udiv2(ptr %p, i64 %v2) {
 ; CHECK-O0-LABEL: load_fold_udiv2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -870,12 +868,12 @@ define i64 @load_fold_udiv2(i64* %p, i64 %v2) {
 ; CHECK-O3-NEXT:    divl %esi
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax def $rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = udiv i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_udiv3(i64* %p1, i64* %p2) {
+define i64 @load_fold_udiv3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_udiv3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -902,14 +900,14 @@ define i64 @load_fold_udiv3(i64* %p1, i64* %p2) {
 ; CHECK-O3-NEXT:    divl %ecx
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax def $rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = udiv i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal to fold (TODO)
-define i64 @load_fold_srem1(i64* %p) {
+define i64 @load_fold_srem1(ptr %p) {
 ; CHECK-O0-LABEL: load_fold_srem1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -935,13 +933,13 @@ define i64 @load_fold_srem1(i64* %p) {
 ; CHECK-O3-NEXT:    subq %rax, %rcx
 ; CHECK-O3-NEXT:    movq %rcx, %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = srem i64 %v, 15
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_srem2(i64* %p, i64 %v2) {
+define i64 @load_fold_srem2(ptr %p, i64 %v2) {
 ; CHECK-O0-LABEL: load_fold_srem2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -968,12 +966,12 @@ define i64 @load_fold_srem2(i64* %p, i64 %v2) {
 ; CHECK-O3-NEXT:    divl %esi
 ; CHECK-O3-NEXT:    movl %edx, %eax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = srem i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_srem3(i64* %p1, i64* %p2) {
+define i64 @load_fold_srem3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_srem3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1001,14 +999,14 @@ define i64 @load_fold_srem3(i64* %p1, i64* %p2) {
 ; CHECK-O3-NEXT:    divl %ecx
 ; CHECK-O3-NEXT:    movl %edx, %eax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = srem i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal to fold (TODO)
-define i64 @load_fold_urem1(i64* %p) {
+define i64 @load_fold_urem1(ptr %p) {
 ; CHECK-O0-LABEL: load_fold_urem1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1030,13 +1028,13 @@ define i64 @load_fold_urem1(i64* %p) {
 ; CHECK-O3-NEXT:    leaq (%rcx,%rcx,2), %rcx
 ; CHECK-O3-NEXT:    subq %rcx, %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = urem i64 %v, 15
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_urem2(i64* %p, i64 %v2) {
+define i64 @load_fold_urem2(ptr %p, i64 %v2) {
 ; CHECK-O0-LABEL: load_fold_urem2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1064,12 +1062,12 @@ define i64 @load_fold_urem2(i64* %p, i64 %v2) {
 ; CHECK-O3-NEXT:    divl %esi
 ; CHECK-O3-NEXT:    movl %edx, %eax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = urem i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_urem3(i64* %p1, i64* %p2) {
+define i64 @load_fold_urem3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_urem3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1098,25 +1096,25 @@ define i64 @load_fold_urem3(i64* %p1, i64* %p2) {
 ; CHECK-O3-NEXT:    divl %ecx
 ; CHECK-O3-NEXT:    movl %edx, %eax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = urem i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_shl1(i64* %p) {
+define i64 @load_fold_shl1(ptr %p) {
 ; CHECK-LABEL: load_fold_shl1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    shlq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = shl i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_shl2(i64* %p, i64 %v2) {
+define i64 @load_fold_shl2(ptr %p, i64 %v2) {
 ; CHECK-O0-LABEL: load_fold_shl2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq %rsi, %rcx
@@ -1129,12 +1127,12 @@ define i64 @load_fold_shl2(i64* %p, i64 %v2) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    shlxq %rsi, (%rdi), %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = shl i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_shl3(i64* %p1, i64* %p2) {
+define i64 @load_fold_shl3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_shl3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1148,25 +1146,25 @@ define i64 @load_fold_shl3(i64* %p1, i64* %p2) {
 ; CHECK-O3-NEXT:    movq (%rsi), %rax
 ; CHECK-O3-NEXT:    shlxq %rax, (%rdi), %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = shl i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_lshr1(i64* %p) {
+define i64 @load_fold_lshr1(ptr %p) {
 ; CHECK-LABEL: load_fold_lshr1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    shrq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = lshr i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_lshr2(i64* %p, i64 %v2) {
+define i64 @load_fold_lshr2(ptr %p, i64 %v2) {
 ; CHECK-O0-LABEL: load_fold_lshr2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq %rsi, %rcx
@@ -1179,12 +1177,12 @@ define i64 @load_fold_lshr2(i64* %p, i64 %v2) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    shrxq %rsi, (%rdi), %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = lshr i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_lshr3(i64* %p1, i64* %p2) {
+define i64 @load_fold_lshr3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_lshr3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1198,25 +1196,25 @@ define i64 @load_fold_lshr3(i64* %p1, i64* %p2) {
 ; CHECK-O3-NEXT:    movq (%rsi), %rax
 ; CHECK-O3-NEXT:    shrxq %rax, (%rdi), %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = lshr i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_ashr1(i64* %p) {
+define i64 @load_fold_ashr1(ptr %p) {
 ; CHECK-LABEL: load_fold_ashr1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    sarq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = ashr i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_ashr2(i64* %p, i64 %v2) {
+define i64 @load_fold_ashr2(ptr %p, i64 %v2) {
 ; CHECK-O0-LABEL: load_fold_ashr2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq %rsi, %rcx
@@ -1229,12 +1227,12 @@ define i64 @load_fold_ashr2(i64* %p, i64 %v2) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    sarxq %rsi, (%rdi), %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = ashr i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_ashr3(i64* %p1, i64* %p2) {
+define i64 @load_fold_ashr3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_ashr3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1248,14 +1246,14 @@ define i64 @load_fold_ashr3(i64* %p1, i64* %p2) {
 ; CHECK-O3-NEXT:    movq (%rsi), %rax
 ; CHECK-O3-NEXT:    sarxq %rax, (%rdi), %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = ashr i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_and1(i64* %p) {
+define i64 @load_fold_and1(ptr %p) {
 ; CHECK-O0-LABEL: load_fold_and1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1267,23 +1265,23 @@ define i64 @load_fold_and1(i64* %p) {
 ; CHECK-O3-NEXT:    movq (%rdi), %rax
 ; CHECK-O3-NEXT:    andl $15, %eax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = and i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_and2(i64* %p, i64 %v2) {
+define i64 @load_fold_and2(ptr %p, i64 %v2) {
 ; CHECK-LABEL: load_fold_and2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
 ; CHECK-NEXT:    andq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = and i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_and3(i64* %p1, i64* %p2) {
+define i64 @load_fold_and3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_and3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1301,36 +1299,36 @@ define i64 @load_fold_and3(i64* %p1, i64* %p2) {
 ; CHECK-O3-EX-NEXT:    movq (%rdi), %rax
 ; CHECK-O3-EX-NEXT:    andq (%rsi), %rax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = and i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_or1(i64* %p) {
+define i64 @load_fold_or1(ptr %p) {
 ; CHECK-LABEL: load_fold_or1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    orq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = or i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_or2(i64* %p, i64 %v2) {
+define i64 @load_fold_or2(ptr %p, i64 %v2) {
 ; CHECK-LABEL: load_fold_or2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
 ; CHECK-NEXT:    orq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = or i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_or3(i64* %p1, i64* %p2) {
+define i64 @load_fold_or3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_or3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1348,36 +1346,36 @@ define i64 @load_fold_or3(i64* %p1, i64* %p2) {
 ; CHECK-O3-EX-NEXT:    movq (%rdi), %rax
 ; CHECK-O3-EX-NEXT:    orq (%rsi), %rax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = or i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal, as expected
-define i64 @load_fold_xor1(i64* %p) {
+define i64 @load_fold_xor1(ptr %p) {
 ; CHECK-LABEL: load_fold_xor1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    xorq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = xor i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @load_fold_xor2(i64* %p, i64 %v2) {
+define i64 @load_fold_xor2(ptr %p, i64 %v2) {
 ; CHECK-LABEL: load_fold_xor2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
 ; CHECK-NEXT:    xorq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = xor i64 %v, %v2
   ret i64 %ret
 }
 
-define i64 @load_fold_xor3(i64* %p1, i64* %p2) {
+define i64 @load_fold_xor3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_xor3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1395,13 +1393,13 @@ define i64 @load_fold_xor3(i64* %p1, i64* %p2) {
 ; CHECK-O3-EX-NEXT:    movq (%rdi), %rax
 ; CHECK-O3-EX-NEXT:    xorq (%rsi), %rax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = xor i64 %v, %v2
   ret i64 %ret
 }
 
-define i1 @load_fold_icmp1(i64* %p) {
+define i1 @load_fold_icmp1(ptr %p) {
 ; CHECK-O0-LABEL: load_fold_icmp1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1414,12 +1412,12 @@ define i1 @load_fold_icmp1(i64* %p) {
 ; CHECK-O3-NEXT:    cmpq $15, (%rdi)
 ; CHECK-O3-NEXT:    sete %al
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = icmp eq i64 %v, 15
   ret i1 %ret
 }
 
-define i1 @load_fold_icmp2(i64* %p, i64 %v2) {
+define i1 @load_fold_icmp2(ptr %p, i64 %v2) {
 ; CHECK-O0-LABEL: load_fold_icmp2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1432,12 +1430,12 @@ define i1 @load_fold_icmp2(i64* %p, i64 %v2) {
 ; CHECK-O3-NEXT:    cmpq %rsi, (%rdi)
 ; CHECK-O3-NEXT:    sete %al
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = icmp eq i64 %v, %v2
   ret i1 %ret
 }
 
-define i1 @load_fold_icmp3(i64* %p1, i64* %p2) {
+define i1 @load_fold_icmp3(ptr %p1, ptr %p2) {
 ; CHECK-O0-LABEL: load_fold_icmp3:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1459,8 +1457,8 @@ define i1 @load_fold_icmp3(i64* %p1, i64* %p2) {
 ; CHECK-O3-EX-NEXT:    cmpq (%rsi), %rax
 ; CHECK-O3-EX-NEXT:    sete %al
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p1 unordered, align 8
-  %v2 = load atomic i64, i64* %p2 unordered, align 8
+  %v = load atomic i64, ptr %p1 unordered, align 8
+  %v2 = load atomic i64, ptr %p2 unordered, align 8
   %ret = icmp eq i64 %v, %v2
   ret i1 %ret
 }
@@ -1472,7 +1470,7 @@ define i1 @load_fold_icmp3(i64* %p1, i64* %p2) {
 ;; required not to narrow the store though!
 
 ; Legal, as expected
-define void @rmw_fold_add1(i64* %p, i64 %v) {
+define void @rmw_fold_add1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_add1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1484,14 +1482,14 @@ define void @rmw_fold_add1(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    addq $15, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = add i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_add2(i64* %p, i64 %v) {
+define void @rmw_fold_add2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_add2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1503,14 +1501,14 @@ define void @rmw_fold_add2(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    addq %rsi, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = add i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_sub1(i64* %p, i64 %v) {
+define void @rmw_fold_sub1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_sub1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1522,14 +1520,14 @@ define void @rmw_fold_sub1(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    addq $-15, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = sub i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_sub2(i64* %p, i64 %v) {
+define void @rmw_fold_sub2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_sub2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1541,14 +1539,14 @@ define void @rmw_fold_sub2(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    subq %rsi, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = sub i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_mul1(i64* %p, i64 %v) {
+define void @rmw_fold_mul1(ptr %p, i64 %v) {
 ; CHECK-LABEL: rmw_fold_mul1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
@@ -1556,14 +1554,14 @@ define void @rmw_fold_mul1(i64* %p, i64 %v) {
 ; CHECK-NEXT:    leaq (%rax,%rax,2), %rax
 ; CHECK-NEXT:    movq %rax, (%rdi)
 ; CHECK-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = mul i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal to fold (TODO)
-define void @rmw_fold_mul2(i64* %p, i64 %v) {
+define void @rmw_fold_mul2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_mul2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1576,14 +1574,14 @@ define void @rmw_fold_mul2(i64* %p, i64 %v) {
 ; CHECK-O3-NEXT:    imulq (%rdi), %rsi
 ; CHECK-O3-NEXT:    movq %rsi, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = mul i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_sdiv1(i64* %p, i64 %v) {
+define void @rmw_fold_sdiv1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_sdiv1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rcx
@@ -1612,14 +1610,14 @@ define void @rmw_fold_sdiv1(i64* %p, i64 %v) {
 ; CHECK-O3-NEXT:    addq %rax, %rdx
 ; CHECK-O3-NEXT:    movq %rdx, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = sdiv i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_sdiv2(i64* %p, i64 %v) {
+define void @rmw_fold_sdiv2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_sdiv2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1647,14 +1645,14 @@ define void @rmw_fold_sdiv2(i64* %p, i64 %v) {
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax def $rax
 ; CHECK-O3-NEXT:    movq %rax, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = sdiv i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_udiv1(i64* %p, i64 %v) {
+define void @rmw_fold_udiv1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_udiv1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rdx
@@ -1680,14 +1678,14 @@ define void @rmw_fold_udiv1(i64* %p, i64 %v) {
 ; CHECK-O3-EX-NEXT:    shrq $3, %rax
 ; CHECK-O3-EX-NEXT:    movq %rax, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = udiv i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_udiv2(i64* %p, i64 %v) {
+define void @rmw_fold_udiv2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_udiv2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1716,14 +1714,14 @@ define void @rmw_fold_udiv2(i64* %p, i64 %v) {
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax def $rax
 ; CHECK-O3-NEXT:    movq %rax, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = udiv i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_srem1(i64* %p, i64 %v) {
+define void @rmw_fold_srem1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_srem1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1759,14 +1757,14 @@ define void @rmw_fold_srem1(i64* %p, i64 %v) {
 ; CHECK-O3-NEXT:    subq %rax, %rcx
 ; CHECK-O3-NEXT:    movq %rcx, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = srem i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_srem2(i64* %p, i64 %v) {
+define void @rmw_fold_srem2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_srem2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1794,14 +1792,14 @@ define void @rmw_fold_srem2(i64* %p, i64 %v) {
 ; CHECK-O3-NEXT:    # kill: def $edx killed $edx def $rdx
 ; CHECK-O3-NEXT:    movq %rdx, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = srem i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_urem1(i64* %p, i64 %v) {
+define void @rmw_fold_urem1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_urem1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1826,14 +1824,14 @@ define void @rmw_fold_urem1(i64* %p, i64 %v) {
 ; CHECK-O3-NEXT:    subq %rax, %rdx
 ; CHECK-O3-NEXT:    movq %rdx, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = urem i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_urem2(i64* %p, i64 %v) {
+define void @rmw_fold_urem2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_urem2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1862,14 +1860,14 @@ define void @rmw_fold_urem2(i64* %p, i64 %v) {
 ; CHECK-O3-NEXT:    # kill: def $edx killed $edx def $rdx
 ; CHECK-O3-NEXT:    movq %rdx, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = urem i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal to fold (TODO)
-define void @rmw_fold_shl1(i64* %p, i64 %v) {
+define void @rmw_fold_shl1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_shl1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1888,14 +1886,14 @@ define void @rmw_fold_shl1(i64* %p, i64 %v) {
 ; CHECK-O3-EX:       # %bb.0:
 ; CHECK-O3-EX-NEXT:    shlq $15, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = shl i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal to fold (TODO)
-define void @rmw_fold_shl2(i64* %p, i64 %v) {
+define void @rmw_fold_shl2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_shl2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1918,14 +1916,14 @@ define void @rmw_fold_shl2(i64* %p, i64 %v) {
 ; CHECK-O3-EX-NEXT:    # kill: def $cl killed $cl killed $rcx
 ; CHECK-O3-EX-NEXT:    shlq %cl, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = shl i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal to fold (TODO)
-define void @rmw_fold_lshr1(i64* %p, i64 %v) {
+define void @rmw_fold_lshr1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_lshr1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1944,14 +1942,14 @@ define void @rmw_fold_lshr1(i64* %p, i64 %v) {
 ; CHECK-O3-EX:       # %bb.0:
 ; CHECK-O3-EX-NEXT:    shrq $15, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = lshr i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal to fold (TODO)
-define void @rmw_fold_lshr2(i64* %p, i64 %v) {
+define void @rmw_fold_lshr2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_lshr2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -1974,14 +1972,14 @@ define void @rmw_fold_lshr2(i64* %p, i64 %v) {
 ; CHECK-O3-EX-NEXT:    # kill: def $cl killed $cl killed $rcx
 ; CHECK-O3-EX-NEXT:    shrq %cl, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = lshr i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal to fold (TODO)
-define void @rmw_fold_ashr1(i64* %p, i64 %v) {
+define void @rmw_fold_ashr1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_ashr1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2000,14 +1998,14 @@ define void @rmw_fold_ashr1(i64* %p, i64 %v) {
 ; CHECK-O3-EX:       # %bb.0:
 ; CHECK-O3-EX-NEXT:    sarq $15, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = ashr i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal to fold (TODO)
-define void @rmw_fold_ashr2(i64* %p, i64 %v) {
+define void @rmw_fold_ashr2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_ashr2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2030,14 +2028,14 @@ define void @rmw_fold_ashr2(i64* %p, i64 %v) {
 ; CHECK-O3-EX-NEXT:    # kill: def $cl killed $cl killed $rcx
 ; CHECK-O3-EX-NEXT:    sarq %cl, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = ashr i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_and1(i64* %p, i64 %v) {
+define void @rmw_fold_and1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_and1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2051,14 +2049,14 @@ define void @rmw_fold_and1(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    andq $15, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = and i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_and2(i64* %p, i64 %v) {
+define void @rmw_fold_and2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_and2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2070,14 +2068,14 @@ define void @rmw_fold_and2(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    andq %rsi, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = and i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_or1(i64* %p, i64 %v) {
+define void @rmw_fold_or1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_or1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2089,14 +2087,14 @@ define void @rmw_fold_or1(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    orq $15, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = or i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_or2(i64* %p, i64 %v) {
+define void @rmw_fold_or2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_or2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2108,14 +2106,14 @@ define void @rmw_fold_or2(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    orq %rsi, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = or i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_xor1(i64* %p, i64 %v) {
+define void @rmw_fold_xor1(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_xor1:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2127,14 +2125,14 @@ define void @rmw_fold_xor1(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    xorq $15, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = xor i64 %prev, 15
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal, as expected
-define void @rmw_fold_xor2(i64* %p, i64 %v) {
+define void @rmw_fold_xor2(ptr %p, i64 %v) {
 ; CHECK-O0-LABEL: rmw_fold_xor2:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2146,9 +2144,9 @@ define void @rmw_fold_xor2(i64* %p, i64 %v) {
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    xorq %rsi, (%rdi)
 ; CHECK-O3-NEXT:    retq
-  %prev = load atomic i64, i64* %p unordered, align 8
+  %prev = load atomic i64, ptr %p unordered, align 8
   %val = xor i64 %prev, %v
-  store atomic i64 %val, i64* %p unordered, align 8
+  store atomic i64 %val, ptr %p unordered, align 8
   ret void
 }
 
@@ -2156,19 +2154,19 @@ define void @rmw_fold_xor2(i64* %p, i64 %v) {
 ;; be folded against the memory operation.
 
 ; Legal to reduce the load width (TODO)
-define i32 @fold_trunc(i64* %p) {
+define i32 @fold_trunc(ptr %p) {
 ; CHECK-LABEL: fold_trunc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    # kill: def $eax killed $eax killed $rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %ret = trunc i64 %v to i32
   ret i32 %ret
 }
 
 ; Legal to reduce the load width and fold the load (TODO)
-define i32 @fold_trunc_add(i64* %p, i32 %v2) {
+define i32 @fold_trunc_add(ptr %p, i32 %v2) {
 ; CHECK-O0-LABEL: fold_trunc_add:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2182,14 +2180,14 @@ define i32 @fold_trunc_add(i64* %p, i32 %v2) {
 ; CHECK-O3-NEXT:    addl %esi, %eax
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax killed $rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %trunc = trunc i64 %v to i32
   %ret = add i32 %trunc, %v2
   ret i32 %ret
 }
 
 ; Legal to reduce the load width and fold the load (TODO)
-define i32 @fold_trunc_and(i64* %p, i32 %v2) {
+define i32 @fold_trunc_and(ptr %p, i32 %v2) {
 ; CHECK-O0-LABEL: fold_trunc_and:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2203,14 +2201,14 @@ define i32 @fold_trunc_and(i64* %p, i32 %v2) {
 ; CHECK-O3-NEXT:    andl %esi, %eax
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax killed $rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %trunc = trunc i64 %v to i32
   %ret = and i32 %trunc, %v2
   ret i32 %ret
 }
 
 ; Legal to reduce the load width and fold the load (TODO)
-define i32 @fold_trunc_or(i64* %p, i32 %v2) {
+define i32 @fold_trunc_or(ptr %p, i32 %v2) {
 ; CHECK-O0-LABEL: fold_trunc_or:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2224,7 +2222,7 @@ define i32 @fold_trunc_or(i64* %p, i32 %v2) {
 ; CHECK-O3-NEXT:    orl %esi, %eax
 ; CHECK-O3-NEXT:    # kill: def $eax killed $eax killed $rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %trunc = trunc i64 %v to i32
   %ret = or i32 %trunc, %v2
   ret i32 %ret
@@ -2232,7 +2230,7 @@ define i32 @fold_trunc_or(i64* %p, i32 %v2) {
 
 ; It's tempting to split the wide load into two smaller byte loads
 ; to reduce memory traffic, but this would be illegal for a atomic load
-define i32 @split_load(i64* %p) {
+define i32 @split_load(ptr %p) {
 ; CHECK-O0-LABEL: split_load:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rcx
@@ -2251,7 +2249,7 @@ define i32 @split_load(i64* %p) {
 ; CHECK-O3-NEXT:    orl %eax, %ecx
 ; CHECK-O3-NEXT:    movzbl %cl, %eax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   %b1 = trunc i64 %v to i8
   %v.shift = lshr i64 %v, 32
   %b2 = trunc i64 %v.shift to i8
@@ -2266,61 +2264,61 @@ define i32 @split_load(i64* %p) {
 @Zero = constant i64 0
 
 ; TODO: should return constant
-define i64 @constant_folding(i64* %p) {
+define i64 @constant_folding(ptr %p) {
 ; CHECK-LABEL: constant_folding:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   ret i64 %v
 }
 
 ; Legal to forward and fold (TODO)
-define i64 @load_forwarding(i64* %p) {
+define i64 @load_forwarding(ptr %p) {
 ; CHECK-LABEL: load_forwarding:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    orq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
-  %v2 = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
+  %v2 = load atomic i64, ptr %p unordered, align 8
   %ret = or i64 %v, %v2
   ret i64 %ret
 }
 
 ; Legal to forward (TODO)
-define i64 @store_forward(i64* %p, i64 %v) {
+define i64 @store_forward(ptr %p, i64 %v) {
 ; CHECK-LABEL: store_forward:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rsi, (%rdi)
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    retq
-  store atomic i64 %v, i64* %p unordered, align 8
-  %ret = load atomic i64, i64* %p unordered, align 8
+  store atomic i64 %v, ptr %p unordered, align 8
+  %ret = load atomic i64, ptr %p unordered, align 8
   ret i64 %ret
 }
 
 ; Legal to kill (TODO)
-define void @dead_writeback(i64* %p) {
+define void @dead_writeback(ptr %p) {
 ; CHECK-LABEL: dead_writeback:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq %rax, (%rdi)
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
-  store atomic i64 %v, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
+  store atomic i64 %v, ptr %p unordered, align 8
   ret void
 }
 
 ; Legal to kill (TODO)
-define void @dead_store(i64* %p, i64 %v) {
+define void @dead_store(ptr %p, i64 %v) {
 ; CHECK-LABEL: dead_store:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq $0, (%rdi)
 ; CHECK-NEXT:    movq %rsi, (%rdi)
 ; CHECK-NEXT:    retq
-  store atomic i64 0, i64* %p unordered, align 8
-  store atomic i64 %v, i64* %p unordered, align 8
+  store atomic i64 0, ptr %p unordered, align 8
+  store atomic i64 %v, ptr %p unordered, align 8
   ret void
 }
 
@@ -2331,41 +2329,41 @@ define void @dead_store(i64* %p, i64 %v) {
 ;; If that were to happen, please rewrite the test to ensure load movement
 ;; isn't violated.
 
-define i64 @nofold_fence(i64* %p) {
+define i64 @nofold_fence(ptr %p) {
 ; CHECK-LABEL: nofold_fence:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    mfence
 ; CHECK-NEXT:    addq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   fence seq_cst
   %ret = add i64 %v, 15
   ret i64 %ret
 }
 
-define i64 @nofold_fence_acquire(i64* %p) {
+define i64 @nofold_fence_acquire(ptr %p) {
 ; CHECK-LABEL: nofold_fence_acquire:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    #MEMBARRIER
 ; CHECK-NEXT:    addq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   fence acquire
   %ret = add i64 %v, 15
   ret i64 %ret
 }
 
 
-define i64 @nofold_stfence(i64* %p) {
+define i64 @nofold_stfence(ptr %p) {
 ; CHECK-LABEL: nofold_stfence:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    #MEMBARRIER
 ; CHECK-NEXT:    addq $15, %rax
 ; CHECK-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8
+  %v = load atomic i64, ptr %p unordered, align 8
   fence syncscope("singlethread") seq_cst
   %ret = add i64 %v, 15
   ret i64 %ret
@@ -2387,12 +2385,12 @@ define i64 @fold_constant(i64 %arg) {
 ; CHECK-O3-NEXT:    movq %rdi, %rax
 ; CHECK-O3-NEXT:    addq Constant(%rip), %rax
 ; CHECK-O3-NEXT:    retq
-  %v = load atomic i64, i64* @Constant unordered, align 8
+  %v = load atomic i64, ptr @Constant unordered, align 8
   %ret = add i64 %v, %arg
   ret i64 %ret
 }
 
-define i64 @fold_constant_clobber(i64* %p, i64 %arg) {
+define i64 @fold_constant_clobber(ptr %p, i64 %arg) {
 ; CHECK-O0-LABEL: fold_constant_clobber:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq Constant(%rip), %rax
@@ -2413,8 +2411,8 @@ define i64 @fold_constant_clobber(i64* %p, i64 %arg) {
 ; CHECK-O3-EX-NEXT:    addq Constant(%rip), %rax
 ; CHECK-O3-EX-NEXT:    movq $5, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* @Constant unordered, align 8
-  store i64 5, i64* %p
+  %v = load atomic i64, ptr @Constant unordered, align 8
+  store i64 5, ptr %p
   %ret = add i64 %v, %arg
   ret i64 %ret
 }
@@ -2440,13 +2438,13 @@ define i64 @fold_constant_fence(i64 %arg) {
 ; CHECK-O3-EX-NEXT:    addq Constant(%rip), %rax
 ; CHECK-O3-EX-NEXT:    mfence
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* @Constant unordered, align 8
+  %v = load atomic i64, ptr @Constant unordered, align 8
   fence seq_cst
   %ret = add i64 %v, %arg
   ret i64 %ret
 }
 
-define i64 @fold_invariant_clobber(i64* dereferenceable(8) %p, i64 %arg) {
+define i64 @fold_invariant_clobber(ptr dereferenceable(8) %p, i64 %arg) {
 ; CHECK-O0-LABEL: fold_invariant_clobber:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2467,14 +2465,14 @@ define i64 @fold_invariant_clobber(i64* dereferenceable(8) %p, i64 %arg) {
 ; CHECK-O3-EX-NEXT:    addq (%rdi), %rax
 ; CHECK-O3-EX-NEXT:    movq $5, (%rdi)
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8, !invariant.load !{}
-  store i64 5, i64* %p
+  %v = load atomic i64, ptr %p unordered, align 8, !invariant.load !{}
+  store i64 5, ptr %p
   %ret = add i64 %v, %arg
   ret i64 %ret
 }
 
 
-define i64 @fold_invariant_fence(i64* dereferenceable(8) %p, i64 %arg) {
+define i64 @fold_invariant_fence(ptr dereferenceable(8) %p, i64 %arg) {
 ; CHECK-O0-LABEL: fold_invariant_fence:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movq (%rdi), %rax
@@ -2495,7 +2493,7 @@ define i64 @fold_invariant_fence(i64* dereferenceable(8) %p, i64 %arg) {
 ; CHECK-O3-EX-NEXT:    addq (%rdi), %rax
 ; CHECK-O3-EX-NEXT:    mfence
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i64, i64* %p unordered, align 8, !invariant.load !{}
+  %v = load atomic i64, ptr %p unordered, align 8, !invariant.load !{}
   fence seq_cst
   %ret = add i64 %v, %arg
   ret i64 %ret
@@ -2504,7 +2502,7 @@ define i64 @fold_invariant_fence(i64* dereferenceable(8) %p, i64 %arg) {
 
 ; Exercise a few cases involving any extend idioms
 
-define i16 @load_i8_anyext_i16(i8* %ptr) {
+define i16 @load_i8_anyext_i16(ptr %ptr) {
 ; CHECK-O0-CUR-LABEL: load_i8_anyext_i16:
 ; CHECK-O0-CUR:       # %bb.0:
 ; CHECK-O0-CUR-NEXT:    movb (%rdi), %al
@@ -2531,13 +2529,13 @@ define i16 @load_i8_anyext_i16(i8* %ptr) {
 ; CHECK-O3-EX-NEXT:    vmovd %xmm0, %eax
 ; CHECK-O3-EX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i8, i8* %ptr unordered, align 2
+  %v = load atomic i8, ptr %ptr unordered, align 2
   %vec = insertelement <2 x i8> undef, i8 %v, i32 0
   %res = bitcast <2 x i8> %vec to i16
   ret i16 %res
 }
 
-define i32 @load_i8_anyext_i32(i8* %ptr) {
+define i32 @load_i8_anyext_i32(ptr %ptr) {
 ; CHECK-O0-CUR-LABEL: load_i8_anyext_i32:
 ; CHECK-O0-CUR:       # %bb.0:
 ; CHECK-O0-CUR-NEXT:    movb (%rdi), %al
@@ -2560,13 +2558,13 @@ define i32 @load_i8_anyext_i32(i8* %ptr) {
 ; CHECK-O3-EX-NEXT:    vpbroadcastb (%rdi), %xmm0
 ; CHECK-O3-EX-NEXT:    vmovd %xmm0, %eax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i8, i8* %ptr unordered, align 4
+  %v = load atomic i8, ptr %ptr unordered, align 4
   %vec = insertelement <4 x i8> undef, i8 %v, i32 0
   %res = bitcast <4 x i8> %vec to i32
   ret i32 %res
 }
 
-define i32 @load_i16_anyext_i32(i16* %ptr) {
+define i32 @load_i16_anyext_i32(ptr %ptr) {
 ; CHECK-O0-CUR-LABEL: load_i16_anyext_i32:
 ; CHECK-O0-CUR:       # %bb.0:
 ; CHECK-O0-CUR-NEXT:    movw (%rdi), %cx
@@ -2590,13 +2588,13 @@ define i32 @load_i16_anyext_i32(i16* %ptr) {
 ; CHECK-O3-EX-NEXT:    vpbroadcastw (%rdi), %xmm0
 ; CHECK-O3-EX-NEXT:    vmovd %xmm0, %eax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i16, i16* %ptr unordered, align 4
+  %v = load atomic i16, ptr %ptr unordered, align 4
   %vec = insertelement <2 x i16> undef, i16 %v, i64 0
   %res = bitcast <2 x i16> %vec to i32
   ret i32 %res
 }
 
-define i64 @load_i16_anyext_i64(i16* %ptr) {
+define i64 @load_i16_anyext_i64(ptr %ptr) {
 ; CHECK-O0-CUR-LABEL: load_i16_anyext_i64:
 ; CHECK-O0-CUR:       # %bb.0:
 ; CHECK-O0-CUR-NEXT:    movw (%rdi), %cx
@@ -2624,14 +2622,14 @@ define i64 @load_i16_anyext_i64(i16* %ptr) {
 ; CHECK-O3-EX-NEXT:    vpbroadcastw (%rdi), %xmm0
 ; CHECK-O3-EX-NEXT:    vmovq %xmm0, %rax
 ; CHECK-O3-EX-NEXT:    retq
-  %v = load atomic i16, i16* %ptr unordered, align 8
+  %v = load atomic i16, ptr %ptr unordered, align 8
   %vec = insertelement <4 x i16> undef, i16 %v, i64 0
   %res = bitcast <4 x i16> %vec to i64
   ret i64 %res
 }
 
 ; TODO: Would be legal to combine for legal atomic wider types
-define i16 @load_combine(i8* %p) {
+define i16 @load_combine(ptr %p) {
 ; CHECK-O0-LABEL: load_combine:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movb (%rdi), %al
@@ -2652,9 +2650,9 @@ define i16 @load_combine(i8* %p) {
 ; CHECK-O3-NEXT:    orl %ecx, %eax
 ; CHECK-O3-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-O3-NEXT:    retq
-  %v1 = load atomic i8, i8* %p unordered, align 2
-  %p2 = getelementptr i8, i8* %p, i64 1
-  %v2 = load atomic i8, i8* %p2 unordered, align 1
+  %v1 = load atomic i8, ptr %p unordered, align 2
+  %p2 = getelementptr i8, ptr %p, i64 1
+  %v2 = load atomic i8, ptr %p2 unordered, align 1
   %v1.ext = zext i8 %v1 to i16
   %v2.ext = zext i8 %v2 to i16
   %v2.sht = shl i16 %v2.ext, 8
@@ -2662,7 +2660,7 @@ define i16 @load_combine(i8* %p) {
   ret i16 %res
 }
 
-define i1 @fold_cmp_over_fence(i32* %p, i32 %v1) {
+define i1 @fold_cmp_over_fence(ptr %p, i32 %v1) {
 ; CHECK-O0-LABEL: fold_cmp_over_fence:
 ; CHECK-O0:       # %bb.0:
 ; CHECK-O0-NEXT:    movl (%rdi), %eax
@@ -2701,7 +2699,7 @@ define i1 @fold_cmp_over_fence(i32* %p, i32 %v1) {
 ; CHECK-O3-EX-NEXT:  .LBB116_2: # %untaken
 ; CHECK-O3-EX-NEXT:    xorl %eax, %eax
 ; CHECK-O3-EX-NEXT:    retq
-  %v2 = load atomic i32, i32* %p unordered, align 4
+  %v2 = load atomic i32, ptr %p unordered, align 4
   fence seq_cst
   %cmp = icmp eq i32 %v1, %v2
   br i1 %cmp, label %taken, label %untaken

diff  --git a/llvm/test/CodeGen/X86/atomic64.ll b/llvm/test/CodeGen/X86/atomic64.ll
index e76093eb4e983..8f4da356e06cb 100644
--- a/llvm/test/CodeGen/X86/atomic64.ll
+++ b/llvm/test/CodeGen/X86/atomic64.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 < %s -O0 -mtriple=x86_64-- -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64
-; RUN: llc -opaque-pointers=0 < %s -O0 -mtriple=i386-- -mcpu=i486 -verify-machineinstrs | FileCheck %s --check-prefix I486
+; RUN: llc < %s -O0 -mtriple=x86_64-- -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64
+; RUN: llc < %s -O0 -mtriple=i386-- -mcpu=i486 -verify-machineinstrs | FileCheck %s --check-prefix I486
 
 @sc64 = external dso_local global i64
 @fsc64 = external dso_local global double
- at psc64 = external dso_local global i8*
+ at psc64 = external dso_local global ptr
 
 define void @atomic_fetch_add64() nounwind {
 ; X64-LABEL: atomic_fetch_add64:
@@ -47,10 +47,10 @@ define void @atomic_fetch_add64() nounwind {
 ; I486-NEXT:    addl $16, %esp
 ; I486-NEXT:    retl
 entry:
-  %t1 = atomicrmw add  i64* @sc64, i64 1 acquire
-  %t2 = atomicrmw add  i64* @sc64, i64 3 acquire
-  %t3 = atomicrmw add  i64* @sc64, i64 5 acquire
-  %t4 = atomicrmw add  i64* @sc64, i64 %t3 acquire
+  %t1 = atomicrmw add  ptr @sc64, i64 1 acquire
+  %t2 = atomicrmw add  ptr @sc64, i64 3 acquire
+  %t3 = atomicrmw add  ptr @sc64, i64 5 acquire
+  %t4 = atomicrmw add  ptr @sc64, i64 %t3 acquire
   ret void
 }
 
@@ -94,10 +94,10 @@ define void @atomic_fetch_sub64() nounwind {
 ; I486-NEXT:    calll __atomic_fetch_sub_8 at PLT
 ; I486-NEXT:    addl $16, %esp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw sub  i64* @sc64, i64 1 acquire
-  %t2 = atomicrmw sub  i64* @sc64, i64 3 acquire
-  %t3 = atomicrmw sub  i64* @sc64, i64 5 acquire
-  %t4 = atomicrmw sub  i64* @sc64, i64 %t3 acquire
+  %t1 = atomicrmw sub  ptr @sc64, i64 1 acquire
+  %t2 = atomicrmw sub  ptr @sc64, i64 3 acquire
+  %t3 = atomicrmw sub  ptr @sc64, i64 5 acquire
+  %t4 = atomicrmw sub  ptr @sc64, i64 %t3 acquire
   ret void
 }
 
@@ -149,9 +149,9 @@ define void @atomic_fetch_and64() nounwind {
 ; I486-NEXT:    calll __atomic_fetch_and_8 at PLT
 ; I486-NEXT:    addl $16, %esp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw and  i64* @sc64, i64 3 acquire
-  %t2 = atomicrmw and  i64* @sc64, i64 5 acquire
-  %t3 = atomicrmw and  i64* @sc64, i64 %t2 acquire
+  %t1 = atomicrmw and  ptr @sc64, i64 3 acquire
+  %t2 = atomicrmw and  ptr @sc64, i64 5 acquire
+  %t3 = atomicrmw and  ptr @sc64, i64 %t2 acquire
   ret void
 }
 
@@ -202,9 +202,9 @@ define void @atomic_fetch_or64() nounwind {
 ; I486-NEXT:    calll __atomic_fetch_or_8 at PLT
 ; I486-NEXT:    addl $16, %esp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw or   i64* @sc64, i64 3 acquire
-  %t2 = atomicrmw or   i64* @sc64, i64 5 acquire
-  %t3 = atomicrmw or   i64* @sc64, i64 %t2 acquire
+  %t1 = atomicrmw or   ptr @sc64, i64 3 acquire
+  %t2 = atomicrmw or   ptr @sc64, i64 5 acquire
+  %t3 = atomicrmw or   ptr @sc64, i64 %t2 acquire
   ret void
 }
 
@@ -255,9 +255,9 @@ define void @atomic_fetch_xor64() nounwind {
 ; I486-NEXT:    calll __atomic_fetch_xor_8 at PLT
 ; I486-NEXT:    addl $16, %esp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw xor  i64* @sc64, i64 3 acquire
-  %t2 = atomicrmw xor  i64* @sc64, i64 5 acquire
-  %t3 = atomicrmw xor  i64* @sc64, i64 %t2 acquire
+  %t1 = atomicrmw xor  ptr @sc64, i64 3 acquire
+  %t2 = atomicrmw xor  ptr @sc64, i64 5 acquire
+  %t3 = atomicrmw xor  ptr @sc64, i64 %t2 acquire
   ret void
 }
 
@@ -296,7 +296,7 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
 ; I486-NEXT:    calll __atomic_fetch_nand_8 at PLT
 ; I486-NEXT:    addl $16, %esp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
+  %t1 = atomicrmw nand ptr @sc64, i64 %x acquire
   ret void
 }
 
@@ -387,7 +387,7 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
 ; I486-NEXT:    popl %esi
 ; I486-NEXT:    popl %ebp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw max  i64* @sc64, i64 %x acquire
+  %t1 = atomicrmw max  ptr @sc64, i64 %x acquire
 
   ret void
 }
@@ -479,7 +479,7 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
 ; I486-NEXT:    popl %esi
 ; I486-NEXT:    popl %ebp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw min  i64* @sc64, i64 %x acquire
+  %t1 = atomicrmw min  ptr @sc64, i64 %x acquire
 
   ret void
 }
@@ -571,7 +571,7 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
 ; I486-NEXT:    popl %esi
 ; I486-NEXT:    popl %ebp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
+  %t1 = atomicrmw umax ptr @sc64, i64 %x acquire
 
   ret void
 }
@@ -663,7 +663,7 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
 ; I486-NEXT:    popl %esi
 ; I486-NEXT:    popl %ebp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
+  %t1 = atomicrmw umin ptr @sc64, i64 %x acquire
 
   ret void
 }
@@ -683,10 +683,10 @@ define void @atomic_fetch_cmpxchg64() nounwind {
 ; I486-NEXT:    movl %esp, %ebp
 ; I486-NEXT:    andl $-8, %esp
 ; I486-NEXT:    subl $32, %esp
-; I486-NEXT:    leal {{[0-9]+}}(%esp), %ecx
 ; I486-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; I486-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; I486-NEXT:    movl %esp, %eax
+; I486-NEXT:    leal {{[0-9]+}}(%esp), %ecx
 ; I486-NEXT:    movl %ecx, 4(%eax)
 ; I486-NEXT:    movl $2, 20(%eax)
 ; I486-NEXT:    movl $2, 16(%eax)
@@ -697,7 +697,7 @@ define void @atomic_fetch_cmpxchg64() nounwind {
 ; I486-NEXT:    movl %ebp, %esp
 ; I486-NEXT:    popl %ebp
 ; I486-NEXT:    retl
-  %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
+  %t1 = cmpxchg ptr @sc64, i64 0, i64 1 acquire acquire
   ret void
 }
 
@@ -720,7 +720,7 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
 ; I486-NEXT:    calll __atomic_store_8 at PLT
 ; I486-NEXT:    addl $16, %esp
 ; I486-NEXT:    retl
-  store atomic i64 %x, i64* @sc64 release, align 8
+  store atomic i64 %x, ptr @sc64 release, align 8
   ret void
 }
 
@@ -743,7 +743,7 @@ define void @atomic_fetch_swap64(i64 %x) nounwind {
 ; I486-NEXT:    calll __atomic_exchange_8 at PLT
 ; I486-NEXT:    addl $16, %esp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
+  %t1 = atomicrmw xchg ptr @sc64, i64 %x acquire
   ret void
 }
 
@@ -773,11 +773,11 @@ define void @atomic_fetch_swapf64(double %x) nounwind {
 ; I486-NEXT:    movl %ebp, %esp
 ; I486-NEXT:    popl %ebp
 ; I486-NEXT:    retl
-  %t1 = atomicrmw xchg double* @fsc64, double %x acquire
+  %t1 = atomicrmw xchg ptr @fsc64, double %x acquire
   ret void
 }
 
-define void @atomic_fetch_swapptr(i8* %x) nounwind {
+define void @atomic_fetch_swapptr(ptr %x) nounwind {
 ; X64-LABEL: atomic_fetch_swapptr:
 ; X64:       # %bb.0:
 ; X64-NEXT:    xchgq %rdi, psc64(%rip)
@@ -788,6 +788,6 @@ define void @atomic_fetch_swapptr(i8* %x) nounwind {
 ; I486-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; I486-NEXT:    xchgl %eax, psc64
 ; I486-NEXT:    retl
-  %t1 = atomicrmw xchg i8** @psc64, i8* %x acquire
+  %t1 = atomicrmw xchg ptr @psc64, ptr %x acquire
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/catchpad-lifetime.ll b/llvm/test/CodeGen/X86/catchpad-lifetime.ll
index c16942de45b84..bbcbb636c3749 100644
--- a/llvm/test/CodeGen/X86/catchpad-lifetime.ll
+++ b/llvm/test/CodeGen/X86/catchpad-lifetime.ll
@@ -1,5 +1,5 @@
-; RUN: llc -opaque-pointers=0 -mtriple=x86_64-windows-msvc < %s | FileCheck %s --check-prefix=X64
-; RUN: llc -opaque-pointers=0 -mtriple=i686-windows-msvc < %s | FileCheck %s --check-prefix=X86
+; RUN: llc -mtriple=x86_64-windows-msvc < %s | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i686-windows-msvc < %s | FileCheck %s --check-prefix=X86
 
 declare void @throw()
 
@@ -7,11 +7,11 @@ declare i32 @__CxxFrameHandler3(...)
 
 declare void @llvm.trap()
 
-define void @test1() personality i32 (...)* @__CxxFrameHandler3 {
+define void @test1() personality ptr @__CxxFrameHandler3 {
 entry:
-  %alloca2 = alloca i8*, align 4
-  %alloca1 = alloca i8*, align 4
-  store volatile i8* null, i8** %alloca1
+  %alloca2 = alloca ptr, align 4
+  %alloca1 = alloca ptr, align 4
+  store volatile ptr null, ptr %alloca1
   invoke void @throw()
           to label %unreachable unwind label %catch.dispatch
 
@@ -25,7 +25,7 @@ entry:
 ; X86: pushl   %ebx
 ; X86: pushl   %edi
 ; X86: pushl   %esi
-; X86: subl    $24, %esp
+; X86: subl    $20, %esp
 
 ; X86: movl  $0, -32(%ebp)
 ; X86: calll _throw
@@ -34,14 +34,12 @@ catch.dispatch:                                   ; preds = %entry
   %cs = catchswitch within none [label %catch.pad] unwind to caller
 
 catch.pad:                                        ; preds = %catch.dispatch
-  %cp = catchpad within %cs [i8* null, i32 0, i8** %alloca1]
-  %v = load volatile i8*, i8** %alloca1
-  store volatile i8* null, i8** %alloca1
-  %bc1 = bitcast i8** %alloca1 to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %bc1)
-  %bc2 = bitcast i8** %alloca2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %bc2)
-  store volatile i8* null, i8** %alloca1
+  %cp = catchpad within %cs [ptr null, i32 0, ptr %alloca1]
+  %v = load volatile ptr, ptr %alloca1
+  store volatile ptr null, ptr %alloca1
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %alloca1)
+  call void @llvm.lifetime.start.p0(i64 4, ptr %alloca2)
+  store volatile ptr null, ptr %alloca1
   call void @llvm.trap()
   unreachable
 
@@ -60,18 +58,18 @@ unreachable:                                      ; preds = %entry
 }
 
 ; X64-LABEL: $cppxdata$test1:
-; X64: .long   56                      # CatchObjOffset
+; X64: .long   40                      # CatchObjOffset
 
 ; -20 is 
diff erence between the end of the EH reg node stack object and the
 ; catch object at EBP -32.
 ; X86-LABEL: L__ehtable$test1:
 ; X86: .long   -20                      # CatchObjOffset
 
-define void @test2() personality i32 (...)* @__CxxFrameHandler3 {
+define void @test2() personality ptr @__CxxFrameHandler3 {
 entry:
-  %alloca2 = alloca i8*, align 4
-  %alloca1 = alloca i8*, align 4
-  store volatile i8* null, i8** %alloca1
+  %alloca2 = alloca ptr, align 4
+  %alloca1 = alloca ptr, align 4
+  store volatile ptr null, ptr %alloca1
   invoke void @throw()
           to label %unreachable unwind label %catch.dispatch
 
@@ -88,13 +86,11 @@ catch.dispatch:                                   ; preds = %entry
   %cs = catchswitch within none [label %catch.pad] unwind to caller
 
 catch.pad:                                        ; preds = %catch.dispatch
-  %cp = catchpad within %cs [i8* null, i32 0, i8** null]
-  store volatile i8* null, i8** %alloca1
-  %bc1 = bitcast i8** %alloca1 to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %bc1)
-  %bc2 = bitcast i8** %alloca2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %bc2)
-  store volatile i8* null, i8** %alloca1
+  %cp = catchpad within %cs [ptr null, i32 0, ptr null]
+  store volatile ptr null, ptr %alloca1
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %alloca1)
+  call void @llvm.lifetime.start.p0(i64 4, ptr %alloca2)
+  store volatile ptr null, ptr %alloca1
   call void @llvm.trap()
   unreachable
 
@@ -122,9 +118,9 @@ unreachable:                                      ; preds = %entry
 
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/X86/cfguard-checks.ll b/llvm/test/CodeGen/X86/cfguard-checks.ll
index a1dd2e670d91c..d3c1875fb4773 100644
--- a/llvm/test/CodeGen/X86/cfguard-checks.ll
+++ b/llvm/test/CodeGen/X86/cfguard-checks.ll
@@ -1,7 +1,7 @@
-; RUN: llc -opaque-pointers=0 < %s -mtriple=i686-pc-windows-msvc | FileCheck %s -check-prefix=X32
-; RUN: llc -opaque-pointers=0 < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s -check-prefixes=X64,X64_MSVC
-; RUN: llc -opaque-pointers=0 < %s -mtriple=i686-w64-windows-gnu | FileCheck %s -check-prefixes=X32,X32_MINGW
-; RUN: llc -opaque-pointers=0 < %s -mtriple=x86_64-w64-windows-gnu | FileCheck %s -check-prefixes=X64,X64_MINGW
+; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s -check-prefixes=X64,X64_MSVC
+; RUN: llc < %s -mtriple=i686-w64-windows-gnu | FileCheck %s -check-prefixes=X32,X32_MINGW
+; RUN: llc < %s -mtriple=x86_64-w64-windows-gnu | FileCheck %s -check-prefixes=X64,X64_MINGW
 ; Control Flow Guard is currently only available on Windows
 
 ; Test that Control Flow Guard checks are correctly added when required.
@@ -13,9 +13,9 @@ declare i32 @target_func()
 ; Test that Control Flow Guard checks are not added on calls with the "guard_nocf" attribute.
 define i32 @func_guard_nocf() {
 entry:
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = call i32 %0() #0
   ret i32 %1
 
@@ -36,9 +36,9 @@ attributes #0 = { "guard_nocf" }
 ; FIXME Ideally these checks should be added as a single call instruction, as in the optimized case.
 define i32 @func_optnone_cf() #1 {
 entry:
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = call i32 %0()
   ret i32 %1
 
@@ -47,8 +47,7 @@ entry:
 	; X32: 	     leal  _target_func, %eax
 	; X32: 	     movl  %eax, (%esp)
 	; X32: 	     movl  (%esp), %ecx
-	; X32: 	     movl ___guard_check_icall_fptr, %eax
-	; X32: 	     calll *%eax
+	; X32: 	     calll *___guard_check_icall_fptr
 	; X32-NEXT:  calll *%ecx
 
   ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
@@ -64,9 +63,9 @@ attributes #1 = { noinline optnone }
 ; Test that Control Flow Guard checks are correctly added in optimized code (common case).
 define i32 @func_cf() {
 entry:
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = call i32 %0()
   ret i32 %1
 
@@ -86,20 +85,20 @@ entry:
 
 
 ; Test that Control Flow Guard checks are correctly added on invoke instructions.
-define i32 @func_cf_invoke() personality i8* bitcast (void ()* @h to i8*) {
+define i32 @func_cf_invoke() personality ptr @h {
 entry:
   %0 = alloca i32, align 4
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %1 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %1 = load ptr, ptr %func_ptr, align 8
   %2 = invoke i32 %1()
           to label %invoke.cont unwind label %lpad
 invoke.cont:                                      ; preds = %entry
   ret i32 %2
 
 lpad:                                             ; preds = %entry
-  %tmp = landingpad { i8*, i32 }
-          catch i8* null
+  %tmp = landingpad { ptr, i32 }
+          catch ptr null
   ret i32 -1
 
   ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
@@ -129,9 +128,9 @@ declare double @target_func_doubles(double, double, double, double)
 
 define double @func_cf_doubles() {
 entry:
-  %func_ptr = alloca double (double, double, double, double)*, align 8
-  store double (double, double, double, double)* @target_func_doubles, double (double, double, double, double)** %func_ptr, align 8
-  %0 = load double (double, double, double, double)*, double (double, double, double, double)** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func_doubles, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = call double %0(double 1.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00)
   ret double %1
 
@@ -163,9 +162,9 @@ entry:
 ; Test that Control Flow Guard checks are correctly added for tail calls.
 define i32 @func_cf_tail() {
 entry:
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = musttail call i32 %0()
   ret i32 %1
 
@@ -184,18 +183,17 @@ entry:
 }
 
 
-%struct.Foo = type { i32 (%struct.Foo*)** }
+%struct.Foo = type { ptr }
 
 ; Test that Control Flow Guard checks are correctly added for variadic musttail
 ; calls. These are used for MS C++ ABI virtual member pointer thunks.
 ; PR44049
-define i32 @vmptr_thunk(%struct.Foo* inreg %p) {
+define i32 @vmptr_thunk(ptr inreg %p) {
 entry:
-  %vptr.addr = getelementptr inbounds %struct.Foo, %struct.Foo* %p, i32 0, i32 0
-  %vptr = load i32 (%struct.Foo*)**, i32 (%struct.Foo*)*** %vptr.addr
-  %slot = getelementptr inbounds i32 (%struct.Foo*)*, i32 (%struct.Foo*)** %vptr, i32 1
-  %vmethod = load i32 (%struct.Foo*)*, i32 (%struct.Foo*)** %slot
-  %rv = musttail call i32 %vmethod(%struct.Foo* inreg %p)
+  %vptr = load ptr, ptr %p
+  %slot = getelementptr inbounds ptr, ptr %vptr, i32 1
+  %vmethod = load ptr, ptr %slot
+  %rv = musttail call i32 %vmethod(ptr inreg %p)
   ret i32 %rv
 
   ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
@@ -224,10 +222,10 @@ entry:
 define i32 @func_cf_setjmp() {
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4
-  store i32 0, i32* %1, align 4
-  store i32 -1, i32* %2, align 4
-  %3 = call i8* @llvm.frameaddress(i32 0)
-  %4 = call i32 @_setjmp(i8* bitcast ([16 x %struct._SETJMP_FLOAT128]* @buf1 to i8*), i8* %3) #2
+  store i32 0, ptr %1, align 4
+  store i32 -1, ptr %2, align 4
+  %3 = call ptr @llvm.frameaddress(i32 0)
+  %4 = call i32 @_setjmp(ptr @buf1, ptr %3) #2
 
   ; X32-LABEL: func_cf_setjmp
   ; X32:       calll __setjmp
@@ -237,8 +235,8 @@ define i32 @func_cf_setjmp() {
   ; X64:       callq _setjmp
   ; X64-NEXT:  $cfgsj_func_cf_setjmp0:
 
-  %5 = call i8* @llvm.frameaddress(i32 0)
-  %6 = call i32 @_setjmp(i8* bitcast ([16 x %struct._SETJMP_FLOAT128]* @buf1 to i8*), i8* %5) #2
+  %5 = call ptr @llvm.frameaddress(i32 0)
+  %6 = call i32 @_setjmp(ptr @buf1, ptr %5) #2
 
   ; X32:       calll __setjmp
   ; X32-NEXT:  $cfgsj_func_cf_setjmp1:
@@ -246,8 +244,8 @@ define i32 @func_cf_setjmp() {
   ; X64:       callq _setjmp
   ; X64-NEXT:  $cfgsj_func_cf_setjmp1:
 
-  store i32 1, i32* %2, align 4
-  %7 = load i32, i32* %2, align 4
+  store i32 1, ptr %2, align 4
+  %7 = load i32, ptr %2, align 4
   ret i32 %7
 
   ; X32:       .section .gljmp$y,"dr"
@@ -259,10 +257,10 @@ define i32 @func_cf_setjmp() {
   ; X64-NEXT:  .symidx $cfgsj_func_cf_setjmp1
 }
 
-declare i8* @llvm.frameaddress(i32)
+declare ptr @llvm.frameaddress(i32)
 
 ; Function Attrs: returns_twice
-declare dso_local i32 @_setjmp(i8*, i8*) #2
+declare dso_local i32 @_setjmp(ptr, ptr) #2
 
 attributes #2 = { returns_twice }
 

diff  --git a/llvm/test/CodeGen/X86/codegen-prepare-cast.ll b/llvm/test/CodeGen/X86/codegen-prepare-cast.ll
index fc213c223147f..c55d53258beba 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare-cast.ll
+++ b/llvm/test/CodeGen/X86/codegen-prepare-cast.ll
@@ -1,25 +1,25 @@
-; RUN: llc -opaque-pointers=0 < %s
+; RUN: llc < %s
 ; PR4297
-; RUN: opt -opaque-pointers=0 -S < %s -codegenprepare | FileCheck %s
+; RUN: opt -S < %s -codegenprepare | FileCheck %s
 
 target datalayout =
 "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
 target triple = "x86_64-unknown-linux-gnu"
-        %"byte[]" = type { i64, i8* }
-        %"char[][]" = type { i64, %"byte[]"* }
- at .str = external dso_local constant [7 x i8]              ; <[7 x i8]*> [#uses=1]
+        %"byte[]" = type { i64, ptr }
+        %"char[][]" = type { i64, ptr }
+ at .str = external dso_local constant [7 x i8]              ; <ptr> [#uses=1]
 
 ; CHECK-LABEL: @_Dmain
-; CHECK: load i8, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0)
+; CHECK: load i8, ptr %tmp4
 ; CHECK: ret
 define fastcc i32 @_Dmain(%"char[][]" %unnamed) {
 entry:
-        %tmp = getelementptr [7 x i8], [7 x i8]* @.str, i32 0, i32 0              ; <i8*> [#uses=1]
+        %tmp = getelementptr [7 x i8], ptr @.str, i32 0, i32 0              ; <ptr> [#uses=1]
         br i1 undef, label %foreachbody, label %foreachend
 
 foreachbody:            ; preds = %entry
-        %tmp4 = getelementptr i8, i8* %tmp, i32 undef               ; <i8*> [#uses=1]
-        %tmp5 = load i8, i8* %tmp4          ; <i8> [#uses=0]
+        %tmp4 = getelementptr i8, ptr %tmp, i32 undef               ; <ptr> [#uses=1]
+        %tmp5 = load i8, ptr %tmp4          ; <i8> [#uses=0]
         unreachable
 
 foreachend:             ; preds = %entry

diff  --git a/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll b/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
index ec2926b31b938..dead31a8ba013 100644
--- a/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
+++ b/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
 
 %struct.i = type { i32, i24 }
 %struct.m = type { %struct.i }
@@ -16,7 +16,7 @@
 @x2 = local_unnamed_addr global i32 0, align 4
 @x3 = local_unnamed_addr global i32 0, align 4
 @x4 = local_unnamed_addr global i32 0, align 4
- at x5 = local_unnamed_addr global double* null, align 8
+ at x5 = local_unnamed_addr global ptr null, align 8
 
 ; Check that compiler does not crash.
 ; Test for PR30775
@@ -58,14 +58,14 @@ define void @_Z1nv() local_unnamed_addr {
 ; CHECK-NEXT:    movw %si, (%rax)
 ; CHECK-NEXT:    retq
 entry:
-  %bf.load = load i32, i32* bitcast (i24* getelementptr inbounds (%struct.m, %struct.m* @k, i64 0, i32 0, i32 1) to i32*), align 4
-  %0 = load i16, i16* @c, align 2
+  %bf.load = load i32, ptr getelementptr inbounds (%struct.m, ptr @k, i64 0, i32 0, i32 1), align 4
+  %0 = load i16, ptr @c, align 2
   %conv = sext i16 %0 to i32
-  %1 = load i16, i16* @b, align 2
+  %1 = load i16, ptr @b, align 2
   %conv1 = sext i16 %1 to i32
-  %2 = load i32, i32* @a, align 4
+  %2 = load i32, ptr @a, align 4
   %tobool = icmp ne i32 %2, 0
-  %bf.load3 = load i32, i32* getelementptr inbounds (%struct.i, %struct.i* @l, i64 0, i32 0), align 4
+  %bf.load3 = load i32, ptr @l, align 4
   %bf.shl = shl i32 %bf.load3, 7
   %bf.ashr = ashr exact i32 %bf.shl, 7
   %bf.clear = shl i32 %bf.load, 1
@@ -78,7 +78,7 @@ entry:
   %phitmp = icmp eq i32 %bf.ashr, 0
   %.phitmp = or i1 %phitmp, %tobool29
   %conv37 = zext i1 %.phitmp to i16
-  store i16 %conv37, i16* @e, align 2
+  store i16 %conv37, ptr @e, align 2
   %bf.clear39 = and i32 %bf.load, 65535
   %factor53 = shl nuw nsw i32 %bf.clear39, 1
   %add46 = add nsw i32 %factor53, %conv
@@ -88,7 +88,7 @@ entry:
   %add51 = add nuw nsw i32 %add48.lobit.not, %bf.clear39
   %shr = ashr i32 %2, %add51
   %conv52 = trunc i32 %shr to i16
-  store i16 %conv52, i16* @b, align 2
+  store i16 %conv52, ptr @b, align 2
   ret void
 }
 
@@ -115,113 +115,114 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    .cfi_offset %r15, -24
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
 ; CHECK-NEXT:    movq x1 at GOTPCREL(%rip), %rax
-; CHECK-NEXT:    movl (%rax), %ebx
-; CHECK-NEXT:    andl $511, %ebx # imm = 0x1FF
-; CHECK-NEXT:    leaq 1(%rbx), %rax
+; CHECK-NEXT:    movl (%rax), %esi
+; CHECK-NEXT:    andl $511, %esi # imm = 0x1FF
+; CHECK-NEXT:    leaq 1(%rsi), %rax
 ; CHECK-NEXT:    movq x4 at GOTPCREL(%rip), %rcx
 ; CHECK-NEXT:    movl %eax, (%rcx)
 ; CHECK-NEXT:    movq x3 at GOTPCREL(%rip), %rcx
-; CHECK-NEXT:    movl (%rcx), %ecx
-; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    movl (%rcx), %edx
+; CHECK-NEXT:    testl %edx, %edx
 ; CHECK-NEXT:    je .LBB1_18
 ; CHECK-NEXT:  # %bb.1: # %for.cond1thread-pre-split.lr.ph
-; CHECK-NEXT:    movq x5 at GOTPCREL(%rip), %rdx
-; CHECK-NEXT:    movq (%rdx), %rsi
-; CHECK-NEXT:    movl %ecx, %edx
-; CHECK-NEXT:    notl %edx
-; CHECK-NEXT:    leaq 8(,%rdx,8), %rdi
-; CHECK-NEXT:    imulq %rax, %rdi
-; CHECK-NEXT:    addq %rsi, %rdi
-; CHECK-NEXT:    movq x2 at GOTPCREL(%rip), %r8
-; CHECK-NEXT:    movl (%r8), %edx
-; CHECK-NEXT:    leal 8(,%rbx,8), %eax
-; CHECK-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT:    leaq 8(%rsi), %rax
-; CHECK-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT:    leaq 32(%rsi), %r11
-; CHECK-NEXT:    leaq 8(,%rbx,8), %rbx
-; CHECK-NEXT:    xorl %r14d, %r14d
-; CHECK-NEXT:    movq x0 at GOTPCREL(%rip), %r15
-; CHECK-NEXT:    movq %rsi, %r12
+; CHECK-NEXT:    movq x5 at GOTPCREL(%rip), %rcx
+; CHECK-NEXT:    movq (%rcx), %rdi
+; CHECK-NEXT:    movl %edx, %ecx
+; CHECK-NEXT:    notl %ecx
+; CHECK-NEXT:    leaq 8(,%rcx,8), %rcx
+; CHECK-NEXT:    imulq %rax, %rcx
+; CHECK-NEXT:    addq %rdi, %rcx
+; CHECK-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movq x2 at GOTPCREL(%rip), %r9
+; CHECK-NEXT:    movl (%r9), %ecx
+; CHECK-NEXT:    leal 8(,%rsi,8), %r8d
+; CHECK-NEXT:    movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    leaq 8(%rdi), %r8
+; CHECK-NEXT:    movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    leaq 32(%rdi), %rbx
+; CHECK-NEXT:    leaq 8(,%rsi,8), %r14
+; CHECK-NEXT:    xorl %r15d, %r15d
+; CHECK-NEXT:    movq x0 at GOTPCREL(%rip), %r12
+; CHECK-NEXT:    movq %rdi, %r13
 ; CHECK-NEXT:    jmp .LBB1_2
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB1_15: # %for.cond1.for.inc3_crit_edge
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    movl %edx, (%r8)
+; CHECK-NEXT:    movl %ecx, (%r9)
 ; CHECK-NEXT:  .LBB1_16: # %for.inc3
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    addq %rbx, %r12
-; CHECK-NEXT:    incq %r14
-; CHECK-NEXT:    addq %rbx, %r11
-; CHECK-NEXT:    incl %ecx
+; CHECK-NEXT:    incq %r15
+; CHECK-NEXT:    addq %r14, %rbx
+; CHECK-NEXT:    incl %edx
+; CHECK-NEXT:    leaq (%r13,%rax,8), %r13
 ; CHECK-NEXT:    je .LBB1_17
 ; CHECK-NEXT:  .LBB1_2: # %for.cond1thread-pre-split
 ; CHECK-NEXT:    # =>This Loop Header: Depth=1
 ; CHECK-NEXT:    # Child Loop BB1_12 Depth 2
 ; CHECK-NEXT:    # Child Loop BB1_14 Depth 2
-; CHECK-NEXT:    testl %edx, %edx
+; CHECK-NEXT:    testl %ecx, %ecx
 ; CHECK-NEXT:    jns .LBB1_16
 ; CHECK-NEXT:  # %bb.3: # %for.body2.preheader
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    movslq %edx, %r13
-; CHECK-NEXT:    testq %r13, %r13
-; CHECK-NEXT:    movq $-1, %rbp
-; CHECK-NEXT:    cmovnsq %r13, %rbp
-; CHECK-NEXT:    subq %r13, %rbp
-; CHECK-NEXT:    incq %rbp
-; CHECK-NEXT:    cmpq $4, %rbp
+; CHECK-NEXT:    movslq %ecx, %rbp
+; CHECK-NEXT:    testq %rbp, %rbp
+; CHECK-NEXT:    movq $-1, %rsi
+; CHECK-NEXT:    cmovnsq %rbp, %rsi
+; CHECK-NEXT:    subq %rbp, %rsi
+; CHECK-NEXT:    incq %rsi
+; CHECK-NEXT:    cmpq $4, %rsi
 ; CHECK-NEXT:    jb .LBB1_14
 ; CHECK-NEXT:  # %bb.4: # %min.iters.checked
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    movq %rbp, %rdx
-; CHECK-NEXT:    andq $-4, %rdx
+; CHECK-NEXT:    movq %rsi, %rcx
+; CHECK-NEXT:    andq $-4, %rcx
 ; CHECK-NEXT:    je .LBB1_14
 ; CHECK-NEXT:  # %bb.5: # %vector.memcheck
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; CHECK-NEXT:    imulq %r14, %rax
-; CHECK-NEXT:    leaq (%rsi,%rax), %r10
-; CHECK-NEXT:    leaq (%r10,%r13,8), %r9
-; CHECK-NEXT:    testq %r13, %r13
-; CHECK-NEXT:    movq $-1, %r10
-; CHECK-NEXT:    cmovnsq %r13, %r10
-; CHECK-NEXT:    cmpq %r15, %r9
+; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; CHECK-NEXT:    imulq %r15, %r10
+; CHECK-NEXT:    leaq (%rdi,%r10), %r11
+; CHECK-NEXT:    leaq (%r11,%rbp,8), %r8
+; CHECK-NEXT:    testq %rbp, %rbp
+; CHECK-NEXT:    movq $-1, %r11
+; CHECK-NEXT:    cmovnsq %rbp, %r11
+; CHECK-NEXT:    cmpq %r12, %r8
 ; CHECK-NEXT:    jae .LBB1_7
 ; CHECK-NEXT:  # %bb.6: # %vector.memcheck
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; CHECK-NEXT:    leaq (%rax,%r10,8), %rax
-; CHECK-NEXT:    cmpq %r15, %rax
+; CHECK-NEXT:    addq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; CHECK-NEXT:    leaq (%r10,%r11,8), %r8
+; CHECK-NEXT:    cmpq %r12, %r8
 ; CHECK-NEXT:    ja .LBB1_14
 ; CHECK-NEXT:  .LBB1_7: # %vector.body.preheader
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    leaq -4(%rdx), %r9
-; CHECK-NEXT:    movq %r9, %rax
-; CHECK-NEXT:    shrq $2, %rax
-; CHECK-NEXT:    btl $2, %r9d
+; CHECK-NEXT:    leaq -4(%rcx), %r8
+; CHECK-NEXT:    movq %r8, %r11
+; CHECK-NEXT:    shrq $2, %r11
+; CHECK-NEXT:    btl $2, %r8d
 ; CHECK-NEXT:    jb .LBB1_8
 ; CHECK-NEXT:  # %bb.9: # %vector.body.prol.preheader
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; CHECK-NEXT:    movdqu %xmm0, (%r12,%r13,8)
-; CHECK-NEXT:    movdqu %xmm0, 16(%r12,%r13,8)
+; CHECK-NEXT:    movdqu %xmm0, (%r13,%rbp,8)
+; CHECK-NEXT:    movdqu %xmm0, 16(%r13,%rbp,8)
 ; CHECK-NEXT:    movl $4, %r10d
-; CHECK-NEXT:    testq %rax, %rax
+; CHECK-NEXT:    testq %r11, %r11
 ; CHECK-NEXT:    jne .LBB1_11
 ; CHECK-NEXT:    jmp .LBB1_13
 ; CHECK-NEXT:  .LBB1_8: # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    xorl %r10d, %r10d
-; CHECK-NEXT:    testq %rax, %rax
+; CHECK-NEXT:    testq %r11, %r11
 ; CHECK-NEXT:    je .LBB1_13
 ; CHECK-NEXT:  .LBB1_11: # %vector.body.preheader.new
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; CHECK-NEXT:    movq %r10, %rax
-; CHECK-NEXT:    subq %rdx, %rax
-; CHECK-NEXT:    addq %r13, %r10
-; CHECK-NEXT:    leaq (%r11,%r10,8), %r10
+; CHECK-NEXT:    movq %r10, %r11
+; CHECK-NEXT:    subq %rcx, %r11
+; CHECK-NEXT:    addq %rbp, %r10
+; CHECK-NEXT:    leaq (%rbx,%r10,8), %r10
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB1_12: # %vector.body
 ; CHECK-NEXT:    # Parent Loop BB1_2 Depth=1
@@ -231,28 +232,29 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    movdqu %xmm0, (%r10)
 ; CHECK-NEXT:    movdqu %xmm0, 16(%r10)
 ; CHECK-NEXT:    addq $64, %r10
-; CHECK-NEXT:    addq $8, %rax
+; CHECK-NEXT:    addq $8, %r11
 ; CHECK-NEXT:    jne .LBB1_12
 ; CHECK-NEXT:  .LBB1_13: # %middle.block
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    addq %rdx, %r13
-; CHECK-NEXT:    cmpq %rdx, %rbp
-; CHECK-NEXT:    movq %r13, %rdx
+; CHECK-NEXT:    addq %rcx, %rbp
+; CHECK-NEXT:    cmpq %rcx, %rsi
+; CHECK-NEXT:    movq %rbp, %rcx
 ; CHECK-NEXT:    je .LBB1_15
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB1_14: # %for.body2
 ; CHECK-NEXT:    # Parent Loop BB1_2 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
-; CHECK-NEXT:    movq (%r15), %rax
-; CHECK-NEXT:    movq %rax, (%r12,%r13,8)
-; CHECK-NEXT:    leaq 1(%r13), %rdx
-; CHECK-NEXT:    cmpq $-1, %r13
-; CHECK-NEXT:    movq %rdx, %r13
+; CHECK-NEXT:    movq (%r12), %rcx
+; CHECK-NEXT:    movq %rcx, (%r13,%rbp,8)
+; CHECK-NEXT:    leaq 1(%rbp), %rcx
+; CHECK-NEXT:    cmpq $-1, %rbp
+; CHECK-NEXT:    movq %rcx, %rbp
 ; CHECK-NEXT:    jl .LBB1_14
 ; CHECK-NEXT:    jmp .LBB1_15
 ; CHECK-NEXT:  .LBB1_17: # %for.cond.for.end5_crit_edge
 ; CHECK-NEXT:    movq x5 at GOTPCREL(%rip), %rax
-; CHECK-NEXT:    movq %rdi, (%rax)
+; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; CHECK-NEXT:    movq %rcx, (%rax)
 ; CHECK-NEXT:    movq x3 at GOTPCREL(%rip), %rax
 ; CHECK-NEXT:    movl $0, (%rax)
 ; CHECK-NEXT:  .LBB1_18: # %for.end5
@@ -270,40 +272,38 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    retq
 entry:
-  %0 = load i32, i32* @x1, align 4
+  %0 = load i32, ptr @x1, align 4
   %and = and i32 %0, 511
   %add = add nuw nsw i32 %and, 1
-  store i32 %add, i32* @x4, align 4
-  %.pr = load i32, i32* @x3, align 4
+  store i32 %add, ptr @x4, align 4
+  %.pr = load i32, ptr @x3, align 4
   %tobool8 = icmp eq i32 %.pr, 0
   br i1 %tobool8, label %for.end5, label %for.cond1thread-pre-split.lr.ph
 
 for.cond1thread-pre-split.lr.ph:                  ; preds = %entry
   %idx.ext13 = zext i32 %add to i64
-  %x5.promoted = load double*, double** @x5, align 8
-  %x5.promoted9 = bitcast double* %x5.promoted to i8*
+  %x5.promoted = load ptr, ptr @x5, align 8
   %1 = xor i32 %.pr, -1
   %2 = zext i32 %1 to i64
   %3 = shl nuw nsw i64 %2, 3
   %4 = add nuw nsw i64 %3, 8
   %5 = mul nuw nsw i64 %4, %idx.ext13
-  %uglygep = getelementptr i8, i8* %x5.promoted9, i64 %5
-  %.pr6.pre = load i32, i32* @x2, align 4
+  %uglygep = getelementptr i8, ptr %x5.promoted, i64 %5
+  %.pr6.pre = load i32, ptr @x2, align 4
   %6 = shl nuw nsw i32 %and, 3
   %addconv = add nuw nsw i32 %6, 8
   %7 = zext i32 %addconv to i64
-  %scevgep15 = getelementptr double, double* %x5.promoted, i64 1
-  %scevgep1516 = bitcast double* %scevgep15 to i8*
+  %scevgep15 = getelementptr double, ptr %x5.promoted, i64 1
   br label %for.cond1thread-pre-split
 
 for.cond1thread-pre-split:                        ; preds = %for.cond1thread-pre-split.lr.ph, %for.inc3
   %indvar = phi i64 [ 0, %for.cond1thread-pre-split.lr.ph ], [ %indvar.next, %for.inc3 ]
   %.pr6 = phi i32 [ %.pr6.pre, %for.cond1thread-pre-split.lr.ph ], [ %.pr611, %for.inc3 ]
-  %8 = phi double* [ %x5.promoted, %for.cond1thread-pre-split.lr.ph ], [ %add.ptr, %for.inc3 ]
+  %8 = phi ptr [ %x5.promoted, %for.cond1thread-pre-split.lr.ph ], [ %add.ptr, %for.inc3 ]
   %9 = phi i32 [ %.pr, %for.cond1thread-pre-split.lr.ph ], [ %inc4, %for.inc3 ]
   %10 = mul i64 %7, %indvar
-  %uglygep14 = getelementptr i8, i8* %x5.promoted9, i64 %10
-  %uglygep17 = getelementptr i8, i8* %scevgep1516, i64 %10
+  %uglygep14 = getelementptr i8, ptr %x5.promoted, i64 %10
+  %uglygep17 = getelementptr i8, ptr %scevgep15, i64 %10
   %cmp7 = icmp slt i32 %.pr6, 0
   br i1 %cmp7, label %for.body2.preheader, label %for.inc3
 
@@ -324,13 +324,13 @@ min.iters.checked:                                ; preds = %for.body2.preheader
 
 vector.memcheck:                                  ; preds = %min.iters.checked
   %16 = shl nsw i64 %11, 3
-  %scevgep = getelementptr i8, i8* %uglygep14, i64 %16
+  %scevgep = getelementptr i8, ptr %uglygep14, i64 %16
   %17 = icmp sgt i64 %11, -1
   %smax18 = select i1 %17, i64 %11, i64 -1
   %18 = shl nsw i64 %smax18, 3
-  %scevgep19 = getelementptr i8, i8* %uglygep17, i64 %18
-  %bound0 = icmp ult i8* %scevgep, bitcast (double* @x0 to i8*)
-  %bound1 = icmp ugt i8* %scevgep19, bitcast (double* @x0 to i8*)
+  %scevgep19 = getelementptr i8, ptr %uglygep17, i64 %18
+  %bound0 = icmp ult ptr %scevgep, @x0
+  %bound1 = icmp ugt ptr %scevgep19, @x0
   %memcheck.conflict = and i1 %bound0, %bound1
   %ind.end = add nsw i64 %11, %n.vec
   br i1 %memcheck.conflict, label %for.body2.preheader21, label %vector.body.preheader
@@ -346,17 +346,15 @@ vector.body.prol.preheader:                       ; preds = %vector.body.prehead
   br label %vector.body.prol
 
 vector.body.prol:                                 ; preds = %vector.body.prol.preheader
-  %22 = load i64, i64* bitcast (double* @x0 to i64*), align 8
+  %22 = load i64, ptr @x0, align 8
   %23 = insertelement <2 x i64> undef, i64 %22, i32 0
   %24 = shufflevector <2 x i64> %23, <2 x i64> undef, <2 x i32> zeroinitializer
   %25 = insertelement <2 x i64> undef, i64 %22, i32 0
   %26 = shufflevector <2 x i64> %25, <2 x i64> undef, <2 x i32> zeroinitializer
-  %27 = getelementptr inbounds double, double* %8, i64 %11
-  %28 = bitcast double* %27 to <2 x i64>*
-  store <2 x i64> %24, <2 x i64>* %28, align 8
-  %29 = getelementptr double, double* %27, i64 2
-  %30 = bitcast double* %29 to <2 x i64>*
-  store <2 x i64> %26, <2 x i64>* %30, align 8
+  %27 = getelementptr inbounds double, ptr %8, i64 %11
+  store <2 x i64> %24, ptr %27, align 8
+  %28 = getelementptr double, ptr %27, i64 2
+  store <2 x i64> %26, ptr %28, align 8
   br label %vector.body.prol.loopexit.unr-lcssa
 
 vector.body.prol.loopexit.unr-lcssa:              ; preds = %vector.body.preheader, %vector.body.prol
@@ -364,42 +362,38 @@ vector.body.prol.loopexit.unr-lcssa:              ; preds = %vector.body.prehead
   br label %vector.body.prol.loopexit
 
 vector.body.prol.loopexit:                        ; preds = %vector.body.prol.loopexit.unr-lcssa
-  %31 = icmp eq i64 %20, 0
-  br i1 %31, label %middle.block, label %vector.body.preheader.new
+  %29 = icmp eq i64 %20, 0
+  br i1 %29, label %middle.block, label %vector.body.preheader.new
 
 vector.body.preheader.new:                        ; preds = %vector.body.prol.loopexit
-  %32 = load i64, i64* bitcast (double* @x0 to i64*), align 8
-  %33 = insertelement <2 x i64> undef, i64 %32, i32 0
+  %30 = load i64, ptr @x0, align 8
+  %31 = insertelement <2 x i64> undef, i64 %30, i32 0
+  %32 = shufflevector <2 x i64> %31, <2 x i64> undef, <2 x i32> zeroinitializer
+  %33 = insertelement <2 x i64> undef, i64 %30, i32 0
   %34 = shufflevector <2 x i64> %33, <2 x i64> undef, <2 x i32> zeroinitializer
-  %35 = insertelement <2 x i64> undef, i64 %32, i32 0
-  %36 = shufflevector <2 x i64> %35, <2 x i64> undef, <2 x i32> zeroinitializer
-  %37 = load i64, i64* bitcast (double* @x0 to i64*), align 8
-  %38 = insertelement <2 x i64> undef, i64 %37, i32 0
+  %35 = load i64, ptr @x0, align 8
+  %36 = insertelement <2 x i64> undef, i64 %35, i32 0
+  %37 = shufflevector <2 x i64> %36, <2 x i64> undef, <2 x i32> zeroinitializer
+  %38 = insertelement <2 x i64> undef, i64 %35, i32 0
   %39 = shufflevector <2 x i64> %38, <2 x i64> undef, <2 x i32> zeroinitializer
-  %40 = insertelement <2 x i64> undef, i64 %37, i32 0
-  %41 = shufflevector <2 x i64> %40, <2 x i64> undef, <2 x i32> zeroinitializer
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.body.preheader.new
   %index = phi i64 [ %index.unr.ph, %vector.body.preheader.new ], [ %index.next.1, %vector.body ]
-  %42 = add i64 %11, %index
-  %43 = getelementptr inbounds double, double* %8, i64 %42
-  %44 = bitcast double* %43 to <2 x i64>*
-  store <2 x i64> %34, <2 x i64>* %44, align 8
-  %45 = getelementptr double, double* %43, i64 2
-  %46 = bitcast double* %45 to <2 x i64>*
-  store <2 x i64> %36, <2 x i64>* %46, align 8
+  %40 = add i64 %11, %index
+  %41 = getelementptr inbounds double, ptr %8, i64 %40
+  store <2 x i64> %32, ptr %41, align 8
+  %42 = getelementptr double, ptr %41, i64 2
+  store <2 x i64> %34, ptr %42, align 8
   %index.next = add i64 %index, 4
-  %47 = add i64 %11, %index.next
-  %48 = getelementptr inbounds double, double* %8, i64 %47
-  %49 = bitcast double* %48 to <2 x i64>*
-  store <2 x i64> %39, <2 x i64>* %49, align 8
-  %50 = getelementptr double, double* %48, i64 2
-  %51 = bitcast double* %50 to <2 x i64>*
-  store <2 x i64> %41, <2 x i64>* %51, align 8
+  %43 = add i64 %11, %index.next
+  %44 = getelementptr inbounds double, ptr %8, i64 %43
+  store <2 x i64> %37, ptr %44, align 8
+  %45 = getelementptr double, ptr %44, i64 2
+  store <2 x i64> %39, ptr %45, align 8
   %index.next.1 = add i64 %index, 8
-  %52 = icmp eq i64 %index.next.1, %n.vec
-  br i1 %52, label %middle.block.unr-lcssa, label %vector.body
+  %46 = icmp eq i64 %index.next.1, %n.vec
+  br i1 %46, label %middle.block.unr-lcssa, label %vector.body
 
 middle.block.unr-lcssa:                           ; preds = %vector.body
   br label %middle.block
@@ -414,10 +408,9 @@ for.body2.preheader21:                            ; preds = %middle.block, %vect
 
 for.body2:                                        ; preds = %for.body2.preheader21, %for.body2
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body2 ], [ %indvars.iv.ph, %for.body2.preheader21 ]
-  %53 = load i64, i64* bitcast (double* @x0 to i64*), align 8
-  %arrayidx = getelementptr inbounds double, double* %8, i64 %indvars.iv
-  %54 = bitcast double* %arrayidx to i64*
-  store i64 %53, i64* %54, align 8
+  %47 = load i64, ptr @x0, align 8
+  %arrayidx = getelementptr inbounds double, ptr %8, i64 %indvars.iv
+  store i64 %47, ptr %arrayidx, align 8
   %indvars.iv.next = add nsw i64 %indvars.iv, 1
   %cmp = icmp slt i64 %indvars.iv, -1
   br i1 %cmp, label %for.body2, label %for.cond1.for.inc3_crit_edge.loopexit
@@ -427,21 +420,21 @@ for.cond1.for.inc3_crit_edge.loopexit:            ; preds = %for.body2
 
 for.cond1.for.inc3_crit_edge:                     ; preds = %for.cond1.for.inc3_crit_edge.loopexit, %middle.block
   %indvars.iv.next.lcssa = phi i64 [ %ind.end, %middle.block ], [ %indvars.iv.next, %for.cond1.for.inc3_crit_edge.loopexit ]
-  %55 = trunc i64 %indvars.iv.next.lcssa to i32
-  store i32 %55, i32* @x2, align 4
+  %48 = trunc i64 %indvars.iv.next.lcssa to i32
+  store i32 %48, ptr @x2, align 4
   br label %for.inc3
 
 for.inc3:                                         ; preds = %for.cond1.for.inc3_crit_edge, %for.cond1thread-pre-split
-  %.pr611 = phi i32 [ %55, %for.cond1.for.inc3_crit_edge ], [ %.pr6, %for.cond1thread-pre-split ]
+  %.pr611 = phi i32 [ %48, %for.cond1.for.inc3_crit_edge ], [ %.pr6, %for.cond1thread-pre-split ]
   %inc4 = add nsw i32 %9, 1
-  %add.ptr = getelementptr inbounds double, double* %8, i64 %idx.ext13
+  %add.ptr = getelementptr inbounds double, ptr %8, i64 %idx.ext13
   %tobool = icmp eq i32 %inc4, 0
   %indvar.next = add i64 %indvar, 1
   br i1 %tobool, label %for.cond.for.end5_crit_edge, label %for.cond1thread-pre-split
 
 for.cond.for.end5_crit_edge:                      ; preds = %for.inc3
-  store i8* %uglygep, i8** bitcast (double** @x5 to i8**), align 8
-  store i32 0, i32* @x3, align 4
+  store ptr %uglygep, ptr @x5, align 8
+  store i32 0, ptr @x3, align 4
   br label %for.end5
 
 for.end5:                                         ; preds = %for.cond.for.end5_crit_edge, %entry

diff  --git a/llvm/test/CodeGen/X86/fold-sext-trunc.ll b/llvm/test/CodeGen/X86/fold-sext-trunc.ll
index 1d320f3c68e0c..c29ca2204a6ad 100644
--- a/llvm/test/CodeGen/X86/fold-sext-trunc.ll
+++ b/llvm/test/CodeGen/X86/fold-sext-trunc.ll
@@ -1,5 +1,5 @@
-; RUN: llc -opaque-pointers=0 < %s -mtriple=x86_64-- | FileCheck %s
-; RUN: llc -opaque-pointers=0 < %s -O0 -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -stop-after livedebugvalues -o - | FileCheck %s -check-prefix=MIR
+; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
+; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -stop-after livedebugvalues -o - | FileCheck %s -check-prefix=MIR
 ; PR4050
 
 %0 = type { i64 }
@@ -12,9 +12,9 @@ declare void @func_28(i64, i64)
 ; CHECK: movslq  g_10+4(%rip), %rdi
 define void @int322(i32 %foo) !dbg !5 {
 entry:
-  %val = load i64, i64* getelementptr (%0, %0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0), !dbg !16
-  %0 = load i32, i32* getelementptr inbounds (%struct.S1, %struct.S1* @g_10, i32 0, i32 1), align 4, !dbg !17
-; MIR: renamable {{\$r[a-z]+}} = MOVSX64rm32 {{.*}}, @g_10 + 4,{{.*}} debug-location !17 :: (dereferenceable load (s32) from `i64* getelementptr (%0, %0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0)` + 4)
+  %val = load i64, ptr @g_10, !dbg !16
+  %0 = load i32, ptr getelementptr inbounds (%struct.S1, ptr @g_10, i32 0, i32 1), align 4, !dbg !17
+; MIR: renamable {{\$r[a-z]+}} = MOVSX64rm32 {{.*}}, @g_10 + 4,{{.*}} debug-location !17 :: (dereferenceable load (s32) from @g_10 + 4)
   %1 = sext i32 %0 to i64, !dbg !18
   %tmp4.i = lshr i64 %val, 32, !dbg !19
   %tmp5.i = trunc i64 %tmp4.i to i32, !dbg !20

diff  --git a/llvm/test/CodeGen/X86/pr32610.ll b/llvm/test/CodeGen/X86/pr32610.ll
index c2a8252d3b9e7..dc11ba8466ae5 100644
--- a/llvm/test/CodeGen/X86/pr32610.ll
+++ b/llvm/test/CodeGen/X86/pr32610.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -mtriple=i386-apple-macosx10.13.0 -o - %s | FileCheck %s
+; RUN: llc -mtriple=i386-apple-macosx10.13.0 -o - %s | FileCheck %s
 
 target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
 
@@ -15,18 +15,18 @@ define void @pr32610(i32 %a0, i32 %a1) #0 {
 ; CHECK-NEXT:    pushl %esi
 ; CHECK-NEXT:    movl 8(%ebp), %edx
 ; CHECK-NEXT:    movl L_b$non_lazy_ptr, %eax
+; CHECK-NEXT:    movl (%eax), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    cmpl (%eax), %edx
+; CHECK-NEXT:    cmpl %eax, %edx
 ; CHECK-NEXT:    sete %cl
 ; CHECK-NEXT:    xorl %esi, %esi
 ; CHECK-NEXT:    incl %esi
 ; CHECK-NEXT:    cmpl $0, 12(%ebp)
 ; CHECK-NEXT:    cmovel %esi, %ecx
-; CHECK-NEXT:    cmpl (%eax), %edx
+; CHECK-NEXT:    cmpl %eax, %edx
 ; CHECK-NEXT:    cmovnel %esi, %ecx
 ; CHECK-NEXT:    movl L_c$non_lazy_ptr, %edx
 ; CHECK-NEXT:    movl %ecx, (%edx)
-; CHECK-NEXT:    movl (%eax), %eax
 ; CHECK-NEXT:    testl %eax, %eax
 ; CHECK-NEXT:    movl $2, %ecx
 ; CHECK-NEXT:    cmovnel %eax, %ecx
@@ -36,17 +36,17 @@ define void @pr32610(i32 %a0, i32 %a1) #0 {
 ; CHECK-NEXT:    popl %ebp
 ; CHECK-NEXT:    retl
 entry:
-  %0 = load i32, i32* getelementptr ([1 x i32], [1 x i32]* @b, i32 0, i32 undef), align 4, !tbaa !1
+  %0 = load i32, ptr getelementptr ([1 x i32], ptr @b, i32 0, i32 undef), align 4, !tbaa !1
   %cmp = icmp eq i32 %a0, %0
   %conv = zext i1 %cmp to i32
   %tobool1.i = icmp ne i32 %a1, 0
   %or.cond.i = and i1 %cmp, %tobool1.i
   %cond.i = select i1 %or.cond.i, i32 %conv, i32 1
-  store i32 %cond.i, i32* @c, align 4, !tbaa !1
-  %1 = load i32, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @b, i32 0, i32 0), align 4
+  store i32 %cond.i, ptr @c, align 4, !tbaa !1
+  %1 = load i32, ptr @b, align 4
   %tobool = icmp ne i32 %1, 0
   %2 = select i1 %tobool, i32 %1, i32 2
-  store i32 %2, i32* @d, align 4, !tbaa !1
+  store i32 %2, ptr @d, align 4, !tbaa !1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/sink-gep-before-mem-inst.ll b/llvm/test/CodeGen/X86/sink-gep-before-mem-inst.ll
index 19bfc37f86306..98df3ce467e9f 100644
--- a/llvm/test/CodeGen/X86/sink-gep-before-mem-inst.ll
+++ b/llvm/test/CodeGen/X86/sink-gep-before-mem-inst.ll
@@ -1,20 +1,18 @@
-; RUN: opt -opaque-pointers=0 < %s -S -codegenprepare -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+; RUN: opt < %s -S -codegenprepare -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
 
-define i64 @test.after(i8 addrspace(1)* readonly align 8) {
+define i64 @test.after(ptr addrspace(1) readonly align 8) {
 ; CHECK-LABEL: test.after
 ; CHECK: sunkaddr
 entry:
-  %.0 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
-  %addr = bitcast i8 addrspace(1)* %.0 to i32 addrspace(1)*
+  %.0 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 8
   br label %header
 
 header:
-  %addr.in.loop = phi i32 addrspace(1)* [ %addr, %entry ], [ %addr.after, %header ]
+  %addr.in.loop = phi ptr addrspace(1) [ %.0, %entry ], [ %.1, %header ]
   %local_2_ = phi i64 [ 0, %entry ], [ %.9, %header ]
-  %.7 = load i32, i32 addrspace(1)* %addr.in.loop, align 8
+  %.7 = load i32, ptr addrspace(1) %addr.in.loop, align 8
   fence acquire
-  %.1 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
-  %addr.after = bitcast i8 addrspace(1)* %.1 to i32 addrspace(1)*
+  %.1 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 8
   %.8 = sext i32 %.7 to i64
   %.9 = add i64 %local_2_, %.8
   %not. = icmp sgt i64 %.9, 999

diff  --git a/llvm/test/CodeGen/X86/stack-protector-no-return.ll b/llvm/test/CodeGen/X86/stack-protector-no-return.ll
index 8f60629e62a88..cfebf0080a6d6 100644
--- a/llvm/test/CodeGen/X86/stack-protector-no-return.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-no-return.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 %s -mtriple=x86_64-unknown-linux-gnu -o - -verify-dom-info | FileCheck %s
-; RUN: llc -opaque-pointers=0 %s -mtriple=x86_64-unknown-linux-gnu -disable-check-noreturn-call=true -o - -verify-dom-info | FileCheck --check-prefix=DISNOTET %s
+; RUN: llc %s -mtriple=x86_64-unknown-linux-gnu -o - -verify-dom-info | FileCheck %s
+; RUN: llc %s -mtriple=x86_64-unknown-linux-gnu -disable-check-noreturn-call=true -o - -verify-dom-info | FileCheck --check-prefix=DISNOTET %s
 
 ; Function Attrs: sspreq
-define void @_Z7catchesv() #0 personality i8* null {
+define void @_Z7catchesv() #0 personality ptr null {
 ; CHECK-LABEL: _Z7catchesv:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushq %rax
@@ -74,7 +74,7 @@ define void @_Z7catchesv() #0 personality i8* null {
 ; DISNOTET-NEXT:    .cfi_def_cfa_offset 16
 ; DISNOTET-NEXT:    callq __stack_chk_fail at PLT
 entry:
-  %call = invoke i64 null(i32 0, i8* null, i64 0)
+  %call = invoke i64 null(i32 0, ptr null, i64 0)
           to label %invoke.cont unwind label %lpad1
 
 invoke.cont:                                      ; preds = %entry
@@ -85,13 +85,13 @@ invoke.cont2:                                     ; preds = %invoke.cont
   unreachable
 
 lpad1:                                            ; preds = %invoke.cont, %entry
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
   ret void
 }
 
 ; uselistorder directives
-uselistorder i8* null, { 1, 0 }
+uselistorder ptr null, { 5, 4, 3, 2, 1, 0 }
 
 attributes #0 = { sspreq }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/X86/tailcall-extract.ll b/llvm/test/CodeGen/X86/tailcall-extract.ll
index 2d3207e2a9469..c3597a8e5b99e 100644
--- a/llvm/test/CodeGen/X86/tailcall-extract.ll
+++ b/llvm/test/CodeGen/X86/tailcall-extract.ll
@@ -1,5 +1,5 @@
-; RUN: llc -opaque-pointers=0 -mtriple=x86_64-linux < %s | FileCheck %s
-; RUN: opt -opaque-pointers=0 -codegenprepare -S -mtriple=x86_64-linux < %s | FileCheck %s --check-prefix OPT
+; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
+; RUN: opt -codegenprepare -S -mtriple=x86_64-linux < %s | FileCheck %s --check-prefix OPT
 
 
 ; The exit block containing extractvalue can be duplicated into the BB
@@ -11,18 +11,16 @@
 
 ; OPT-LABEL:   test1
 ; OPT:         if.then.i:
-; OPT-NEXT:    tail call { i8*, i64 } @bar
+; OPT-NEXT:    tail call { ptr, i64 } @bar
 ; OPT-NEXT:    extractvalue
-; OPT-NEXT:    bitcast
 ; OPT-NEXT:    ret
 ;
 ; OPT:         if.end.i:
-; OPT-NEXT:    tail call { i8*, i64 } @foo
+; OPT-NEXT:    tail call { ptr, i64 } @foo
 ; OPT-NEXT:    extractvalue
-; OPT-NEXT:    bitcast
 ; OPT-NEXT:    ret
 
-define i64* @test1(i64 %size) {
+define ptr @test1(i64 %size) {
 entry:
   %cmp.i.i = icmp ugt i64 %size, 16384
   %add.i.i = add i64 %size, 7
@@ -32,18 +30,17 @@ entry:
   %cmp.i = or i1 %cmp.i.i, %cmp1.i
   br i1 %cmp.i, label %if.end.i, label %if.then.i
   if.then.i:                                        ; preds = %entry
-  %call1.i = tail call { i8*, i64 } @bar(i64 %size)
+  %call1.i = tail call { ptr, i64 } @bar(i64 %size)
   br label %exit
 
 if.end.i:                                         ; preds = %entry
-  %call2.i = tail call { i8*, i64 } @foo(i64 %size)
+  %call2.i = tail call { ptr, i64 } @foo(i64 %size)
   br label %exit
 
 exit:
-  %call1.i.sink = phi { i8*, i64 } [ %call1.i, %if.then.i ], [ %call2.i, %if.end.i ]
-  %ev = extractvalue { i8*, i64 } %call1.i.sink, 0
-  %result = bitcast i8* %ev to i64*
-  ret i64* %result
+  %call1.i.sink = phi { ptr, i64 } [ %call1.i, %if.then.i ], [ %call2.i, %if.end.i ]
+  %ev = extractvalue { ptr, i64 } %call1.i.sink, 0
+  ret ptr %ev
 }
 
 
@@ -56,11 +53,11 @@ exit:
 
 ; OPT-LABEL:   test2
 ; OPT:         if.then.i:
-; OPT-NEXT:    tail call { i8*, i64 } @bar
+; OPT-NEXT:    tail call { ptr, i64 } @bar
 ; OPT-NEXT:    br label %exit
 ;
 ; OPT:         if.end.i:
-; OPT-NEXT:    tail call { i8*, i64 } @foo
+; OPT-NEXT:    tail call { ptr, i64 } @foo
 ; OPT-NEXT:    br label %exit
 ;
 ; OPT:         exit:
@@ -78,16 +75,16 @@ entry:
   %cmp.i = or i1 %cmp.i.i, %cmp1.i
   br i1 %cmp.i, label %if.end.i, label %if.then.i
   if.then.i:                                        ; preds = %entry
-  %call1.i = tail call { i8*, i64 } @bar(i64 %size)
+  %call1.i = tail call { ptr, i64 } @bar(i64 %size)
   br label %exit
 
 if.end.i:                                         ; preds = %entry
-  %call2.i = tail call { i8*, i64 } @foo(i64 %size)
+  %call2.i = tail call { ptr, i64 } @foo(i64 %size)
   br label %exit
 
 exit:
-  %call1.i.sink = phi { i8*, i64 } [ %call1.i, %if.then.i ], [ %call2.i, %if.end.i ]
-  %ev = extractvalue { i8*, i64 } %call1.i.sink, 1
+  %call1.i.sink = phi { ptr, i64 } [ %call1.i, %if.then.i ], [ %call2.i, %if.end.i ]
+  %ev = extractvalue { ptr, i64 } %call1.i.sink, 1
   ret i64 %ev
 }
 
@@ -101,18 +98,16 @@ exit:
 
 ; OPT-LABEL:   test3
 ; OPT:         if.then.i:
-; OPT-NEXT:    tail call { { i8*, i64 }, i64 } @baz
+; OPT-NEXT:    tail call { { ptr, i64 }, i64 } @baz
 ; OPT-NEXT:    extractvalue
-; OPT-NEXT:    bitcast
 ; OPT-NEXT:    ret
 ;
 ; OPT:         if.end.i:
-; OPT-NEXT:    tail call { { i8*, i64 }, i64 } @qux
+; OPT-NEXT:    tail call { { ptr, i64 }, i64 } @qux
 ; OPT-NEXT:    extractvalue
-; OPT-NEXT:    bitcast
 ; OPT-NEXT:    ret
 
-define i64* @test3(i64 %size) {
+define ptr @test3(i64 %size) {
 entry:
   %cmp.i.i = icmp ugt i64 %size, 16384
   %add.i.i = add i64 %size, 7
@@ -123,18 +118,17 @@ entry:
   br i1 %cmp.i, label %if.end.i, label %if.then.i
 
 if.then.i:                                        ; preds = %entry
-  %call1.i = tail call { {i8*, i64}, i64 } @baz(i64 %size)
+  %call1.i = tail call { {ptr, i64}, i64 } @baz(i64 %size)
   br label %exit
 
 if.end.i:                                         ; preds = %entry
-  %call2.i = tail call { {i8*, i64}, i64 } @qux(i64 %size)
+  %call2.i = tail call { {ptr, i64}, i64 } @qux(i64 %size)
   br label %exit
 
 exit:
-  %call1.i.sink = phi { {i8*, i64}, i64 } [ %call1.i, %if.then.i ], [ %call2.i, %if.end.i ]
-  %ev = extractvalue { {i8*, i64}, i64 } %call1.i.sink, 0, 0
-  %result = bitcast i8* %ev to i64*
-  ret i64* %result
+  %call1.i.sink = phi { {ptr, i64}, i64 } [ %call1.i, %if.then.i ], [ %call2.i, %if.end.i ]
+  %ev = extractvalue { {ptr, i64}, i64 } %call1.i.sink, 0, 0
+  ret ptr %ev
 }
 
 
@@ -147,11 +141,11 @@ exit:
 
 ; OPT-LABEL:   test4
 ; OPT:         if.then.i:
-; OPT-NEXT:    tail call { { i8*, i64 }, i64 } @baz
+; OPT-NEXT:    tail call { { ptr, i64 }, i64 } @baz
 ; OPT-NEXT:    br label %exit
 ;
 ; OPT:         if.end.i:
-; OPT-NEXT:    tail call { { i8*, i64 }, i64 } @qux
+; OPT-NEXT:    tail call { { ptr, i64 }, i64 } @qux
 ; OPT-NEXT:    br label %exit
 ;
 ; OPT:         exit:
@@ -170,21 +164,21 @@ entry:
   br i1 %cmp.i, label %if.end.i, label %if.then.i
 
 if.then.i:                                        ; preds = %entry
-  %call1.i = tail call { {i8*, i64}, i64 } @baz(i64 %size)
+  %call1.i = tail call { {ptr, i64}, i64 } @baz(i64 %size)
   br label %exit
 
 if.end.i:                                         ; preds = %entry
-  %call2.i = tail call { {i8*, i64}, i64 } @qux(i64 %size)
+  %call2.i = tail call { {ptr, i64}, i64 } @qux(i64 %size)
   br label %exit
 
 exit:
-  %call1.i.sink = phi { {i8*, i64}, i64 } [ %call1.i, %if.then.i ], [ %call2.i, %if.end.i ]
-  %ev = extractvalue { {i8*, i64}, i64 } %call1.i.sink, 0, 1
+  %call1.i.sink = phi { {ptr, i64}, i64 } [ %call1.i, %if.then.i ], [ %call2.i, %if.end.i ]
+  %ev = extractvalue { {ptr, i64}, i64 } %call1.i.sink, 0, 1
   ret i64 %ev
 }
 
 
-declare dso_local { i8*, i64 } @foo(i64)
-declare dso_local { i8*, i64 } @bar(i64)
-declare dso_local { {i8*, i64}, i64 } @baz(i64)
-declare dso_local { {i8*, i64}, i64 } @qux(i64)
+declare dso_local { ptr, i64 } @foo(i64)
+declare dso_local { ptr, i64 } @bar(i64)
+declare dso_local { {ptr, i64}, i64 } @baz(i64)
+declare dso_local { {ptr, i64}, i64 } @qux(i64)

diff  --git a/llvm/test/CodeGen/X86/tls-loads-control.ll b/llvm/test/CodeGen/X86/tls-loads-control.ll
index 78cd99a3183ff..8d9bf61c53fa5 100644
--- a/llvm/test/CodeGen/X86/tls-loads-control.ll
+++ b/llvm/test/CodeGen/X86/tls-loads-control.ll
@@ -1,5 +1,5 @@
-; RUN: llc -opaque-pointers=0 -mtriple=x86_64-unknown-unknown -O2 --relocation-model=pic --tls-load-hoist=true --stop-after=tlshoist -o - %s | FileCheck %s
-; RUN: llc -opaque-pointers=0 -mtriple=x86_64-unknown-unknown -O2 --relocation-model=pic --stop-after=tlshoist -o - %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-unknown -O2 --relocation-model=pic --tls-load-hoist=true --stop-after=tlshoist -o - %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-unknown -O2 --relocation-model=pic --stop-after=tlshoist -o - %s | FileCheck %s
 
 ; This test come from compiling clang/test/CodeGen/intel/tls_loads.cpp with:
 ; (clang tls_loads.cpp -fPIC -ftls-model=global-dynamic -O2 -S -emit-llvm)
@@ -47,11 +47,11 @@ define noundef i32 @_Z2f1i(i32 noundef %c) local_unnamed_addr #0 {
 ; CHECK:      entry:
 ; CHECK-NEXT:   %call = tail call noundef i32 @_Z5gfuncv()
 ; CHECK-NEXT:   %phi.cmp = icmp eq i32 %call, 0
-; CHECK-NEXT:   %tls_bitcast1 = bitcast i32* @thl_x to i32*
+; CHECK-NEXT:   %tls_bitcast1 = bitcast ptr @thl_x to ptr
 ; CHECK-NEXT:   br i1 %phi.cmp, label %while.end11, label %while.body4.preheader
 
 ; CHECK:      while.body4.preheader:
-; CHECK-NEXT:   %tls_bitcast = bitcast i32* @thl_x2 to i32*
+; CHECK-NEXT:   %tls_bitcast = bitcast ptr @thl_x2 to ptr
 ; CHECK-NEXT:   br label %while.body4
 
 ; CHECK:      while.body4:
@@ -71,16 +71,16 @@ define noundef i32 @_Z2f1i(i32 noundef %c) local_unnamed_addr #0 {
 ; CHECK:      while.body8:
 ; CHECK-NEXT:   %c.addr.219 = phi i32 [ %dec, %while.body8 ], [ %call5, %while.body8.preheader ]
 ; CHECK-NEXT:   %dec = add i32 %c.addr.219, -1
-; CHECK-NEXT:   %0 = load i32, i32* %tls_bitcast, align 4
+; CHECK-NEXT:   %0 = load i32, ptr %tls_bitcast, align 4
 ; CHECK-NEXT:   %call9 = tail call noundef i32 @_Z6gfunc2i(i32 noundef %0)
-; CHECK-NEXT:   %1 = load i32, i32* %tls_bitcast1, align 4
+; CHECK-NEXT:   %1 = load i32, ptr %tls_bitcast1, align 4
 ; CHECK-NEXT:   %add = add nsw i32 %1, %call9
-; CHECK-NEXT:   store i32 %add, i32* %tls_bitcast1, align 4
+; CHECK-NEXT:   store i32 %add, ptr %tls_bitcast1, align 4
 ; CHECK-NEXT:   %tobool7.not = icmp eq i32 %dec, 0
 ; CHECK-NEXT:   br i1 %tobool7.not, label %while.body4.backedge.loopexit, label %while.body8, !llvm.loop !4
 
 ; CHECK:      while.end11:
-; CHECK-NEXT:   %2 = load i32, i32* %tls_bitcast1, align 4
+; CHECK-NEXT:   %2 = load i32, ptr %tls_bitcast1, align 4
 ; CHECK-NEXT:   ret i32 %2
 
 entry:
@@ -99,16 +99,16 @@ while.body4.backedge:                             ; preds = %while.body8, %while
 while.body8:                                      ; preds = %while.body4, %while.body8
   %c.addr.219 = phi i32 [ %dec, %while.body8 ], [ %call5, %while.body4 ]
   %dec = add nsw i32 %c.addr.219, -1
-  %0 = load i32, i32* @thl_x2, align 4
+  %0 = load i32, ptr @thl_x2, align 4
   %call9 = tail call noundef i32 @_Z6gfunc2i(i32 noundef %0)
-  %1 = load i32, i32* @thl_x, align 4
+  %1 = load i32, ptr @thl_x, align 4
   %add = add nsw i32 %1, %call9
-  store i32 %add, i32* @thl_x, align 4
+  store i32 %add, ptr @thl_x, align 4
   %tobool7.not = icmp eq i32 %dec, 0
   br i1 %tobool7.not, label %while.body4.backedge, label %while.body8, !llvm.loop !4
 
 while.end11:                                      ; preds = %entry
-  %2 = load i32, i32* @thl_x, align 4
+  %2 = load i32, ptr @thl_x, align 4
   ret i32 %2
 }
 
@@ -134,30 +134,30 @@ define noundef i32 @_Z2f2i(i32 noundef %c) local_unnamed_addr #0 {
 ; CHECK-NEXT:   %call = tail call noundef i32 @_Z5gfuncv()
 ; CHECK-NEXT:   %add = add nsw i32 %call, %c
 ; CHECK-NEXT:   %tobool.not12 = icmp eq i32 %add, 0
-; CHECK-NEXT:   %tls_bitcast = bitcast i32* @thl_x to i32*
+; CHECK-NEXT:   %tls_bitcast = bitcast ptr @thl_x to ptr
 ; CHECK-NEXT:   br i1 %tobool.not12, label %while.end, label %while.body.preheader
 
 ; CHECK:      while.body.preheader:
-; CHECK-NEXT:   %tls_bitcast1 = bitcast i8* @_ZZ2f2iE2st.0 to i8*
-; CHECK-NEXT:   %tls_bitcast2 = bitcast i32* @_ZZ2f2iE2st.1 to i32*
+; CHECK-NEXT:   %tls_bitcast1 = bitcast ptr @_ZZ2f2iE2st.0 to ptr
+; CHECK-NEXT:   %tls_bitcast2 = bitcast ptr @_ZZ2f2iE2st.1 to ptr
 ; CHECK-NEXT:   br label %while.body
 
 ; CHECK:      while.body:
 ; CHECK-NEXT:   %c.addr.013 = phi i32 [ %dec, %while.body ], [ %add, %while.body.preheader ]
 ; CHECK-NEXT:   %dec = add i32 %c.addr.013, -1
 ; CHECK-NEXT:   %call1 = tail call noundef i32 @_Z5gfuncv()
-; CHECK-NEXT:   %0 = load i32, i32* %tls_bitcast, align 4
+; CHECK-NEXT:   %0 = load i32, ptr %tls_bitcast, align 4
 ; CHECK-NEXT:   %add2 = add nsw i32 %0, %call1
-; CHECK-NEXT:   store i32 %add2, i32* %tls_bitcast, align 4
+; CHECK-NEXT:   store i32 %add2, ptr %tls_bitcast, align 4
 ; CHECK-NEXT:   %call3 = tail call noundef i32 @_Z5gfuncv()
-; CHECK-NEXT:   %1 = load i8, i8* %tls_bitcast1, align 4
+; CHECK-NEXT:   %1 = load i8, ptr %tls_bitcast1, align 4
 ; CHECK-NEXT:   %2 = trunc i32 %call3 to i8
 ; CHECK-NEXT:   %conv7 = add i8 %1, %2
-; CHECK-NEXT:   store i8 %conv7, i8* %tls_bitcast1, align 4
+; CHECK-NEXT:   store i8 %conv7, ptr %tls_bitcast1, align 4
 ; CHECK-NEXT:   %call8 = tail call noundef i32 @_Z5gfuncv()
-; CHECK-NEXT:   %3 = load i32, i32* %tls_bitcast2, align 4
+; CHECK-NEXT:   %3 = load i32, ptr %tls_bitcast2, align 4
 ; CHECK-NEXT:   %add9 = add nsw i32 %3, %call8
-; CHECK-NEXT:   store i32 %add9, i32* %tls_bitcast2, align 4
+; CHECK-NEXT:   store i32 %add9, ptr %tls_bitcast2, align 4
 ; CHECK-NEXT:   %tobool.not = icmp eq i32 %dec, 0
 ; CHECK-NEXT:   br i1 %tobool.not, label %while.end.loopexit, label %while.body
 
@@ -165,7 +165,7 @@ define noundef i32 @_Z2f2i(i32 noundef %c) local_unnamed_addr #0 {
 ; CHECK-NEXT:   br label %while.end
 
 ; CHECK:      while.end:
-; CHECK-NEXT:   %4 = load i32, i32* %tls_bitcast, align 4
+; CHECK-NEXT:   %4 = load i32, ptr %tls_bitcast, align 4
 ; CHECK-NEXT:   ret i32 %4
 entry:
   %call = tail call noundef i32 @_Z5gfuncv()
@@ -177,23 +177,23 @@ while.body:                                       ; preds = %entry, %while.body
   %c.addr.013 = phi i32 [ %dec, %while.body ], [ %add, %entry ]
   %dec = add nsw i32 %c.addr.013, -1
   %call1 = tail call noundef i32 @_Z5gfuncv()
-  %0 = load i32, i32* @thl_x, align 4
+  %0 = load i32, ptr @thl_x, align 4
   %add2 = add nsw i32 %0, %call1
-  store i32 %add2, i32* @thl_x, align 4
+  store i32 %add2, ptr @thl_x, align 4
   %call3 = tail call noundef i32 @_Z5gfuncv()
-  %1 = load i8, i8* @_ZZ2f2iE2st.0, align 4
+  %1 = load i8, ptr @_ZZ2f2iE2st.0, align 4
   %2 = trunc i32 %call3 to i8
   %conv7 = add i8 %1, %2
-  store i8 %conv7, i8* @_ZZ2f2iE2st.0, align 4
+  store i8 %conv7, ptr @_ZZ2f2iE2st.0, align 4
   %call8 = tail call noundef i32 @_Z5gfuncv()
-  %3 = load i32, i32* @_ZZ2f2iE2st.1, align 4
+  %3 = load i32, ptr @_ZZ2f2iE2st.1, align 4
   %add9 = add nsw i32 %3, %call8
-  store i32 %add9, i32* @_ZZ2f2iE2st.1, align 4
+  store i32 %add9, ptr @_ZZ2f2iE2st.1, align 4
   %tobool.not = icmp eq i32 %dec, 0
   br i1 %tobool.not, label %while.end, label %while.body
 
 while.end:                                        ; preds = %while.body, %entry
-  %4 = load i32, i32* @thl_x, align 4
+  %4 = load i32, ptr @thl_x, align 4
   ret i32 %4
 }
 
@@ -209,28 +209,28 @@ while.end:                                        ; preds = %while.body, %entry
 define noundef i32 @_Z2f3i(i32 noundef %c) local_unnamed_addr #0 {
 ; CHECK-LABEL: _Z2f3i
 ; CHECK:      entry:
-; CHECK-NEXT:   %tls_bitcast = bitcast i32* @thl_x to i32*
-; CHECK-NEXT:   %0 = load i32, i32* %tls_bitcast, align 4
+; CHECK-NEXT:   %tls_bitcast = bitcast ptr @thl_x to ptr
+; CHECK-NEXT:   %0 = load i32, ptr %tls_bitcast, align 4
 ; CHECK-NEXT:   %call = tail call noundef i32 @_Z6gfunc2i(i32 noundef %0)
-; CHECK-NEXT:   %1 = load i32, i32* %tls_bitcast, align 4
+; CHECK-NEXT:   %1 = load i32, ptr %tls_bitcast, align 4
 ; CHECK-NEXT:   %call1 = tail call noundef i32 @_Z6gfunc2i(i32 noundef %1)
 ; CHECK-NEXT:   ret i32 1
 entry:
-  %0 = load i32, i32* @thl_x, align 4
+  %0 = load i32, ptr @thl_x, align 4
   %call = tail call noundef i32 @_Z6gfunc2i(i32 noundef %0)
-  %1 = load i32, i32* @thl_x, align 4
+  %1 = load i32, ptr @thl_x, align 4
   %call1 = tail call noundef i32 @_Z6gfunc2i(i32 noundef %1)
   ret i32 1
 }
 
 ; Function Attrs: uwtable
-define weak_odr hidden noundef i32* @_ZTW5thl_x() local_unnamed_addr #2 comdat {
-  ret i32* @thl_x
+define weak_odr hidden noundef ptr @_ZTW5thl_x() local_unnamed_addr #2 comdat {
+  ret ptr @thl_x
 }
 
 ; Function Attrs: uwtable
-define weak_odr hidden noundef i32* @_ZTW6thl_x2() local_unnamed_addr #2 comdat {
-  ret i32* @thl_x2
+define weak_odr hidden noundef ptr @_ZTW6thl_x2() local_unnamed_addr #2 comdat {
+  ret ptr @thl_x2
 }
 
 attributes #0 = { mustprogress uwtable "tls-load-hoist" "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }

diff  --git a/llvm/test/CodeGen/X86/tls-loads-control2.ll b/llvm/test/CodeGen/X86/tls-loads-control2.ll
index 3f5810db29a5e..fb0f1d2d7398d 100644
--- a/llvm/test/CodeGen/X86/tls-loads-control2.ll
+++ b/llvm/test/CodeGen/X86/tls-loads-control2.ll
@@ -1,5 +1,5 @@
-; RUN: opt -opaque-pointers=0 -S -mtriple=x86_64-unknown-unknown -passes=tlshoist --relocation-model=pic --tls-load-hoist=true -o - %s | FileCheck %s --check-prefix=HOIST0
-; RUN: opt -opaque-pointers=0 -S -mtriple=x86_64-unknown-unknown -passes=tlshoist --relocation-model=pic -o - %s | FileCheck %s --check-prefix=HOIST2
+; RUN: opt -S -mtriple=x86_64-unknown-unknown -passes=tlshoist --relocation-model=pic --tls-load-hoist=true -o - %s | FileCheck %s --check-prefix=HOIST0
+; RUN: opt -S -mtriple=x86_64-unknown-unknown -passes=tlshoist --relocation-model=pic -o - %s | FileCheck %s --check-prefix=HOIST2
 
 $_ZTW5thl_x = comdat any
 
@@ -8,35 +8,35 @@ $_ZTW5thl_x = comdat any
 ; Function Attrs: mustprogress uwtable
 define i32 @_Z2f1i(i32 %c) local_unnamed_addr #0 {
 entry:
-  %0 = load i32, i32* @thl_x, align 4
+  %0 = load i32, ptr @thl_x, align 4
   %call = tail call i32 @_Z5gfunci(i32 %0)
-  %1 = load i32, i32* @thl_x, align 4
+  %1 = load i32, ptr @thl_x, align 4
   %call1 = tail call i32 @_Z5gfunci(i32 %1)
   ret i32 1
 }
 
 ;HOIST0-LABEL: _Z2f1i
 ;HOIST0:     entry:
-;HOIST0-NEXT:  %tls_bitcast = bitcast i32* @thl_x to i32*
-;HOIST0-NEXT:  %0 = load i32, i32* %tls_bitcast, align 4
+;HOIST0-NEXT:  %tls_bitcast = bitcast ptr @thl_x to ptr
+;HOIST0-NEXT:  %0 = load i32, ptr %tls_bitcast, align 4
 ;HOIST0-NEXT:  %call = tail call i32 @_Z5gfunci(i32 %0)
-;HOIST0-NEXT:  %1 = load i32, i32* %tls_bitcast, align 4
+;HOIST0-NEXT:  %1 = load i32, ptr %tls_bitcast, align 4
 ;HOIST0-NEXT:  %call1 = tail call i32 @_Z5gfunci(i32 %1)
 ;HOIST0-NEXT:  ret i32 1
 
 ;HOIST2-LABEL: _Z2f1i
 ;HOIST2:     entry:
-;HOIST2-NEXT:  %0 = load i32, i32* @thl_x, align 4
+;HOIST2-NEXT:  %0 = load i32, ptr @thl_x, align 4
 ;HOIST2-NEXT:  %call = tail call i32 @_Z5gfunci(i32 %0)
-;HOIST2-NEXT:  %1 = load i32, i32* @thl_x, align 4
+;HOIST2-NEXT:  %1 = load i32, ptr @thl_x, align 4
 ;HOIST2-NEXT:  %call1 = tail call i32 @_Z5gfunci(i32 %1)
 ;HOIST2-NEXT:  ret i32 1
 
 declare i32 @_Z5gfunci(i32) local_unnamed_addr #1
 
 ; Function Attrs: uwtable
-define weak_odr hidden i32* @_ZTW5thl_x() local_unnamed_addr #2 comdat {
-  ret i32* @thl_x
+define weak_odr hidden ptr @_ZTW5thl_x() local_unnamed_addr #2 comdat {
+  ret ptr @thl_x
 }
 
 attributes #0 = { mustprogress uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }


        


More information about the llvm-commits mailing list