[llvm] ab232c9 - [PowerPC] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 4 03:11:35 PDT 2023


Author: Nikita Popov
Date: 2023-04-04T12:11:26+02:00
New Revision: ab232c9ddf617530b958b7fbddf99b5d5d708897

URL: https://github.com/llvm/llvm-project/commit/ab232c9ddf617530b958b7fbddf99b5d5d708897
DIFF: https://github.com/llvm/llvm-project/commit/ab232c9ddf617530b958b7fbddf99b5d5d708897.diff

LOG: [PowerPC] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/common-chain.ll
    llvm/test/CodeGen/PowerPC/loop-instr-form-prepare.ll
    llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/common-chain.ll b/llvm/test/CodeGen/PowerPC/common-chain.ll
index fc2d9ac9ad53..ea8a72e7d11e 100644
--- a/llvm/test/CodeGen/PowerPC/common-chain.ll
+++ b/llvm/test/CodeGen/PowerPC/common-chain.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -ppc-asm-full-reg-names -verify-machineinstrs -ppc-formprep-chain-commoning \
+; RUN: llc -ppc-asm-full-reg-names -verify-machineinstrs -ppc-formprep-chain-commoning \
 ; RUN:   -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 < %s | FileCheck %s
 
 ; addresses:
@@ -32,7 +32,7 @@
 ;   return sum;
 ; }
 ;
-define i64 @two_chain_same_offset_succ(i8* %p, i64 %offset, i64 %base1, i64 %n) {
+define i64 @two_chain_same_offset_succ(ptr %p, i64 %offset, i64 %base1, i64 %n) {
 ; CHECK-LABEL: two_chain_same_offset_succ:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpdi r6, 0
@@ -80,24 +80,20 @@ for.body:                                         ; preds = %entry, %for.body
   %i.047 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
   %add = add i64 %i.047, %base1
   %add.ptr9.idx = add i64 %add, %offset
-  %add.ptr9 = getelementptr inbounds i8, i8* %p, i64 %add.ptr9.idx
-  %0 = bitcast i8* %add.ptr9 to i64*
-  %1 = load i64, i64* %0, align 8
+  %add.ptr9 = getelementptr inbounds i8, ptr %p, i64 %add.ptr9.idx
+  %0 = load i64, ptr %add.ptr9, align 8
   %add.ptr10.idx = add i64 %add, %mul
-  %add.ptr10 = getelementptr inbounds i8, i8* %p, i64 %add.ptr10.idx
-  %2 = bitcast i8* %add.ptr10 to i64*
-  %3 = load i64, i64* %2, align 8
+  %add.ptr10 = getelementptr inbounds i8, ptr %p, i64 %add.ptr10.idx
+  %1 = load i64, ptr %add.ptr10, align 8
   %add.ptr11.idx = add i64 %add, %mul2
-  %add.ptr11 = getelementptr inbounds i8, i8* %p, i64 %add.ptr11.idx
-  %4 = bitcast i8* %add.ptr11 to i64*
-  %5 = load i64, i64* %4, align 8
+  %add.ptr11 = getelementptr inbounds i8, ptr %p, i64 %add.ptr11.idx
+  %2 = load i64, ptr %add.ptr11, align 8
   %add.ptr12.idx = add i64 %add, %mul4
-  %add.ptr12 = getelementptr inbounds i8, i8* %p, i64 %add.ptr12.idx
-  %6 = bitcast i8* %add.ptr12 to i64*
-  %7 = load i64, i64* %6, align 8
-  %mul13 = mul i64 %3, %1
-  %mul14 = mul i64 %mul13, %5
-  %mul15 = mul i64 %mul14, %7
+  %add.ptr12 = getelementptr inbounds i8, ptr %p, i64 %add.ptr12.idx
+  %3 = load i64, ptr %add.ptr12, align 8
+  %mul13 = mul i64 %1, %0
+  %mul14 = mul i64 %mul13, %2
+  %mul15 = mul i64 %mul14, %3
   %add16 = add i64 %mul15, %sum.048
   %inc = add nuw nsw i64 %i.047, 1
   %exitcond.not = icmp eq i64 %inc, %n
@@ -137,7 +133,7 @@ for.body:                                         ; preds = %entry, %for.body
 ;   return sum;
 ; }
 ;
-define i64 @not_perfect_chain_all_same_offset_fail(i8* %p, i64 %offset, i64 %base1, i64 %n) {
+define i64 @not_perfect_chain_all_same_offset_fail(ptr %p, i64 %offset, i64 %base1, i64 %n) {
 ; CHECK-LABEL: not_perfect_chain_all_same_offset_fail:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpdi r6, 0
@@ -189,29 +185,24 @@ for.body:                                         ; preds = %entry, %for.body
   %i.059 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
   %add = add i64 %i.059, %base1
   %add.ptr12.idx = add i64 %add, %offset
-  %add.ptr12 = getelementptr inbounds i8, i8* %p, i64 %add.ptr12.idx
-  %0 = bitcast i8* %add.ptr12 to i64*
-  %1 = load i64, i64* %0, align 8
+  %add.ptr12 = getelementptr inbounds i8, ptr %p, i64 %add.ptr12.idx
+  %0 = load i64, ptr %add.ptr12, align 8
   %add.ptr13.idx = add i64 %add, %mul
-  %add.ptr13 = getelementptr inbounds i8, i8* %p, i64 %add.ptr13.idx
-  %2 = bitcast i8* %add.ptr13 to i64*
-  %3 = load i64, i64* %2, align 8
+  %add.ptr13 = getelementptr inbounds i8, ptr %p, i64 %add.ptr13.idx
+  %1 = load i64, ptr %add.ptr13, align 8
   %add.ptr14.idx = add i64 %add, %mul2
-  %add.ptr14 = getelementptr inbounds i8, i8* %p, i64 %add.ptr14.idx
-  %4 = bitcast i8* %add.ptr14 to i64*
-  %5 = load i64, i64* %4, align 8
+  %add.ptr14 = getelementptr inbounds i8, ptr %p, i64 %add.ptr14.idx
+  %2 = load i64, ptr %add.ptr14, align 8
   %add.ptr15.idx = add i64 %add, %mul4
-  %add.ptr15 = getelementptr inbounds i8, i8* %p, i64 %add.ptr15.idx
-  %6 = bitcast i8* %add.ptr15 to i64*
-  %7 = load i64, i64* %6, align 8
+  %add.ptr15 = getelementptr inbounds i8, ptr %p, i64 %add.ptr15.idx
+  %3 = load i64, ptr %add.ptr15, align 8
   %add.ptr16.idx = add i64 %add, %mul6
-  %add.ptr16 = getelementptr inbounds i8, i8* %p, i64 %add.ptr16.idx
-  %8 = bitcast i8* %add.ptr16 to i64*
-  %9 = load i64, i64* %8, align 8
-  %mul17 = mul i64 %3, %1
-  %mul18 = mul i64 %mul17, %5
-  %mul19 = mul i64 %mul18, %7
-  %mul20 = mul i64 %mul19, %9
+  %add.ptr16 = getelementptr inbounds i8, ptr %p, i64 %add.ptr16.idx
+  %4 = load i64, ptr %add.ptr16, align 8
+  %mul17 = mul i64 %1, %0
+  %mul18 = mul i64 %mul17, %2
+  %mul19 = mul i64 %mul18, %3
+  %mul20 = mul i64 %mul19, %4
   %add21 = add i64 %mul20, %sum.060
   %inc = add nuw nsw i64 %i.059, 1
   %exitcond.not = icmp eq i64 %inc, %n
@@ -242,7 +233,7 @@ for.body:                                         ; preds = %entry, %for.body
 ;   return sum;
 ; }
 ;
-define i64 @no_enough_elements_fail(i8* %p, i64 %offset, i64 %base1, i64 %n) {
+define i64 @no_enough_elements_fail(ptr %p, i64 %offset, i64 %base1, i64 %n) {
 ; CHECK-LABEL: no_enough_elements_fail:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpdi r6, 0
@@ -282,19 +273,16 @@ for.body:                                         ; preds = %entry, %for.body
   %sum.034 = phi i64 [ %add10, %for.body ], [ 0, %entry ]
   %i.033 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
   %add.ptr5.idx = add i64 %i.033, %base1
-  %add.ptr5 = getelementptr inbounds i8, i8* %p, i64 %add.ptr5.idx
-  %0 = bitcast i8* %add.ptr5 to i64*
-  %1 = load i64, i64* %0, align 8
+  %add.ptr5 = getelementptr inbounds i8, ptr %p, i64 %add.ptr5.idx
+  %0 = load i64, ptr %add.ptr5, align 8
   %add.ptr6.idx = add i64 %add.ptr5.idx, %mul
-  %add.ptr6 = getelementptr inbounds i8, i8* %p, i64 %add.ptr6.idx
-  %2 = bitcast i8* %add.ptr6 to i64*
-  %3 = load i64, i64* %2, align 8
+  %add.ptr6 = getelementptr inbounds i8, ptr %p, i64 %add.ptr6.idx
+  %1 = load i64, ptr %add.ptr6, align 8
   %add.ptr7.idx = add i64 %add.ptr5.idx, %mul1
-  %add.ptr7 = getelementptr inbounds i8, i8* %p, i64 %add.ptr7.idx
-  %4 = bitcast i8* %add.ptr7 to i64*
-  %5 = load i64, i64* %4, align 8
-  %mul8 = mul i64 %3, %1
-  %mul9 = mul i64 %mul8, %5
+  %add.ptr7 = getelementptr inbounds i8, ptr %p, i64 %add.ptr7.idx
+  %2 = load i64, ptr %add.ptr7, align 8
+  %mul8 = mul i64 %1, %0
+  %mul9 = mul i64 %mul8, %2
   %add10 = add i64 %mul9, %sum.034
   %inc = add nuw nsw i64 %i.033, 1
   %exitcond.not = icmp eq i64 %inc, %n
@@ -330,7 +318,7 @@ for.body:                                         ; preds = %entry, %for.body
 ;   return sum;
 ; }
 ;
-define i64 @no_reuseable_offset_fail(i8* %p, i64 %offset, i64 %base1, i64 %n) {
+define i64 @no_reuseable_offset_fail(ptr %p, i64 %offset, i64 %base1, i64 %n) {
 ; CHECK-LABEL: no_reuseable_offset_fail:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpdi r6, 0
@@ -375,24 +363,20 @@ for.body:                                         ; preds = %entry, %for.body
   %sum.046 = phi i64 [ %add15, %for.body ], [ 0, %entry ]
   %i.045 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
   %add.ptr8.idx = add i64 %i.045, %base1
-  %add.ptr8 = getelementptr inbounds i8, i8* %p, i64 %add.ptr8.idx
-  %0 = bitcast i8* %add.ptr8 to i64*
-  %1 = load i64, i64* %0, align 8
+  %add.ptr8 = getelementptr inbounds i8, ptr %p, i64 %add.ptr8.idx
+  %0 = load i64, ptr %add.ptr8, align 8
   %add.ptr9.idx = add i64 %add.ptr8.idx, %mul
-  %add.ptr9 = getelementptr inbounds i8, i8* %p, i64 %add.ptr9.idx
-  %2 = bitcast i8* %add.ptr9 to i64*
-  %3 = load i64, i64* %2, align 8
+  %add.ptr9 = getelementptr inbounds i8, ptr %p, i64 %add.ptr9.idx
+  %1 = load i64, ptr %add.ptr9, align 8
   %add.ptr10.idx = add i64 %add.ptr8.idx, %mul1
-  %add.ptr10 = getelementptr inbounds i8, i8* %p, i64 %add.ptr10.idx
-  %4 = bitcast i8* %add.ptr10 to i64*
-  %5 = load i64, i64* %4, align 8
+  %add.ptr10 = getelementptr inbounds i8, ptr %p, i64 %add.ptr10.idx
+  %2 = load i64, ptr %add.ptr10, align 8
   %add.ptr11.idx = add i64 %add.ptr8.idx, %mul3
-  %add.ptr11 = getelementptr inbounds i8, i8* %p, i64 %add.ptr11.idx
-  %6 = bitcast i8* %add.ptr11 to i64*
-  %7 = load i64, i64* %6, align 8
-  %mul12 = mul i64 %3, %1
-  %mul13 = mul i64 %mul12, %5
-  %mul14 = mul i64 %mul13, %7
+  %add.ptr11 = getelementptr inbounds i8, ptr %p, i64 %add.ptr11.idx
+  %3 = load i64, ptr %add.ptr11, align 8
+  %mul12 = mul i64 %1, %0
+  %mul13 = mul i64 %mul12, %2
+  %mul14 = mul i64 %mul13, %3
   %add15 = add i64 %mul14, %sum.046
   %inc = add nuw nsw i64 %i.045, 1
   %exitcond.not = icmp eq i64 %inc, %n
@@ -437,7 +421,7 @@ for.body:                                         ; preds = %entry, %for.body
 ;   return sum;
 ; }
 ;
-define i64 @not_same_offset_fail(i8* %p, i64 %offset, i64 %base1, i64 %n) {
+define i64 @not_same_offset_fail(ptr %p, i64 %offset, i64 %base1, i64 %n) {
 ; CHECK-LABEL: not_same_offset_fail:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpdi r6, 0
@@ -497,34 +481,28 @@ for.body:                                         ; preds = %entry, %for.body
   %i.071 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
   %add = add i64 %i.071, %base1
   %add.ptr15.idx = add i64 %add, %offset
-  %add.ptr15 = getelementptr inbounds i8, i8* %p, i64 %add.ptr15.idx
-  %0 = bitcast i8* %add.ptr15 to i64*
-  %1 = load i64, i64* %0, align 8
+  %add.ptr15 = getelementptr inbounds i8, ptr %p, i64 %add.ptr15.idx
+  %0 = load i64, ptr %add.ptr15, align 8
   %add.ptr16.idx = add i64 %add, %mul
-  %add.ptr16 = getelementptr inbounds i8, i8* %p, i64 %add.ptr16.idx
-  %2 = bitcast i8* %add.ptr16 to i64*
-  %3 = load i64, i64* %2, align 8
+  %add.ptr16 = getelementptr inbounds i8, ptr %p, i64 %add.ptr16.idx
+  %1 = load i64, ptr %add.ptr16, align 8
   %add.ptr17.idx = add i64 %add, %mul2
-  %add.ptr17 = getelementptr inbounds i8, i8* %p, i64 %add.ptr17.idx
-  %4 = bitcast i8* %add.ptr17 to i64*
-  %5 = load i64, i64* %4, align 8
+  %add.ptr17 = getelementptr inbounds i8, ptr %p, i64 %add.ptr17.idx
+  %2 = load i64, ptr %add.ptr17, align 8
   %add.ptr18.idx = add i64 %add, %mul4
-  %add.ptr18 = getelementptr inbounds i8, i8* %p, i64 %add.ptr18.idx
-  %6 = bitcast i8* %add.ptr18 to i64*
-  %7 = load i64, i64* %6, align 8
+  %add.ptr18 = getelementptr inbounds i8, ptr %p, i64 %add.ptr18.idx
+  %3 = load i64, ptr %add.ptr18, align 8
   %add.ptr19.idx = add i64 %add, %mul6
-  %add.ptr19 = getelementptr inbounds i8, i8* %p, i64 %add.ptr19.idx
-  %8 = bitcast i8* %add.ptr19 to i64*
-  %9 = load i64, i64* %8, align 8
+  %add.ptr19 = getelementptr inbounds i8, ptr %p, i64 %add.ptr19.idx
+  %4 = load i64, ptr %add.ptr19, align 8
   %add.ptr20.idx = add i64 %add, %mul8
-  %add.ptr20 = getelementptr inbounds i8, i8* %p, i64 %add.ptr20.idx
-  %10 = bitcast i8* %add.ptr20 to i64*
-  %11 = load i64, i64* %10, align 8
-  %mul21 = mul i64 %3, %1
-  %mul22 = mul i64 %mul21, %5
-  %mul23 = mul i64 %mul22, %7
-  %mul24 = mul i64 %mul23, %9
-  %mul25 = mul i64 %mul24, %11
+  %add.ptr20 = getelementptr inbounds i8, ptr %p, i64 %add.ptr20.idx
+  %5 = load i64, ptr %add.ptr20, align 8
+  %mul21 = mul i64 %1, %0
+  %mul22 = mul i64 %mul21, %2
+  %mul23 = mul i64 %mul22, %3
+  %mul24 = mul i64 %mul23, %4
+  %mul25 = mul i64 %mul24, %5
   %add26 = add i64 %mul25, %sum.072
   %inc = add nuw nsw i64 %i.071, 1
   %exitcond.not = icmp eq i64 %inc, %n
@@ -561,7 +539,7 @@ for.body:                                         ; preds = %entry, %for.body
 ;   return sum;
 ; }
 ;
-define i64 @two_chain_
diff erent_offsets_succ(i8* %p, i64 %offset, i64 %base1, i64 %n) {
+define i64 @two_chain_
diff erent_offsets_succ(ptr %p, i64 %offset, i64 %base1, i64 %n) {
 ; CHECK-LABEL: two_chain_
diff erent_offsets_succ:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpdi r6, 0
@@ -609,24 +587,20 @@ for.body:                                         ; preds = %entry, %for.body
   %i.047 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
   %add = add i64 %i.047, %base1
   %add.ptr9.idx = add i64 %add, %offset
-  %add.ptr9 = getelementptr inbounds i8, i8* %p, i64 %add.ptr9.idx
-  %0 = bitcast i8* %add.ptr9 to i64*
-  %1 = load i64, i64* %0, align 8
+  %add.ptr9 = getelementptr inbounds i8, ptr %p, i64 %add.ptr9.idx
+  %0 = load i64, ptr %add.ptr9, align 8
   %add.ptr10.idx = add i64 %add, %mul
-  %add.ptr10 = getelementptr inbounds i8, i8* %p, i64 %add.ptr10.idx
-  %2 = bitcast i8* %add.ptr10 to i64*
-  %3 = load i64, i64* %2, align 8
+  %add.ptr10 = getelementptr inbounds i8, ptr %p, i64 %add.ptr10.idx
+  %1 = load i64, ptr %add.ptr10, align 8
   %add.ptr11.idx = add i64 %add, %mul2
-  %add.ptr11 = getelementptr inbounds i8, i8* %p, i64 %add.ptr11.idx
-  %4 = bitcast i8* %add.ptr11 to i64*
-  %5 = load i64, i64* %4, align 8
+  %add.ptr11 = getelementptr inbounds i8, ptr %p, i64 %add.ptr11.idx
+  %2 = load i64, ptr %add.ptr11, align 8
   %add.ptr12.idx = add i64 %add, %mul4
-  %add.ptr12 = getelementptr inbounds i8, i8* %p, i64 %add.ptr12.idx
-  %6 = bitcast i8* %add.ptr12 to i64*
-  %7 = load i64, i64* %6, align 8
-  %mul13 = mul i64 %3, %1
-  %mul14 = mul i64 %mul13, %5
-  %mul15 = mul i64 %mul14, %7
+  %add.ptr12 = getelementptr inbounds i8, ptr %p, i64 %add.ptr12.idx
+  %3 = load i64, ptr %add.ptr12, align 8
+  %mul13 = mul i64 %1, %0
+  %mul14 = mul i64 %mul13, %2
+  %mul15 = mul i64 %mul14, %3
   %add16 = add i64 %mul15, %sum.048
   %inc = add nuw nsw i64 %i.047, 1
   %exitcond.not = icmp eq i64 %inc, %n
@@ -663,7 +637,7 @@ for.body:                                         ; preds = %entry, %for.body
 ;   return sum;
 ; }
 ;
-define i64 @two_chain_two_bases_succ(i8* %p, i64 %offset, i64 %base1, i64 %base2, i64 %n) {
+define i64 @two_chain_two_bases_succ(ptr %p, i64 %offset, i64 %base1, i64 %base2, i64 %n) {
 ; CHECK-LABEL: two_chain_two_bases_succ:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpdi r7, 0
@@ -708,26 +682,22 @@ for.body:                                         ; preds = %entry, %for.body
   %i.045 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
   %add = add i64 %i.045, %base1
   %add.ptr8.idx = add i64 %add, %offset
-  %add.ptr8 = getelementptr inbounds i8, i8* %p, i64 %add.ptr8.idx
-  %0 = bitcast i8* %add.ptr8 to i64*
-  %1 = load i64, i64* %0, align 8
+  %add.ptr8 = getelementptr inbounds i8, ptr %p, i64 %add.ptr8.idx
+  %0 = load i64, ptr %add.ptr8, align 8
   %add1 = add i64 %i.045, %mul
   %add.ptr9.idx = add i64 %add1, %base1
-  %add.ptr9 = getelementptr inbounds i8, i8* %p, i64 %add.ptr9.idx
-  %2 = bitcast i8* %add.ptr9 to i64*
-  %3 = load i64, i64* %2, align 8
+  %add.ptr9 = getelementptr inbounds i8, ptr %p, i64 %add.ptr9.idx
+  %1 = load i64, ptr %add.ptr9, align 8
   %add2 = add i64 %i.045, %base2
   %add.ptr10.idx = add i64 %add2, %offset
-  %add.ptr10 = getelementptr inbounds i8, i8* %p, i64 %add.ptr10.idx
-  %4 = bitcast i8* %add.ptr10 to i64*
-  %5 = load i64, i64* %4, align 8
+  %add.ptr10 = getelementptr inbounds i8, ptr %p, i64 %add.ptr10.idx
+  %2 = load i64, ptr %add.ptr10, align 8
   %add.ptr11.idx = add i64 %add2, %mul
-  %add.ptr11 = getelementptr inbounds i8, i8* %p, i64 %add.ptr11.idx
-  %6 = bitcast i8* %add.ptr11 to i64*
-  %7 = load i64, i64* %6, align 8
-  %mul12 = mul i64 %3, %1
-  %mul13 = mul i64 %mul12, %5
-  %mul14 = mul i64 %mul13, %7
+  %add.ptr11 = getelementptr inbounds i8, ptr %p, i64 %add.ptr11.idx
+  %3 = load i64, ptr %add.ptr11, align 8
+  %mul12 = mul i64 %1, %0
+  %mul13 = mul i64 %mul12, %2
+  %mul14 = mul i64 %mul13, %3
   %add15 = add i64 %mul14, %sum.046
   %inc = add nuw nsw i64 %i.045, 1
   %exitcond.not = icmp eq i64 %inc, %n
@@ -748,7 +718,7 @@ for.body:                                         ; preds = %entry, %for.body
 ;   return 0;
 ; }
 ;
-define signext i32 @spill_reduce_succ(double* %input1, double* %input2, double* %output, i64 %m, i64 %inc1, i64 %inc2, i64 %inc3, i64 %inc4, i64 %inc) {
+define signext i32 @spill_reduce_succ(ptr %input1, ptr %input2, ptr %output, i64 %m, i64 %inc1, i64 %inc2, i64 %inc3, i64 %inc4, i64 %inc) {
 ; CHECK-LABEL: spill_reduce_succ:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpdi r6, 0
@@ -771,14 +741,15 @@ define signext i32 @spill_reduce_succ(double* %input1, double* %input2, double*
 ; CHECK-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std r31, -8(r1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std r2, -152(r1) # 8-byte Folded Spill
-; CHECK-NEXT:    std r9, -160(r1) # 8-byte Folded Spill
+; CHECK-NEXT:    std r9, -184(r1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std r8, -176(r1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std r7, -168(r1) # 8-byte Folded Spill
+; CHECK-NEXT:    std r3, -160(r1) # 8-byte Folded Spill
 ; CHECK-NEXT:    ble cr0, .LBB7_7
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    sldi r6, r6, 2
 ; CHECK-NEXT:    li r7, 1
-; CHECK-NEXT:    mr r12, r10
+; CHECK-NEXT:    mr r30, r10
 ; CHECK-NEXT:    cmpdi r6, 1
 ; CHECK-NEXT:    iselgt r7, r6, r7
 ; CHECK-NEXT:    addi r8, r7, -1
@@ -786,193 +757,176 @@ define signext i32 @spill_reduce_succ(double* %input1, double* %input2, double*
 ; CHECK-NEXT:    cmpldi r8, 3
 ; CHECK-NEXT:    blt cr0, .LBB7_4
 ; CHECK-NEXT:  # %bb.2: # %for.body.preheader.new
-; CHECK-NEXT:    rldicl r7, r7, 62, 2
-; CHECK-NEXT:    sldi r10, r12, 2
-; CHECK-NEXT:    ld r2, -168(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    rldicl r7, r7, 2, 1
-; CHECK-NEXT:    std r7, -184(r1) # 8-byte Folded Spill
-; CHECK-NEXT:    ld r7, -160(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    add r8, r7, r10
-; CHECK-NEXT:    mr r22, r7
-; CHECK-NEXT:    mr r7, r4
-; CHECK-NEXT:    mr r4, r3
-; CHECK-NEXT:    ld r3, -176(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    sldi r8, r8, 3
+; CHECK-NEXT:    ld r14, -168(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    mulli r24, r30, 24
+; CHECK-NEXT:    ld r16, -184(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    ld r15, -176(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    ld r3, -160(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    rldicl r0, r7, 62, 2
+; CHECK-NEXT:    sldi r11, r30, 5
+; CHECK-NEXT:    sldi r19, r30, 4
+; CHECK-NEXT:    sldi r7, r14, 3
+; CHECK-NEXT:    add r14, r30, r14
+; CHECK-NEXT:    sldi r10, r16, 3
+; CHECK-NEXT:    sldi r12, r15, 3
+; CHECK-NEXT:    add r16, r30, r16
+; CHECK-NEXT:    add r15, r30, r15
+; CHECK-NEXT:    add r27, r11, r7
+; CHECK-NEXT:    add r22, r24, r7
+; CHECK-NEXT:    add r17, r19, r7
+; CHECK-NEXT:    sldi r2, r14, 3
+; CHECK-NEXT:    add r26, r24, r10
+; CHECK-NEXT:    add r25, r24, r12
+; CHECK-NEXT:    add r21, r19, r10
+; CHECK-NEXT:    add r20, r19, r12
+; CHECK-NEXT:    add r8, r11, r10
+; CHECK-NEXT:    sldi r16, r16, 3
+; CHECK-NEXT:    add r29, r5, r27
+; CHECK-NEXT:    add r28, r4, r27
+; CHECK-NEXT:    add r27, r3, r27
+; CHECK-NEXT:    add r24, r5, r22
+; CHECK-NEXT:    add r23, r4, r22
+; CHECK-NEXT:    add r22, r3, r22
+; CHECK-NEXT:    add r19, r5, r17
+; CHECK-NEXT:    add r18, r4, r17
+; CHECK-NEXT:    add r17, r3, r17
+; CHECK-NEXT:    add r14, r5, r2
+; CHECK-NEXT:    add r31, r4, r2
+; CHECK-NEXT:    add r2, r3, r2
 ; CHECK-NEXT:    add r9, r5, r8
-; CHECK-NEXT:    add r8, r3, r10
-; CHECK-NEXT:    add r10, r2, r10
-; CHECK-NEXT:    sldi r10, r10, 3
-; CHECK-NEXT:    sldi r8, r8, 3
-; CHECK-NEXT:    add r30, r5, r10
-; CHECK-NEXT:    add r29, r7, r10
-; CHECK-NEXT:    add r28, r4, r10
-; CHECK-NEXT:    sldi r10, r12, 1
+; CHECK-NEXT:    add r8, r11, r12
+; CHECK-NEXT:    add r26, r5, r26
+; CHECK-NEXT:    add r25, r5, r25
+; CHECK-NEXT:    add r21, r5, r21
+; CHECK-NEXT:    add r20, r5, r20
+; CHECK-NEXT:    add r16, r5, r16
 ; CHECK-NEXT:    add r8, r5, r8
-; CHECK-NEXT:    add r11, r12, r10
-; CHECK-NEXT:    add r0, r22, r11
-; CHECK-NEXT:    sldi r0, r0, 3
-; CHECK-NEXT:    add r27, r5, r0
-; CHECK-NEXT:    add r0, r3, r11
-; CHECK-NEXT:    add r11, r2, r11
-; CHECK-NEXT:    sldi r11, r11, 3
-; CHECK-NEXT:    sldi r0, r0, 3
-; CHECK-NEXT:    add r25, r5, r11
-; CHECK-NEXT:    add r24, r7, r11
-; CHECK-NEXT:    add r23, r4, r11
-; CHECK-NEXT:    add r11, r22, r10
-; CHECK-NEXT:    add r26, r5, r0
-; CHECK-NEXT:    mr r0, r22
-; CHECK-NEXT:    sldi r11, r11, 3
-; CHECK-NEXT:    add r22, r5, r11
-; CHECK-NEXT:    add r11, r3, r10
-; CHECK-NEXT:    add r10, r2, r10
-; CHECK-NEXT:    sldi r10, r10, 3
-; CHECK-NEXT:    sldi r11, r11, 3
-; CHECK-NEXT:    add r20, r5, r10
-; CHECK-NEXT:    add r19, r7, r10
-; CHECK-NEXT:    add r18, r4, r10
-; CHECK-NEXT:    add r10, r12, r0
-; CHECK-NEXT:    add r21, r5, r11
-; CHECK-NEXT:    sldi r11, r2, 3
-; CHECK-NEXT:    sldi r10, r10, 3
-; CHECK-NEXT:    add r17, r5, r10
-; CHECK-NEXT:    add r10, r12, r3
-; CHECK-NEXT:    sldi r10, r10, 3
-; CHECK-NEXT:    add r16, r5, r10
-; CHECK-NEXT:    add r10, r12, r2
-; CHECK-NEXT:    sldi r10, r10, 3
-; CHECK-NEXT:    add r15, r5, r10
-; CHECK-NEXT:    add r14, r7, r10
-; CHECK-NEXT:    add r31, r4, r10
-; CHECK-NEXT:    sldi r10, r3, 3
-; CHECK-NEXT:    mr r3, r4
-; CHECK-NEXT:    mr r4, r7
-; CHECK-NEXT:    ld r7, -160(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    sub r0, r10, r11
-; CHECK-NEXT:    sldi r10, r7, 3
-; CHECK-NEXT:    ld r7, -184(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    sub r2, r10, r11
-; CHECK-NEXT:    li r11, 0
-; CHECK-NEXT:    mr r10, r12
-; CHECK-NEXT:    addi r7, r7, -4
-; CHECK-NEXT:    rldicl r7, r7, 62, 2
-; CHECK-NEXT:    addi r7, r7, 1
-; CHECK-NEXT:    mtctr r7
-; CHECK-NEXT:    sldi r7, r12, 5
+; CHECK-NEXT:    rldicl r3, r0, 2, 1
+; CHECK-NEXT:    addi r3, r3, -4
+; CHECK-NEXT:    sub r0, r12, r7
+; CHECK-NEXT:    sub r12, r10, r7
+; CHECK-NEXT:    li r7, 0
+; CHECK-NEXT:    mr r10, r30
+; CHECK-NEXT:    sldi r15, r15, 3
+; CHECK-NEXT:    add r15, r5, r15
+; CHECK-NEXT:    rldicl r3, r3, 62, 2
+; CHECK-NEXT:    addi r3, r3, 1
+; CHECK-NEXT:    mtctr r3
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_3: # %for.body
 ; CHECK-NEXT:    #
-; CHECK-NEXT:    lfd f0, 0(r31)
-; CHECK-NEXT:    lfd f1, 0(r14)
-; CHECK-NEXT:    add r10, r10, r12
-; CHECK-NEXT:    add r10, r10, r12
+; CHECK-NEXT:    lfd f0, 0(r2)
+; CHECK-NEXT:    lfd f1, 0(r31)
+; CHECK-NEXT:    add r3, r10, r30
+; CHECK-NEXT:    add r3, r3, r30
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfd f1, 0(r15)
-; CHECK-NEXT:    add r10, r10, r12
-; CHECK-NEXT:    add r10, r10, r12
+; CHECK-NEXT:    lfd f1, 0(r14)
+; CHECK-NEXT:    add r3, r3, r30
+; CHECK-NEXT:    add r10, r3, r30
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfd f0, 0(r15)
-; CHECK-NEXT:    add r15, r15, r7
-; CHECK-NEXT:    lfdx f0, r31, r0
-; CHECK-NEXT:    lfdx f1, r14, r0
+; CHECK-NEXT:    stfd f0, 0(r14)
+; CHECK-NEXT:    add r14, r14, r11
+; CHECK-NEXT:    lfdx f0, r2, r0
+; CHECK-NEXT:    lfdx f1, r31, r0
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r16, r11
+; CHECK-NEXT:    lfdx f1, r15, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r16, r11
-; CHECK-NEXT:    lfdx f0, r31, r2
-; CHECK-NEXT:    lfdx f1, r14, r2
-; CHECK-NEXT:    add r31, r31, r7
-; CHECK-NEXT:    add r14, r14, r7
+; CHECK-NEXT:    stfdx f0, r15, r7
+; CHECK-NEXT:    lfdx f0, r2, r12
+; CHECK-NEXT:    lfdx f1, r31, r12
+; CHECK-NEXT:    add r2, r2, r11
+; CHECK-NEXT:    add r31, r31, r11
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r17, r11
+; CHECK-NEXT:    lfdx f1, r16, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r17, r11
-; CHECK-NEXT:    lfd f0, 0(r18)
-; CHECK-NEXT:    lfd f1, 0(r19)
+; CHECK-NEXT:    stfdx f0, r16, r7
+; CHECK-NEXT:    lfd f0, 0(r17)
+; CHECK-NEXT:    lfd f1, 0(r18)
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r20, r11
+; CHECK-NEXT:    lfdx f1, r19, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r20, r11
-; CHECK-NEXT:    lfdx f0, r18, r0
-; CHECK-NEXT:    lfdx f1, r19, r0
+; CHECK-NEXT:    stfdx f0, r19, r7
+; CHECK-NEXT:    lfdx f0, r17, r0
+; CHECK-NEXT:    lfdx f1, r18, r0
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r21, r11
+; CHECK-NEXT:    lfdx f1, r20, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r21, r11
-; CHECK-NEXT:    lfdx f0, r18, r2
-; CHECK-NEXT:    lfdx f1, r19, r2
-; CHECK-NEXT:    add r18, r18, r7
-; CHECK-NEXT:    add r19, r19, r7
+; CHECK-NEXT:    stfdx f0, r20, r7
+; CHECK-NEXT:    lfdx f0, r17, r12
+; CHECK-NEXT:    lfdx f1, r18, r12
+; CHECK-NEXT:    add r17, r17, r11
+; CHECK-NEXT:    add r18, r18, r11
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r22, r11
+; CHECK-NEXT:    lfdx f1, r21, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r22, r11
-; CHECK-NEXT:    lfd f0, 0(r23)
-; CHECK-NEXT:    lfd f1, 0(r24)
+; CHECK-NEXT:    stfdx f0, r21, r7
+; CHECK-NEXT:    lfd f0, 0(r22)
+; CHECK-NEXT:    lfd f1, 0(r23)
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r25, r11
+; CHECK-NEXT:    lfdx f1, r24, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r25, r11
-; CHECK-NEXT:    lfdx f0, r23, r0
-; CHECK-NEXT:    lfdx f1, r24, r0
+; CHECK-NEXT:    stfdx f0, r24, r7
+; CHECK-NEXT:    lfdx f0, r22, r0
+; CHECK-NEXT:    lfdx f1, r23, r0
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r26, r11
+; CHECK-NEXT:    lfdx f1, r25, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r26, r11
-; CHECK-NEXT:    lfdx f0, r23, r2
-; CHECK-NEXT:    lfdx f1, r24, r2
-; CHECK-NEXT:    add r23, r23, r7
-; CHECK-NEXT:    add r24, r24, r7
+; CHECK-NEXT:    stfdx f0, r25, r7
+; CHECK-NEXT:    lfdx f0, r22, r12
+; CHECK-NEXT:    lfdx f1, r23, r12
+; CHECK-NEXT:    add r22, r22, r11
+; CHECK-NEXT:    add r23, r23, r11
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r27, r11
+; CHECK-NEXT:    lfdx f1, r26, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r27, r11
-; CHECK-NEXT:    lfd f0, 0(r28)
-; CHECK-NEXT:    lfd f1, 0(r29)
+; CHECK-NEXT:    stfdx f0, r26, r7
+; CHECK-NEXT:    lfd f0, 0(r27)
+; CHECK-NEXT:    lfd f1, 0(r28)
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r30, r11
+; CHECK-NEXT:    lfdx f1, r29, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r30, r11
-; CHECK-NEXT:    lfdx f0, r28, r0
-; CHECK-NEXT:    lfdx f1, r29, r0
+; CHECK-NEXT:    stfdx f0, r29, r7
+; CHECK-NEXT:    lfdx f0, r27, r0
+; CHECK-NEXT:    lfdx f1, r28, r0
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r8, r11
+; CHECK-NEXT:    lfdx f1, r8, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r8, r11
-; CHECK-NEXT:    lfdx f0, r28, r2
-; CHECK-NEXT:    lfdx f1, r29, r2
-; CHECK-NEXT:    add r28, r28, r7
-; CHECK-NEXT:    add r29, r29, r7
+; CHECK-NEXT:    stfdx f0, r8, r7
+; CHECK-NEXT:    lfdx f0, r27, r12
+; CHECK-NEXT:    lfdx f1, r28, r12
+; CHECK-NEXT:    add r27, r27, r11
+; CHECK-NEXT:    add r28, r28, r11
 ; CHECK-NEXT:    xsmuldp f0, f0, f1
-; CHECK-NEXT:    lfdx f1, r9, r11
+; CHECK-NEXT:    lfdx f1, r9, r7
 ; CHECK-NEXT:    xsadddp f0, f1, f0
-; CHECK-NEXT:    stfdx f0, r9, r11
-; CHECK-NEXT:    add r11, r11, r7
+; CHECK-NEXT:    stfdx f0, r9, r7
+; CHECK-NEXT:    add r7, r7, r11
 ; CHECK-NEXT:    bdnz .LBB7_3
 ; CHECK-NEXT:  .LBB7_4: # %for.cond.cleanup.loopexit.unr-lcssa
 ; CHECK-NEXT:    cmpldi r6, 0
 ; CHECK-NEXT:    beq cr0, .LBB7_7
 ; CHECK-NEXT:  # %bb.5: # %for.body.epil.preheader
-; CHECK-NEXT:    sldi r8, r12, 3
-; CHECK-NEXT:    ld r12, -176(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    ld r7, -160(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    add r12, r10, r12
-; CHECK-NEXT:    add r7, r10, r7
-; CHECK-NEXT:    sldi r0, r12, 3
-; CHECK-NEXT:    sldi r11, r7, 3
-; CHECK-NEXT:    add r12, r5, r0
-; CHECK-NEXT:    add r30, r4, r0
-; CHECK-NEXT:    add r29, r3, r0
-; CHECK-NEXT:    ld r0, -168(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    add r7, r5, r11
-; CHECK-NEXT:    add r9, r4, r11
-; CHECK-NEXT:    add r11, r3, r11
-; CHECK-NEXT:    add r10, r10, r0
-; CHECK-NEXT:    sldi r10, r10, 3
-; CHECK-NEXT:    add r5, r5, r10
-; CHECK-NEXT:    add r4, r4, r10
-; CHECK-NEXT:    add r3, r3, r10
+; CHECK-NEXT:    ld r3, -184(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    ld r0, -160(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    sldi r8, r30, 3
+; CHECK-NEXT:    add r3, r10, r3
+; CHECK-NEXT:    sldi r3, r3, 3
+; CHECK-NEXT:    add r7, r5, r3
+; CHECK-NEXT:    add r9, r4, r3
+; CHECK-NEXT:    add r11, r0, r3
+; CHECK-NEXT:    ld r3, -176(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    add r3, r10, r3
+; CHECK-NEXT:    sldi r3, r3, 3
+; CHECK-NEXT:    add r12, r5, r3
+; CHECK-NEXT:    add r30, r4, r3
+; CHECK-NEXT:    add r29, r0, r3
+; CHECK-NEXT:    ld r3, -168(r1) # 8-byte Folded Reload
+; CHECK-NEXT:    add r3, r10, r3
 ; CHECK-NEXT:    li r10, 0
+; CHECK-NEXT:    sldi r3, r3, 3
+; CHECK-NEXT:    add r5, r5, r3
+; CHECK-NEXT:    add r4, r4, r3
+; CHECK-NEXT:    add r3, r0, r3
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_6: # %for.body.epil
 ; CHECK-NEXT:    #
@@ -1046,35 +1000,35 @@ for.body.epil:                                    ; preds = %for.cond.cleanup.lo
   %inc.addr.050.epil = phi i64 [ %add23.epil, %for.body.epil ], [ %inc.addr.050.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %epil.iter = phi i64 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
   %add.epil = add nsw i64 %inc.addr.050.epil, %inc1
-  %arrayidx.epil = getelementptr inbounds double, double* %input1, i64 %add.epil
-  %3 = load double, double* %arrayidx.epil, align 8
-  %arrayidx2.epil = getelementptr inbounds double, double* %input2, i64 %add.epil
-  %4 = load double, double* %arrayidx2.epil, align 8
+  %arrayidx.epil = getelementptr inbounds double, ptr %input1, i64 %add.epil
+  %3 = load double, ptr %arrayidx.epil, align 8
+  %arrayidx2.epil = getelementptr inbounds double, ptr %input2, i64 %add.epil
+  %4 = load double, ptr %arrayidx2.epil, align 8
   %mul3.epil = fmul double %3, %4
-  %arrayidx5.epil = getelementptr inbounds double, double* %output, i64 %add.epil
-  %5 = load double, double* %arrayidx5.epil, align 8
+  %arrayidx5.epil = getelementptr inbounds double, ptr %output, i64 %add.epil
+  %5 = load double, ptr %arrayidx5.epil, align 8
   %add6.epil = fadd double %5, %mul3.epil
-  store double %add6.epil, double* %arrayidx5.epil, align 8
+  store double %add6.epil, ptr %arrayidx5.epil, align 8
   %add7.epil = add nsw i64 %inc.addr.050.epil, %inc2
-  %arrayidx8.epil = getelementptr inbounds double, double* %input1, i64 %add7.epil
-  %6 = load double, double* %arrayidx8.epil, align 8
-  %arrayidx10.epil = getelementptr inbounds double, double* %input2, i64 %add7.epil
-  %7 = load double, double* %arrayidx10.epil, align 8
+  %arrayidx8.epil = getelementptr inbounds double, ptr %input1, i64 %add7.epil
+  %6 = load double, ptr %arrayidx8.epil, align 8
+  %arrayidx10.epil = getelementptr inbounds double, ptr %input2, i64 %add7.epil
+  %7 = load double, ptr %arrayidx10.epil, align 8
   %mul11.epil = fmul double %6, %7
-  %arrayidx13.epil = getelementptr inbounds double, double* %output, i64 %add7.epil
-  %8 = load double, double* %arrayidx13.epil, align 8
+  %arrayidx13.epil = getelementptr inbounds double, ptr %output, i64 %add7.epil
+  %8 = load double, ptr %arrayidx13.epil, align 8
   %add14.epil = fadd double %8, %mul11.epil
-  store double %add14.epil, double* %arrayidx13.epil, align 8
+  store double %add14.epil, ptr %arrayidx13.epil, align 8
   %add15.epil = add nsw i64 %inc.addr.050.epil, %inc3
-  %arrayidx16.epil = getelementptr inbounds double, double* %input1, i64 %add15.epil
-  %9 = load double, double* %arrayidx16.epil, align 8
-  %arrayidx18.epil = getelementptr inbounds double, double* %input2, i64 %add15.epil
-  %10 = load double, double* %arrayidx18.epil, align 8
+  %arrayidx16.epil = getelementptr inbounds double, ptr %input1, i64 %add15.epil
+  %9 = load double, ptr %arrayidx16.epil, align 8
+  %arrayidx18.epil = getelementptr inbounds double, ptr %input2, i64 %add15.epil
+  %10 = load double, ptr %arrayidx18.epil, align 8
   %mul19.epil = fmul double %9, %10
-  %arrayidx21.epil = getelementptr inbounds double, double* %output, i64 %add15.epil
-  %11 = load double, double* %arrayidx21.epil, align 8
+  %arrayidx21.epil = getelementptr inbounds double, ptr %output, i64 %add15.epil
+  %11 = load double, ptr %arrayidx21.epil, align 8
   %add22.epil = fadd double %11, %mul19.epil
-  store double %add22.epil, double* %arrayidx21.epil, align 8
+  store double %add22.epil, ptr %arrayidx21.epil, align 8
   %add23.epil = add nsw i64 %inc.addr.050.epil, %inc4
   %epil.iter.sub = add nsw i64 %epil.iter, -1
   %epil.iter.cmp.not = icmp eq i64 %epil.iter.sub, 0
@@ -1087,128 +1041,128 @@ for.body:                                         ; preds = %for.body, %for.body
   %inc.addr.050 = phi i64 [ %inc4, %for.body.preheader.new ], [ %add23.3, %for.body ]
   %niter = phi i64 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
   %add = add nsw i64 %inc.addr.050, %inc1
-  %arrayidx = getelementptr inbounds double, double* %input1, i64 %add
-  %12 = load double, double* %arrayidx, align 8
-  %arrayidx2 = getelementptr inbounds double, double* %input2, i64 %add
-  %13 = load double, double* %arrayidx2, align 8
+  %arrayidx = getelementptr inbounds double, ptr %input1, i64 %add
+  %12 = load double, ptr %arrayidx, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %input2, i64 %add
+  %13 = load double, ptr %arrayidx2, align 8
   %mul3 = fmul double %12, %13
-  %arrayidx5 = getelementptr inbounds double, double* %output, i64 %add
-  %14 = load double, double* %arrayidx5, align 8
+  %arrayidx5 = getelementptr inbounds double, ptr %output, i64 %add
+  %14 = load double, ptr %arrayidx5, align 8
   %add6 = fadd double %14, %mul3
-  store double %add6, double* %arrayidx5, align 8
+  store double %add6, ptr %arrayidx5, align 8
   %add7 = add nsw i64 %inc.addr.050, %inc2
-  %arrayidx8 = getelementptr inbounds double, double* %input1, i64 %add7
-  %15 = load double, double* %arrayidx8, align 8
-  %arrayidx10 = getelementptr inbounds double, double* %input2, i64 %add7
-  %16 = load double, double* %arrayidx10, align 8
+  %arrayidx8 = getelementptr inbounds double, ptr %input1, i64 %add7
+  %15 = load double, ptr %arrayidx8, align 8
+  %arrayidx10 = getelementptr inbounds double, ptr %input2, i64 %add7
+  %16 = load double, ptr %arrayidx10, align 8
   %mul11 = fmul double %15, %16
-  %arrayidx13 = getelementptr inbounds double, double* %output, i64 %add7
-  %17 = load double, double* %arrayidx13, align 8
+  %arrayidx13 = getelementptr inbounds double, ptr %output, i64 %add7
+  %17 = load double, ptr %arrayidx13, align 8
   %add14 = fadd double %17, %mul11
-  store double %add14, double* %arrayidx13, align 8
+  store double %add14, ptr %arrayidx13, align 8
   %add15 = add nsw i64 %inc.addr.050, %inc3
-  %arrayidx16 = getelementptr inbounds double, double* %input1, i64 %add15
-  %18 = load double, double* %arrayidx16, align 8
-  %arrayidx18 = getelementptr inbounds double, double* %input2, i64 %add15
-  %19 = load double, double* %arrayidx18, align 8
+  %arrayidx16 = getelementptr inbounds double, ptr %input1, i64 %add15
+  %18 = load double, ptr %arrayidx16, align 8
+  %arrayidx18 = getelementptr inbounds double, ptr %input2, i64 %add15
+  %19 = load double, ptr %arrayidx18, align 8
   %mul19 = fmul double %18, %19
-  %arrayidx21 = getelementptr inbounds double, double* %output, i64 %add15
-  %20 = load double, double* %arrayidx21, align 8
+  %arrayidx21 = getelementptr inbounds double, ptr %output, i64 %add15
+  %20 = load double, ptr %arrayidx21, align 8
   %add22 = fadd double %20, %mul19
-  store double %add22, double* %arrayidx21, align 8
+  store double %add22, ptr %arrayidx21, align 8
   %add23 = add nsw i64 %inc.addr.050, %inc4
   %add.1 = add nsw i64 %add23, %inc1
-  %arrayidx.1 = getelementptr inbounds double, double* %input1, i64 %add.1
-  %21 = load double, double* %arrayidx.1, align 8
-  %arrayidx2.1 = getelementptr inbounds double, double* %input2, i64 %add.1
-  %22 = load double, double* %arrayidx2.1, align 8
+  %arrayidx.1 = getelementptr inbounds double, ptr %input1, i64 %add.1
+  %21 = load double, ptr %arrayidx.1, align 8
+  %arrayidx2.1 = getelementptr inbounds double, ptr %input2, i64 %add.1
+  %22 = load double, ptr %arrayidx2.1, align 8
   %mul3.1 = fmul double %21, %22
-  %arrayidx5.1 = getelementptr inbounds double, double* %output, i64 %add.1
-  %23 = load double, double* %arrayidx5.1, align 8
+  %arrayidx5.1 = getelementptr inbounds double, ptr %output, i64 %add.1
+  %23 = load double, ptr %arrayidx5.1, align 8
   %add6.1 = fadd double %23, %mul3.1
-  store double %add6.1, double* %arrayidx5.1, align 8
+  store double %add6.1, ptr %arrayidx5.1, align 8
   %add7.1 = add nsw i64 %add23, %inc2
-  %arrayidx8.1 = getelementptr inbounds double, double* %input1, i64 %add7.1
-  %24 = load double, double* %arrayidx8.1, align 8
-  %arrayidx10.1 = getelementptr inbounds double, double* %input2, i64 %add7.1
-  %25 = load double, double* %arrayidx10.1, align 8
+  %arrayidx8.1 = getelementptr inbounds double, ptr %input1, i64 %add7.1
+  %24 = load double, ptr %arrayidx8.1, align 8
+  %arrayidx10.1 = getelementptr inbounds double, ptr %input2, i64 %add7.1
+  %25 = load double, ptr %arrayidx10.1, align 8
   %mul11.1 = fmul double %24, %25
-  %arrayidx13.1 = getelementptr inbounds double, double* %output, i64 %add7.1
-  %26 = load double, double* %arrayidx13.1, align 8
+  %arrayidx13.1 = getelementptr inbounds double, ptr %output, i64 %add7.1
+  %26 = load double, ptr %arrayidx13.1, align 8
   %add14.1 = fadd double %26, %mul11.1
-  store double %add14.1, double* %arrayidx13.1, align 8
+  store double %add14.1, ptr %arrayidx13.1, align 8
   %add15.1 = add nsw i64 %add23, %inc3
-  %arrayidx16.1 = getelementptr inbounds double, double* %input1, i64 %add15.1
-  %27 = load double, double* %arrayidx16.1, align 8
-  %arrayidx18.1 = getelementptr inbounds double, double* %input2, i64 %add15.1
-  %28 = load double, double* %arrayidx18.1, align 8
+  %arrayidx16.1 = getelementptr inbounds double, ptr %input1, i64 %add15.1
+  %27 = load double, ptr %arrayidx16.1, align 8
+  %arrayidx18.1 = getelementptr inbounds double, ptr %input2, i64 %add15.1
+  %28 = load double, ptr %arrayidx18.1, align 8
   %mul19.1 = fmul double %27, %28
-  %arrayidx21.1 = getelementptr inbounds double, double* %output, i64 %add15.1
-  %29 = load double, double* %arrayidx21.1, align 8
+  %arrayidx21.1 = getelementptr inbounds double, ptr %output, i64 %add15.1
+  %29 = load double, ptr %arrayidx21.1, align 8
   %add22.1 = fadd double %29, %mul19.1
-  store double %add22.1, double* %arrayidx21.1, align 8
+  store double %add22.1, ptr %arrayidx21.1, align 8
   %add23.1 = add nsw i64 %add23, %inc4
   %add.2 = add nsw i64 %add23.1, %inc1
-  %arrayidx.2 = getelementptr inbounds double, double* %input1, i64 %add.2
-  %30 = load double, double* %arrayidx.2, align 8
-  %arrayidx2.2 = getelementptr inbounds double, double* %input2, i64 %add.2
-  %31 = load double, double* %arrayidx2.2, align 8
+  %arrayidx.2 = getelementptr inbounds double, ptr %input1, i64 %add.2
+  %30 = load double, ptr %arrayidx.2, align 8
+  %arrayidx2.2 = getelementptr inbounds double, ptr %input2, i64 %add.2
+  %31 = load double, ptr %arrayidx2.2, align 8
   %mul3.2 = fmul double %30, %31
-  %arrayidx5.2 = getelementptr inbounds double, double* %output, i64 %add.2
-  %32 = load double, double* %arrayidx5.2, align 8
+  %arrayidx5.2 = getelementptr inbounds double, ptr %output, i64 %add.2
+  %32 = load double, ptr %arrayidx5.2, align 8
   %add6.2 = fadd double %32, %mul3.2
-  store double %add6.2, double* %arrayidx5.2, align 8
+  store double %add6.2, ptr %arrayidx5.2, align 8
   %add7.2 = add nsw i64 %add23.1, %inc2
-  %arrayidx8.2 = getelementptr inbounds double, double* %input1, i64 %add7.2
-  %33 = load double, double* %arrayidx8.2, align 8
-  %arrayidx10.2 = getelementptr inbounds double, double* %input2, i64 %add7.2
-  %34 = load double, double* %arrayidx10.2, align 8
+  %arrayidx8.2 = getelementptr inbounds double, ptr %input1, i64 %add7.2
+  %33 = load double, ptr %arrayidx8.2, align 8
+  %arrayidx10.2 = getelementptr inbounds double, ptr %input2, i64 %add7.2
+  %34 = load double, ptr %arrayidx10.2, align 8
   %mul11.2 = fmul double %33, %34
-  %arrayidx13.2 = getelementptr inbounds double, double* %output, i64 %add7.2
-  %35 = load double, double* %arrayidx13.2, align 8
+  %arrayidx13.2 = getelementptr inbounds double, ptr %output, i64 %add7.2
+  %35 = load double, ptr %arrayidx13.2, align 8
   %add14.2 = fadd double %35, %mul11.2
-  store double %add14.2, double* %arrayidx13.2, align 8
+  store double %add14.2, ptr %arrayidx13.2, align 8
   %add15.2 = add nsw i64 %add23.1, %inc3
-  %arrayidx16.2 = getelementptr inbounds double, double* %input1, i64 %add15.2
-  %36 = load double, double* %arrayidx16.2, align 8
-  %arrayidx18.2 = getelementptr inbounds double, double* %input2, i64 %add15.2
-  %37 = load double, double* %arrayidx18.2, align 8
+  %arrayidx16.2 = getelementptr inbounds double, ptr %input1, i64 %add15.2
+  %36 = load double, ptr %arrayidx16.2, align 8
+  %arrayidx18.2 = getelementptr inbounds double, ptr %input2, i64 %add15.2
+  %37 = load double, ptr %arrayidx18.2, align 8
   %mul19.2 = fmul double %36, %37
-  %arrayidx21.2 = getelementptr inbounds double, double* %output, i64 %add15.2
-  %38 = load double, double* %arrayidx21.2, align 8
+  %arrayidx21.2 = getelementptr inbounds double, ptr %output, i64 %add15.2
+  %38 = load double, ptr %arrayidx21.2, align 8
   %add22.2 = fadd double %38, %mul19.2
-  store double %add22.2, double* %arrayidx21.2, align 8
+  store double %add22.2, ptr %arrayidx21.2, align 8
   %add23.2 = add nsw i64 %add23.1, %inc4
   %add.3 = add nsw i64 %add23.2, %inc1
-  %arrayidx.3 = getelementptr inbounds double, double* %input1, i64 %add.3
-  %39 = load double, double* %arrayidx.3, align 8
-  %arrayidx2.3 = getelementptr inbounds double, double* %input2, i64 %add.3
-  %40 = load double, double* %arrayidx2.3, align 8
+  %arrayidx.3 = getelementptr inbounds double, ptr %input1, i64 %add.3
+  %39 = load double, ptr %arrayidx.3, align 8
+  %arrayidx2.3 = getelementptr inbounds double, ptr %input2, i64 %add.3
+  %40 = load double, ptr %arrayidx2.3, align 8
   %mul3.3 = fmul double %39, %40
-  %arrayidx5.3 = getelementptr inbounds double, double* %output, i64 %add.3
-  %41 = load double, double* %arrayidx5.3, align 8
+  %arrayidx5.3 = getelementptr inbounds double, ptr %output, i64 %add.3
+  %41 = load double, ptr %arrayidx5.3, align 8
   %add6.3 = fadd double %41, %mul3.3
-  store double %add6.3, double* %arrayidx5.3, align 8
+  store double %add6.3, ptr %arrayidx5.3, align 8
   %add7.3 = add nsw i64 %add23.2, %inc2
-  %arrayidx8.3 = getelementptr inbounds double, double* %input1, i64 %add7.3
-  %42 = load double, double* %arrayidx8.3, align 8
-  %arrayidx10.3 = getelementptr inbounds double, double* %input2, i64 %add7.3
-  %43 = load double, double* %arrayidx10.3, align 8
+  %arrayidx8.3 = getelementptr inbounds double, ptr %input1, i64 %add7.3
+  %42 = load double, ptr %arrayidx8.3, align 8
+  %arrayidx10.3 = getelementptr inbounds double, ptr %input2, i64 %add7.3
+  %43 = load double, ptr %arrayidx10.3, align 8
   %mul11.3 = fmul double %42, %43
-  %arrayidx13.3 = getelementptr inbounds double, double* %output, i64 %add7.3
-  %44 = load double, double* %arrayidx13.3, align 8
+  %arrayidx13.3 = getelementptr inbounds double, ptr %output, i64 %add7.3
+  %44 = load double, ptr %arrayidx13.3, align 8
   %add14.3 = fadd double %44, %mul11.3
-  store double %add14.3, double* %arrayidx13.3, align 8
+  store double %add14.3, ptr %arrayidx13.3, align 8
   %add15.3 = add nsw i64 %add23.2, %inc3
-  %arrayidx16.3 = getelementptr inbounds double, double* %input1, i64 %add15.3
-  %45 = load double, double* %arrayidx16.3, align 8
-  %arrayidx18.3 = getelementptr inbounds double, double* %input2, i64 %add15.3
-  %46 = load double, double* %arrayidx18.3, align 8
+  %arrayidx16.3 = getelementptr inbounds double, ptr %input1, i64 %add15.3
+  %45 = load double, ptr %arrayidx16.3, align 8
+  %arrayidx18.3 = getelementptr inbounds double, ptr %input2, i64 %add15.3
+  %46 = load double, ptr %arrayidx18.3, align 8
   %mul19.3 = fmul double %45, %46
-  %arrayidx21.3 = getelementptr inbounds double, double* %output, i64 %add15.3
-  %47 = load double, double* %arrayidx21.3, align 8
+  %arrayidx21.3 = getelementptr inbounds double, ptr %output, i64 %add15.3
+  %47 = load double, ptr %arrayidx21.3, align 8
   %add22.3 = fadd double %47, %mul19.3
-  store double %add22.3, double* %arrayidx21.3, align 8
+  store double %add22.3, ptr %arrayidx21.3, align 8
   %add23.3 = add nsw i64 %add23.2, %inc4
   %niter.nsub.3 = add i64 %niter, -4
   %niter.ncmp.3.not = icmp eq i64 %niter.nsub.3, 0

diff  --git a/llvm/test/CodeGen/PowerPC/loop-instr-form-prepare.ll b/llvm/test/CodeGen/PowerPC/loop-instr-form-prepare.ll
index 3a4fe398adf6..769b358131e9 100644
--- a/llvm/test/CodeGen/PowerPC/loop-instr-form-prepare.ll
+++ b/llvm/test/CodeGen/PowerPC/loop-instr-form-prepare.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -ppc-asm-full-reg-names -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 < %s | FileCheck %s
+; RUN: llc -ppc-asm-full-reg-names -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 < %s | FileCheck %s
 
 ; test_no_prep:
 ; unsigned long test_no_prep(char *p, int count) {
@@ -18,7 +18,7 @@
 ;   return res + count;
 ; }
 
-define i64 @test_no_prep(i8* %arg, i32 signext %arg1) {
+define i64 @test_no_prep(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_no_prep:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmplwi r4, 0
@@ -59,19 +59,15 @@ bb:
 bb3:                                              ; preds = %bb3, %bb
   %i4 = phi i64 [ %i23, %bb3 ], [ 0, %bb ]
   %i5 = phi i64 [ %i22, %bb3 ], [ 0, %bb ]
-  %i6 = getelementptr inbounds i8, i8* %arg, i64 %i4
-  %i7 = getelementptr inbounds i8, i8* %i6, i64 4001
-  %i8 = bitcast i8* %i7 to i64*
-  %i9 = load i64, i64* %i8, align 8
-  %i10 = getelementptr inbounds i8, i8* %i6, i64 4002
-  %i11 = bitcast i8* %i10 to i64*
-  %i12 = load i64, i64* %i11, align 8
-  %i13 = getelementptr inbounds i8, i8* %i6, i64 4003
-  %i14 = bitcast i8* %i13 to i64*
-  %i15 = load i64, i64* %i14, align 8
-  %i16 = getelementptr inbounds i8, i8* %i6, i64 4004
-  %i17 = bitcast i8* %i16 to i64*
-  %i18 = load i64, i64* %i17, align 8
+  %i6 = getelementptr inbounds i8, ptr %arg, i64 %i4
+  %i7 = getelementptr inbounds i8, ptr %i6, i64 4001
+  %i9 = load i64, ptr %i7, align 8
+  %i10 = getelementptr inbounds i8, ptr %i6, i64 4002
+  %i12 = load i64, ptr %i10, align 8
+  %i13 = getelementptr inbounds i8, ptr %i6, i64 4003
+  %i15 = load i64, ptr %i13, align 8
+  %i16 = getelementptr inbounds i8, ptr %i6, i64 4004
+  %i18 = load i64, ptr %i16, align 8
   %i19 = mul i64 %i12, %i9
   %i20 = mul i64 %i19, %i15
   %i21 = mul i64 %i20, %i18
@@ -103,7 +99,7 @@ bb25:                                             ; preds = %bb3, %bb
 ;   return res + count;
 ; }
 
-define i64 @test_ds_prep(i8* %arg, i32 signext %arg1) {
+define i64 @test_ds_prep(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_ds_prep:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmplwi r4, 0
@@ -142,19 +138,15 @@ bb:
 bb3:                                              ; preds = %bb3, %bb
   %i4 = phi i64 [ %i23, %bb3 ], [ 0, %bb ]
   %i5 = phi i64 [ %i22, %bb3 ], [ 0, %bb ]
-  %i6 = getelementptr inbounds i8, i8* %arg, i64 %i4
-  %i7 = getelementptr inbounds i8, i8* %i6, i64 4001
-  %i8 = bitcast i8* %i7 to i64*
-  %i9 = load i64, i64* %i8, align 8
-  %i10 = getelementptr inbounds i8, i8* %i6, i64 4002
-  %i11 = bitcast i8* %i10 to i64*
-  %i12 = load i64, i64* %i11, align 8
-  %i13 = getelementptr inbounds i8, i8* %i6, i64 4003
-  %i14 = bitcast i8* %i13 to i64*
-  %i15 = load i64, i64* %i14, align 8
-  %i16 = getelementptr inbounds i8, i8* %i6, i64 4006
-  %i17 = bitcast i8* %i16 to i64*
-  %i18 = load i64, i64* %i17, align 8
+  %i6 = getelementptr inbounds i8, ptr %arg, i64 %i4
+  %i7 = getelementptr inbounds i8, ptr %i6, i64 4001
+  %i9 = load i64, ptr %i7, align 8
+  %i10 = getelementptr inbounds i8, ptr %i6, i64 4002
+  %i12 = load i64, ptr %i10, align 8
+  %i13 = getelementptr inbounds i8, ptr %i6, i64 4003
+  %i15 = load i64, ptr %i13, align 8
+  %i16 = getelementptr inbounds i8, ptr %i6, i64 4006
+  %i18 = load i64, ptr %i16, align 8
   %i19 = mul i64 %i12, %i9
   %i20 = mul i64 %i19, %i15
   %i21 = mul i64 %i20, %i18
@@ -196,7 +188,7 @@ bb25:                                             ; preds = %bb3, %bb
 ;  return res + count;
 ;}
 
-define i64 @test_max_number_reminder(i8* %arg, i32 signext %arg1) {
+define i64 @test_max_number_reminder(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_max_number_reminder:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmplwi r4, 0
@@ -260,34 +252,25 @@ bb:
 bb3:                                              ; preds = %bb3, %bb
   %i4 = phi i64 [ %i43, %bb3 ], [ 0, %bb ]
   %i5 = phi i64 [ %i42, %bb3 ], [ 0, %bb ]
-  %i6 = getelementptr inbounds i8, i8* %arg, i64 %i4
-  %i7 = getelementptr inbounds i8, i8* %i6, i64 4001
-  %i8 = bitcast i8* %i7 to i64*
-  %i9 = load i64, i64* %i8, align 8
-  %i10 = getelementptr inbounds i8, i8* %i6, i64 4002
-  %i11 = bitcast i8* %i10 to i64*
-  %i12 = load i64, i64* %i11, align 8
-  %i13 = getelementptr inbounds i8, i8* %i6, i64 4003
-  %i14 = bitcast i8* %i13 to i64*
-  %i15 = load i64, i64* %i14, align 8
-  %i16 = getelementptr inbounds i8, i8* %i6, i64 4005
-  %i17 = bitcast i8* %i16 to i64*
-  %i18 = load i64, i64* %i17, align 8
-  %i19 = getelementptr inbounds i8, i8* %i6, i64 4006
-  %i20 = bitcast i8* %i19 to i64*
-  %i21 = load i64, i64* %i20, align 8
-  %i22 = getelementptr inbounds i8, i8* %i6, i64 4007
-  %i23 = bitcast i8* %i22 to i64*
-  %i24 = load i64, i64* %i23, align 8
-  %i25 = getelementptr inbounds i8, i8* %i6, i64 4014
-  %i26 = bitcast i8* %i25 to i64*
-  %i27 = load i64, i64* %i26, align 8
-  %i28 = getelementptr inbounds i8, i8* %i6, i64 4010
-  %i29 = bitcast i8* %i28 to i64*
-  %i30 = load i64, i64* %i29, align 8
-  %i31 = getelementptr inbounds i8, i8* %i6, i64 4011
-  %i32 = bitcast i8* %i31 to i64*
-  %i33 = load i64, i64* %i32, align 8
+  %i6 = getelementptr inbounds i8, ptr %arg, i64 %i4
+  %i7 = getelementptr inbounds i8, ptr %i6, i64 4001
+  %i9 = load i64, ptr %i7, align 8
+  %i10 = getelementptr inbounds i8, ptr %i6, i64 4002
+  %i12 = load i64, ptr %i10, align 8
+  %i13 = getelementptr inbounds i8, ptr %i6, i64 4003
+  %i15 = load i64, ptr %i13, align 8
+  %i16 = getelementptr inbounds i8, ptr %i6, i64 4005
+  %i18 = load i64, ptr %i16, align 8
+  %i19 = getelementptr inbounds i8, ptr %i6, i64 4006
+  %i21 = load i64, ptr %i19, align 8
+  %i22 = getelementptr inbounds i8, ptr %i6, i64 4007
+  %i24 = load i64, ptr %i22, align 8
+  %i25 = getelementptr inbounds i8, ptr %i6, i64 4014
+  %i27 = load i64, ptr %i25, align 8
+  %i28 = getelementptr inbounds i8, ptr %i6, i64 4010
+  %i30 = load i64, ptr %i28, align 8
+  %i31 = getelementptr inbounds i8, ptr %i6, i64 4011
+  %i33 = load i64, ptr %i31, align 8
   %i34 = mul i64 %i12, %i9
   %i35 = mul i64 %i34, %i15
   %i36 = mul i64 %i35, %i18
@@ -324,7 +307,7 @@ bb45:                                             ; preds = %bb3, %bb
 ;   return res + count;
 ; }
 
-define dso_local i64 @test_update_ds_prep_interact(i8* %arg, i32 signext %arg1) {
+define dso_local i64 @test_update_ds_prep_interact(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_update_ds_prep_interact:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmplwi r4, 0
@@ -363,19 +346,15 @@ bb3:                                              ; preds = %bb3, %bb
   %i4 = phi i64 [ %i24, %bb3 ], [ 0, %bb ]
   %i5 = phi i64 [ %i23, %bb3 ], [ 0, %bb ]
   %i6 = shl i64 %i4, 2
-  %i7 = getelementptr inbounds i8, i8* %arg, i64 %i6
-  %i8 = getelementptr inbounds i8, i8* %i7, i64 4001
-  %i9 = bitcast i8* %i8 to i64*
-  %i10 = load i64, i64* %i9, align 8
-  %i11 = getelementptr inbounds i8, i8* %i7, i64 4002
-  %i12 = bitcast i8* %i11 to i64*
-  %i13 = load i64, i64* %i12, align 8
-  %i14 = getelementptr inbounds i8, i8* %i7, i64 4003
-  %i15 = bitcast i8* %i14 to i64*
-  %i16 = load i64, i64* %i15, align 8
-  %i17 = getelementptr inbounds i8, i8* %i7, i64 4006
-  %i18 = bitcast i8* %i17 to i64*
-  %i19 = load i64, i64* %i18, align 8
+  %i7 = getelementptr inbounds i8, ptr %arg, i64 %i6
+  %i8 = getelementptr inbounds i8, ptr %i7, i64 4001
+  %i10 = load i64, ptr %i8, align 8
+  %i11 = getelementptr inbounds i8, ptr %i7, i64 4002
+  %i13 = load i64, ptr %i11, align 8
+  %i14 = getelementptr inbounds i8, ptr %i7, i64 4003
+  %i16 = load i64, ptr %i14, align 8
+  %i17 = getelementptr inbounds i8, ptr %i7, i64 4006
+  %i19 = load i64, ptr %i17, align 8
   %i20 = mul i64 %i13, %i10
   %i21 = mul i64 %i20, %i16
   %i22 = mul i64 %i21, %i19
@@ -407,7 +386,7 @@ bb26:                                             ; preds = %bb3, %bb
 ;   return res + count;
 ; }
 
-define i64 @test_update_ds_prep_nointeract(i8* %arg, i32 signext %arg1) {
+define i64 @test_update_ds_prep_nointeract(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_update_ds_prep_nointeract:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmplwi r4, 0
@@ -447,18 +426,15 @@ bb:
 bb3:                                              ; preds = %bb3, %bb
   %i4 = phi i64 [ %i23, %bb3 ], [ 0, %bb ]
   %i5 = phi i64 [ %i22, %bb3 ], [ 0, %bb ]
-  %i6 = getelementptr inbounds i8, i8* %arg, i64 %i4
-  %i7 = getelementptr inbounds i8, i8* %i6, i64 4001
-  %i8 = load i8, i8* %i7, align 1
-  %i9 = getelementptr inbounds i8, i8* %i6, i64 4002
-  %i10 = bitcast i8* %i9 to i64*
-  %i11 = load i64, i64* %i10, align 8
-  %i12 = getelementptr inbounds i8, i8* %i6, i64 4003
-  %i13 = bitcast i8* %i12 to i64*
-  %i14 = load i64, i64* %i13, align 8
-  %i15 = getelementptr inbounds i8, i8* %i6, i64 4007
-  %i16 = bitcast i8* %i15 to i64*
-  %i17 = load i64, i64* %i16, align 8
+  %i6 = getelementptr inbounds i8, ptr %arg, i64 %i4
+  %i7 = getelementptr inbounds i8, ptr %i6, i64 4001
+  %i8 = load i8, ptr %i7, align 1
+  %i9 = getelementptr inbounds i8, ptr %i6, i64 4002
+  %i11 = load i64, ptr %i9, align 8
+  %i12 = getelementptr inbounds i8, ptr %i6, i64 4003
+  %i14 = load i64, ptr %i12, align 8
+  %i15 = getelementptr inbounds i8, ptr %i6, i64 4007
+  %i17 = load i64, ptr %i15, align 8
   %i18 = zext i8 %i8 to i64
   %i19 = mul i64 %i11, %i18
   %i20 = mul i64 %i19, %i14
@@ -495,7 +471,7 @@ bb25:                                             ; preds = %bb3, %bb
 ;   return res + count;
 ; }
 
-define dso_local i64 @test_ds_multiple_chains(i8* %arg, i8* %arg1, i32 signext %arg2) {
+define dso_local i64 @test_ds_multiple_chains(ptr %arg, ptr %arg1, i32 signext %arg2) {
 ; CHECK-LABEL: test_ds_multiple_chains:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmplwi r5, 0
@@ -546,32 +522,24 @@ bb:
 bb4:                                              ; preds = %bb4, %bb
   %i5 = phi i64 [ %i41, %bb4 ], [ 0, %bb ]
   %i6 = phi i64 [ %i40, %bb4 ], [ 0, %bb ]
-  %i7 = getelementptr inbounds i8, i8* %arg, i64 %i5
-  %i8 = getelementptr inbounds i8, i8* %i7, i64 4001
-  %i9 = bitcast i8* %i8 to i64*
-  %i10 = load i64, i64* %i9, align 8
-  %i11 = getelementptr inbounds i8, i8* %i7, i64 4010
-  %i12 = bitcast i8* %i11 to i64*
-  %i13 = load i64, i64* %i12, align 8
-  %i14 = getelementptr inbounds i8, i8* %i7, i64 4005
-  %i15 = bitcast i8* %i14 to i64*
-  %i16 = load i64, i64* %i15, align 8
-  %i17 = getelementptr inbounds i8, i8* %i7, i64 4009
-  %i18 = bitcast i8* %i17 to i64*
-  %i19 = load i64, i64* %i18, align 8
-  %i20 = getelementptr inbounds i8, i8* %arg1, i64 %i5
-  %i21 = getelementptr inbounds i8, i8* %i20, i64 4001
-  %i22 = bitcast i8* %i21 to i64*
-  %i23 = load i64, i64* %i22, align 8
-  %i24 = getelementptr inbounds i8, i8* %i20, i64 4010
-  %i25 = bitcast i8* %i24 to i64*
-  %i26 = load i64, i64* %i25, align 8
-  %i27 = getelementptr inbounds i8, i8* %i20, i64 4005
-  %i28 = bitcast i8* %i27 to i64*
-  %i29 = load i64, i64* %i28, align 8
-  %i30 = getelementptr inbounds i8, i8* %i20, i64 4009
-  %i31 = bitcast i8* %i30 to i64*
-  %i32 = load i64, i64* %i31, align 8
+  %i7 = getelementptr inbounds i8, ptr %arg, i64 %i5
+  %i8 = getelementptr inbounds i8, ptr %i7, i64 4001
+  %i10 = load i64, ptr %i8, align 8
+  %i11 = getelementptr inbounds i8, ptr %i7, i64 4010
+  %i13 = load i64, ptr %i11, align 8
+  %i14 = getelementptr inbounds i8, ptr %i7, i64 4005
+  %i16 = load i64, ptr %i14, align 8
+  %i17 = getelementptr inbounds i8, ptr %i7, i64 4009
+  %i19 = load i64, ptr %i17, align 8
+  %i20 = getelementptr inbounds i8, ptr %arg1, i64 %i5
+  %i21 = getelementptr inbounds i8, ptr %i20, i64 4001
+  %i23 = load i64, ptr %i21, align 8
+  %i24 = getelementptr inbounds i8, ptr %i20, i64 4010
+  %i26 = load i64, ptr %i24, align 8
+  %i27 = getelementptr inbounds i8, ptr %i20, i64 4005
+  %i29 = load i64, ptr %i27, align 8
+  %i30 = getelementptr inbounds i8, ptr %i20, i64 4009
+  %i32 = load i64, ptr %i30, align 8
   %i33 = mul i64 %i13, %i10
   %i34 = mul i64 %i33, %i16
   %i35 = mul i64 %i34, %i19
@@ -621,13 +589,12 @@ bb43:                                             ; preds = %bb4, %bb
 ;  return res;
 ;}
 
- at arr = external local_unnamed_addr global i8*, align 8
+ at arr = external local_unnamed_addr global ptr, align 8
 
-define i64 @test_ds_cross_basic_blocks(i8* %arg, i32 signext %arg1) {
+define i64 @test_ds_cross_basic_blocks(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_ds_cross_basic_blocks:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmplwi r4, 0
-; CHECK-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
@@ -638,44 +605,44 @@ define i64 @test_ds_cross_basic_blocks(i8* %arg, i32 signext %arg1) {
 ; CHECK-NEXT:    li r7, 1
 ; CHECK-NEXT:    addi r6, r3, 4009
 ; CHECK-NEXT:    ld r5, .LC0 at toc@l(r5)
-; CHECK-NEXT:    iselgt r4, r4, r7
-; CHECK-NEXT:    li r3, 0
-; CHECK-NEXT:    li r8, -7
-; CHECK-NEXT:    li r9, -6
+; CHECK-NEXT:    iselgt r3, r4, r7
+; CHECK-NEXT:    li r4, -7
+; CHECK-NEXT:    li r8, -6
+; CHECK-NEXT:    li r9, 1
 ; CHECK-NEXT:    li r10, 1
 ; CHECK-NEXT:    li r11, 1
 ; CHECK-NEXT:    li r12, 1
 ; CHECK-NEXT:    li r30, 1
 ; CHECK-NEXT:    ld r5, 0(r5)
-; CHECK-NEXT:    mtctr r4
-; CHECK-NEXT:    li r4, -9
-; CHECK-NEXT:    li r29, 1
+; CHECK-NEXT:    mtctr r3
+; CHECK-NEXT:    li r3, 0
 ; CHECK-NEXT:    addi r5, r5, -1
 ; CHECK-NEXT:    b .LBB6_4
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_2: # %bb18
 ; CHECK-NEXT:    #
-; CHECK-NEXT:    ldx r0, r6, r4
-; CHECK-NEXT:    add r29, r0, r29
-; CHECK-NEXT:    ld r0, -8(r6)
+; CHECK-NEXT:    addi r29, r6, -9
+; CHECK-NEXT:    ld r0, 0(r29)
 ; CHECK-NEXT:    add r30, r0, r30
+; CHECK-NEXT:    ld r0, -8(r6)
+; CHECK-NEXT:    add r12, r0, r12
 ; CHECK-NEXT:  .LBB6_3: # %bb49
 ; CHECK-NEXT:    #
-; CHECK-NEXT:    mulld r0, r30, r29
+; CHECK-NEXT:    mulld r0, r12, r30
 ; CHECK-NEXT:    addi r6, r6, 1
-; CHECK-NEXT:    mulld r0, r0, r12
 ; CHECK-NEXT:    mulld r0, r0, r11
 ; CHECK-NEXT:    mulld r0, r0, r10
+; CHECK-NEXT:    mulld r0, r0, r9
 ; CHECK-NEXT:    maddld r3, r0, r7, r3
 ; CHECK-NEXT:    bdz .LBB6_9
 ; CHECK-NEXT:  .LBB6_4: # %bb5
 ; CHECK-NEXT:    #
 ; CHECK-NEXT:    lbzu r0, 1(r5)
-; CHECK-NEXT:    mulli r28, r0, 171
-; CHECK-NEXT:    rlwinm r27, r28, 24, 8, 30
-; CHECK-NEXT:    srwi r28, r28, 9
-; CHECK-NEXT:    add r28, r28, r27
-; CHECK-NEXT:    sub r0, r0, r28
+; CHECK-NEXT:    mulli r29, r0, 171
+; CHECK-NEXT:    rlwinm r28, r29, 24, 8, 30
+; CHECK-NEXT:    srwi r29, r29, 9
+; CHECK-NEXT:    add r29, r29, r28
+; CHECK-NEXT:    sub r0, r0, r29
 ; CHECK-NEXT:    clrlwi r0, r0, 24
 ; CHECK-NEXT:    cmplwi r0, 1
 ; CHECK-NEXT:    beq cr0, .LBB6_2
@@ -685,16 +652,16 @@ define i64 @test_ds_cross_basic_blocks(i8* %arg, i32 signext %arg1) {
 ; CHECK-NEXT:    bne cr0, .LBB6_7
 ; CHECK-NEXT:  # %bb.6: # %bb31
 ; CHECK-NEXT:    #
-; CHECK-NEXT:    ldx r0, r6, r8
-; CHECK-NEXT:    add r12, r0, r12
-; CHECK-NEXT:    ld r0, -4(r6)
+; CHECK-NEXT:    ldx r0, r6, r4
 ; CHECK-NEXT:    add r11, r0, r11
+; CHECK-NEXT:    ld r0, -4(r6)
+; CHECK-NEXT:    add r10, r0, r10
 ; CHECK-NEXT:    b .LBB6_3
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_7: # %bb40
 ; CHECK-NEXT:    #
-; CHECK-NEXT:    ldx r0, r6, r9
-; CHECK-NEXT:    add r10, r0, r10
+; CHECK-NEXT:    ldx r0, r6, r8
+; CHECK-NEXT:    add r9, r0, r9
 ; CHECK-NEXT:    ld r0, 0(r6)
 ; CHECK-NEXT:    add r7, r0, r7
 ; CHECK-NEXT:    b .LBB6_3
@@ -704,7 +671,6 @@ define i64 @test_ds_cross_basic_blocks(i8* %arg, i32 signext %arg1) {
 ; CHECK-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
 ; CHECK-NEXT:    blr
 bb:
   %i = sext i32 %arg1 to i64
@@ -712,7 +678,7 @@ bb:
   br i1 %i2, label %bb64, label %bb3
 
 bb3:                                              ; preds = %bb
-  %i4 = load i8*, i8** @arr, align 8
+  %i4 = load ptr, ptr @arr, align 8
   br label %bb5
 
 bb5:                                              ; preds = %bb49, %bb3
@@ -724,48 +690,42 @@ bb5:                                              ; preds = %bb49, %bb3
   %i11 = phi i64 [ 1, %bb3 ], [ %i50, %bb49 ]
   %i12 = phi i64 [ 0, %bb3 ], [ %i62, %bb49 ]
   %i13 = phi i64 [ 0, %bb3 ], [ %i61, %bb49 ]
-  %i14 = getelementptr inbounds i8, i8* %i4, i64 %i12
-  %i15 = load i8, i8* %i14, align 1
+  %i14 = getelementptr inbounds i8, ptr %i4, i64 %i12
+  %i15 = load i8, ptr %i14, align 1
   %i16 = urem i8 %i15, 3
   %i17 = icmp eq i8 %i16, 1
   br i1 %i17, label %bb18, label %bb28
 
 bb18:                                             ; preds = %bb5
-  %i19 = getelementptr inbounds i8, i8* %arg, i64 %i12
-  %i20 = getelementptr inbounds i8, i8* %i19, i64 4000
-  %i21 = bitcast i8* %i20 to i64*
-  %i22 = load i64, i64* %i21, align 8
+  %i19 = getelementptr inbounds i8, ptr %arg, i64 %i12
+  %i20 = getelementptr inbounds i8, ptr %i19, i64 4000
+  %i22 = load i64, ptr %i20, align 8
   %i23 = add i64 %i22, %i11
-  %i24 = getelementptr inbounds i8, i8* %i19, i64 4001
-  %i25 = bitcast i8* %i24 to i64*
-  %i26 = load i64, i64* %i25, align 8
+  %i24 = getelementptr inbounds i8, ptr %i19, i64 4001
+  %i26 = load i64, ptr %i24, align 8
   %i27 = add i64 %i26, %i10
   br label %bb49
 
 bb28:                                             ; preds = %bb5
   %i29 = icmp eq i8 %i16, 2
-  %i30 = getelementptr inbounds i8, i8* %arg, i64 %i12
+  %i30 = getelementptr inbounds i8, ptr %arg, i64 %i12
   br i1 %i29, label %bb31, label %bb40
 
 bb31:                                             ; preds = %bb28
-  %i32 = getelementptr inbounds i8, i8* %i30, i64 4002
-  %i33 = bitcast i8* %i32 to i64*
-  %i34 = load i64, i64* %i33, align 8
+  %i32 = getelementptr inbounds i8, ptr %i30, i64 4002
+  %i34 = load i64, ptr %i32, align 8
   %i35 = add i64 %i34, %i9
-  %i36 = getelementptr inbounds i8, i8* %i30, i64 4005
-  %i37 = bitcast i8* %i36 to i64*
-  %i38 = load i64, i64* %i37, align 8
+  %i36 = getelementptr inbounds i8, ptr %i30, i64 4005
+  %i38 = load i64, ptr %i36, align 8
   %i39 = add i64 %i38, %i8
   br label %bb49
 
 bb40:                                             ; preds = %bb28
-  %i41 = getelementptr inbounds i8, i8* %i30, i64 4003
-  %i42 = bitcast i8* %i41 to i64*
-  %i43 = load i64, i64* %i42, align 8
+  %i41 = getelementptr inbounds i8, ptr %i30, i64 4003
+  %i43 = load i64, ptr %i41, align 8
   %i44 = add i64 %i43, %i7
-  %i45 = getelementptr inbounds i8, i8* %i30, i64 4009
-  %i46 = bitcast i8* %i45 to i64*
-  %i47 = load i64, i64* %i46, align 8
+  %i45 = getelementptr inbounds i8, ptr %i30, i64 4009
+  %i47 = load i64, ptr %i45, align 8
   %i48 = add i64 %i47, %i6
   br label %bb49
 
@@ -809,7 +769,7 @@ bb64:                                             ; preds = %bb49, %bb
 ;  return res;
 ;}
 
-define float @test_ds_float(i8* %arg, i32 signext %arg1) {
+define float @test_ds_float(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_ds_float:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmpwi r4, 0
@@ -849,19 +809,15 @@ bb2:                                              ; preds = %bb
 bb4:                                              ; preds = %bb4, %bb2
   %i5 = phi i64 [ 0, %bb2 ], [ %i24, %bb4 ]
   %i6 = phi float [ 0.000000e+00, %bb2 ], [ %i23, %bb4 ]
-  %i7 = getelementptr inbounds i8, i8* %arg, i64 %i5
-  %i8 = getelementptr inbounds i8, i8* %i7, i64 4001
-  %i9 = bitcast i8* %i8 to float*
-  %i10 = load float, float* %i9, align 4
-  %i11 = getelementptr inbounds i8, i8* %i7, i64 4002
-  %i12 = bitcast i8* %i11 to float*
-  %i13 = load float, float* %i12, align 4
-  %i14 = getelementptr inbounds i8, i8* %i7, i64 4022
-  %i15 = bitcast i8* %i14 to float*
-  %i16 = load float, float* %i15, align 4
-  %i17 = getelementptr inbounds i8, i8* %i7, i64 4062
-  %i18 = bitcast i8* %i17 to float*
-  %i19 = load float, float* %i18, align 4
+  %i7 = getelementptr inbounds i8, ptr %arg, i64 %i5
+  %i8 = getelementptr inbounds i8, ptr %i7, i64 4001
+  %i10 = load float, ptr %i8, align 4
+  %i11 = getelementptr inbounds i8, ptr %i7, i64 4002
+  %i13 = load float, ptr %i11, align 4
+  %i14 = getelementptr inbounds i8, ptr %i7, i64 4022
+  %i16 = load float, ptr %i14, align 4
+  %i17 = getelementptr inbounds i8, ptr %i7, i64 4062
+  %i19 = load float, ptr %i17, align 4
   %i20 = fmul float %i10, %i13
   %i21 = fmul float %i20, %i16
   %i22 = fmul float %i21, %i19
@@ -893,7 +849,7 @@ bb26:                                             ; preds = %bb4, %bb
 ;  return res;
 ;}
 
-define float @test_ds_combine_float_int(i8* %arg, i32 signext %arg1) {
+define float @test_ds_combine_float_int(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_ds_combine_float_int:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmpwi r4, 0
@@ -934,19 +890,15 @@ bb2:                                              ; preds = %bb
 bb4:                                              ; preds = %bb4, %bb2
   %i5 = phi i64 [ 0, %bb2 ], [ %i25, %bb4 ]
   %i6 = phi float [ 0.000000e+00, %bb2 ], [ %i24, %bb4 ]
-  %i7 = getelementptr inbounds i8, i8* %arg, i64 %i5
-  %i8 = getelementptr inbounds i8, i8* %i7, i64 4001
-  %i9 = bitcast i8* %i8 to float*
-  %i10 = load float, float* %i9, align 4
-  %i11 = getelementptr inbounds i8, i8* %i7, i64 4002
-  %i12 = bitcast i8* %i11 to i64*
-  %i13 = load i64, i64* %i12, align 8
-  %i14 = getelementptr inbounds i8, i8* %i7, i64 4022
-  %i15 = bitcast i8* %i14 to float*
-  %i16 = load float, float* %i15, align 4
-  %i17 = getelementptr inbounds i8, i8* %i7, i64 4062
-  %i18 = bitcast i8* %i17 to float*
-  %i19 = load float, float* %i18, align 4
+  %i7 = getelementptr inbounds i8, ptr %arg, i64 %i5
+  %i8 = getelementptr inbounds i8, ptr %i7, i64 4001
+  %i10 = load float, ptr %i8, align 4
+  %i11 = getelementptr inbounds i8, ptr %i7, i64 4002
+  %i13 = load i64, ptr %i11, align 8
+  %i14 = getelementptr inbounds i8, ptr %i7, i64 4022
+  %i16 = load float, ptr %i14, align 4
+  %i17 = getelementptr inbounds i8, ptr %i7, i64 4062
+  %i19 = load float, ptr %i17, align 4
   %i20 = uitofp i64 %i13 to float
   %i21 = fmul float %i10, %i20
   %i22 = fmul float %i16, %i21
@@ -978,7 +930,7 @@ bb27:                                             ; preds = %bb4, %bb
 ;   return res + count;
 ; }
 
-define i64 @test_ds_lwa_prep(i8* %arg, i32 signext %arg1) {
+define i64 @test_ds_lwa_prep(ptr %arg, i32 signext %arg1) {
 ; CHECK-LABEL: test_ds_lwa_prep:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    cmpwi r4, 0
@@ -1014,22 +966,18 @@ bb:
 bb3:                                              ; preds = %bb3, %bb
   %i4 = phi i64 [ %i27, %bb3 ], [ 0, %bb ]
   %i5 = phi i64 [ %i26, %bb3 ], [ 0, %bb ]
-  %i6 = getelementptr inbounds i8, i8* %arg, i64 %i4
-  %i7 = getelementptr inbounds i8, i8* %i6, i64 1
-  %i8 = bitcast i8* %i7 to i32*
-  %i9 = load i32, i32* %i8, align 4
+  %i6 = getelementptr inbounds i8, ptr %arg, i64 %i4
+  %i7 = getelementptr inbounds i8, ptr %i6, i64 1
+  %i9 = load i32, ptr %i7, align 4
   %i10 = sext i32 %i9 to i64
-  %i11 = getelementptr inbounds i8, i8* %i6, i64 2
-  %i12 = bitcast i8* %i11 to i32*
-  %i13 = load i32, i32* %i12, align 4
+  %i11 = getelementptr inbounds i8, ptr %i6, i64 2
+  %i13 = load i32, ptr %i11, align 4
   %i14 = sext i32 %i13 to i64
-  %i15 = getelementptr inbounds i8, i8* %i6, i64 6
-  %i16 = bitcast i8* %i15 to i32*
-  %i17 = load i32, i32* %i16, align 4
+  %i15 = getelementptr inbounds i8, ptr %i6, i64 6
+  %i17 = load i32, ptr %i15, align 4
   %i18 = sext i32 %i17 to i64
-  %i19 = getelementptr inbounds i8, i8* %i6, i64 10
-  %i20 = bitcast i8* %i19 to i32*
-  %i21 = load i32, i32* %i20, align 4
+  %i19 = getelementptr inbounds i8, ptr %i6, i64 10
+  %i21 = load i32, ptr %i19, align 4
   %i22 = sext i32 %i21 to i64
   %i23 = mul nsw i64 %i14, %i10
   %i24 = mul nsw i64 %i23, %i18

diff  --git a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
index 7c2464a1ec60..94beed98d245 100644
--- a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
+++ b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -verify-machineinstrs -mcpu=pwr10 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr10 < %s | FileCheck %s
 
 target datalayout = "e-m:e-i64:64-p:64:64-n32:64-v256:256:256-v512:512:512"
 target triple = "powerpc64le-unknown-linux-gnu"
@@ -7,7 +7,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 %_elem_type_of_a = type <{ double }>
 %_elem_type_of_x = type <{ double }>
 
-define void @foo(i32* %.m, i32* %.n, [0 x %_elem_type_of_a]* %.a, [0 x %_elem_type_of_x]* %.x, i32* %.l, <2 x double>* %.vy01, <2 x double>* %.vy02, <2 x double>* %.vy03, <2 x double>* %.vy04, <2 x double>* %.vy05, <2 x double>* %.vy06, <2 x double>* %.vy07, <2 x double>* %.vy08, <2 x double>* %.vy09, <2 x double>* %.vy0a, <2 x double>* %.vy0b, <2 x double>* %.vy0c, <2 x double>* %.vy21, <2 x double>* %.vy22, <2 x double>* %.vy23, <2 x double>* %.vy24, <2 x double>* %.vy25, <2 x double>* %.vy26, <2 x double>* %.vy27, <2 x double>* %.vy28, <2 x double>* %.vy29, <2 x double>* %.vy2a, <2 x double>* %.vy2b, <2 x double>* %.vy2c) {
+define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.vy02, ptr %.vy03, ptr %.vy04, ptr %.vy05, ptr %.vy06, ptr %.vy07, ptr %.vy08, ptr %.vy09, ptr %.vy0a, ptr %.vy0b, ptr %.vy0c, ptr %.vy21, ptr %.vy22, ptr %.vy23, ptr %.vy24, ptr %.vy25, ptr %.vy26, ptr %.vy27, ptr %.vy28, ptr %.vy29, ptr %.vy2a, ptr %.vy2b, ptr %.vy2c) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdu 1, -592(1)
@@ -92,122 +92,120 @@ define void @foo(i32* %.m, i32* %.n, [0 x %_elem_type_of_a]* %.a, [0 x %_elem_ty
 ; CHECK-NEXT:    cmpwi 3, 1
 ; CHECK-NEXT:    blt 0, .LBB0_7
 ; CHECK-NEXT:  # %bb.2: # %_loop_1_do_.preheader
-; CHECK-NEXT:    mr 21, 5
+; CHECK-NEXT:    mr 22, 5
 ; CHECK-NEXT:    ld 5, 848(1)
-; CHECK-NEXT:    lwa 0, 0(7)
+; CHECK-NEXT:    ld 28, 824(1)
+; CHECK-NEXT:    mr 11, 7
+; CHECK-NEXT:    mr 18, 6
 ; CHECK-NEXT:    addi 3, 3, 1
-; CHECK-NEXT:    mr 14, 7
-; CHECK-NEXT:    mr 22, 6
-; CHECK-NEXT:    mr 11, 10
-; CHECK-NEXT:    ld 18, 736(1)
-; CHECK-NEXT:    ld 17, 728(1)
-; CHECK-NEXT:    std 8, 32(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 9, 40(1) # 8-byte Folded Spill
-; CHECK-NEXT:    cmpldi 3, 9
-; CHECK-NEXT:    ld 19, 744(1)
-; CHECK-NEXT:    ld 20, 752(1)
+; CHECK-NEXT:    std 8, 48(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 9, 56(1) # 8-byte Folded Spill
+; CHECK-NEXT:    ld 6, 712(1)
+; CHECK-NEXT:    ld 23, 688(1)
+; CHECK-NEXT:    std 10, 64(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 6, 72(1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std 5, 200(1) # 8-byte Folded Spill
 ; CHECK-NEXT:    ld 5, 840(1)
-; CHECK-NEXT:    std 17, 80(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 18, 88(1) # 8-byte Folded Spill
-; CHECK-NEXT:    lxv 36, 0(18)
-; CHECK-NEXT:    std 19, 96(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 20, 104(1) # 8-byte Folded Spill
-; CHECK-NEXT:    lxv 13, 0(19)
-; CHECK-NEXT:    lxv 12, 0(20)
-; CHECK-NEXT:    ld 30, 832(1)
-; CHECK-NEXT:    ld 2, 824(1)
-; CHECK-NEXT:    ld 12, 816(1)
-; CHECK-NEXT:    ld 29, 808(1)
-; CHECK-NEXT:    std 2, 176(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 30, 184(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 29, 160(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 12, 168(1) # 8-byte Folded Spill
+; CHECK-NEXT:    cmpldi 3, 9
+; CHECK-NEXT:    ld 19, 768(1)
+; CHECK-NEXT:    ld 2, 760(1)
+; CHECK-NEXT:    lxv 33, 0(6)
+; CHECK-NEXT:    ld 21, 784(1)
+; CHECK-NEXT:    ld 20, 776(1)
+; CHECK-NEXT:    std 2, 112(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 19, 120(1) # 8-byte Folded Spill
+; CHECK-NEXT:    lxv 10, 0(19)
+; CHECK-NEXT:    ld 7, 728(1)
+; CHECK-NEXT:    std 20, 128(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 21, 136(1) # 8-byte Folded Spill
+; CHECK-NEXT:    lxv 7, 0(21)
+; CHECK-NEXT:    ld 27, 816(1)
+; CHECK-NEXT:    std 27, 168(1) # 8-byte Folded Spill
 ; CHECK-NEXT:    std 5, 192(1) # 8-byte Folded Spill
-; CHECK-NEXT:    li 5, 9
-; CHECK-NEXT:    ld 28, 800(1)
-; CHECK-NEXT:    ld 27, 792(1)
-; CHECK-NEXT:    ld 26, 784(1)
-; CHECK-NEXT:    ld 25, 776(1)
-; CHECK-NEXT:    ld 24, 768(1)
-; CHECK-NEXT:    ld 23, 760(1)
-; CHECK-NEXT:    ld 16, 720(1)
-; CHECK-NEXT:    ld 15, 712(1)
-; CHECK-NEXT:    ld 6, 704(1)
-; CHECK-NEXT:    ld 7, 696(1)
-; CHECK-NEXT:    ld 10, 688(1)
-; CHECK-NEXT:    lxv 43, 0(8)
-; CHECK-NEXT:    std 11, 48(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 6, 56(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 27, 144(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 28, 152(1) # 8-byte Folded Spill
-; CHECK-NEXT:    mr 8, 7
-; CHECK-NEXT:    std 25, 128(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 26, 136(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 15, 64(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 16, 72(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 23, 112(1) # 8-byte Folded Spill
-; CHECK-NEXT:    std 24, 120(1) # 8-byte Folded Spill
-; CHECK-NEXT:    iselgt 3, 3, 5
-; CHECK-NEXT:    sldi 5, 0, 3
-; CHECK-NEXT:    add 5, 5, 21
+; CHECK-NEXT:    ld 5, 832(1)
+; CHECK-NEXT:    ld 26, 808(1)
+; CHECK-NEXT:    ld 25, 800(1)
+; CHECK-NEXT:    ld 24, 792(1)
+; CHECK-NEXT:    ld 17, 752(1)
+; CHECK-NEXT:    ld 16, 744(1)
+; CHECK-NEXT:    ld 15, 736(1)
+; CHECK-NEXT:    ld 29, 704(1)
+; CHECK-NEXT:    ld 30, 720(1)
 ; CHECK-NEXT:    lxv 42, 0(9)
-; CHECK-NEXT:    lxv 41, 0(11)
-; CHECK-NEXT:    lxv 40, 0(10)
-; CHECK-NEXT:    lxv 39, 0(7)
-; CHECK-NEXT:    mulli 11, 0, 48
-; CHECK-NEXT:    addi 14, 5, 32
-; CHECK-NEXT:    sldi 5, 0, 4
+; CHECK-NEXT:    std 7, 80(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 15, 88(1) # 8-byte Folded Spill
+; CHECK-NEXT:    lxv 37, 0(7)
+; CHECK-NEXT:    lxv 43, 0(8)
+; CHECK-NEXT:    mr 8, 29
+; CHECK-NEXT:    std 26, 160(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 16, 96(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 17, 104(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 24, 144(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 25, 152(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 5, 184(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 28, 176(1) # 8-byte Folded Spill
+; CHECK-NEXT:    ld 5, 696(1)
+; CHECK-NEXT:    lxv 41, 0(10)
+; CHECK-NEXT:    lxv 40, 0(23)
+; CHECK-NEXT:    lxv 38, 0(29)
+; CHECK-NEXT:    lxv 32, 0(30)
+; CHECK-NEXT:    lxv 36, 0(15)
+; CHECK-NEXT:    lxv 13, 0(16)
+; CHECK-NEXT:    lxv 12, 0(17)
+; CHECK-NEXT:    lxv 11, 0(2)
+; CHECK-NEXT:    lxv 9, 0(20)
+; CHECK-NEXT:    lxv 5, 0(24)
+; CHECK-NEXT:    lxv 4, 0(25)
+; CHECK-NEXT:    mr 10, 30
+; CHECK-NEXT:    std 5, 32(1) # 8-byte Folded Spill
+; CHECK-NEXT:    std 23, 40(1) # 8-byte Folded Spill
+; CHECK-NEXT:    lwa 5, 0(11)
+; CHECK-NEXT:    li 11, 9
+; CHECK-NEXT:    ld 7, 184(1) # 8-byte Folded Reload
+; CHECK-NEXT:    lxv 2, 0(26)
+; CHECK-NEXT:    lxv 0, 0(27)
+; CHECK-NEXT:    lxv 1, 0(28)
+; CHECK-NEXT:    li 28, 1
+; CHECK-NEXT:    li 27, 0
+; CHECK-NEXT:    ld 9, 32(1) # 8-byte Folded Reload
+; CHECK-NEXT:    lxv 3, 0(7)
+; CHECK-NEXT:    iselgt 3, 3, 11
 ; CHECK-NEXT:    addi 3, 3, -2
-; CHECK-NEXT:    lxv 38, 0(6)
-; CHECK-NEXT:    lxv 33, 0(15)
-; CHECK-NEXT:    lxv 32, 0(16)
-; CHECK-NEXT:    lxv 37, 0(17)
-; CHECK-NEXT:    add 5, 5, 21
-; CHECK-NEXT:    lxv 11, 0(23)
-; CHECK-NEXT:    lxv 10, 0(24)
-; CHECK-NEXT:    lxv 8, 0(25)
-; CHECK-NEXT:    lxv 6, 0(26)
-; CHECK-NEXT:    rldicl 3, 3, 61, 3
-; CHECK-NEXT:    li 26, 0
-; CHECK-NEXT:    mr 25, 21
-; CHECK-NEXT:    addi 31, 5, 32
-; CHECK-NEXT:    mulli 5, 0, 40
-; CHECK-NEXT:    lxv 5, 0(27)
-; CHECK-NEXT:    lxv 3, 0(28)
-; CHECK-NEXT:    lxv 1, 0(29)
-; CHECK-NEXT:    lxv 0, 0(12)
-; CHECK-NEXT:    mulli 28, 0, 6
-; CHECK-NEXT:    addi 3, 3, 1
-; CHECK-NEXT:    li 27, 1
-; CHECK-NEXT:    add 18, 21, 5
-; CHECK-NEXT:    sldi 5, 0, 5
-; CHECK-NEXT:    lxv 2, 0(2)
-; CHECK-NEXT:    lxv 4, 0(30)
-; CHECK-NEXT:    sldi 2, 0, 1
-; CHECK-NEXT:    add 19, 21, 5
-; CHECK-NEXT:    mulli 5, 0, 24
-; CHECK-NEXT:    add 20, 21, 5
+; CHECK-NEXT:    mulli 6, 5, 40
+; CHECK-NEXT:    sldi 0, 5, 4
+; CHECK-NEXT:    extswsli 14, 5, 3
+; CHECK-NEXT:    rldicl 12, 3, 61, 3
+; CHECK-NEXT:    lxv 39, 0(9)
+; CHECK-NEXT:    add 31, 14, 22
+; CHECK-NEXT:    add 11, 0, 22
+; CHECK-NEXT:    mr 26, 22
+; CHECK-NEXT:    addi 3, 11, 32
+; CHECK-NEXT:    addi 11, 12, 1
+; CHECK-NEXT:    mulli 12, 5, 48
+; CHECK-NEXT:    addi 31, 31, 32
+; CHECK-NEXT:    add 19, 22, 6
+; CHECK-NEXT:    sldi 6, 5, 5
+; CHECK-NEXT:    mulli 5, 5, 24
+; CHECK-NEXT:    add 20, 22, 6
+; CHECK-NEXT:    add 21, 22, 5
 ; CHECK-NEXT:    ld 5, 192(1) # 8-byte Folded Reload
-; CHECK-NEXT:    lxv 9, 0(5)
+; CHECK-NEXT:    lxv 8, 0(5)
 ; CHECK-NEXT:    ld 5, 200(1) # 8-byte Folded Reload
-; CHECK-NEXT:    lxv 7, 0(5)
+; CHECK-NEXT:    lxv 6, 0(5)
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %_loop_2_do_.lr.ph
 ; CHECK-NEXT:    # =>This Loop Header: Depth=1
 ; CHECK-NEXT:    # Child Loop BB0_4 Depth 2
-; CHECK-NEXT:    maddld 5, 28, 26, 2
-; CHECK-NEXT:    mr 6, 22
-; CHECK-NEXT:    mr 29, 20
-; CHECK-NEXT:    mr 30, 19
-; CHECK-NEXT:    mr 12, 18
-; CHECK-NEXT:    mtctr 3
-; CHECK-NEXT:    sldi 5, 5, 3
-; CHECK-NEXT:    add 24, 21, 5
-; CHECK-NEXT:    maddld 5, 28, 26, 0
-; CHECK-NEXT:    sldi 5, 5, 3
-; CHECK-NEXT:    add 23, 21, 5
-; CHECK-NEXT:    mr 5, 25
+; CHECK-NEXT:    maddld 5, 12, 27, 0
+; CHECK-NEXT:    mr 6, 18
+; CHECK-NEXT:    mr 29, 21
+; CHECK-NEXT:    mr 30, 20
+; CHECK-NEXT:    mr 2, 19
+; CHECK-NEXT:    mtctr 11
+; CHECK-NEXT:    add 25, 22, 5
+; CHECK-NEXT:    maddld 5, 12, 27, 14
+; CHECK-NEXT:    add 24, 22, 5
+; CHECK-NEXT:    mr 5, 26
 ; CHECK-NEXT:    .p2align 5
 ; CHECK-NEXT:  .LBB0_4: # %_loop_2_do_
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
@@ -215,19 +213,19 @@ define void @foo(i32* %.m, i32* %.n, [0 x %_elem_type_of_a]* %.a, [0 x %_elem_ty
 ; CHECK-NEXT:    lxvp 34, 0(6)
 ; CHECK-NEXT:    lxvp 44, 0(5)
 ; CHECK-NEXT:    xvmaddadp 43, 45, 35
-; CHECK-NEXT:    lxvp 46, 0(23)
+; CHECK-NEXT:    lxvp 46, 0(24)
 ; CHECK-NEXT:    xvmaddadp 42, 47, 35
-; CHECK-NEXT:    lxvp 48, 0(24)
+; CHECK-NEXT:    lxvp 48, 0(25)
 ; CHECK-NEXT:    lxvp 50, 0(29)
 ; CHECK-NEXT:    lxvp 62, 0(30)
-; CHECK-NEXT:    lxvp 60, 0(12)
+; CHECK-NEXT:    lxvp 60, 0(2)
 ; CHECK-NEXT:    lxvp 58, 32(6)
 ; CHECK-NEXT:    lxvp 56, 32(5)
-; CHECK-NEXT:    lxvp 54, 32(23)
-; CHECK-NEXT:    lxvp 52, 32(24)
+; CHECK-NEXT:    lxvp 54, 32(24)
+; CHECK-NEXT:    lxvp 52, 32(25)
 ; CHECK-NEXT:    lxvp 30, 32(29)
 ; CHECK-NEXT:    lxvp 28, 32(30)
-; CHECK-NEXT:    lxvp 26, 32(12)
+; CHECK-NEXT:    lxvp 26, 32(2)
 ; CHECK-NEXT:    xvmaddadp 41, 49, 35
 ; CHECK-NEXT:    xvmaddadp 40, 51, 35
 ; CHECK-NEXT:    xvmaddadp 39, 63, 35
@@ -240,52 +238,51 @@ define void @foo(i32* %.m, i32* %.n, [0 x %_elem_type_of_a]* %.a, [0 x %_elem_ty
 ; CHECK-NEXT:    xvmaddadp 12, 60, 34
 ; CHECK-NEXT:    xvmaddadp 11, 57, 59
 ; CHECK-NEXT:    xvmaddadp 10, 55, 59
-; CHECK-NEXT:    xvmaddadp 8, 53, 59
-; CHECK-NEXT:    xvmaddadp 6, 31, 59
+; CHECK-NEXT:    xvmaddadp 9, 53, 59
+; CHECK-NEXT:    xvmaddadp 7, 31, 59
 ; CHECK-NEXT:    xvmaddadp 5, 29, 59
-; CHECK-NEXT:    xvmaddadp 3, 27, 59
-; CHECK-NEXT:    xvmaddadp 1, 56, 58
+; CHECK-NEXT:    xvmaddadp 4, 27, 59
+; CHECK-NEXT:    xvmaddadp 2, 56, 58
 ; CHECK-NEXT:    xvmaddadp 0, 54, 58
-; CHECK-NEXT:    xvmaddadp 2, 52, 58
-; CHECK-NEXT:    xvmaddadp 4, 30, 58
-; CHECK-NEXT:    xvmaddadp 9, 28, 58
-; CHECK-NEXT:    xvmaddadp 7, 26, 58
+; CHECK-NEXT:    xvmaddadp 1, 52, 58
+; CHECK-NEXT:    xvmaddadp 3, 30, 58
+; CHECK-NEXT:    xvmaddadp 8, 28, 58
+; CHECK-NEXT:    xvmaddadp 6, 26, 58
 ; CHECK-NEXT:    addi 6, 6, 64
 ; CHECK-NEXT:    addi 5, 5, 64
-; CHECK-NEXT:    addi 23, 23, 64
 ; CHECK-NEXT:    addi 24, 24, 64
+; CHECK-NEXT:    addi 25, 25, 64
 ; CHECK-NEXT:    addi 29, 29, 64
 ; CHECK-NEXT:    addi 30, 30, 64
-; CHECK-NEXT:    addi 12, 12, 64
+; CHECK-NEXT:    addi 2, 2, 64
 ; CHECK-NEXT:    bdnz .LBB0_4
 ; CHECK-NEXT:  # %bb.5: # %_loop_2_endl_
 ; CHECK-NEXT:    #
-; CHECK-NEXT:    addi 27, 27, 6
-; CHECK-NEXT:    add 25, 25, 11
-; CHECK-NEXT:    add 14, 14, 11
-; CHECK-NEXT:    add 18, 18, 11
-; CHECK-NEXT:    add 31, 31, 11
-; CHECK-NEXT:    add 19, 19, 11
-; CHECK-NEXT:    add 20, 20, 11
-; CHECK-NEXT:    addi 26, 26, 1
-; CHECK-NEXT:    cmpld 27, 4
+; CHECK-NEXT:    addi 28, 28, 6
+; CHECK-NEXT:    add 26, 26, 12
+; CHECK-NEXT:    add 31, 31, 12
+; CHECK-NEXT:    add 19, 19, 12
+; CHECK-NEXT:    add 3, 3, 12
+; CHECK-NEXT:    add 20, 20, 12
+; CHECK-NEXT:    add 21, 21, 12
+; CHECK-NEXT:    addi 27, 27, 1
+; CHECK-NEXT:    cmpld 28, 4
 ; CHECK-NEXT:    ble 0, .LBB0_3
 ; CHECK-NEXT:  # %bb.6: # %_loop_1_loopHeader_._return_bb_crit_edge.loopexit
-; CHECK-NEXT:    ld 3, 32(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 43, 0(3)
-; CHECK-NEXT:    ld 3, 40(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 42, 0(3)
 ; CHECK-NEXT:    ld 3, 48(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 41, 0(3)
+; CHECK-NEXT:    stxv 43, 0(3)
 ; CHECK-NEXT:    ld 3, 56(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 40, 0(10)
-; CHECK-NEXT:    stxv 39, 0(8)
-; CHECK-NEXT:    stxv 38, 0(3)
+; CHECK-NEXT:    stxv 42, 0(3)
 ; CHECK-NEXT:    ld 3, 64(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 33, 0(3)
+; CHECK-NEXT:    stxv 41, 0(3)
+; CHECK-NEXT:    ld 3, 40(1) # 8-byte Folded Reload
+; CHECK-NEXT:    stxv 40, 0(3)
 ; CHECK-NEXT:    ld 3, 72(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 32, 0(3)
+; CHECK-NEXT:    stxv 39, 0(9)
+; CHECK-NEXT:    stxv 38, 0(8)
+; CHECK-NEXT:    stxv 33, 0(3)
 ; CHECK-NEXT:    ld 3, 80(1) # 8-byte Folded Reload
+; CHECK-NEXT:    stxv 32, 0(10)
 ; CHECK-NEXT:    stxv 37, 0(3)
 ; CHECK-NEXT:    ld 3, 88(1) # 8-byte Folded Reload
 ; CHECK-NEXT:    stxv 36, 0(3)
@@ -298,25 +295,25 @@ define void @foo(i32* %.m, i32* %.n, [0 x %_elem_type_of_a]* %.a, [0 x %_elem_ty
 ; CHECK-NEXT:    ld 3, 120(1) # 8-byte Folded Reload
 ; CHECK-NEXT:    stxv 10, 0(3)
 ; CHECK-NEXT:    ld 3, 128(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 8, 0(3)
+; CHECK-NEXT:    stxv 9, 0(3)
 ; CHECK-NEXT:    ld 3, 136(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 6, 0(3)
+; CHECK-NEXT:    stxv 7, 0(3)
 ; CHECK-NEXT:    ld 3, 144(1) # 8-byte Folded Reload
 ; CHECK-NEXT:    stxv 5, 0(3)
 ; CHECK-NEXT:    ld 3, 152(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 3, 0(3)
+; CHECK-NEXT:    stxv 4, 0(3)
 ; CHECK-NEXT:    ld 3, 160(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 1, 0(3)
+; CHECK-NEXT:    stxv 2, 0(3)
 ; CHECK-NEXT:    ld 3, 168(1) # 8-byte Folded Reload
 ; CHECK-NEXT:    stxv 0, 0(3)
 ; CHECK-NEXT:    ld 3, 176(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 2, 0(3)
+; CHECK-NEXT:    stxv 1, 0(3)
 ; CHECK-NEXT:    ld 3, 184(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 4, 0(3)
+; CHECK-NEXT:    stxv 3, 0(3)
 ; CHECK-NEXT:    ld 3, 192(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 9, 0(3)
+; CHECK-NEXT:    stxv 8, 0(3)
 ; CHECK-NEXT:    ld 3, 200(1) # 8-byte Folded Reload
-; CHECK-NEXT:    stxv 7, 0(3)
+; CHECK-NEXT:    stxv 6, 0(3)
 ; CHECK-NEXT:  .LBB0_7: # %_return_bb
 ; CHECK-NEXT:    lxv 63, 384(1) # 16-byte Folded Reload
 ; CHECK-NEXT:    lxv 62, 368(1) # 16-byte Folded Reload
@@ -357,47 +354,46 @@ define void @foo(i32* %.m, i32* %.n, [0 x %_elem_type_of_a]* %.a, [0 x %_elem_ty
 ; CHECK-NEXT:    addi 1, 1, 592
 ; CHECK-NEXT:    blr
 entry:
-  %_val_l_ = load i32, i32* %.l, align 4
+  %_val_l_ = load i32, ptr %.l, align 4
   %_conv = sext i32 %_val_l_ to i64
   %_mult_tmp = shl nsw i64 %_conv, 3
   %_sub_tmp4 = sub nuw nsw i64 -8, %_mult_tmp
-  %_val_n_ = load i32, i32* %.n, align 4
+  %_val_n_ = load i32, ptr %.n, align 4
   %_leq_tmp.not116 = icmp slt i32 %_val_n_, 1
   br i1 %_leq_tmp.not116, label %_return_bb, label %_loop_1_do_.lr.ph
 
 _loop_1_do_.lr.ph:                                ; preds = %entry
-  %_val_m_ = load i32, i32* %.m, align 4
+  %_val_m_ = load i32, ptr %.m, align 4
   %_leq_tmp6.not114 = icmp slt i32 %_val_m_, 1
   br i1 %_leq_tmp6.not114, label %_return_bb, label %_loop_1_do_.preheader
 
 _loop_1_do_.preheader:                            ; preds = %_loop_1_do_.lr.ph
-  %x_rvo_based_addr_112 = getelementptr inbounds [0 x %_elem_type_of_x], [0 x %_elem_type_of_x]* %.x, i64 0, i64 -1
-  %a_byte_ptr_ = bitcast [0 x %_elem_type_of_a]* %.a to i8*
-  %a_rvo_based_addr_ = getelementptr inbounds i8, i8* %a_byte_ptr_, i64 %_sub_tmp4
-  %.vy01.promoted = load <2 x double>, <2 x double>* %.vy01, align 16
-  %.vy02.promoted = load <2 x double>, <2 x double>* %.vy02, align 16
-  %.vy03.promoted = load <2 x double>, <2 x double>* %.vy03, align 16
-  %.vy04.promoted = load <2 x double>, <2 x double>* %.vy04, align 16
-  %.vy05.promoted = load <2 x double>, <2 x double>* %.vy05, align 16
-  %.vy06.promoted = load <2 x double>, <2 x double>* %.vy06, align 16
-  %.vy07.promoted = load <2 x double>, <2 x double>* %.vy07, align 16
-  %.vy08.promoted = load <2 x double>, <2 x double>* %.vy08, align 16
-  %.vy09.promoted = load <2 x double>, <2 x double>* %.vy09, align 16
-  %.vy0a.promoted = load <2 x double>, <2 x double>* %.vy0a, align 16
-  %.vy0b.promoted = load <2 x double>, <2 x double>* %.vy0b, align 16
-  %.vy0c.promoted = load <2 x double>, <2 x double>* %.vy0c, align 16
-  %.vy21.promoted = load <2 x double>, <2 x double>* %.vy21, align 16
-  %.vy22.promoted = load <2 x double>, <2 x double>* %.vy22, align 16
-  %.vy23.promoted = load <2 x double>, <2 x double>* %.vy23, align 16
-  %.vy24.promoted = load <2 x double>, <2 x double>* %.vy24, align 16
-  %.vy25.promoted = load <2 x double>, <2 x double>* %.vy25, align 16
-  %.vy26.promoted = load <2 x double>, <2 x double>* %.vy26, align 16
-  %.vy27.promoted = load <2 x double>, <2 x double>* %.vy27, align 16
-  %.vy28.promoted = load <2 x double>, <2 x double>* %.vy28, align 16
-  %.vy29.promoted = load <2 x double>, <2 x double>* %.vy29, align 16
-  %.vy2a.promoted = load <2 x double>, <2 x double>* %.vy2a, align 16
-  %.vy2b.promoted = load <2 x double>, <2 x double>* %.vy2b, align 16
-  %.vy2c.promoted = load <2 x double>, <2 x double>* %.vy2c, align 16
+  %x_rvo_based_addr_112 = getelementptr inbounds [0 x %_elem_type_of_x], ptr %.x, i64 0, i64 -1
+  %a_rvo_based_addr_ = getelementptr inbounds i8, ptr %.a, i64 %_sub_tmp4
+  %.vy01.promoted = load <2 x double>, ptr %.vy01, align 16
+  %.vy02.promoted = load <2 x double>, ptr %.vy02, align 16
+  %.vy03.promoted = load <2 x double>, ptr %.vy03, align 16
+  %.vy04.promoted = load <2 x double>, ptr %.vy04, align 16
+  %.vy05.promoted = load <2 x double>, ptr %.vy05, align 16
+  %.vy06.promoted = load <2 x double>, ptr %.vy06, align 16
+  %.vy07.promoted = load <2 x double>, ptr %.vy07, align 16
+  %.vy08.promoted = load <2 x double>, ptr %.vy08, align 16
+  %.vy09.promoted = load <2 x double>, ptr %.vy09, align 16
+  %.vy0a.promoted = load <2 x double>, ptr %.vy0a, align 16
+  %.vy0b.promoted = load <2 x double>, ptr %.vy0b, align 16
+  %.vy0c.promoted = load <2 x double>, ptr %.vy0c, align 16
+  %.vy21.promoted = load <2 x double>, ptr %.vy21, align 16
+  %.vy22.promoted = load <2 x double>, ptr %.vy22, align 16
+  %.vy23.promoted = load <2 x double>, ptr %.vy23, align 16
+  %.vy24.promoted = load <2 x double>, ptr %.vy24, align 16
+  %.vy25.promoted = load <2 x double>, ptr %.vy25, align 16
+  %.vy26.promoted = load <2 x double>, ptr %.vy26, align 16
+  %.vy27.promoted = load <2 x double>, ptr %.vy27, align 16
+  %.vy28.promoted = load <2 x double>, ptr %.vy28, align 16
+  %.vy29.promoted = load <2 x double>, ptr %.vy29, align 16
+  %.vy2a.promoted = load <2 x double>, ptr %.vy2a, align 16
+  %.vy2b.promoted = load <2 x double>, ptr %.vy2b, align 16
+  %.vy2c.promoted = load <2 x double>, ptr %.vy2c, align 16
   %i = zext i32 %_val_m_ to i64
   %i1 = zext i32 %_val_n_ to i64
   br label %_loop_2_do_.lr.ph
@@ -429,22 +425,22 @@ _loop_2_do_.lr.ph:                                ; preds = %_loop_2_endl_, %_lo
   %i24 = phi <2 x double> [ %i73, %_loop_2_endl_ ], [ %.vy02.promoted, %_loop_1_do_.preheader ]
   %i25 = phi <2 x double> [ %i71, %_loop_2_endl_ ], [ %.vy01.promoted, %_loop_1_do_.preheader ]
   %_ix_x_len10 = mul i64 %_mult_tmp, %indvars.iv212
-  %a_ix_dim_0_ = getelementptr inbounds i8, i8* %a_rvo_based_addr_, i64 %_ix_x_len10
+  %a_ix_dim_0_ = getelementptr inbounds i8, ptr %a_rvo_based_addr_, i64 %_ix_x_len10
   %i26 = add nuw nsw i64 %indvars.iv212, 1
   %_ix_x_len24 = mul i64 %_mult_tmp, %i26
-  %a_ix_dim_0_25 = getelementptr inbounds i8, i8* %a_rvo_based_addr_, i64 %_ix_x_len24
+  %a_ix_dim_0_25 = getelementptr inbounds i8, ptr %a_rvo_based_addr_, i64 %_ix_x_len24
   %i27 = add nuw nsw i64 %indvars.iv212, 2
   %_ix_x_len40 = mul i64 %_mult_tmp, %i27
-  %a_ix_dim_0_41 = getelementptr inbounds i8, i8* %a_rvo_based_addr_, i64 %_ix_x_len40
+  %a_ix_dim_0_41 = getelementptr inbounds i8, ptr %a_rvo_based_addr_, i64 %_ix_x_len40
   %i28 = add nuw nsw i64 %indvars.iv212, 3
   %_ix_x_len56 = mul i64 %_mult_tmp, %i28
-  %a_ix_dim_0_57 = getelementptr inbounds i8, i8* %a_rvo_based_addr_, i64 %_ix_x_len56
+  %a_ix_dim_0_57 = getelementptr inbounds i8, ptr %a_rvo_based_addr_, i64 %_ix_x_len56
   %i29 = add nuw nsw i64 %indvars.iv212, 4
   %_ix_x_len72 = mul i64 %_mult_tmp, %i29
-  %a_ix_dim_0_73 = getelementptr inbounds i8, i8* %a_rvo_based_addr_, i64 %_ix_x_len72
+  %a_ix_dim_0_73 = getelementptr inbounds i8, ptr %a_rvo_based_addr_, i64 %_ix_x_len72
   %i30 = add nuw nsw i64 %indvars.iv212, 5
   %_ix_x_len88 = mul i64 %_mult_tmp, %i30
-  %a_ix_dim_0_89 = getelementptr inbounds i8, i8* %a_rvo_based_addr_, i64 %_ix_x_len88
+  %a_ix_dim_0_89 = getelementptr inbounds i8, ptr %a_rvo_based_addr_, i64 %_ix_x_len88
   br label %_loop_2_do_
 
 _loop_2_do_:                                      ; preds = %_loop_2_do_, %_loop_2_do_.lr.ph
@@ -474,21 +470,20 @@ _loop_2_do_:                                      ; preds = %_loop_2_do_, %_loop
   %i53 = phi <2 x double> [ %i24, %_loop_2_do_.lr.ph ], [ %i73, %_loop_2_do_ ]
   %i54 = phi <2 x double> [ %i25, %_loop_2_do_.lr.ph ], [ %i71, %_loop_2_do_ ]
   %_ix_x_len = shl nuw nsw i64 %indvars.iv, 3
-  %x_ix_dim_0_113 = getelementptr inbounds %_elem_type_of_x, %_elem_type_of_x* %x_rvo_based_addr_112, i64 %indvars.iv
-  %x_ix_dim_0_ = bitcast %_elem_type_of_x* %x_ix_dim_0_113 to i8*
-  %i55 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull %x_ix_dim_0_)
-  %a_ix_dim_1_ = getelementptr inbounds i8, i8* %a_ix_dim_0_, i64 %_ix_x_len
-  %i56 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull %a_ix_dim_1_)
-  %a_ix_dim_1_29 = getelementptr inbounds i8, i8* %a_ix_dim_0_25, i64 %_ix_x_len
-  %i57 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull %a_ix_dim_1_29)
-  %a_ix_dim_1_45 = getelementptr inbounds i8, i8* %a_ix_dim_0_41, i64 %_ix_x_len
-  %i58 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull %a_ix_dim_1_45)
-  %a_ix_dim_1_61 = getelementptr inbounds i8, i8* %a_ix_dim_0_57, i64 %_ix_x_len
-  %i59 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull %a_ix_dim_1_61)
-  %a_ix_dim_1_77 = getelementptr inbounds i8, i8* %a_ix_dim_0_73, i64 %_ix_x_len
-  %i60 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull %a_ix_dim_1_77)
-  %a_ix_dim_1_93 = getelementptr inbounds i8, i8* %a_ix_dim_0_89, i64 %_ix_x_len
-  %i61 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull %a_ix_dim_1_93)
+  %x_ix_dim_0_113 = getelementptr inbounds %_elem_type_of_x, ptr %x_rvo_based_addr_112, i64 %indvars.iv
+  %i55 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull %x_ix_dim_0_113)
+  %a_ix_dim_1_ = getelementptr inbounds i8, ptr %a_ix_dim_0_, i64 %_ix_x_len
+  %i56 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull %a_ix_dim_1_)
+  %a_ix_dim_1_29 = getelementptr inbounds i8, ptr %a_ix_dim_0_25, i64 %_ix_x_len
+  %i57 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull %a_ix_dim_1_29)
+  %a_ix_dim_1_45 = getelementptr inbounds i8, ptr %a_ix_dim_0_41, i64 %_ix_x_len
+  %i58 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull %a_ix_dim_1_45)
+  %a_ix_dim_1_61 = getelementptr inbounds i8, ptr %a_ix_dim_0_57, i64 %_ix_x_len
+  %i59 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull %a_ix_dim_1_61)
+  %a_ix_dim_1_77 = getelementptr inbounds i8, ptr %a_ix_dim_0_73, i64 %_ix_x_len
+  %i60 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull %a_ix_dim_1_77)
+  %a_ix_dim_1_93 = getelementptr inbounds i8, ptr %a_ix_dim_0_89, i64 %_ix_x_len
+  %i61 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull %a_ix_dim_1_93)
   %i62 = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %i55)
   %.fca.0.extract35 = extractvalue { <16 x i8>, <16 x i8> } %i62, 0
   %.fca.1.extract36 = extractvalue { <16 x i8>, <16 x i8> } %i62, 1
@@ -523,21 +518,20 @@ _loop_2_do_:                                      ; preds = %_loop_2_do_, %_loop
   %i79 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> %i78, <2 x double> %i70, <2 x double> %i50)
   %i80 = bitcast <16 x i8> %.fca.0.extract to <2 x double>
   %i81 = tail call contract <2 x double> @llvm.fma.v2f64(<2 x double> %i80, <2 x double> %i70, <2 x double> %i49)
-  %i82 = getelementptr %_elem_type_of_x, %_elem_type_of_x* %x_ix_dim_0_113, i64 4
-  %i83 = bitcast %_elem_type_of_x* %i82 to i8*
-  %i84 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %i83)
-  %i85 = getelementptr i8, i8* %a_ix_dim_1_, i64 32
-  %i86 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %i85)
-  %i87 = getelementptr i8, i8* %a_ix_dim_1_29, i64 32
-  %i88 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %i87)
-  %i89 = getelementptr i8, i8* %a_ix_dim_1_45, i64 32
-  %i90 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %i89)
-  %i91 = getelementptr i8, i8* %a_ix_dim_1_61, i64 32
-  %i92 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %i91)
-  %i93 = getelementptr i8, i8* %a_ix_dim_1_77, i64 32
-  %i94 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %i93)
-  %i95 = getelementptr i8, i8* %a_ix_dim_1_93, i64 32
-  %i96 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %i95)
+  %i82 = getelementptr %_elem_type_of_x, ptr %x_ix_dim_0_113, i64 4
+  %i84 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i82)
+  %i85 = getelementptr i8, ptr %a_ix_dim_1_, i64 32
+  %i86 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i85)
+  %i87 = getelementptr i8, ptr %a_ix_dim_1_29, i64 32
+  %i88 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i87)
+  %i89 = getelementptr i8, ptr %a_ix_dim_1_45, i64 32
+  %i90 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i89)
+  %i91 = getelementptr i8, ptr %a_ix_dim_1_61, i64 32
+  %i92 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i91)
+  %i93 = getelementptr i8, ptr %a_ix_dim_1_77, i64 32
+  %i94 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i93)
+  %i95 = getelementptr i8, ptr %a_ix_dim_1_93, i64 32
+  %i96 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i95)
   %i97 = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %i84)
   %.fca.0.extract37 = extractvalue { <16 x i8>, <16 x i8> } %i97, 0
   %.fca.1.extract39 = extractvalue { <16 x i8>, <16 x i8> } %i97, 1
@@ -608,37 +602,37 @@ _loop_2_endl_:                                    ; preds = %_loop_2_do_
   br i1 %_leq_tmp.not, label %_loop_1_loopHeader_._return_bb_crit_edge.loopexit, label %_loop_2_do_.lr.ph
 
 _loop_1_loopHeader_._return_bb_crit_edge.loopexit: ; preds = %_loop_2_endl_
-  store <2 x double> %i71, <2 x double>* %.vy01, align 16
-  store <2 x double> %i73, <2 x double>* %.vy02, align 16
-  store <2 x double> %i75, <2 x double>* %.vy03, align 16
-  store <2 x double> %i77, <2 x double>* %.vy04, align 16
-  store <2 x double> %i79, <2 x double>* %.vy05, align 16
-  store <2 x double> %i81, <2 x double>* %.vy06, align 16
-  store <2 x double> %i106, <2 x double>* %.vy07, align 16
-  store <2 x double> %i108, <2 x double>* %.vy08, align 16
-  store <2 x double> %i110, <2 x double>* %.vy09, align 16
-  store <2 x double> %i112, <2 x double>* %.vy0a, align 16
-  store <2 x double> %i114, <2 x double>* %.vy0b, align 16
-  store <2 x double> %i116, <2 x double>* %.vy0c, align 16
-  store <2 x double> %i119, <2 x double>* %.vy21, align 16
-  store <2 x double> %i121, <2 x double>* %.vy22, align 16
-  store <2 x double> %i123, <2 x double>* %.vy23, align 16
-  store <2 x double> %i125, <2 x double>* %.vy24, align 16
-  store <2 x double> %i127, <2 x double>* %.vy25, align 16
-  store <2 x double> %i129, <2 x double>* %.vy26, align 16
-  store <2 x double> %i132, <2 x double>* %.vy27, align 16
-  store <2 x double> %i134, <2 x double>* %.vy28, align 16
-  store <2 x double> %i136, <2 x double>* %.vy29, align 16
-  store <2 x double> %i138, <2 x double>* %.vy2a, align 16
-  store <2 x double> %i140, <2 x double>* %.vy2b, align 16
-  store <2 x double> %i142, <2 x double>* %.vy2c, align 16
+  store <2 x double> %i71, ptr %.vy01, align 16
+  store <2 x double> %i73, ptr %.vy02, align 16
+  store <2 x double> %i75, ptr %.vy03, align 16
+  store <2 x double> %i77, ptr %.vy04, align 16
+  store <2 x double> %i79, ptr %.vy05, align 16
+  store <2 x double> %i81, ptr %.vy06, align 16
+  store <2 x double> %i106, ptr %.vy07, align 16
+  store <2 x double> %i108, ptr %.vy08, align 16
+  store <2 x double> %i110, ptr %.vy09, align 16
+  store <2 x double> %i112, ptr %.vy0a, align 16
+  store <2 x double> %i114, ptr %.vy0b, align 16
+  store <2 x double> %i116, ptr %.vy0c, align 16
+  store <2 x double> %i119, ptr %.vy21, align 16
+  store <2 x double> %i121, ptr %.vy22, align 16
+  store <2 x double> %i123, ptr %.vy23, align 16
+  store <2 x double> %i125, ptr %.vy24, align 16
+  store <2 x double> %i127, ptr %.vy25, align 16
+  store <2 x double> %i129, ptr %.vy26, align 16
+  store <2 x double> %i132, ptr %.vy27, align 16
+  store <2 x double> %i134, ptr %.vy28, align 16
+  store <2 x double> %i136, ptr %.vy29, align 16
+  store <2 x double> %i138, ptr %.vy2a, align 16
+  store <2 x double> %i140, ptr %.vy2b, align 16
+  store <2 x double> %i142, ptr %.vy2c, align 16
   br label %_return_bb
 
 _return_bb:                                       ; preds = %_loop_1_loopHeader_._return_bb_crit_edge.loopexit, %_loop_1_do_.lr.ph, %entry
   ret void
 }
 
-declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*)
+declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr)
 declare { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1>)
 declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
 


        


More information about the llvm-commits mailing list