[llvm] r230786 - [opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction

David Blaikie dblaikie at gmail.com
Fri Feb 27 11:29:18 PST 2015


Modified: llvm/trunk/test/Transforms/LoopVectorize/tbaa-nodep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/tbaa-nodep.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/tbaa-nodep.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/tbaa-nodep.ll Fri Feb 27 13:29:02 2015
@@ -10,10 +10,10 @@ entry:
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+  %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
   %0 = load float* %arrayidx, align 4, !tbaa !0
   %conv = fptosi float %0 to i32
-  %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
   store i32 %conv, i32* %arrayidx2, align 4, !tbaa !4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 1600
@@ -51,13 +51,13 @@ entry:
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+  %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
   %0 = load float* %arrayidx, align 4, !tbaa !0
-  %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
   %1 = load i32* %arrayidx2, align 4, !tbaa !4
   %conv = sitofp i32 %1 to float
   %mul = fmul float %0, %conv
-  %arrayidx4 = getelementptr inbounds float* %c, i64 %indvars.iv
+  %arrayidx4 = getelementptr inbounds float, float* %c, i64 %indvars.iv
   store float %mul, float* %arrayidx4, align 4, !tbaa !0
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 1600

Modified: llvm/trunk/test/Transforms/LoopVectorize/undef-inst-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/undef-inst-bug.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/undef-inst-bug.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/undef-inst-bug.ll Fri Feb 27 13:29:02 2015
@@ -24,7 +24,7 @@ for.body:
   ; Loop invariant anchored in loop.
   %idxprom21 = zext i32 undef to i64
 
-  %arrayidx23 = getelementptr inbounds [100 x [100 x i32]]* undef, i64 0, i64 %idxprom21, i64 %indvars.iv17
+  %arrayidx23 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* undef, i64 0, i64 %idxprom21, i64 %indvars.iv17
   store i32 undef, i32* %arrayidx23, align 4
   %indvars.next= add i64 %indvars.iv17, -1
   %0 = trunc i64 %indvars.next to i32

Modified: llvm/trunk/test/Transforms/LoopVectorize/unroll_novec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/unroll_novec.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/unroll_novec.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/unroll_novec.ll Fri Feb 27 13:29:02 2015
@@ -33,7 +33,7 @@ define void @inc(i32 %n) nounwind uwtabl
 
 .lr.ph:                                           ; preds = %0, %.lr.ph
   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
-  %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+  %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
   %3 = load i32* %2, align 4
   %4 = trunc i64 %indvars.iv to i32
   %5 = add nsw i32 %3, %4

Modified: llvm/trunk/test/Transforms/LoopVectorize/unsized-pointee-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/unsized-pointee-crash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/unsized-pointee-crash.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/unsized-pointee-crash.ll Fri Feb 27 13:29:02 2015
@@ -11,10 +11,10 @@ for.body:
   %b.05 = phi i32 (...)* [ undef, %entry ], [ %1, %for.body ]
   %a.04 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %0 = bitcast i32 (...)* %b.05 to i8*
-  %add.ptr = getelementptr i8* %0, i64 1
+  %add.ptr = getelementptr i8, i8* %0, i64 1
   %1 = bitcast i8* %add.ptr to i32 (...)*
 ; CHECK:      %[[cst:.*]] = bitcast i32 (...)* {{.*}} to i8*
-; CHECK-NEXT: %[[gep:.*]] = getelementptr i8* %[[cst]], i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr i8, i8* %[[cst]], i64 1
   %inc = add nsw i32 %a.04, 1
   %exitcond = icmp eq i32 %a.04, 63
   br i1 %exitcond, label %for.end, label %for.body

Modified: llvm/trunk/test/Transforms/LoopVectorize/value-ptr-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/value-ptr-bug.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/value-ptr-bug.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/value-ptr-bug.ll Fri Feb 27 13:29:02 2015
@@ -19,8 +19,8 @@ entry:
 loop:
   %inc = phi i32 [ %sub267, %entry ], [ %add, %loop]
   %ext.inc = sext i32 %inc to i64
-  %add.ptr265 = getelementptr inbounds i32* %ptr265, i64 %ext.inc
-  %add.ptr266 = getelementptr inbounds i32* %ptr266, i64 %ext.inc
+  %add.ptr265 = getelementptr inbounds i32, i32* %ptr265, i64 %ext.inc
+  %add.ptr266 = getelementptr inbounds i32, i32* %ptr266, i64 %ext.inc
   %add = add i32 %inc, 9
   %cmp = icmp slt i32 %add, 140
   br i1 %cmp, label %block1, label %loop
@@ -37,9 +37,9 @@ do.body272:
   %row_width.5 = phi i32 [ %sub267.lcssa, %block1 ], [ %dec, %do.body272 ]
   %sp.4 = phi i8* [ %tmp30, %block1 ], [ %incdec.ptr273, %do.body272 ]
   %dp.addr.4 = phi i8* [ %tmp29, %block1 ], [ %incdec.ptr274, %do.body272 ]
-  %incdec.ptr273 = getelementptr inbounds i8* %sp.4, i64 1
+  %incdec.ptr273 = getelementptr inbounds i8, i8* %sp.4, i64 1
   %tmp31 = load i8* %sp.4, align 1
-  %incdec.ptr274 = getelementptr inbounds i8* %dp.addr.4, i64 1
+  %incdec.ptr274 = getelementptr inbounds i8, i8* %dp.addr.4, i64 1
   store i8 %tmp31, i8* %dp.addr.4, align 1
   %dec = add i32 %row_width.5, -1
   %cmp276 = icmp eq i32 %dec, 0

Modified: llvm/trunk/test/Transforms/LoopVectorize/vect.omp.persistence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/vect.omp.persistence.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/vect.omp.persistence.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/vect.omp.persistence.ll Fri Feb 27 13:29:02 2015
@@ -45,13 +45,13 @@ for.header:
 for.body:
 
   %0 = add nsw i64 %indvars.iv, -5
-  %arrayidx = getelementptr inbounds float* %a, i64 %0
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %0
   %1 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
   %2 = add nsw i64 %indvars.iv, 2
-  %arrayidx2 = getelementptr inbounds float* %a, i64 %2
+  %arrayidx2 = getelementptr inbounds float, float* %a, i64 %2
   %3 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
   %mul = fmul float %1, %3
-  %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
+  %arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvars.iv
   store float %mul, float* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !1
 
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1

Modified: llvm/trunk/test/Transforms/LoopVectorize/vect.stats.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/vect.stats.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/vect.stats.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/vect.stats.ll Fri Feb 27 13:29:02 2015
@@ -20,7 +20,7 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %indvars.iv2 = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
-  %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv2
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv2
   %0 = load float* %arrayidx, align 4
   %mul = fmul float %0, %0
   store float %mul, float* %arrayidx, align 4
@@ -42,13 +42,13 @@ entry:
 for.body:                                         ; preds = %entry, %for.body
   %indvars.iv2 = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
   %0 = add nsw i64 %indvars.iv2, -5
-  %arrayidx = getelementptr inbounds float* %a, i64 %0
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %0
   %1 = load float* %arrayidx, align 4
   %2 = add nsw i64 %indvars.iv2, 2
-  %arrayidx2 = getelementptr inbounds float* %a, i64 %2
+  %arrayidx2 = getelementptr inbounds float, float* %a, i64 %2
   %3 = load float* %arrayidx2, align 4
   %mul = fmul float %1, %3
-  %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv2
+  %arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvars.iv2
   store float %mul, float* %arrayidx4, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv2, 1
   %cmp2 = icmp sgt i64 %indvars.iv.next, %size

Modified: llvm/trunk/test/Transforms/LoopVectorize/vectorize-once.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/vectorize-once.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/vectorize-once.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/vectorize-once.ll Fri Feb 27 13:29:02 2015
@@ -22,7 +22,7 @@ target triple = "x86_64-apple-macosx10.8
 define i32 @_Z4foo1Pii(i32* %A, i32 %n) #0 {
 entry:
   %idx.ext = sext i32 %n to i64
-  %add.ptr = getelementptr inbounds i32* %A, i64 %idx.ext
+  %add.ptr = getelementptr inbounds i32, i32* %A, i64 %idx.ext
   %cmp3.i = icmp eq i32 %n, 0
   br i1 %cmp3.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
 
@@ -31,7 +31,7 @@ for.body.i:
   %__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
   %0 = load i32* %__first.addr.04.i, align 4
   %add.i = add nsw i32 %0, %__init.addr.05.i
-  %incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
+  %incdec.ptr.i = getelementptr inbounds i32, i32* %__first.addr.04.i, i64 1
   %cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
   br i1 %cmp.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
 
@@ -48,7 +48,7 @@ _ZSt10accumulateIPiiET0_T_S2_S1_.exit:
 define i32 @_Z4foo2Pii(i32* %A, i32 %n) #0 {
 entry:
   %idx.ext = sext i32 %n to i64
-  %add.ptr = getelementptr inbounds i32* %A, i64 %idx.ext
+  %add.ptr = getelementptr inbounds i32, i32* %A, i64 %idx.ext
   %cmp3.i = icmp eq i32 %n, 0
   br i1 %cmp3.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
 
@@ -57,7 +57,7 @@ for.body.i:
   %__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
   %0 = load i32* %__first.addr.04.i, align 4
   %add.i = add nsw i32 %0, %__init.addr.05.i
-  %incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
+  %incdec.ptr.i = getelementptr inbounds i32, i32* %__first.addr.04.i, i64 1
   %cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
   br i1 %cmp.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i, !llvm.loop !0
 

Modified: llvm/trunk/test/Transforms/LoopVectorize/version-mem-access.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/version-mem-access.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/version-mem-access.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/version-mem-access.ll Fri Feb 27 13:29:02 2015
@@ -34,14 +34,14 @@ for.body:
   %iv.trunc = trunc i64 %indvars.iv to i32
   %mul = mul i32 %iv.trunc, %BStride
   %mul64 = zext i32 %mul to i64
-  %arrayidx = getelementptr inbounds i32* %B, i64 %mul64
+  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %mul64
   %0 = load i32* %arrayidx, align 4
   %mul2 = mul nsw i64 %indvars.iv, %CStride
-  %arrayidx3 = getelementptr inbounds i32* %C, i64 %mul2
+  %arrayidx3 = getelementptr inbounds i32, i32* %C, i64 %mul2
   %1 = load i32* %arrayidx3, align 4
   %mul4 = mul nsw i32 %1, %0
   %mul3 = mul nsw i64 %indvars.iv, %AStride
-  %arrayidx7 = getelementptr inbounds i32* %A, i64 %mul3
+  %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %mul3
   store i32 %mul4, i32* %arrayidx7, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -76,9 +76,9 @@ for.body:
   %0 = trunc i64 %indvars.iv to i32
   %mul = mul nsw i32 %0, %conv
   %idxprom = sext i32 %mul to i64
-  %arrayidx = getelementptr inbounds double* %x, i64 %idxprom
+  %arrayidx = getelementptr inbounds double, double* %x, i64 %idxprom
   %1 = load double* %arrayidx, align 8
-  %arrayidx3 = getelementptr inbounds double* %c, i64 %indvars.iv
+  %arrayidx3 = getelementptr inbounds double, double* %c, i64 %indvars.iv
   store double %1, double* %arrayidx3, align 8
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32

Modified: llvm/trunk/test/Transforms/LoopVectorize/write-only.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/write-only.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/write-only.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/write-only.ll Fri Feb 27 13:29:02 2015
@@ -12,7 +12,7 @@ define i32 @read_mod_write_single_ptr(fl
 
 .lr.ph:                                           ; preds = %0, %.lr.ph
   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
-  %2 = getelementptr inbounds float* %a, i64 %indvars.iv
+  %2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
   %3 = load float* %2, align 4
   %4 = fmul float %3, 3.000000e+00
   store float %4, float* %2, align 4

Modified: llvm/trunk/test/Transforms/LowerBitSets/simple.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LowerBitSets/simple.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LowerBitSets/simple.ll (original)
+++ llvm/trunk/test/Transforms/LowerBitSets/simple.ll Fri Feb 27 13:29:02 2015
@@ -60,7 +60,7 @@ define i1 @foo(i32* %p) {
   ; CHECK: br i1 [[R6]]
 
   ; CHECK: [[R8:%[^ ]*]] = lshr i32 [[R5]], 5
-  ; CHECK: [[R9:%[^ ]*]] = getelementptr i32* bitcast ([9 x i8]* @bitset1.bits to i32*), i32 [[R8]]
+  ; CHECK: [[R9:%[^ ]*]] = getelementptr i32, i32* bitcast ([9 x i8]* @bitset1.bits to i32*), i32 [[R8]]
   ; CHECK: [[R10:%[^ ]*]] = load i32* [[R9]]
   ; CHECK: [[R11:%[^ ]*]] = and i32 [[R5]], 31
   ; CHECK: [[R12:%[^ ]*]] = shl i32 1, [[R11]]
@@ -106,7 +106,7 @@ define i1 @baz(i32* %p) {
   ; CHECK: br i1 [[T6]]
 
   ; CHECK: [[T8:%[^ ]*]] = lshr i32 [[T5]], 5
-  ; CHECK: [[T9:%[^ ]*]] = getelementptr i32* bitcast ([9 x i8]* @bitset3.bits to i32*), i32 [[T8]]
+  ; CHECK: [[T9:%[^ ]*]] = getelementptr i32, i32* bitcast ([9 x i8]* @bitset3.bits to i32*), i32 [[T8]]
   ; CHECK: [[T10:%[^ ]*]] = load i32* [[T9]]
   ; CHECK: [[T11:%[^ ]*]] = and i32 [[T5]], 31
   ; CHECK: [[T12:%[^ ]*]] = shl i32 1, [[T11]]

Modified: llvm/trunk/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll (original)
+++ llvm/trunk/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll Fri Feb 27 13:29:02 2015
@@ -32,7 +32,7 @@ then:		; preds = %no_exit
 	%tmp.9 = load i8** %p_addr		; <i8*> [#uses=1]
 	%tmp.10 = load i32* %i		; <i32> [#uses=1]
 	%tmp.11 = sub i32 %tmp.10, 1		; <i32> [#uses=1]
-	%tmp.12 = getelementptr i8* %tmp.9, i32 %tmp.11		; <i8*> [#uses=1]
+	%tmp.12 = getelementptr i8, i8* %tmp.9, i32 %tmp.11		; <i8*> [#uses=1]
 	%tmp.13 = load i32* %out		; <i32> [#uses=1]
 	%tmp.14 = trunc i32 %tmp.13 to i8		; <i8> [#uses=1]
 	store i8 %tmp.14, i8* %tmp.12

Modified: llvm/trunk/test/Transforms/Mem2Reg/ignore-lifetime.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Mem2Reg/ignore-lifetime.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/Mem2Reg/ignore-lifetime.ll (original)
+++ llvm/trunk/test/Transforms/Mem2Reg/ignore-lifetime.ll Fri Feb 27 13:29:02 2015
@@ -18,7 +18,7 @@ define void @test2() {
 ; CHECK: test2
 ; CHECK-NOT: alloca
   %A = alloca {i8, i16}
-  %B = getelementptr {i8, i16}* %A, i32 0, i32 0
+  %B = getelementptr {i8, i16}, {i8, i16}* %A, i32 0, i32 0
   call void @llvm.lifetime.start(i64 2, i8* %B)
   store {i8, i16} zeroinitializer, {i8, i16}* %A
   call void @llvm.lifetime.end(i64 2, i8* %B)

Modified: llvm/trunk/test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll Fri Feb 27 13:29:02 2015
@@ -8,9 +8,9 @@ target triple = "i386-pc-linux-gnu"
 
 define internal fastcc void @initialize(%0* noalias nocapture sret %agg.result) nounwind {
 entry:
-  %agg.result.03 = getelementptr %0* %agg.result, i32 0, i32 0
+  %agg.result.03 = getelementptr %0, %0* %agg.result, i32 0, i32 0
   store x86_fp80 0xK00000000000000000000, x86_fp80* %agg.result.03
-  %agg.result.15 = getelementptr %0* %agg.result, i32 0, i32 1
+  %agg.result.15 = getelementptr %0, %0* %agg.result, i32 0, i32 1
   store x86_fp80 0xK00000000000000000000, x86_fp80* %agg.result.15
   ret void
 }

Modified: llvm/trunk/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll Fri Feb 27 13:29:02 2015
@@ -14,7 +14,7 @@ entry:
   %a_i8 = bitcast %a* %a_var to i8*
   %b_i8 = bitcast %b* %b_var to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_i8, i8* %a_i8, i32 4, i32 1, i1 false)
-  %tmp1 = getelementptr %b* %b_var, i32 0, i32 0
+  %tmp1 = getelementptr %b, %b* %b_var, i32 0, i32 0
   %tmp2 = load float* %tmp1
   ret float %tmp2
 }

Modified: llvm/trunk/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll Fri Feb 27 13:29:02 2015
@@ -17,9 +17,9 @@ define i32 @foo() nounwind {
   call void @bar(%struct1* sret %x) nounwind
 ; CHECK: call void @bar(%struct1* sret %x)
 
-  %gepn1 = getelementptr inbounds %struct2* %y, i32 0, i32 0, i32 0
+  %gepn1 = getelementptr inbounds %struct2, %struct2* %y, i32 0, i32 0, i32 0
   store i32 0, i32* %gepn1, align 8
-  %gepn2 = getelementptr inbounds %struct2* %y, i32 0, i32 0, i32 1
+  %gepn2 = getelementptr inbounds %struct2, %struct2* %y, i32 0, i32 0, i32 1
   store i32 0, i32* %gepn2, align 4
 
   %bit1 = bitcast %struct1* %x to i64*
@@ -30,7 +30,7 @@ define i32 @foo() nounwind {
 ; CHECK: %load = load i64* %bit1, align 8
 ; CHECK: store i64 %load, i64* %bit2, align 8
 
-  %gep1 = getelementptr %struct2* %y, i32 0, i32 0, i32 0
+  %gep1 = getelementptr %struct2, %struct2* %y, i32 0, i32 0, i32 0
   %ret = load i32* %gep1
   ret i32 %ret
 }

Modified: llvm/trunk/test/Transforms/MemCpyOpt/align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/align.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/align.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/align.ll Fri Feb 27 13:29:02 2015
@@ -10,13 +10,13 @@ declare void @llvm.memset.p0i8.i64(i8* n
 define void @foo(i32* %p) {
 ; CHECK-LABEL: @foo(
 ; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 16, i32 4, i1 false)
-  %a0 = getelementptr i32* %p, i64 0
+  %a0 = getelementptr i32, i32* %p, i64 0
   store i32 0, i32* %a0, align 4
-  %a1 = getelementptr i32* %p, i64 1
+  %a1 = getelementptr i32, i32* %p, i64 1
   store i32 0, i32* %a1, align 16
-  %a2 = getelementptr i32* %p, i64 2
+  %a2 = getelementptr i32, i32* %p, i64 2
   store i32 0, i32* %a2, align 4
-  %a3 = getelementptr i32* %p, i64 3
+  %a3 = getelementptr i32, i32* %p, i64 3
   store i32 0, i32* %a3, align 4
   ret void
 }

Modified: llvm/trunk/test/Transforms/MemCpyOpt/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/atomic.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/atomic.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/atomic.ll Fri Feb 27 13:29:02 2015
@@ -16,9 +16,9 @@ define void @test1() nounwind uwtable ss
   %x = alloca [101 x i32], align 16
   %bc = bitcast [101 x i32]* %x to i8*
   call void @llvm.memset.p0i8.i64(i8* %bc, i8 0, i64 400, i32 16, i1 false)
-  %gep1 = getelementptr inbounds [101 x i32]* %x, i32 0, i32 100
+  %gep1 = getelementptr inbounds [101 x i32], [101 x i32]* %x, i32 0, i32 100
   store atomic i32 0, i32* %gep1 unordered, align 4
-  %gep2 = getelementptr inbounds [101 x i32]* %x, i32 0, i32 0
+  %gep2 = getelementptr inbounds [101 x i32], [101 x i32]* %x, i32 0, i32 0
   call void @otherf(i32* %gep2)
   ret void
 }

Modified: llvm/trunk/test/Transforms/MemCpyOpt/callslot_deref.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/callslot_deref.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/callslot_deref.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/callslot_deref.ll Fri Feb 27 13:29:02 2015
@@ -10,7 +10,7 @@ define void @must_remove_memcpy(i8* noal
 ; CHECK: call void @llvm.memset.p0i8.i64
 ; CHECK-NOT: call void @llvm.memcpy.p0i8.p0i8.i64
   %src = alloca [4096 x i8], align 1
-  %p = getelementptr inbounds [4096 x i8]* %src, i64 0, i64 0
+  %p = getelementptr inbounds [4096 x i8], [4096 x i8]* %src, i64 0, i64 0
   call void @llvm.memset.p0i8.i64(i8* %p, i8 0, i64 4096, i32 1, i1 false)
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %p, i64 4096, i32 1, i1 false) #2
   ret void
@@ -22,7 +22,7 @@ define void @must_not_remove_memcpy(i8*
 ; CHECK: call void @llvm.memset.p0i8.i64
 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
   %src = alloca [4096 x i8], align 1
-  %p = getelementptr inbounds [4096 x i8]* %src, i64 0, i64 0
+  %p = getelementptr inbounds [4096 x i8], [4096 x i8]* %src, i64 0, i64 0
   call void @llvm.memset.p0i8.i64(i8* %p, i8 0, i64 4096, i32 1, i1 false)
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %p, i64 4096, i32 1, i1 false) #2
   ret void

Modified: llvm/trunk/test/Transforms/MemCpyOpt/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/crash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/crash.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/crash.ll Fri Feb 27 13:29:02 2015
@@ -9,37 +9,37 @@ target triple = "armv7-eabi"
 ; PR4882
 define void @test1(%struct.bar* %this) {
 entry:
-  %0 = getelementptr inbounds %struct.bar* %this, i32 0, i32 0, i32 0, i32 0
+  %0 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 0, i32 0, i32 0
   store float 0.000000e+00, float* %0, align 4
-  %1 = getelementptr inbounds %struct.bar* %this, i32 0, i32 0, i32 0, i32 1
+  %1 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 0, i32 0, i32 1
   store float 0.000000e+00, float* %1, align 4
-  %2 = getelementptr inbounds %struct.bar* %this, i32 0, i32 0, i32 0, i32 2
+  %2 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 0, i32 0, i32 2
   store float 0.000000e+00, float* %2, align 4
-  %3 = getelementptr inbounds %struct.bar* %this, i32 0, i32 0, i32 0, i32 3
+  %3 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 0, i32 0, i32 3
   store float 0.000000e+00, float* %3, align 4
-  %4 = getelementptr inbounds %struct.bar* %this, i32 0, i32 1, i32 0, i32 0
+  %4 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 1, i32 0, i32 0
   store float 0.000000e+00, float* %4, align 4
-  %5 = getelementptr inbounds %struct.bar* %this, i32 0, i32 1, i32 0, i32 1
+  %5 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 1, i32 0, i32 1
   store float 0.000000e+00, float* %5, align 4
-  %6 = getelementptr inbounds %struct.bar* %this, i32 0, i32 1, i32 0, i32 2
+  %6 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 1, i32 0, i32 2
   store float 0.000000e+00, float* %6, align 4
-  %7 = getelementptr inbounds %struct.bar* %this, i32 0, i32 1, i32 0, i32 3
+  %7 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 1, i32 0, i32 3
   store float 0.000000e+00, float* %7, align 4
-  %8 = getelementptr inbounds %struct.bar* %this, i32 0, i32 3, i32 0, i32 1
+  %8 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 3, i32 0, i32 1
   store float 0.000000e+00, float* %8, align 4
-  %9 = getelementptr inbounds %struct.bar* %this, i32 0, i32 3, i32 0, i32 2
+  %9 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 3, i32 0, i32 2
   store float 0.000000e+00, float* %9, align 4
-  %10 = getelementptr inbounds %struct.bar* %this, i32 0, i32 3, i32 0, i32 3
+  %10 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 3, i32 0, i32 3
   store float 0.000000e+00, float* %10, align 4
-  %11 = getelementptr inbounds %struct.bar* %this, i32 0, i32 4, i32 0, i32 0
+  %11 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 4, i32 0, i32 0
   store float 0.000000e+00, float* %11, align 4
-  %12 = getelementptr inbounds %struct.bar* %this, i32 0, i32 4, i32 0, i32 1
+  %12 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 4, i32 0, i32 1
   store float 0.000000e+00, float* %12, align 4
-  %13 = getelementptr inbounds %struct.bar* %this, i32 0, i32 4, i32 0, i32 2
+  %13 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 4, i32 0, i32 2
   store float 0.000000e+00, float* %13, align 4
-  %14 = getelementptr inbounds %struct.bar* %this, i32 0, i32 4, i32 0, i32 3
+  %14 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 4, i32 0, i32 3
   store float 0.000000e+00, float* %14, align 4
-  %15 = getelementptr inbounds %struct.bar* %this, i32 0, i32 5
+  %15 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 5
   store float 0.000000e+00, float* %15, align 4
   unreachable
 }

Modified: llvm/trunk/test/Transforms/MemCpyOpt/form-memset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/form-memset.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/form-memset.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/form-memset.ll Fri Feb 27 13:29:02 2015
@@ -8,43 +8,43 @@ target triple = "i386-apple-darwin8"
 define void @test1(i8 signext  %c) nounwind  {
 entry:
 	%x = alloca [19 x i8]		; <[19 x i8]*> [#uses=20]
-	%tmp = getelementptr [19 x i8]* %x, i32 0, i32 0		; <i8*> [#uses=1]
+	%tmp = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 0		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp, align 1
-	%tmp5 = getelementptr [19 x i8]* %x, i32 0, i32 1		; <i8*> [#uses=1]
+	%tmp5 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 1		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp5, align 1
-	%tmp9 = getelementptr [19 x i8]* %x, i32 0, i32 2		; <i8*> [#uses=1]
+	%tmp9 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 2		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp9, align 1
-	%tmp13 = getelementptr [19 x i8]* %x, i32 0, i32 3		; <i8*> [#uses=1]
+	%tmp13 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 3		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp13, align 1
-	%tmp17 = getelementptr [19 x i8]* %x, i32 0, i32 4		; <i8*> [#uses=1]
+	%tmp17 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 4		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp17, align 1
-	%tmp21 = getelementptr [19 x i8]* %x, i32 0, i32 5		; <i8*> [#uses=1]
+	%tmp21 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 5		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp21, align 1
-	%tmp25 = getelementptr [19 x i8]* %x, i32 0, i32 6		; <i8*> [#uses=1]
+	%tmp25 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 6		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp25, align 1
-	%tmp29 = getelementptr [19 x i8]* %x, i32 0, i32 7		; <i8*> [#uses=1]
+	%tmp29 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 7		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp29, align 1
-	%tmp33 = getelementptr [19 x i8]* %x, i32 0, i32 8		; <i8*> [#uses=1]
+	%tmp33 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 8		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp33, align 1
-	%tmp37 = getelementptr [19 x i8]* %x, i32 0, i32 9		; <i8*> [#uses=1]
+	%tmp37 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 9		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp37, align 1
-	%tmp41 = getelementptr [19 x i8]* %x, i32 0, i32 10		; <i8*> [#uses=1]
+	%tmp41 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 10		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp41, align 1
-	%tmp45 = getelementptr [19 x i8]* %x, i32 0, i32 11		; <i8*> [#uses=1]
+	%tmp45 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 11		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp45, align 1
-	%tmp49 = getelementptr [19 x i8]* %x, i32 0, i32 12		; <i8*> [#uses=1]
+	%tmp49 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 12		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp49, align 1
-	%tmp53 = getelementptr [19 x i8]* %x, i32 0, i32 13		; <i8*> [#uses=1]
+	%tmp53 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 13		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp53, align 1
-	%tmp57 = getelementptr [19 x i8]* %x, i32 0, i32 14		; <i8*> [#uses=1]
+	%tmp57 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 14		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp57, align 1
-	%tmp61 = getelementptr [19 x i8]* %x, i32 0, i32 15		; <i8*> [#uses=1]
+	%tmp61 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 15		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp61, align 1
-	%tmp65 = getelementptr [19 x i8]* %x, i32 0, i32 16		; <i8*> [#uses=1]
+	%tmp65 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 16		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp65, align 1
-	%tmp69 = getelementptr [19 x i8]* %x, i32 0, i32 17		; <i8*> [#uses=1]
+	%tmp69 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 17		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp69, align 1
-	%tmp73 = getelementptr [19 x i8]* %x, i32 0, i32 18		; <i8*> [#uses=1]
+	%tmp73 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 18		; <i8*> [#uses=1]
 	store i8 %c, i8* %tmp73, align 1
 	%tmp76 = call i32 (...)* @bar( [19 x i8]* %x ) nounwind
 	ret void
@@ -65,88 +65,88 @@ entry:
 	%ref_idx = alloca [8 x i8]		; <[8 x i8]*> [#uses=8]
 	%left_mvd = alloca [8 x %struct.MV]		; <[8 x %struct.MV]*> [#uses=17]
 	%up_mvd = alloca [8 x %struct.MV]		; <[8 x %struct.MV]*> [#uses=17]
-	%tmp20 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 7		; <i8*> [#uses=1]
+	%tmp20 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 7		; <i8*> [#uses=1]
 	store i8 -1, i8* %tmp20, align 1
-	%tmp23 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 6		; <i8*> [#uses=1]
+	%tmp23 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 6		; <i8*> [#uses=1]
 	store i8 -1, i8* %tmp23, align 1
-	%tmp26 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 5		; <i8*> [#uses=1]
+	%tmp26 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 5		; <i8*> [#uses=1]
 	store i8 -1, i8* %tmp26, align 1
-	%tmp29 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 4		; <i8*> [#uses=1]
+	%tmp29 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 4		; <i8*> [#uses=1]
 	store i8 -1, i8* %tmp29, align 1
-	%tmp32 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 3		; <i8*> [#uses=1]
+	%tmp32 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 3		; <i8*> [#uses=1]
 	store i8 -1, i8* %tmp32, align 1
-	%tmp35 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 2		; <i8*> [#uses=1]
+	%tmp35 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 2		; <i8*> [#uses=1]
 	store i8 -1, i8* %tmp35, align 1
-	%tmp38 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 1		; <i8*> [#uses=1]
+	%tmp38 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 1		; <i8*> [#uses=1]
 	store i8 -1, i8* %tmp38, align 1
-	%tmp41 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 0		; <i8*> [#uses=2]
+	%tmp41 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 0		; <i8*> [#uses=2]
 	store i8 -1, i8* %tmp41, align 1
-	%tmp43 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0		; <i16*> [#uses=1]
+	%tmp43 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp43, align 2
-	%tmp46 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1		; <i16*> [#uses=1]
+	%tmp46 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp46, align 2
-	%tmp57 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0		; <i16*> [#uses=1]
+	%tmp57 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp57, align 2
-	%tmp60 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1		; <i16*> [#uses=1]
+	%tmp60 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp60, align 2
-	%tmp71 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0		; <i16*> [#uses=1]
+	%tmp71 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp71, align 2
-	%tmp74 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1		; <i16*> [#uses=1]
+	%tmp74 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp74, align 2
-	%tmp85 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0		; <i16*> [#uses=1]
+	%tmp85 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp85, align 2
-	%tmp88 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 1		; <i16*> [#uses=1]
+	%tmp88 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp88, align 2
-	%tmp99 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 3, i32 0		; <i16*> [#uses=1]
+	%tmp99 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 3, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp99, align 2
-	%tmp102 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 3, i32 1		; <i16*> [#uses=1]
+	%tmp102 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 3, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp102, align 2
-	%tmp113 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 2, i32 0		; <i16*> [#uses=1]
+	%tmp113 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 2, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp113, align 2
-	%tmp116 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 2, i32 1		; <i16*> [#uses=1]
+	%tmp116 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 2, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp116, align 2
-	%tmp127 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 1, i32 0		; <i16*> [#uses=1]
+	%tmp127 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 1, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp127, align 2
-	%tmp130 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 1, i32 1		; <i16*> [#uses=1]
+	%tmp130 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 1, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp130, align 2
-	%tmp141 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 0, i32 0		; <i16*> [#uses=1]
+	%tmp141 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 0, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp141, align 8
-	%tmp144 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 0, i32 1		; <i16*> [#uses=1]
+	%tmp144 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 0, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp144, align 2
-	%tmp148 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 7, i32 0		; <i16*> [#uses=1]
+	%tmp148 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 7, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp148, align 2
-	%tmp151 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 7, i32 1		; <i16*> [#uses=1]
+	%tmp151 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 7, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp151, align 2
-	%tmp162 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 6, i32 0		; <i16*> [#uses=1]
+	%tmp162 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 6, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp162, align 2
-	%tmp165 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 6, i32 1		; <i16*> [#uses=1]
+	%tmp165 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 6, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp165, align 2
-	%tmp176 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 5, i32 0		; <i16*> [#uses=1]
+	%tmp176 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 5, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp176, align 2
-	%tmp179 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 5, i32 1		; <i16*> [#uses=1]
+	%tmp179 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 5, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp179, align 2
-	%tmp190 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 4, i32 0		; <i16*> [#uses=1]
+	%tmp190 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 4, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp190, align 2
-	%tmp193 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 4, i32 1		; <i16*> [#uses=1]
+	%tmp193 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 4, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp193, align 2
-	%tmp204 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 3, i32 0		; <i16*> [#uses=1]
+	%tmp204 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 3, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp204, align 2
-	%tmp207 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 3, i32 1		; <i16*> [#uses=1]
+	%tmp207 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 3, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp207, align 2
-	%tmp218 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 2, i32 0		; <i16*> [#uses=1]
+	%tmp218 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 2, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp218, align 2
-	%tmp221 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 2, i32 1		; <i16*> [#uses=1]
+	%tmp221 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 2, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp221, align 2
-	%tmp232 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 1, i32 0		; <i16*> [#uses=1]
+	%tmp232 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 1, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp232, align 2
-	%tmp235 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 1, i32 1		; <i16*> [#uses=1]
+	%tmp235 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 1, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp235, align 2
-	%tmp246 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 0, i32 0		; <i16*> [#uses=1]
+	%tmp246 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 0, i32 0		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp246, align 8
-	%tmp249 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 0, i32 1		; <i16*> [#uses=1]
+	%tmp249 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 0, i32 1		; <i16*> [#uses=1]
 	store i16 0, i16* %tmp249, align 2
-	%up_mvd252 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 0		; <%struct.MV*> [#uses=1]
-	%left_mvd253 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 0		; <%struct.MV*> [#uses=1]
+	%up_mvd252 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 0		; <%struct.MV*> [#uses=1]
+	%left_mvd253 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 0		; <%struct.MV*> [#uses=1]
 	call void @foo( %struct.MV* %up_mvd252, %struct.MV* %left_mvd253, i8* %tmp41 ) nounwind 
 	ret void
         
@@ -167,9 +167,9 @@ declare void @foo(%struct.MV*, %struct.M
 ; Store followed by memset.
 define void @test3(i32* nocapture %P) nounwind ssp {
 entry:
-  %arrayidx = getelementptr inbounds i32* %P, i64 1
+  %arrayidx = getelementptr inbounds i32, i32* %P, i64 1
   store i32 0, i32* %arrayidx, align 4
-  %add.ptr = getelementptr inbounds i32* %P, i64 2
+  %add.ptr = getelementptr inbounds i32, i32* %P, i64 2
   %0 = bitcast i32* %add.ptr to i8*
   tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 11, i32 1, i1 false)
   ret void
@@ -182,7 +182,7 @@ entry:
 define void @test4(i32* nocapture %P) nounwind ssp {
 entry:
   store i32 0, i32* %P, align 4
-  %add.ptr = getelementptr inbounds i32* %P, i64 1
+  %add.ptr = getelementptr inbounds i32, i32* %P, i64 1
   %0 = bitcast i32* %add.ptr to i8*
   tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 11, i32 1, i1 false)
   ret void
@@ -196,10 +196,10 @@ declare void @llvm.memset.p0i8.i64(i8* n
 ; Memset followed by store.
 define void @test5(i32* nocapture %P) nounwind ssp {
 entry:
-  %add.ptr = getelementptr inbounds i32* %P, i64 2
+  %add.ptr = getelementptr inbounds i32, i32* %P, i64 2
   %0 = bitcast i32* %add.ptr to i8*
   tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 11, i32 1, i1 false)
-  %arrayidx = getelementptr inbounds i32* %P, i64 1
+  %arrayidx = getelementptr inbounds i32, i32* %P, i64 1
   store i32 0, i32* %arrayidx, align 4
   ret void
 ; CHECK-LABEL: @test5(
@@ -212,7 +212,7 @@ define void @test6(i32* nocapture %P) no
 entry:
   %0 = bitcast i32* %P to i8*
   tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 12, i32 1, i1 false)
-  %add.ptr = getelementptr inbounds i32* %P, i64 3
+  %add.ptr = getelementptr inbounds i32, i32* %P, i64 3
   %1 = bitcast i32* %add.ptr to i8*
   tail call void @llvm.memset.p0i8.i64(i8* %1, i8 0, i64 12, i32 1, i1 false)
   ret void
@@ -224,13 +224,13 @@ entry:
 ; rdar://9892684
 define void @test7(i32* nocapture %c) nounwind optsize {
   store i32 -1, i32* %c, align 4
-  %1 = getelementptr inbounds i32* %c, i32 1
+  %1 = getelementptr inbounds i32, i32* %c, i32 1
   store i32 -1, i32* %1, align 4
-  %2 = getelementptr inbounds i32* %c, i32 2
+  %2 = getelementptr inbounds i32, i32* %c, i32 2
   store i32 -1, i32* %2, align 4
-  %3 = getelementptr inbounds i32* %c, i32 3
+  %3 = getelementptr inbounds i32, i32* %c, i32 3
   store i32 -1, i32* %3, align 4
-  %4 = getelementptr inbounds i32* %c, i32 4
+  %4 = getelementptr inbounds i32, i32* %c, i32 4
   store i32 -1, i32* %4, align 4
 ; CHECK-LABEL: @test7(
 ; CHECK: call void @llvm.memset.p0i8.i64(i8* %5, i8 -1, i64 20, i32 4, i1 false)
@@ -288,10 +288,10 @@ define void @test10(i8* nocapture %P) no
 ; Memset followed by odd store.
 define void @test11(i32* nocapture %P) nounwind ssp {
 entry:
-  %add.ptr = getelementptr inbounds i32* %P, i64 3
+  %add.ptr = getelementptr inbounds i32, i32* %P, i64 3
   %0 = bitcast i32* %add.ptr to i8*
   tail call void @llvm.memset.p0i8.i64(i8* %0, i8 1, i64 11, i32 1, i1 false)
-  %arrayidx = getelementptr inbounds i32* %P, i64 0
+  %arrayidx = getelementptr inbounds i32, i32* %P, i64 0
   %arrayidx.cast = bitcast i32* %arrayidx to i96*
   store i96 310698676526526814092329217, i96* %arrayidx.cast, align 4
   ret void

Modified: llvm/trunk/test/Transforms/MemCpyOpt/loadstore-sret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/loadstore-sret.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/loadstore-sret.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/loadstore-sret.ll Fri Feb 27 13:29:02 2015
@@ -12,10 +12,10 @@ _ZNSt8auto_ptrIiED1Ev.exit:
   %temp.lvalue = alloca %"class.std::auto_ptr", align 8
 ; CHECK: call void @_Z3barv(%"class.std::auto_ptr"* sret %agg.result)
   call void @_Z3barv(%"class.std::auto_ptr"* sret %temp.lvalue)
-  %tmp.i.i = getelementptr inbounds %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
+  %tmp.i.i = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
 ; CHECK-NOT: load
   %tmp2.i.i = load i32** %tmp.i.i, align 8
-  %tmp.i.i4 = getelementptr inbounds %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
+  %tmp.i.i4 = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
 ; CHECK-NOT: store
   store i32* %tmp2.i.i, i32** %tmp.i.i4, align 8
 ; CHECK: ret void

Modified: llvm/trunk/test/Transforms/MemCpyOpt/memcpy-to-memset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/memcpy-to-memset.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/memcpy-to-memset.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/memcpy-to-memset.ll Fri Feb 27 13:29:02 2015
@@ -9,7 +9,7 @@ define void @test1() nounwind {
   %arr = alloca [3 x i32], align 4
   %arr_i8 = bitcast [3 x i32]* %arr to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %arr_i8, i8* bitcast ([3 x i32]* @cst to i8*), i64 12, i32 4, i1 false)
-  %arraydecay = getelementptr inbounds [3 x i32]* %arr, i64 0, i64 0
+  %arraydecay = getelementptr inbounds [3 x i32], [3 x i32]* %arr, i64 0, i64 0
   call void @foo(i32* %arraydecay) nounwind
   ret void
 ; CHECK-LABEL: @test1(

Modified: llvm/trunk/test/Transforms/MemCpyOpt/memcpy-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/memcpy-undef.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/memcpy-undef.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/memcpy-undef.ll Fri Feb 27 13:29:02 2015
@@ -7,12 +7,12 @@ target triple = "x86_64-apple-macosx10.8
 
 define i32 @test1(%struct.foo* nocapture %foobie) nounwind noinline ssp uwtable {
   %bletch.sroa.1 = alloca [7 x i8], align 1
-  %1 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 0
+  %1 = getelementptr inbounds %struct.foo, %struct.foo* %foobie, i64 0, i32 0
   store i8 98, i8* %1, align 4
-  %2 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 1, i64 0
-  %3 = getelementptr inbounds [7 x i8]* %bletch.sroa.1, i64 0, i64 0
+  %2 = getelementptr inbounds %struct.foo, %struct.foo* %foobie, i64 0, i32 1, i64 0
+  %3 = getelementptr inbounds [7 x i8], [7 x i8]* %bletch.sroa.1, i64 0, i64 0
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 7, i32 1, i1 false)
-  %4 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 2
+  %4 = getelementptr inbounds %struct.foo, %struct.foo* %foobie, i64 0, i32 2
   store i32 20, i32* %4, align 4
   ret i32 undef
 

Modified: llvm/trunk/test/Transforms/MemCpyOpt/memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/memcpy.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/memcpy.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/memcpy.ll Fri Feb 27 13:29:02 2015
@@ -93,7 +93,7 @@ entry:
   %y = alloca %struct.S, align 16
   %tmp = bitcast %struct.S* %y to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp, i8* bitcast (%struct.S* @sS to i8*), i64 32, i32 16, i1 false)
-  %a = getelementptr %struct.S* %y, i64 0, i32 1, i64 0
+  %a = getelementptr %struct.S, %struct.S* %y, i64 0, i32 1, i64 0
   store i8 4, i8* %a
   call void @test5a(%struct.S* align 16 byval %y)
   ret i32 0
@@ -139,10 +139,10 @@ define void @test8() {
 ; CHECK: test8
 ; CHECK-NOT: memcpy
   %A = tail call i8* @malloc(i32 10)
-  %B = getelementptr inbounds i8* %A, i64 2
+  %B = getelementptr inbounds i8, i8* %A, i64 2
   tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %B, i8* getelementptr inbounds ([7 x i8]* @test8.str, i64 0, i64 0), i32 7, i32 1, i1 false)
   %C = tail call i8* @malloc(i32 10)
-  %D = getelementptr inbounds i8* %C, i64 2
+  %D = getelementptr inbounds i8, i8* %C, i64 2
   tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %D, i8* %B, i32 7, i32 1, i1 false)
   ret void
 ; CHECK: ret void

Modified: llvm/trunk/test/Transforms/MemCpyOpt/memmove.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/memmove.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/memmove.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/memmove.ll Fri Feb 27 13:29:02 2015
@@ -13,7 +13,7 @@ entry:
 
   %malloccall = tail call i8* @malloc(i32 trunc (i64 mul nuw (i64 ptrtoint (i8* getelementptr (i8* null, i32 1) to i64), i64 13) to i32))
   %call3 = bitcast i8* %malloccall to [13 x i8]*
-  %call3.sub = getelementptr inbounds [13 x i8]* %call3, i64 0, i64 0
+  %call3.sub = getelementptr inbounds [13 x i8], [13 x i8]* %call3, i64 0, i64 0
   tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %call3.sub, i8* %src, i64 13, i32 1, i1 false)
   ret i8* %call3.sub
 }
@@ -24,7 +24,7 @@ define void @test2(i8* %P) nounwind {
 entry:
 ; CHECK-LABEL: @test2(
 ; CHECK: call void @llvm.memcpy
-  %add.ptr = getelementptr i8* %P, i64 16
+  %add.ptr = getelementptr i8, i8* %P, i64 16
   tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %P, i8* %add.ptr, i64 16, i32 1, i1 false)
   ret void
 }
@@ -34,7 +34,7 @@ define void @test3(i8* %P) nounwind {
 entry:
 ; CHECK-LABEL: @test3(
 ; CHECK: call void @llvm.memmove
-  %add.ptr = getelementptr i8* %P, i64 16
+  %add.ptr = getelementptr i8, i8* %P, i64 16
   tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %P, i8* %add.ptr, i64 17, i32 1, i1 false)
   ret void
 }

Modified: llvm/trunk/test/Transforms/MemCpyOpt/smaller.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/smaller.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/smaller.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/smaller.ll Fri Feb 27 13:29:02 2015
@@ -21,7 +21,7 @@ entry:
   %agg.tmp = alloca %struct.s, align 4
   store i32 99, i32* getelementptr inbounds (%struct.s* @cell, i32 0, i32 1), align 4
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.s* @cell, i32 0, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i32 11, i32 1, i1 false)
-  %tmp = getelementptr inbounds %struct.s* %agg.tmp, i32 0, i32 0, i32 0
+  %tmp = getelementptr inbounds %struct.s, %struct.s* %agg.tmp, i32 0, i32 0, i32 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* getelementptr inbounds (%struct.s* @cell, i32 0, i32 0, i32 0), i32 16, i32 4, i1 false)
   call void @check(%struct.s* byval %agg.tmp)
   ret void

Modified: llvm/trunk/test/Transforms/MemCpyOpt/sret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MemCpyOpt/sret.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MemCpyOpt/sret.ll (original)
+++ llvm/trunk/test/Transforms/MemCpyOpt/sret.ll Fri Feb 27 13:29:02 2015
@@ -9,12 +9,12 @@ define void @ccosl(%0* noalias sret %agg
 entry:
   %iz = alloca %0
   %memtmp = alloca %0, align 16
-  %tmp1 = getelementptr %0* %z, i32 0, i32 1
+  %tmp1 = getelementptr %0, %0* %z, i32 0, i32 1
   %tmp2 = load x86_fp80* %tmp1, align 16
   %tmp3 = fsub x86_fp80 0xK80000000000000000000, %tmp2
-  %tmp4 = getelementptr %0* %iz, i32 0, i32 1
-  %real = getelementptr %0* %iz, i32 0, i32 0
-  %tmp7 = getelementptr %0* %z, i32 0, i32 0
+  %tmp4 = getelementptr %0, %0* %iz, i32 0, i32 1
+  %real = getelementptr %0, %0* %iz, i32 0, i32 0
+  %tmp7 = getelementptr %0, %0* %z, i32 0, i32 0
   %tmp8 = load x86_fp80* %tmp7, align 16
   store x86_fp80 %tmp3, x86_fp80* %real, align 16
   store x86_fp80 %tmp8, x86_fp80* %tmp4, align 16

Modified: llvm/trunk/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll (original)
+++ llvm/trunk/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll Fri Feb 27 13:29:02 2015
@@ -37,12 +37,12 @@ entry:
   store %"struct.kc::impl_casestring__Str"* %_file, %"struct.kc::impl_casestring__Str"** %_file_addr
   store i32 %_line, i32* %_line_addr
   %0 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
-  %1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
+  %1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
   call void @_ZN2kc13impl_filelineC2Ev() nounwind
   %2 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
-  %3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
-  %4 = getelementptr inbounds %"struct.kc::impl_fileline"* %3, i32 0, i32 0
-  %5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
+  %3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
+  %4 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %3, i32 0, i32 0
+  %5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
   store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc22impl_fileline_FileLineE, i32 0, i32 2), i32 (...)*** %5, align 4
   %6 = load %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
   %7 = icmp eq %"struct.kc::impl_casestring__Str"* %6, null
@@ -63,13 +63,13 @@ bb1:
 
 bb2:                                              ; preds = %bb1, %invcont
   %10 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
-  %11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
-  %12 = getelementptr inbounds %"struct.kc::impl_fileline"* %11, i32 0, i32 1
+  %11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
+  %12 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %11, i32 0, i32 1
   %13 = load %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
   store %"struct.kc::impl_casestring__Str"* %13, %"struct.kc::impl_casestring__Str"** %12, align 4
   %14 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
-  %15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
-  %16 = getelementptr inbounds %"struct.kc::impl_fileline"* %15, i32 0, i32 2
+  %15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
+  %16 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %15, i32 0, i32 2
   %17 = load i32* %_line_addr, align 4
   store i32 %17, i32* %16, align 4
   ret void
@@ -106,8 +106,8 @@ entry:
   %"alloca point" = bitcast i32 0 to i32
   store %"struct.kc::impl_fileline"* %this, %"struct.kc::impl_fileline"** %this_addr
   %0 = load %"struct.kc::impl_fileline"** %this_addr, align 4
-  %1 = getelementptr inbounds %"struct.kc::impl_fileline"* %0, i32 0, i32 0
-  %2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
+  %1 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %0, i32 0, i32 0
+  %2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
   store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc13impl_filelineE, i32 0, i32 2), i32 (...)*** %2, align 4
   %3 = trunc i32 0 to i8
   %toBool = icmp ne i8 %3, 0
@@ -131,8 +131,8 @@ entry:
   %"alloca point" = bitcast i32 0 to i32
   store %"struct.kc::impl_fileline"* %this, %"struct.kc::impl_fileline"** %this_addr
   %0 = load %"struct.kc::impl_fileline"** %this_addr, align 4
-  %1 = getelementptr inbounds %"struct.kc::impl_fileline"* %0, i32 0, i32 0
-  %2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
+  %1 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %0, i32 0, i32 0
+  %2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
   store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc13impl_filelineE, i32 0, i32 2), i32 (...)*** %2, align 4
   %3 = trunc i32 0 to i8
   %toBool = icmp ne i8 %3, 0
@@ -163,12 +163,12 @@ entry:
   store %"struct.kc::impl_casestring__Str"* %_file, %"struct.kc::impl_casestring__Str"** %_file_addr
   store i32 %_line, i32* %_line_addr
   %0 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
-  %1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
+  %1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
   call void @_ZN2kc13impl_filelineC2Ev() nounwind
   %2 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
-  %3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
-  %4 = getelementptr inbounds %"struct.kc::impl_fileline"* %3, i32 0, i32 0
-  %5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
+  %3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
+  %4 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %3, i32 0, i32 0
+  %5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
   store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc22impl_fileline_FileLineE, i32 0, i32 2), i32 (...)*** %5, align 4
   %6 = load %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
   %7 = icmp eq %"struct.kc::impl_casestring__Str"* %6, null
@@ -189,13 +189,13 @@ bb1:
 
 bb2:                                              ; preds = %bb1, %invcont
   %10 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
-  %11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
-  %12 = getelementptr inbounds %"struct.kc::impl_fileline"* %11, i32 0, i32 1
+  %11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
+  %12 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %11, i32 0, i32 1
   %13 = load %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
   store %"struct.kc::impl_casestring__Str"* %13, %"struct.kc::impl_casestring__Str"** %12, align 4
   %14 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
-  %15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
-  %16 = getelementptr inbounds %"struct.kc::impl_fileline"* %15, i32 0, i32 2
+  %15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
+  %16 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %15, i32 0, i32 2
   %17 = load i32* %_line_addr, align 4
   store i32 %17, i32* %16, align 4
   ret void

Modified: llvm/trunk/test/Transforms/MergeFunc/address-spaces.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MergeFunc/address-spaces.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MergeFunc/address-spaces.ll (original)
+++ llvm/trunk/test/Transforms/MergeFunc/address-spaces.ll Fri Feb 27 13:29:02 2015
@@ -9,7 +9,7 @@ declare void @foo(i32) nounwind
 define i32 @store_as0(i32* %x) {
 ; CHECK-LABEL: @store_as0(
 ; CHECK: call void @foo(
-  %gep = getelementptr i32* %x, i32 4
+  %gep = getelementptr i32, i32* %x, i32 4
   %y = load i32* %gep
   call void @foo(i32 %y) nounwind
   ret i32 %y
@@ -18,7 +18,7 @@ define i32 @store_as0(i32* %x) {
 define i32 @store_as1(i32 addrspace(1)* %x) {
 ; CHECK-LABEL: @store_as1(
 ; CHECK: call void @foo(
-  %gep = getelementptr i32 addrspace(1)* %x, i32 4
+  %gep = getelementptr i32, i32 addrspace(1)* %x, i32 4
   %y = load i32 addrspace(1)* %gep
   call void @foo(i32 %y) nounwind
   ret i32 %y
@@ -27,7 +27,7 @@ define i32 @store_as1(i32 addrspace(1)*
 define i32 @store_as2(i32 addrspace(2)* %x) {
 ; CHECK-LABEL: @store_as2(
 ; CHECK: call void @foo(
-  %gep = getelementptr i32 addrspace(2)* %x, i32 4
+  %gep = getelementptr i32, i32 addrspace(2)* %x, i32 4
   %y = load i32 addrspace(2)* %gep
   call void @foo(i32 %y) nounwind
   ret i32 %y

Modified: llvm/trunk/test/Transforms/MergeFunc/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MergeFunc/crash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MergeFunc/crash.ll (original)
+++ llvm/trunk/test/Transforms/MergeFunc/crash.ll Fri Feb 27 13:29:02 2015
@@ -21,7 +21,7 @@ define internal i32 @func1(i32* %ptr, {
 }
 
 define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
-  %1 = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
+  %1 = getelementptr inbounds %.qux.2496, %.qux.2496* %this, i32 0, i32 1, i32 1
   %2 = load i32* %1, align 4
   ret i32 %2
 }
@@ -35,12 +35,12 @@ define internal i32* @func33(%.qux.2585*
 }
 
 define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
-  %1 = getelementptr inbounds %.qux.2585* %this, i32 0
+  %1 = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0
   ret i32* undef
 }
 
 define internal i8* @func35(%.qux.2585* nocapture %this) align 2 {
-  %1 = getelementptr inbounds %.qux.2585* %this, i32 0, i32 2
+  %1 = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0, i32 2
   %2 = load i8** %1, align 4
   ret i8* %2
 }

Modified: llvm/trunk/test/Transforms/MergeFunc/inttoptr-address-space.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MergeFunc/inttoptr-address-space.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MergeFunc/inttoptr-address-space.ll (original)
+++ llvm/trunk/test/Transforms/MergeFunc/inttoptr-address-space.ll Fri Feb 27 13:29:02 2015
@@ -11,7 +11,7 @@ target datalayout = "e-p:32:32:32-p1:16:
 
 define internal i32 @func10(%.qux.2496 addrspace(1)* nocapture %this) align 2 {
 bb:
-  %tmp = getelementptr inbounds %.qux.2496 addrspace(1)* %this, i32 0, i32 1, i32 1
+  %tmp = getelementptr inbounds %.qux.2496, %.qux.2496 addrspace(1)* %this, i32 0, i32 1, i32 1
   %tmp1 = load i32 addrspace(1)* %tmp, align 4
   ret i32 %tmp1
 }
@@ -23,7 +23,7 @@ bb:
 ; CHECK: %[[V2:.+]] = bitcast %.qux.2585 addrspace(1)* %{{.*}} to %.qux.2496 addrspace(1)*
 ; CHECK: %[[V3:.+]] = tail call i32 @func10(%.qux.2496 addrspace(1)* %[[V2]])
 ; CHECK: %{{.*}} = inttoptr i32 %[[V3]] to i8*
-  %tmp = getelementptr inbounds %.qux.2585 addrspace(1)* %this, i32 0, i32 2
+  %tmp = getelementptr inbounds %.qux.2585, %.qux.2585 addrspace(1)* %this, i32 0, i32 2
   %tmp1 = load i8* addrspace(1)* %tmp, align 4
   ret i8* %tmp1
 }

Modified: llvm/trunk/test/Transforms/MergeFunc/inttoptr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MergeFunc/inttoptr.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MergeFunc/inttoptr.ll (original)
+++ llvm/trunk/test/Transforms/MergeFunc/inttoptr.ll Fri Feb 27 13:29:02 2015
@@ -23,7 +23,7 @@ bb2:
 
 define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
 bb:
-  %tmp = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
+  %tmp = getelementptr inbounds %.qux.2496, %.qux.2496* %this, i32 0, i32 1, i32 1
   %tmp1 = load i32* %tmp, align 4
   ret i32 %tmp1
 }
@@ -40,7 +40,7 @@ bb:
 
 define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
 bb:
-  %tmp = getelementptr inbounds %.qux.2585* %this, i32 0
+  %tmp = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0
   ret i32* undef
 }
 
@@ -50,7 +50,7 @@ bb:
 ; CHECK: %[[V2:.+]] = bitcast %.qux.2585* %{{.*}} to %.qux.2496*
 ; CHECK: %[[V3:.+]] = tail call i32 @func10(%.qux.2496* %[[V2]])
 ; CHECK: %{{.*}} = inttoptr i32 %[[V3]] to i8*
-  %tmp = getelementptr inbounds %.qux.2585* %this, i32 0, i32 2
+  %tmp = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0, i32 2
   %tmp1 = load i8** %tmp, align 4
   ret i8* %tmp1
 }

Modified: llvm/trunk/test/Transforms/MergeFunc/mergefunc-struct-return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MergeFunc/mergefunc-struct-return.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MergeFunc/mergefunc-struct-return.ll (original)
+++ llvm/trunk/test/Transforms/MergeFunc/mergefunc-struct-return.ll Fri Feb 27 13:29:02 2015
@@ -13,9 +13,9 @@ declare void @noop()
 define %kv1 @fn1() {
 ; CHECK-LABEL: @fn1(
   %tmp = alloca %kv1
-  %v1 = getelementptr %kv1* %tmp, i32 0, i32 0
+  %v1 = getelementptr %kv1, %kv1* %tmp, i32 0, i32 0
   store i32* null, i32** %v1
-  %v2 = getelementptr %kv1* %tmp, i32 0, i32 0
+  %v2 = getelementptr %kv1, %kv1* %tmp, i32 0, i32 0
   store i32* null, i32** %v2
   call void @noop()
   %v3 = load %kv1* %tmp
@@ -29,9 +29,9 @@ define %kv2 @fn2() {
 ; CHECK: %3 = bitcast i32* %2 to i8*
 ; CHECK: %4 = insertvalue %kv2 undef, i8* %3, 0
   %tmp = alloca %kv2
-  %v1 = getelementptr %kv2* %tmp, i32 0, i32 0
+  %v1 = getelementptr %kv2, %kv2* %tmp, i32 0, i32 0
   store i8* null, i8** %v1
-  %v2 = getelementptr %kv2* %tmp, i32 0, i32 0
+  %v2 = getelementptr %kv2, %kv2* %tmp, i32 0, i32 0
   store i8* null, i8** %v2
   call void @noop()
 

Modified: llvm/trunk/test/Transforms/MergeFunc/vector-GEP-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MergeFunc/vector-GEP-crash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MergeFunc/vector-GEP-crash.ll (original)
+++ llvm/trunk/test/Transforms/MergeFunc/vector-GEP-crash.ll Fri Feb 27 13:29:02 2015
@@ -2,11 +2,11 @@
 ; This used to cause a crash when compairing the GEPs
 
 define void @foo(<2 x i64*>) {
-  %tmp = getelementptr <2 x i64*> %0, <2 x i64> <i64 0, i64 0>
+  %tmp = getelementptr i64, <2 x i64*> %0, <2 x i64> <i64 0, i64 0>
   ret void
 }
 
 define void @bar(<2 x i64*>) {
-  %tmp = getelementptr <2 x i64*> %0, <2 x i64> <i64 0, i64 0>
+  %tmp = getelementptr i64, <2 x i64*> %0, <2 x i64> <i64 0, i64 0>
   ret void
 }

Modified: llvm/trunk/test/Transforms/MetaRenamer/metarenamer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/MetaRenamer/metarenamer.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/MetaRenamer/metarenamer.ll (original)
+++ llvm/trunk/test/Transforms/MetaRenamer/metarenamer.ll Fri Feb 27 13:29:02 2015
@@ -24,14 +24,14 @@ define i32 @func_3_xxx() nounwind uwtabl
 
 define void @func_4_xxx(%struct.foo_xxx* sret %agg.result) nounwind uwtable ssp {
   %1 = alloca %struct.foo_xxx, align 8
-  %2 = getelementptr inbounds %struct.foo_xxx* %1, i32 0, i32 0
+  %2 = getelementptr inbounds %struct.foo_xxx, %struct.foo_xxx* %1, i32 0, i32 0
   store i32 1, i32* %2, align 4
-  %3 = getelementptr inbounds %struct.foo_xxx* %1, i32 0, i32 1
+  %3 = getelementptr inbounds %struct.foo_xxx, %struct.foo_xxx* %1, i32 0, i32 1
   store float 2.000000e+00, float* %3, align 4
-  %4 = getelementptr inbounds %struct.foo_xxx* %1, i32 0, i32 2
-  %5 = getelementptr inbounds %struct.bar_xxx* %4, i32 0, i32 0
+  %4 = getelementptr inbounds %struct.foo_xxx, %struct.foo_xxx* %1, i32 0, i32 2
+  %5 = getelementptr inbounds %struct.bar_xxx, %struct.bar_xxx* %4, i32 0, i32 0
   store i32 3, i32* %5, align 4
-  %6 = getelementptr inbounds %struct.bar_xxx* %4, i32 0, i32 1
+  %6 = getelementptr inbounds %struct.bar_xxx, %struct.bar_xxx* %4, i32 0, i32 1
   store double 4.000000e+00, double* %6, align 8
   %7 = bitcast %struct.foo_xxx* %agg.result to i8*
   %8 = bitcast %struct.foo_xxx* %1 to i8*

Modified: llvm/trunk/test/Transforms/ObjCARC/allocas.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ObjCARC/allocas.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ObjCARC/allocas.ll (original)
+++ llvm/trunk/test/Transforms/ObjCARC/allocas.ll Fri Feb 27 13:29:02 2015
@@ -74,7 +74,7 @@ entry:
 define void @test1b(i8* %x) {
 entry:
   %A = alloca i8*
-  %gep = getelementptr i8** %A, i32 0
+  %gep = getelementptr i8*, i8** %A, i32 0
   tail call i8* @objc_retain(i8* %x)
   tail call i8* @objc_retain(i8* %x)
   store i8* %x, i8** %gep, align 8
@@ -97,7 +97,7 @@ entry:
 define void @test1c(i8* %x) {
 entry:
   %A = alloca i8*, i32 3
-  %gep = getelementptr i8** %A, i32 2
+  %gep = getelementptr i8*, i8** %A, i32 2
   tail call i8* @objc_retain(i8* %x)
   tail call i8* @objc_retain(i8* %x)
   store i8* %x, i8** %gep, align 8
@@ -131,7 +131,7 @@ use_allocaB:
 
 exit:
   %A = phi i8** [ %allocaA, %use_allocaA ], [ %allocaB, %use_allocaB ]
-  %gep = getelementptr i8** %A, i32 0
+  %gep = getelementptr i8*, i8** %A, i32 0
   tail call i8* @objc_retain(i8* %x)
   tail call i8* @objc_retain(i8* %x)
   store i8* %x, i8** %gep, align 8
@@ -164,7 +164,7 @@ use_allocaB:
 
 exit:
   %A = phi i8** [ %allocaA, %use_allocaA ], [ %allocaB, %use_allocaB ]
-  %gep = getelementptr i8** %A, i32 2
+  %gep = getelementptr i8*, i8** %A, i32 2
   tail call i8* @objc_retain(i8* %x)
   tail call i8* @objc_retain(i8* %x)
   store i8* %x, i8** %gep, align 8
@@ -243,9 +243,9 @@ bb3:
 define void @test2b(i8* %x) {
 entry:
   %A = alloca i8*
-  %gep1 = getelementptr i8** %A, i32 0
+  %gep1 = getelementptr i8*, i8** %A, i32 0
   store i8* %x, i8** %gep1, align 8
-  %gep2 = getelementptr i8** %A, i32 0
+  %gep2 = getelementptr i8*, i8** %A, i32 0
   %y = load i8** %gep2
   br label %bb1
 
@@ -275,9 +275,9 @@ bb3:
 define void @test2c(i8* %x) {
 entry:
   %A = alloca i8*, i32 3
-  %gep1 = getelementptr i8** %A, i32 2
+  %gep1 = getelementptr i8*, i8** %A, i32 2
   store i8* %x, i8** %gep1, align 8
-  %gep2 = getelementptr i8** %A, i32 2
+  %gep2 = getelementptr i8*, i8** %A, i32 2
   %y = load i8** %gep2
   tail call i8* @objc_retain(i8* %x)
   br label %bb1
@@ -311,17 +311,17 @@ entry:
 
 bb1:
   %Abb1 = alloca i8*, i32 3
-  %gepbb11 = getelementptr i8** %Abb1, i32 2
+  %gepbb11 = getelementptr i8*, i8** %Abb1, i32 2
   store i8* %x, i8** %gepbb11, align 8
-  %gepbb12 = getelementptr i8** %Abb1, i32 2
+  %gepbb12 = getelementptr i8*, i8** %Abb1, i32 2
   %ybb1 = load i8** %gepbb12
   br label %bb3
 
 bb2:
   %Abb2 = alloca i8*, i32 4
-  %gepbb21 = getelementptr i8** %Abb2, i32 2
+  %gepbb21 = getelementptr i8*, i8** %Abb2, i32 2
   store i8* %x, i8** %gepbb21, align 8
-  %gepbb22 = getelementptr i8** %Abb2, i32 2
+  %gepbb22 = getelementptr i8*, i8** %Abb2, i32 2
   %ybb2 = load i8** %gepbb22
   br label %bb3
 
@@ -369,44 +369,44 @@ entry:
   %call1 = call i8* @returner()
   %tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call1)
 
-  %objs.begin = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 0
+  %objs.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
   tail call i8* @objc_retain(i8* %call1)
   store i8* %call1, i8** %objs.begin, align 8
-  %objs.elt = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 1
+  %objs.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 1
   tail call i8* @objc_retain(i8* %call1)
   store i8* %call1, i8** %objs.elt
 
   %call2 = call i8* @returner1()
   %call3 = call i8* @returner2()
-  %keys.begin = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 0
+  %keys.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
   tail call i8* @objc_retain(i8* %call2)
   store i8* %call2, i8** %keys.begin, align 8
-  %keys.elt = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 1
+  %keys.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 1
   tail call i8* @objc_retain(i8* %call3)
   store i8* %call3, i8** %keys.elt  
   
-  %gep = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 2
+  %gep = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 2
   br label %arraydestroy.body
 
 arraydestroy.body:
   %arraydestroy.elementPast = phi i8** [ %gep, %entry ], [ %arraydestroy.element, %arraydestroy.body ]
-  %arraydestroy.element = getelementptr inbounds i8** %arraydestroy.elementPast, i64 -1
+  %arraydestroy.element = getelementptr inbounds i8*, i8** %arraydestroy.elementPast, i64 -1
   %destroy_tmp = load i8** %arraydestroy.element, align 8
   call void @objc_release(i8* %destroy_tmp), !clang.imprecise_release !0
-  %objs_ptr = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 0
+  %objs_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
   %arraydestroy.cmp = icmp eq i8** %arraydestroy.element, %objs_ptr
   br i1 %arraydestroy.cmp, label %arraydestroy.done, label %arraydestroy.body
 
 arraydestroy.done:
-  %gep1 = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 2
+  %gep1 = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 2
   br label %arraydestroy.body1
 
 arraydestroy.body1:
   %arraydestroy.elementPast1 = phi i8** [ %gep1, %arraydestroy.done ], [ %arraydestroy.element1, %arraydestroy.body1 ]
-  %arraydestroy.element1 = getelementptr inbounds i8** %arraydestroy.elementPast1, i64 -1
+  %arraydestroy.element1 = getelementptr inbounds i8*, i8** %arraydestroy.elementPast1, i64 -1
   %destroy_tmp1 = load i8** %arraydestroy.element1, align 8
   call void @objc_release(i8* %destroy_tmp1), !clang.imprecise_release !0
-  %keys_ptr = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 0
+  %keys_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
   %arraydestroy.cmp1 = icmp eq i8** %arraydestroy.element1, %keys_ptr
   br i1 %arraydestroy.cmp1, label %arraydestroy.done1, label %arraydestroy.body1
 
@@ -448,44 +448,44 @@ entry:
   %tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call1)
   %tmp1 = tail call i8* @objc_retain(i8* %call1)
 
-  %objs.begin = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 0
+  %objs.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
   tail call i8* @objc_retain(i8* %call1)
   store i8* %call1, i8** %objs.begin, align 8
-  %objs.elt = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 1
+  %objs.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 1
   tail call i8* @objc_retain(i8* %call1)
   store i8* %call1, i8** %objs.elt
 
   %call2 = call i8* @returner1()
   %call3 = call i8* @returner2()
-  %keys.begin = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 0
+  %keys.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
   tail call i8* @objc_retain(i8* %call2)
   store i8* %call2, i8** %keys.begin, align 8
-  %keys.elt = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 1
+  %keys.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 1
   tail call i8* @objc_retain(i8* %call3)
   store i8* %call3, i8** %keys.elt  
   
-  %gep = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 2
+  %gep = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 2
   br label %arraydestroy.body
 
 arraydestroy.body:
   %arraydestroy.elementPast = phi i8** [ %gep, %entry ], [ %arraydestroy.element, %arraydestroy.body ]
-  %arraydestroy.element = getelementptr inbounds i8** %arraydestroy.elementPast, i64 -1
+  %arraydestroy.element = getelementptr inbounds i8*, i8** %arraydestroy.elementPast, i64 -1
   %destroy_tmp = load i8** %arraydestroy.element, align 8
   call void @objc_release(i8* %destroy_tmp), !clang.imprecise_release !0
-  %objs_ptr = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 0
+  %objs_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
   %arraydestroy.cmp = icmp eq i8** %arraydestroy.element, %objs_ptr
   br i1 %arraydestroy.cmp, label %arraydestroy.done, label %arraydestroy.body
 
 arraydestroy.done:
-  %gep1 = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 2
+  %gep1 = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 2
   br label %arraydestroy.body1
 
 arraydestroy.body1:
   %arraydestroy.elementPast1 = phi i8** [ %gep1, %arraydestroy.done ], [ %arraydestroy.element1, %arraydestroy.body1 ]
-  %arraydestroy.element1 = getelementptr inbounds i8** %arraydestroy.elementPast1, i64 -1
+  %arraydestroy.element1 = getelementptr inbounds i8*, i8** %arraydestroy.elementPast1, i64 -1
   %destroy_tmp1 = load i8** %arraydestroy.element1, align 8
   call void @objc_release(i8* %destroy_tmp1), !clang.imprecise_release !0
-  %keys_ptr = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 0
+  %keys_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
   %arraydestroy.cmp1 = icmp eq i8** %arraydestroy.element1, %keys_ptr
   br i1 %arraydestroy.cmp1, label %arraydestroy.done1, label %arraydestroy.body1
 

Modified: llvm/trunk/test/Transforms/ObjCARC/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ObjCARC/basic.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ObjCARC/basic.ll (original)
+++ llvm/trunk/test/Transforms/ObjCARC/basic.ll Fri Feb 27 13:29:02 2015
@@ -1659,7 +1659,7 @@ true:
 
 done:
   %g = bitcast i8* %p to i8*
-  %h = getelementptr i8* %g, i64 0
+  %h = getelementptr i8, i8* %g, i64 0
   call void @objc_release(i8* %g)
   ret void
 }
@@ -1688,7 +1688,7 @@ true:
 
 done:
   %g = bitcast i8* %p to i8*
-  %h = getelementptr i8* %g, i64 0
+  %h = getelementptr i8, i8* %g, i64 0
   call void @objc_release(i8* %g)
   ret void
 }
@@ -1713,7 +1713,7 @@ true:
 
 done:
   %g = bitcast i8* %p to i8*
-  %h = getelementptr i8* %g, i64 0
+  %h = getelementptr i8, i8* %g, i64 0
   call void @objc_release(i8* %g)
   ret void
 }
@@ -1732,7 +1732,7 @@ true:
 
 done:
   %g = bitcast i8* %p to i8*
-  %h = getelementptr i8* %g, i64 0
+  %h = getelementptr i8, i8* %g, i64 0
   call void @objc_release(i8* %g), !clang.imprecise_release !0
   ret void
 }
@@ -1760,7 +1760,7 @@ true:
 
 done:
   %g = bitcast i8* %p to i8*
-  %h = getelementptr i8* %g, i64 0
+  %h = getelementptr i8, i8* %g, i64 0
   call void @objc_release(i8* %g)
   ret void
 }
@@ -1780,7 +1780,7 @@ true:
 
 done:
   %g = bitcast i8* %p to i8*
-  %h = getelementptr i8* %g, i64 0
+  %h = getelementptr i8, i8* %g, i64 0
   call void @objc_release(i8* %g), !clang.imprecise_release !0
   ret void
 }
@@ -2682,28 +2682,28 @@ invoke.cont:
   tail call void @llvm.dbg.value(metadata {}* %self, i64 0, metadata !0, metadata !{})
   tail call void @llvm.dbg.value(metadata {}* %self, i64 0, metadata !0, metadata !{})
   %ivar = load i64* @"OBJC_IVAR_$_A.myZ", align 8
-  %add.ptr = getelementptr i8* %0, i64 %ivar
+  %add.ptr = getelementptr i8, i8* %0, i64 %ivar
   %tmp1 = bitcast i8* %add.ptr to float*
   %tmp2 = load float* %tmp1, align 4
   %conv = fpext float %tmp2 to double
   %add.ptr.sum = add i64 %ivar, 4
-  %tmp6 = getelementptr inbounds i8* %0, i64 %add.ptr.sum
+  %tmp6 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
   %2 = bitcast i8* %tmp6 to float*
   %tmp7 = load float* %2, align 4
   %conv8 = fpext float %tmp7 to double
   %add.ptr.sum36 = add i64 %ivar, 8
-  %tmp12 = getelementptr inbounds i8* %0, i64 %add.ptr.sum36
+  %tmp12 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum36
   %arrayidx = bitcast i8* %tmp12 to float*
   %tmp13 = load float* %arrayidx, align 4
   %conv14 = fpext float %tmp13 to double
   %tmp12.sum = add i64 %ivar, 12
-  %arrayidx19 = getelementptr inbounds i8* %0, i64 %tmp12.sum
+  %arrayidx19 = getelementptr inbounds i8, i8* %0, i64 %tmp12.sum
   %3 = bitcast i8* %arrayidx19 to float*
   %tmp20 = load float* %3, align 4
   %conv21 = fpext float %tmp20 to double
   %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([33 x i8]* @.str4, i64 0, i64 0), double %conv, double %conv8, double %conv14, double %conv21)
   %ivar23 = load i64* @"OBJC_IVAR_$_A.myZ", align 8
-  %add.ptr24 = getelementptr i8* %0, i64 %ivar23
+  %add.ptr24 = getelementptr i8, i8* %0, i64 %ivar23
   %4 = bitcast i8* %add.ptr24 to i128*
   %srcval = load i128* %4, align 4
   tail call void @objc_release(i8* %0) nounwind

Modified: llvm/trunk/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ObjCARC/contract-storestrong-ivar.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ObjCARC/contract-storestrong-ivar.ll (original)
+++ llvm/trunk/test/Transforms/ObjCARC/contract-storestrong-ivar.ll Fri Feb 27 13:29:02 2015
@@ -18,7 +18,7 @@ define hidden void @y(%0* nocapture %sel
 entry:
   %ivar = load i64* @"OBJC_IVAR_$_Controller.preferencesController", align 8
   %tmp = bitcast %0* %self to i8*
-  %add.ptr = getelementptr inbounds i8* %tmp, i64 %ivar
+  %add.ptr = getelementptr inbounds i8, i8* %tmp, i64 %ivar
   %tmp1 = bitcast i8* %add.ptr to %1**
   %tmp2 = load %1** %tmp1, align 8
   %tmp3 = bitcast %1* %preferencesController to i8*

Modified: llvm/trunk/test/Transforms/ObjCARC/escape.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ObjCARC/escape.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ObjCARC/escape.ll (original)
+++ llvm/trunk/test/Transforms/ObjCARC/escape.ll Fri Feb 27 13:29:02 2015
@@ -17,41 +17,41 @@ define void @test0() nounwind {
 entry:
   %weakLogNTimes = alloca %struct.__block_byref_weakLogNTimes, align 8
   %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, align 8
-  %byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
+  %byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
   store i8* null, i8** %byref.isa, align 8
-  %byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
+  %byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
   store %struct.__block_byref_weakLogNTimes* %weakLogNTimes, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
-  %byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
+  %byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
   store i32 33554432, i32* %byref.flags, align 8
-  %byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
+  %byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
   store i32 48, i32* %byref.size, align 4
-  %tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
+  %tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
   store i8* bitcast (void (i8*, i8*)* @__Block_byref_object_copy_ to i8*), i8** %tmp1, align 8
-  %tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
+  %tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
   store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_ to i8*), i8** %tmp2, align 8
-  %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
+  %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
   %tmp3 = bitcast void (...)** %weakLogNTimes1 to i8**
   %tmp4 = call i8* @objc_initWeak(i8** %tmp3, i8* null) nounwind
-  %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
+  %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
   store i8* null, i8** %block.isa, align 8
-  %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
+  %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
   store i32 1107296256, i32* %block.flags, align 8
-  %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
+  %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
   store i32 0, i32* %block.reserved, align 4
-  %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
+  %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
   store i8* bitcast (void (i8*, i32)* @__main_block_invoke_0 to i8*), i8** %block.invoke, align 8
-  %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
+  %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
   store %struct.__block_descriptor* null, %struct.__block_descriptor** %block.descriptor, align 8
-  %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
+  %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
   %tmp5 = bitcast %struct.__block_byref_weakLogNTimes* %weakLogNTimes to i8*
   store i8* %tmp5, i8** %block.captured, align 8
   %tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
   %tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
   %tmp8 = load %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
-  %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
+  %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
   %tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
   %tmp10 = call i8* @objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
-  %tmp11 = getelementptr inbounds i8* %tmp7, i64 16
+  %tmp11 = getelementptr inbounds i8, i8* %tmp7, i64 16
   %tmp12 = bitcast i8* %tmp11 to i8**
   %tmp13 = load i8** %tmp12, align 8
   %tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
@@ -72,41 +72,41 @@ define void @test1() nounwind {
 entry:
   %weakLogNTimes = alloca %struct.__block_byref_weakLogNTimes, align 8
   %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, align 8
-  %byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
+  %byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
   store i8* null, i8** %byref.isa, align 8
-  %byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
+  %byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
   store %struct.__block_byref_weakLogNTimes* %weakLogNTimes, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
-  %byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
+  %byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
   store i32 33554432, i32* %byref.flags, align 8
-  %byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
+  %byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
   store i32 48, i32* %byref.size, align 4
-  %tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
+  %tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
   store i8* bitcast (void (i8*, i8*)* @__Block_byref_object_copy_ to i8*), i8** %tmp1, align 8
-  %tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
+  %tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
   store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_ to i8*), i8** %tmp2, align 8
-  %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
+  %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
   %tmp3 = bitcast void (...)** %weakLogNTimes1 to i8**
   %tmp4 = call i8* @objc_initWeak(i8** %tmp3, i8* null) nounwind
-  %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
+  %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
   store i8* null, i8** %block.isa, align 8
-  %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
+  %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
   store i32 1107296256, i32* %block.flags, align 8
-  %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
+  %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
   store i32 0, i32* %block.reserved, align 4
-  %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
+  %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
   store i8* bitcast (void (i8*, i32)* @__main_block_invoke_0 to i8*), i8** %block.invoke, align 8
-  %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
+  %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
   store %struct.__block_descriptor* null, %struct.__block_descriptor** %block.descriptor, align 8
-  %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
+  %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
   %tmp5 = bitcast %struct.__block_byref_weakLogNTimes* %weakLogNTimes to i8*
   store i8* %tmp5, i8** %block.captured, align 8
   %tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
   %tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
   %tmp8 = load %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
-  %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
+  %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
   %tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
   %tmp10 = call i8* @not_really_objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
-  %tmp11 = getelementptr inbounds i8* %tmp7, i64 16
+  %tmp11 = getelementptr inbounds i8, i8* %tmp7, i64 16
   %tmp12 = bitcast i8* %tmp11 to i8**
   %tmp13 = load i8** %tmp12, align 8
   %tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*

Modified: llvm/trunk/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll (original)
+++ llvm/trunk/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll Fri Feb 27 13:29:02 2015
@@ -168,7 +168,7 @@ bb57:
   %tmp70 = tail call %14* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %14* (i8*, i8*, %23*, %18*)*)(i8* %tmp69, i8* %tmp68, %23* %tmp67, %18* %tmp47)
   %tmp71 = bitcast %14* %tmp70 to i8*
   ; hack to prevent the optimize from using objc_retainAutoreleasedReturnValue.
-  %tmp71x = getelementptr i8* %tmp71, i64 1
+  %tmp71x = getelementptr i8, i8* %tmp71, i64 1
   %tmp72 = tail call i8* @objc_retain(i8* %tmp71x) nounwind
   %tmp73 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_402", align 8
   tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8)*)(i8* %tmp72, i8* %tmp73, i8 signext 1)

Modified: llvm/trunk/test/Transforms/ObjCARC/nested.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ObjCARC/nested.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ObjCARC/nested.ll (original)
+++ llvm/trunk/test/Transforms/ObjCARC/nested.ll Fri Feb 27 13:29:02 2015
@@ -43,10 +43,10 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
-  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
   br label %forcoll.loopbody.outer
 
 forcoll.loopbody.outer:
@@ -68,7 +68,7 @@ forcoll.mutated:
 
 forcoll.notmutated:
   %stateitems = load i8*** %stateitems.ptr, align 8
-  %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+  %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
   %3 = load i8** %currentitem.ptr, align 8
   call void @use(i8* %3)
   %4 = add i64 %forcoll.index, 1
@@ -108,10 +108,10 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
-  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
   br label %forcoll.loopbody.outer
 
 forcoll.loopbody.outer:
@@ -133,7 +133,7 @@ forcoll.mutated:
 
 forcoll.notmutated:
   %stateitems = load i8*** %stateitems.ptr, align 8
-  %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+  %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
   %3 = load i8** %currentitem.ptr, align 8
   call void @use(i8* %3)
   %4 = add i64 %forcoll.index, 1
@@ -173,10 +173,10 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
-  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
   br label %forcoll.loopbody.outer
 
 forcoll.loopbody.outer:
@@ -198,7 +198,7 @@ forcoll.mutated:
 
 forcoll.notmutated:
   %stateitems = load i8*** %stateitems.ptr, align 8
-  %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+  %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
   %3 = load i8** %currentitem.ptr, align 8
   call void @use(i8* %3)
   %4 = add i64 %forcoll.index, 1
@@ -239,10 +239,10 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
-  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
   br label %forcoll.loopbody.outer
 
 forcoll.loopbody.outer:
@@ -264,7 +264,7 @@ forcoll.mutated:
 
 forcoll.notmutated:
   %stateitems = load i8*** %stateitems.ptr, align 8
-  %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+  %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
   %3 = load i8** %currentitem.ptr, align 8
   call void @use(i8* %3)
   %4 = add i64 %forcoll.index, 1
@@ -306,10 +306,10 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
-  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
   br label %forcoll.loopbody.outer
 
 forcoll.loopbody.outer:
@@ -331,7 +331,7 @@ forcoll.mutated:
 
 forcoll.notmutated:
   %stateitems = load i8*** %stateitems.ptr, align 8
-  %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+  %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
   %3 = load i8** %currentitem.ptr, align 8
   call void @use(i8* %3)
   %4 = add i64 %forcoll.index, 1
@@ -375,10 +375,10 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
-  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
   br label %forcoll.loopbody.outer
 
 forcoll.loopbody.outer:
@@ -400,7 +400,7 @@ forcoll.mutated:
 
 forcoll.notmutated:
   %stateitems = load i8*** %stateitems.ptr, align 8
-  %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+  %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
   %3 = load i8** %currentitem.ptr, align 8
   call void @use(i8* %3)
   %4 = add i64 %forcoll.index, 1
@@ -441,10 +441,10 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
-  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+  %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
   br label %forcoll.loopbody.outer
 
 forcoll.loopbody.outer:
@@ -466,7 +466,7 @@ forcoll.mutated:
 
 forcoll.notmutated:
   %stateitems = load i8*** %stateitems.ptr, align 8
-  %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+  %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
   %3 = load i8** %currentitem.ptr, align 8
   %tobool = icmp eq i8* %3, null
   br i1 %tobool, label %forcoll.next, label %if.then
@@ -518,7 +518,7 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
   br label %forcoll.loopbody.outer
@@ -585,7 +585,7 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
   br label %forcoll.loopbody.outer
@@ -652,7 +652,7 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
   br label %forcoll.loopbody.outer
@@ -720,7 +720,7 @@ entry:
   br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
 
 forcoll.loopinit:
-  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+  %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
   %mutationsptr = load i64** %mutationsptr.ptr, align 8
   %forcoll.initial-mutations = load i64* %mutationsptr, align 8
   br label %forcoll.loopbody.outer
@@ -779,16 +779,16 @@ entry:
   %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
   %block9 = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
   %call = call i8* @def(), !clang.arc.no_objc_arc_exceptions !0
-  %foo = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 5
-  %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 0
+  %foo = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 5
+  %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 0
   store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
-  %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 1
+  %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 1
   store i32 1107296256, i32* %block.flags, align 8
-  %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 2
+  %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 2
   store i32 0, i32* %block.reserved, align 4
-  %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 3
+  %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 3
   store i8* bitcast (void (i8*)* @__crasher_block_invoke to i8*), i8** %block.invoke, align 8
-  %block.d = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 4
+  %block.d = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 4
   store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp to %struct.__block_d*), %struct.__block_d** %block.d, align 8
   %foo2 = tail call i8* @objc_retain(i8* %call) nounwind
   store i8* %foo2, i8** %foo, align 8
@@ -798,16 +798,16 @@ entry:
   call void @objc_release(i8* %foo5) nounwind
   %strongdestroy = load i8** %foo, align 8
   call void @objc_release(i8* %strongdestroy) nounwind, !clang.imprecise_release !0
-  %foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
-  %block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
+  %foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
+  %block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
   store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa11, align 8
-  %block.flags12 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 1
+  %block.flags12 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 1
   store i32 1107296256, i32* %block.flags12, align 8
-  %block.reserved13 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 2
+  %block.reserved13 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 2
   store i32 0, i32* %block.reserved13, align 4
-  %block.invoke14 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 3
+  %block.invoke14 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 3
   store i8* bitcast (void (i8*)* @__crasher_block_invoke1 to i8*), i8** %block.invoke14, align 8
-  %block.d15 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 4
+  %block.d15 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 4
   store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp5 to %struct.__block_d*), %struct.__block_d** %block.d15, align 8
   %foo18 = call i8* @objc_retain(i8* %call) nounwind
   store i8* %call, i8** %foo10, align 8

Modified: llvm/trunk/test/Transforms/ObjCARC/retain-block-side-effects.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ObjCARC/retain-block-side-effects.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ObjCARC/retain-block-side-effects.ll (original)
+++ llvm/trunk/test/Transforms/ObjCARC/retain-block-side-effects.ll Fri Feb 27 13:29:02 2015
@@ -7,7 +7,7 @@
 ; CHECK: %tmp16 = call i8* @objc_retainBlock(i8* %tmp15) [[NUW:#[0-9]+]]
 ; CHECK: %tmp17 = bitcast i8* %tmp16 to void ()*
 ; CHECK: %tmp18 = load %struct.__block_byref_repeater** %byref.forwarding, align 8
-; CHECK: %repeater12 = getelementptr inbounds %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
+; CHECK: %repeater12 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
 ; CHECK: store void ()* %tmp17, void ()** %repeater12, align 8
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -20,17 +20,17 @@ define void @foo() noreturn {
 entry:
   %repeater = alloca %struct.__block_byref_repeater, align 8
   %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>, align 8
-  %byref.forwarding = getelementptr inbounds %struct.__block_byref_repeater* %repeater, i64 0, i32 1
-  %tmp10 = getelementptr inbounds %struct.__block_byref_repeater* %repeater, i64 0, i32 6
+  %byref.forwarding = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %repeater, i64 0, i32 1
+  %tmp10 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %repeater, i64 0, i32 6
   store void ()* null, void ()** %tmp10, align 8
-  %block.captured11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>* %block, i64 0, i32 6
+  %block.captured11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>* %block, i64 0, i32 6
   %tmp14 = bitcast %struct.__block_byref_repeater* %repeater to i8*
   store i8* %tmp14, i8** %block.captured11, align 8
   %tmp15 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>* %block to i8*
   %tmp16 = call i8* @objc_retainBlock(i8* %tmp15) nounwind
   %tmp17 = bitcast i8* %tmp16 to void ()*
   %tmp18 = load %struct.__block_byref_repeater** %byref.forwarding, align 8
-  %repeater12 = getelementptr inbounds %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
+  %repeater12 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
   %tmp13 = load void ()** %repeater12, align 8
   store void ()* %tmp17, void ()** %repeater12, align 8
   ret void

Modified: llvm/trunk/test/Transforms/ObjCARC/weak-copies.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ObjCARC/weak-copies.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ObjCARC/weak-copies.ll (original)
+++ llvm/trunk/test/Transforms/ObjCARC/weak-copies.ll Fri Feb 27 13:29:02 2015
@@ -50,17 +50,17 @@ entry:
   %block = alloca %1, align 8
   %0 = call i8* @objc_retain(i8* %me) nounwind
   %1 = call i8* @objc_initWeak(i8** %w, i8* %0) nounwind
-  %block.isa = getelementptr inbounds %1* %block, i64 0, i32 0
+  %block.isa = getelementptr inbounds %1, %1* %block, i64 0, i32 0
   store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
-  %block.flags = getelementptr inbounds %1* %block, i64 0, i32 1
+  %block.flags = getelementptr inbounds %1, %1* %block, i64 0, i32 1
   store i32 1107296256, i32* %block.flags, align 8
-  %block.reserved = getelementptr inbounds %1* %block, i64 0, i32 2
+  %block.reserved = getelementptr inbounds %1, %1* %block, i64 0, i32 2
   store i32 0, i32* %block.reserved, align 4
-  %block.invoke = getelementptr inbounds %1* %block, i64 0, i32 3
+  %block.invoke = getelementptr inbounds %1, %1* %block, i64 0, i32 3
   store i8* bitcast (void (i8*)* @__qux_block_invoke_0 to i8*), i8** %block.invoke, align 8
-  %block.descriptor = getelementptr inbounds %1* %block, i64 0, i32 4
+  %block.descriptor = getelementptr inbounds %1, %1* %block, i64 0, i32 4
   store %struct.__block_descriptor* bitcast (%0* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** %block.descriptor, align 8
-  %block.captured = getelementptr inbounds %1* %block, i64 0, i32 5
+  %block.captured = getelementptr inbounds %1, %1* %block, i64 0, i32 5
   %2 = call i8* @objc_loadWeak(i8** %w) nounwind
   %3 = call i8* @objc_initWeak(i8** %block.captured, i8* %2) nounwind
   %4 = bitcast %1* %block to void ()*

Modified: llvm/trunk/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll (original)
+++ llvm/trunk/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll Fri Feb 27 13:29:02 2015
@@ -87,7 +87,7 @@ entry:
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
   store %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %this, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr
   %1 = load %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
-  %2 = getelementptr inbounds %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %1, i32 0, i32 0 ; <i32*> [#uses=1]
+  %2 = getelementptr inbounds %"struct.boost::details::compressed_pair_imp<empty_t,int,1>", %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %1, i32 0, i32 0 ; <i32*> [#uses=1]
   store i32* %2, i32** %0, align 8
   %3 = load i32** %0, align 8                     ; <i32*> [#uses=1]
   store i32* %3, i32** %retval, align 8
@@ -106,7 +106,7 @@ entry:
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
   store %"struct.boost::compressed_pair<empty_t,int>"* %this, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr
   %1 = load %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1]
-  %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
+  %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>", %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
   %3 = call i32* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE6secondEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %2) nounwind ; <i32*> [#uses=1]
   store i32* %3, i32** %0, align 8
   %4 = load i32** %0, align 8                     ; <i32*> [#uses=1]
@@ -145,7 +145,7 @@ entry:
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
   store %"struct.boost::compressed_pair<empty_t,int>"* %this, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr
   %1 = load %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1]
-  %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
+  %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>", %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
   %3 = call %struct.empty_base_t* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE5firstEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %2) nounwind ; <%struct.empty_base_t*> [#uses=1]
   store %struct.empty_base_t* %3, %struct.empty_base_t** %0, align 8
   %4 = load %struct.empty_base_t** %0, align 8    ; <%struct.empty_base_t*> [#uses=1]

Modified: llvm/trunk/test/Transforms/PhaseOrdering/PR6627.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/PhaseOrdering/PR6627.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/PhaseOrdering/PR6627.ll (original)
+++ llvm/trunk/test/Transforms/PhaseOrdering/PR6627.ll Fri Feb 27 13:29:02 2015
@@ -15,21 +15,21 @@ entry:
   br i1 %cmp, label %land.lhs.true, label %if.end
 
 land.lhs.true:                                    ; preds = %entry
-  %arrayidx4 = getelementptr inbounds i8* %arrayidx, i64 1
+  %arrayidx4 = getelementptr inbounds i8, i8* %arrayidx, i64 1
   %tmp5 = load i8* %arrayidx4, align 1
   %conv6 = zext i8 %tmp5 to i32
   %cmp7 = icmp eq i32 %conv6, 69
   br i1 %cmp7, label %land.lhs.true9, label %if.end
 
 land.lhs.true9:                                   ; preds = %land.lhs.true
-  %arrayidx12 = getelementptr inbounds i8* %arrayidx, i64 2
+  %arrayidx12 = getelementptr inbounds i8, i8* %arrayidx, i64 2
   %tmp13 = load i8* %arrayidx12, align 1
   %conv14 = zext i8 %tmp13 to i32
   %cmp15 = icmp eq i32 %conv14, 76
   br i1 %cmp15, label %land.lhs.true17, label %if.end
 
 land.lhs.true17:                                  ; preds = %land.lhs.true9
-  %arrayidx20 = getelementptr inbounds i8* %arrayidx, i64 3
+  %arrayidx20 = getelementptr inbounds i8, i8* %arrayidx, i64 3
   %tmp21 = load i8* %arrayidx20, align 1
   %conv22 = zext i8 %tmp21 to i32
   %cmp23 = icmp eq i32 %conv22, 70
@@ -59,21 +59,21 @@ entry:
   br i1 %cmp, label %land.lhs.true, label %if.end
 
 land.lhs.true:                                    ; preds = %entry
-  %arrayidx4 = getelementptr inbounds i8* %arrayidx, i64 1
+  %arrayidx4 = getelementptr inbounds i8, i8* %arrayidx, i64 1
   %tmp5 = load i8* %arrayidx4, align 1
   %conv6 = zext i8 %tmp5 to i32
   %cmp7 = icmp eq i32 %conv6, 69
   br i1 %cmp7, label %land.lhs.true9, label %if.end
 
 land.lhs.true9:                                   ; preds = %land.lhs.true
-  %arrayidx12 = getelementptr inbounds i8* %arrayidx, i64 2
+  %arrayidx12 = getelementptr inbounds i8, i8* %arrayidx, i64 2
   %tmp13 = load i8* %arrayidx12, align 1
   %conv14 = zext i8 %tmp13 to i32
   %cmp15 = icmp eq i32 %conv14, 76
   br i1 %cmp15, label %land.lhs.true17, label %if.end
 
 land.lhs.true17:                                  ; preds = %land.lhs.true9
-  %arrayidx20 = getelementptr inbounds i8* %arrayidx, i64 3
+  %arrayidx20 = getelementptr inbounds i8, i8* %arrayidx, i64 3
   %tmp21 = load i8* %arrayidx20, align 1
   %conv22 = zext i8 %tmp21 to i32
   %cmp23 = icmp eq i32 %conv22, 70

Modified: llvm/trunk/test/Transforms/PhaseOrdering/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/PhaseOrdering/basic.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/PhaseOrdering/basic.ll (original)
+++ llvm/trunk/test/Transforms/PhaseOrdering/basic.ll Fri Feb 27 13:29:02 2015
@@ -31,14 +31,14 @@ define void @test1() nounwind ssp {
 define i32 @test2(i32 %a, i32* %p) nounwind uwtable ssp {
 entry:
   %div = udiv i32 %a, 4
-  %arrayidx = getelementptr inbounds i32* %p, i64 0
+  %arrayidx = getelementptr inbounds i32, i32* %p, i64 0
   store i32 %div, i32* %arrayidx, align 4
   %add = add i32 %div, %div
-  %arrayidx1 = getelementptr inbounds i32* %p, i64 1
+  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1
   store i32 %add, i32* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds i32* %p, i64 1
+  %arrayidx2 = getelementptr inbounds i32, i32* %p, i64 1
   %0 = load i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32* %p, i64 0
+  %arrayidx3 = getelementptr inbounds i32, i32* %p, i64 0
   %1 = load i32* %arrayidx3, align 4
   %mul = mul i32 2, %1
   %sub = sub i32 %0, %mul

Modified: llvm/trunk/test/Transforms/PhaseOrdering/scev.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/PhaseOrdering/scev.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/PhaseOrdering/scev.ll (original)
+++ llvm/trunk/test/Transforms/PhaseOrdering/scev.ll Fri Feb 27 13:29:02 2015
@@ -21,9 +21,9 @@ for.cond:
 
 for.body:                                         ; preds = %for.cond
   store i32 0, i32* %p.addr.0, align 4
-  %add.ptr = getelementptr inbounds i32* %p.addr.0, i64 %div
+  %add.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 %div
   store i32 1, i32* %add.ptr, align 4
-  %add.ptr1 = getelementptr inbounds i32* %add.ptr, i64 %div
+  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i64 %div
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
@@ -50,9 +50,9 @@ for.cond:
 
 for.body:                                         ; preds = %for.cond
   store i32 0, i32* %p.addr.0, align 4
-  %add.ptr = getelementptr inbounds i32* %p.addr.0, i64 %div
+  %add.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 %div
   store i32 1, i32* %add.ptr, align 4
-  %add.ptr1 = getelementptr inbounds i32* %add.ptr, i64 %div
+  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i64 %div
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body

Modified: llvm/trunk/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll (original)
+++ llvm/trunk/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll Fri Feb 27 13:29:02 2015
@@ -22,7 +22,7 @@ entry:
   unreachable
 
 "8":                                              ; preds = %"4"
-  %8 = getelementptr inbounds i8* undef, i32 %6
+  %8 = getelementptr inbounds i8, i8* undef, i32 %6
   br i1 undef, label %"13", label %"12"
 
 "12":                                             ; preds = %"8", %entry

Modified: llvm/trunk/test/Transforms/Reassociate/looptest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Reassociate/looptest.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/Reassociate/looptest.ll (original)
+++ llvm/trunk/test/Transforms/Reassociate/looptest.ll Fri Feb 27 13:29:02 2015
@@ -33,7 +33,7 @@ bb4:		; preds = %bb4, %bb3
 	%reg117 = phi i32 [ %reg118, %bb4 ], [ 0, %bb3 ]		; <i32> [#uses=2]
 	%reg113 = add i32 %reg115, %reg117		; <i32> [#uses=1]
 	%reg114 = add i32 %reg113, %reg116		; <i32> [#uses=1]
-	%cast227 = getelementptr [4 x i8]* @.LC0, i64 0, i64 0		; <i8*> [#uses=1]
+	%cast227 = getelementptr [4 x i8], [4 x i8]* @.LC0, i64 0, i64 0		; <i8*> [#uses=1]
 	call i32 (i8*, ...)* @printf( i8* %cast227, i32 %reg114 )		; <i32>:0 [#uses=0]
 	%reg118 = add i32 %reg117, 1		; <i32> [#uses=2]
 	%cond224 = icmp ne i32 %reg118, %Num		; <i1> [#uses=1]

Modified: llvm/trunk/test/Transforms/RewriteStatepointsForGC/basics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/RewriteStatepointsForGC/basics.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/RewriteStatepointsForGC/basics.ll (original)
+++ llvm/trunk/test/Transforms/RewriteStatepointsForGC/basics.ll Fri Feb 27 13:29:02 2015
@@ -38,7 +38,7 @@ define i8 @test3(i8 addrspace(1)* %obj)
 ; CHECK-NEXT: load i8 addrspace(1)* %derived.relocated
 ; CHECK-NEXT: load i8 addrspace(1)* %obj.relocated
 entry:
-  %derived = getelementptr i8 addrspace(1)* %obj, i64 10
+  %derived = getelementptr i8, i8 addrspace(1)* %obj, i64 10
   call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
 
   %a = load i8 addrspace(1)* %derived

Modified: llvm/trunk/test/Transforms/SCCP/2002-08-30-GetElementPtrTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/2002-08-30-GetElementPtrTest.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/2002-08-30-GetElementPtrTest.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/2002-08-30-GetElementPtrTest.ll Fri Feb 27 13:29:02 2015
@@ -3,7 +3,7 @@
 @G = external global [40 x i32]		; <[40 x i32]*> [#uses=1]
 
 define i32* @test() {
-	%X = getelementptr [40 x i32]* @G, i64 0, i64 0		; <i32*> [#uses=1]
+	%X = getelementptr [40 x i32], [40 x i32]* @G, i64 0, i64 0		; <i32*> [#uses=1]
 	ret i32* %X
 }
 

Modified: llvm/trunk/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll Fri Feb 27 13:29:02 2015
@@ -22,9 +22,9 @@ endif:		; preds = %no_exit
 loopexit:		; preds = %endif, %then, %entry
 	%j.1 = phi i32 [ 1, %entry ], [ %j.0, %endif ], [ %i.0, %then ]		; <i32> [#uses=1]
 	%i.1 = phi i32 [ 1, %entry ], [ %inc, %endif ], [ %inc1, %then ]		; <i32> [#uses=1]
-	%tmp.17 = getelementptr i32* %data.1, i64 1		; <i32*> [#uses=1]
+	%tmp.17 = getelementptr i32, i32* %data.1, i64 1		; <i32*> [#uses=1]
 	store i32 %j.1, i32* %tmp.17
-	%tmp.23 = getelementptr i32* %data.1, i64 2		; <i32*> [#uses=1]
+	%tmp.23 = getelementptr i32, i32* %data.1, i64 2		; <i32*> [#uses=1]
 	store i32 %i.1, i32* %tmp.23
 	ret void
 }

Modified: llvm/trunk/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll Fri Feb 27 13:29:02 2015
@@ -67,7 +67,7 @@ bb217:		; preds = %cond_true260
 	ret void
 cond_next252:		; preds = %cond_next208, %entry
 	%D.0.0 = phi i32 [ 0, %entry ], [ %tmp229, %cond_next208 ]		; <i32> [#uses=1]
-	%tmp254 = getelementptr i8** null, i32 1		; <i8**> [#uses=1]
+	%tmp254 = getelementptr i8*, i8** null, i32 1		; <i8**> [#uses=1]
 	%tmp256 = load i8** %tmp254		; <i8*> [#uses=1]
 	%tmp258 = load i8* %tmp256		; <i8> [#uses=1]
 	%tmp259 = icmp eq i8 %tmp258, 45		; <i1> [#uses=1]

Modified: llvm/trunk/test/Transforms/SCCP/2006-12-04-PackedType.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/2006-12-04-PackedType.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/2006-12-04-PackedType.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/2006-12-04-PackedType.ll Fri Feb 27 13:29:02 2015
@@ -103,13 +103,13 @@ target triple = "powerpc-apple-darwin8"
 
 define void @gldLLVMVecPointRender(%struct.GLDContextRec* %ctx) {
 entry:
-	%tmp.uip = getelementptr %struct.GLDContextRec* %ctx, i32 0, i32 22		; <i32*> [#uses=1]
+	%tmp.uip = getelementptr %struct.GLDContextRec, %struct.GLDContextRec* %ctx, i32 0, i32 22		; <i32*> [#uses=1]
 	%tmp = load i32* %tmp.uip		; <i32> [#uses=3]
 	%tmp91 = lshr i32 %tmp, 5		; <i32> [#uses=1]
 	%tmp92 = trunc i32 %tmp91 to i1		; <i1> [#uses=1]
 	br i1 %tmp92, label %cond_true93, label %cond_next116
 cond_true93:		; preds = %entry
-	%tmp.upgrd.1 = getelementptr %struct.GLDContextRec* %ctx, i32 0, i32 31, i32 14		; <i32*> [#uses=1]
+	%tmp.upgrd.1 = getelementptr %struct.GLDContextRec, %struct.GLDContextRec* %ctx, i32 0, i32 31, i32 14		; <i32*> [#uses=1]
 	%tmp95 = load i32* %tmp.upgrd.1		; <i32> [#uses=1]
 	%tmp95.upgrd.2 = sitofp i32 %tmp95 to float		; <float> [#uses=1]
 	%tmp108 = fmul float undef, %tmp95.upgrd.2		; <float> [#uses=1]

Modified: llvm/trunk/test/Transforms/SCCP/apint-array.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/apint-array.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/apint-array.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/apint-array.ll Fri Feb 27 13:29:02 2015
@@ -6,7 +6,7 @@
 define i101 @array()
 {
 Head:
-   %A = getelementptr [6 x i101]* @Y, i32 0, i32 1
+   %A = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 1
 
    %B = load i101* %A
    %C = icmp sge i101 %B, 1
@@ -14,7 +14,7 @@ Head:
 True:
    %D = and i101 %B, 1
    %E = trunc i101 %D to i32
-   %F = getelementptr [6 x i101]* @Y, i32 0, i32 %E
+   %F = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 %E
    %G = load i101* %F
    br label %False
 False:

Modified: llvm/trunk/test/Transforms/SCCP/apint-bigarray.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/apint-bigarray.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/apint-bigarray.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/apint-bigarray.ll Fri Feb 27 13:29:02 2015
@@ -3,7 +3,7 @@
 @G =  global [1000000 x i10000] zeroinitializer
 
 define internal i10000* @test(i10000 %Arg) {
-	%X = getelementptr [1000000 x i10000]* @G, i32 0, i32 999
+	%X = getelementptr [1000000 x i10000], [1000000 x i10000]* @G, i32 0, i32 999
         store i10000 %Arg, i10000* %X
 	ret i10000* %X
 }

Modified: llvm/trunk/test/Transforms/SCCP/apint-bigint2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/apint-bigint2.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/apint-bigint2.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/apint-bigint2.ll Fri Feb 27 13:29:02 2015
@@ -6,12 +6,12 @@
 define i101 @array()
 {
 Head:
-   %A = getelementptr [6 x i101]* @Y, i32 0, i32 1
+   %A = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 1
    %B = load i101* %A
    %D = and i101 %B, 1
    %DD = or i101 %D, 1
    %E = trunc i101 %DD to i32
-   %F = getelementptr [6 x i101]* @Y, i32 0, i32 %E
+   %F = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 %E
    %G = load i101* %F
  
    ret i101 %G

Modified: llvm/trunk/test/Transforms/SCCP/apint-ipsccp4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/apint-ipsccp4.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/apint-ipsccp4.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/apint-ipsccp4.ll Fri Feb 27 13:29:02 2015
@@ -9,13 +9,13 @@
                                      { i212, float } { i212 37, float 2.0 } ]
 
 define internal float @test2() {
-	%A = getelementptr [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
+	%A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
 	%B = load float* %A
 	ret float %B
 }
 
 define internal float  @test3() {
-	%A = getelementptr [2 x { i212, float}]* @Y, i32 0, i32 0, i32 1
+	%A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 0, i32 1
 	%B = load float* %A
 	ret float %B
 }

Modified: llvm/trunk/test/Transforms/SCCP/apint-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/apint-load.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/apint-load.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/apint-load.ll Fri Feb 27 13:29:02 2015
@@ -12,13 +12,13 @@ define i212 @test1() {
 }
 
 define internal float @test2() {
-	%A = getelementptr [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
+	%A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
 	%B = load float* %A
 	ret float %B
 }
 
 define internal i212 @test3() {
-	%A = getelementptr [2 x { i212, float}]* @Y, i32 0, i32 0, i32 0
+	%A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 0, i32 0
 	%B = load i212* %A
 	ret i212 %B
 }

Modified: llvm/trunk/test/Transforms/SCCP/apint-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/apint-select.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/apint-select.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/apint-select.ll Fri Feb 27 13:29:02 2015
@@ -3,7 +3,7 @@
 @A = constant i32 10
 
 define i712 @test1() {
-        %P = getelementptr i32* @A, i32 0
+        %P = getelementptr i32, i32* @A, i32 0
         %B = ptrtoint i32* %P to i64
         %BB = and i64 %B, undef
         %C = icmp sge i64 %BB, 0

Modified: llvm/trunk/test/Transforms/SCCP/loadtest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SCCP/loadtest.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SCCP/loadtest.ll (original)
+++ llvm/trunk/test/Transforms/SCCP/loadtest.ll Fri Feb 27 13:29:02 2015
@@ -15,13 +15,13 @@ define i32 @test1() {
 }
 
 define float @test2() {
-	%A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1		; <float*> [#uses=1]
+	%A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1		; <float*> [#uses=1]
 	%B = load float* %A		; <float> [#uses=1]
 	ret float %B
 }
 
 define i32 @test3() {
-	%A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0		; <i32*> [#uses=1]
+	%A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0		; <i32*> [#uses=1]
 	%B = load i32* %A
 	ret i32 %B
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/AArch64/commute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/AArch64/commute.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/AArch64/commute.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/AArch64/commute.ll Fri Feb 27 13:29:02 2015
@@ -6,8 +6,8 @@ target triple = "aarch64--linux-gnu"
 
 define void @test1(%structA* nocapture readonly %J, i32 %xmin, i32 %ymin) {
 ; CHECK-LABEL: test1
-; CHECK: %arrayidx4 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 0
-; CHECK: %arrayidx9 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 1
+; CHECK: %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
+; CHECK: %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
 ; CHECK: %3 = bitcast float* %arrayidx4 to <2 x float>*
 ; CHECK: %4 = load <2 x float>* %3, align 4
 ; CHECK: %5 = fsub fast <2 x float> %2, %4
@@ -23,10 +23,10 @@ entry:
 for.body3.lr.ph:
   %conv5 = sitofp i32 %ymin to float
   %conv = sitofp i32 %xmin to float
-  %arrayidx4 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 0
+  %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
   %0 = load float* %arrayidx4, align 4
   %sub = fsub fast float %conv, %0
-  %arrayidx9 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 1
+  %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
   %1 = load float* %arrayidx9, align 4
   %sub10 = fsub fast float %conv5, %1
   %mul11 = fmul fast float %sub, %sub
@@ -41,8 +41,8 @@ for.end27:
 
 define void @test2(%structA* nocapture readonly %J, i32 %xmin, i32 %ymin) {
 ; CHECK-LABEL: test2
-; CHECK: %arrayidx4 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 0
-; CHECK: %arrayidx9 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 1
+; CHECK: %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
+; CHECK: %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
 ; CHECK: %3 = bitcast float* %arrayidx4 to <2 x float>*
 ; CHECK: %4 = load <2 x float>* %3, align 4
 ; CHECK: %5 = fsub fast <2 x float> %2, %4
@@ -58,10 +58,10 @@ entry:
 for.body3.lr.ph:
   %conv5 = sitofp i32 %ymin to float
   %conv = sitofp i32 %xmin to float
-  %arrayidx4 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 0
+  %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
   %0 = load float* %arrayidx4, align 4
   %sub = fsub fast float %conv, %0
-  %arrayidx9 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 1
+  %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
   %1 = load float* %arrayidx9, align 4
   %sub10 = fsub fast float %conv5, %1
   %mul11 = fmul fast float %sub, %sub

Modified: llvm/trunk/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll Fri Feb 27 13:29:02 2015
@@ -13,8 +13,8 @@ target triple = "arm64-apple-ios5.0.0"
 ; CHECK: store double
 ; CHECK: store double
 define void @f(double* %p, double* %q) {
-  %addr2 = getelementptr double* %q, i32 1
-  %addr = getelementptr double* %p, i32 1
+  %addr2 = getelementptr double, double* %q, i32 1
+  %addr = getelementptr double, double* %p, i32 1
   %x = load double* %p
   %y = load double* %addr
   call void @g()
@@ -35,8 +35,8 @@ entry:
 loop:
   %p1 = phi double [0.0, %entry], [%x, %loop]
   %p2 = phi double [0.0, %entry], [%y, %loop]
-  %addr2 = getelementptr double* %q, i32 1
-  %addr = getelementptr double* %p, i32 1
+  %addr2 = getelementptr double, double* %q, i32 1
+  %addr = getelementptr double, double* %p, i32 1
   store double %p1, double* %q
   store double %p2, double* %addr2
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll Fri Feb 27 13:29:02 2015
@@ -14,29 +14,29 @@ entry:
   %add = add nsw i32 %1, %0
   %div = sdiv i32 %add, 2
   store i32 %div, i32* %a, align 4
-  %arrayidx3 = getelementptr inbounds i32* %b, i64 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1
   %2 = load i32* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds i32* %c, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1
   %3 = load i32* %arrayidx4, align 4
   %add5 = add nsw i32 %3, %2
   %div6 = sdiv i32 %add5, 2
-  %arrayidx7 = getelementptr inbounds i32* %a, i64 1
+  %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1
   store i32 %div6, i32* %arrayidx7, align 4
-  %arrayidx8 = getelementptr inbounds i32* %b, i64 2
+  %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2
   %4 = load i32* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds i32* %c, i64 2
+  %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
   %5 = load i32* %arrayidx9, align 4
   %add10 = add nsw i32 %5, %4
   %div11 = sdiv i32 %add10, 2
-  %arrayidx12 = getelementptr inbounds i32* %a, i64 2
+  %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2
   store i32 %div11, i32* %arrayidx12, align 4
-  %arrayidx13 = getelementptr inbounds i32* %b, i64 3
+  %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3
   %6 = load i32* %arrayidx13, align 4
-  %arrayidx14 = getelementptr inbounds i32* %c, i64 3
+  %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3
   %7 = load i32* %arrayidx14, align 4
   %add15 = add nsw i32 %7, %6
   %div16 = sdiv i32 %add15, 2
-  %arrayidx17 = getelementptr inbounds i32* %a, i64 3
+  %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3
   store i32 %div16, i32* %arrayidx17, align 4
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll Fri Feb 27 13:29:02 2015
@@ -12,9 +12,9 @@ define void @expensive_double_store(doub
 entry:
   %0 = load double* %src, align 8
   store double %0, double* %dst, align 8
-  %arrayidx2 = getelementptr inbounds double* %src, i64 1
+  %arrayidx2 = getelementptr inbounds double, double* %src, i64 1
   %1 = load double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double* %dst, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %dst, i64 1
   store double %1, double* %arrayidx3, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/ARM/sroa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/ARM/sroa.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/ARM/sroa.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/ARM/sroa.ll Fri Feb 27 13:29:02 2015
@@ -44,9 +44,9 @@ entry:
   %3 = bitcast i64 %b.sroa.3.12.insert.insert to double
   %add = fadd double %0, %2
   %add3 = fadd double %1, %3
-  %re.i.i = getelementptr inbounds %class.Complex* %agg.result, i32 0, i32 0
+  %re.i.i = getelementptr inbounds %class.Complex, %class.Complex* %agg.result, i32 0, i32 0
   store double %add, double* %re.i.i, align 4
-  %im.i.i = getelementptr inbounds %class.Complex* %agg.result, i32 0, i32 1
+  %im.i.i = getelementptr inbounds %class.Complex, %class.Complex* %agg.result, i32 0, i32 1
   store double %add3, double* %im.i.i, align 4
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/R600/simplebb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/R600/simplebb.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/R600/simplebb.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/R600/simplebb.ll Fri Feb 27 13:29:02 2015
@@ -13,13 +13,13 @@ define void @test1_as_3_3_3(double addrs
   %i0 = load double addrspace(3)* %a, align 8
   %i1 = load double addrspace(3)* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double addrspace(3)* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double addrspace(3)* %a, i64 1
   %i3 = load double addrspace(3)* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double addrspace(3)* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double addrspace(3)* %b, i64 1
   %i4 = load double addrspace(3)* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double addrspace(3)* %c, align 8
-  %arrayidx5 = getelementptr inbounds double addrspace(3)* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double addrspace(3)* %c, i64 1
   store double %mul5, double addrspace(3)* %arrayidx5, align 8
   ret void
 }
@@ -33,13 +33,13 @@ define void @test1_as_3_0_0(double addrs
   %i0 = load double addrspace(3)* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double addrspace(3)* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double addrspace(3)* %a, i64 1
   %i3 = load double addrspace(3)* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %mul5, double* %arrayidx5, align 8
   ret void
 }
@@ -53,13 +53,13 @@ define void @test1_as_0_0_3(double* %a,
   %i0 = load double* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double addrspace(3)* %c, align 8
-  %arrayidx5 = getelementptr inbounds double addrspace(3)* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double addrspace(3)* %c, i64 1
   store double %mul5, double addrspace(3)* %arrayidx5, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/addsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/addsub.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/addsub.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/addsub.ll Fri Feb 27 13:29:02 2015
@@ -264,15 +264,15 @@ define void @reorder_alt_rightsubTree(do
   %4 = load double* %d
   %5 = fsub double %3, %4
   store double %5, double* %c
-  %6 = getelementptr inbounds double* %d, i64 1
+  %6 = getelementptr inbounds double, double* %d, i64 1
   %7 = load double* %6
-  %8 = getelementptr inbounds double* %a, i64 1
+  %8 = getelementptr inbounds double, double* %a, i64 1
   %9 = load double* %8
-  %10 = getelementptr inbounds double* %b, i64 1
+  %10 = getelementptr inbounds double, double* %b, i64 1
   %11 = load double* %10
   %12 = fadd double %9, %11
   %13 = fadd double %7, %12
-  %14 = getelementptr inbounds double* %c, i64 1
+  %14 = getelementptr inbounds double, double* %c, i64 1
   store double %13, double* %14
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/align.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/align.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/align.ll Fri Feb 27 13:29:02 2015
@@ -12,11 +12,11 @@ entry:
   %i0 = load double* %a 
   %i1 = load double* %b 
   %mul = fmul double %i0, %i1
-  %store1 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
-  %store2 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %store1 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
+  %store2 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
 ; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
@@ -38,18 +38,18 @@ entry:
 define void @test2(float * %a, float * %b) {
 entry:
   %l0 = load float* %a
-  %a1 = getelementptr inbounds float* %a, i64 1
+  %a1 = getelementptr inbounds float, float* %a, i64 1
   %l1 = load float* %a1
-  %a2 = getelementptr inbounds float* %a, i64 2
+  %a2 = getelementptr inbounds float, float* %a, i64 2
   %l2 = load float* %a2
-  %a3 = getelementptr inbounds float* %a, i64 3
+  %a3 = getelementptr inbounds float, float* %a, i64 3
   %l3 = load float* %a3
   store float %l0, float* %b
-  %b1 = getelementptr inbounds float* %b, i64 1
+  %b1 = getelementptr inbounds float, float* %b, i64 1
   store float %l1, float* %b1
-  %b2 = getelementptr inbounds float* %b, i64 2
+  %b2 = getelementptr inbounds float, float* %b, i64 2
   store float %l2, float* %b2
-  %b3 = getelementptr inbounds float* %b, i64 3
+  %b3 = getelementptr inbounds float, float* %b, i64 3
   store float %l3, float* %b3
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/bad_types.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/bad_types.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/bad_types.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/bad_types.ll Fri Feb 27 13:29:02 2015
@@ -16,7 +16,7 @@ entry:
   %b.cast = bitcast x86_mmx %b to i64
   %a.and = and i64 %a.cast, 42
   %b.and = and i64 %b.cast, 42
-  %gep = getelementptr i64* %ptr, i32 1
+  %gep = getelementptr i64, i64* %ptr, i32 1
   store i64 %a.and, i64* %ptr
   store i64 %b.and, i64* %gep
   ret void

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/barriercall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/barriercall.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/barriercall.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/barriercall.ll Fri Feb 27 13:29:02 2015
@@ -14,15 +14,15 @@ entry:
   store i32 %add, i32* %A, align 4
   %mul1 = mul nsw i32 %n, 9
   %add2 = add nsw i32 %mul1, 9
-  %arrayidx3 = getelementptr inbounds i32* %A, i64 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
   store i32 %add2, i32* %arrayidx3, align 4
   %mul4 = shl i32 %n, 3
   %add5 = add nsw i32 %mul4, 9
-  %arrayidx6 = getelementptr inbounds i32* %A, i64 2
+  %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 2
   store i32 %add5, i32* %arrayidx6, align 4
   %mul7 = mul nsw i32 %n, 10
   %add8 = add nsw i32 %mul7, 9
-  %arrayidx9 = getelementptr inbounds i32* %A, i64 3
+  %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 3
   store i32 %add8, i32* %arrayidx9, align 4
   ret i32 undef
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/call.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/call.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/call.ll Fri Feb 27 13:29:02 2015
@@ -19,14 +19,14 @@ entry:
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
   %call = tail call double @sin(double %mul) nounwind readnone
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   %call5 = tail call double @sin(double %mul5) nounwind readnone
   store double %call, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %call5, double* %arrayidx5, align 8
   ret void
 }
@@ -40,14 +40,14 @@ entry:
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
   %call = tail call double @cos(double %mul) nounwind readnone
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   %call5 = tail call double @cos(double %mul5) nounwind readnone
   store double %call, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %call5, double* %arrayidx5, align 8
   ret void
 }
@@ -61,14 +61,14 @@ entry:
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
   %call = tail call double @pow(double %mul,double %mul) nounwind readnone
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   %call5 = tail call double @pow(double %mul5,double %mul5) nounwind readnone
   store double %call, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %call5, double* %arrayidx5, align 8
   ret void
 }
@@ -83,14 +83,14 @@ entry:
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
   %call = tail call double @exp2(double %mul) nounwind readnone
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   %call5 = tail call double @exp2(double %mul5) nounwind readnone
   store double %call, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %call5, double* %arrayidx5, align 8
   ret void
 }
@@ -106,14 +106,14 @@ entry:
   %i1 = load i64* %b, align 8
   %mul = mul i64 %i0, %i1
   %call = tail call i64 @round(i64 %mul) nounwind readnone
-  %arrayidx3 = getelementptr inbounds i64* %a, i64 1
+  %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
   %i3 = load i64* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds i64* %b, i64 1
+  %arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
   %i4 = load i64* %arrayidx4, align 8
   %mul5 = mul i64 %i3, %i4
   %call5 = tail call i64 @round(i64 %mul5) nounwind readnone
   store i64 %call, i64* %c, align 8
-  %arrayidx5 = getelementptr inbounds i64* %c, i64 1
+  %arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
   store i64 %call5, i64* %arrayidx5, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/cast.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/cast.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/cast.ll Fri Feb 27 13:29:02 2015
@@ -18,20 +18,20 @@ entry:
   %0 = load i8* %B, align 1
   %conv = sext i8 %0 to i32
   store i32 %conv, i32* %A, align 4
-  %arrayidx2 = getelementptr inbounds i8* %B, i64 1
+  %arrayidx2 = getelementptr inbounds i8, i8* %B, i64 1
   %1 = load i8* %arrayidx2, align 1
   %conv3 = sext i8 %1 to i32
-  %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
   store i32 %conv3, i32* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds i8* %B, i64 2
+  %arrayidx5 = getelementptr inbounds i8, i8* %B, i64 2
   %2 = load i8* %arrayidx5, align 1
   %conv6 = sext i8 %2 to i32
-  %arrayidx7 = getelementptr inbounds i32* %A, i64 2
+  %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 2
   store i32 %conv6, i32* %arrayidx7, align 4
-  %arrayidx8 = getelementptr inbounds i8* %B, i64 3
+  %arrayidx8 = getelementptr inbounds i8, i8* %B, i64 3
   %3 = load i8* %arrayidx8, align 1
   %conv9 = sext i8 %3 to i32
-  %arrayidx10 = getelementptr inbounds i32* %A, i64 3
+  %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 3
   store i32 %conv9, i32* %arrayidx10, align 4
   ret i32 undef
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/cmp_sel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/cmp_sel.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/cmp_sel.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/cmp_sel.ll Fri Feb 27 13:29:02 2015
@@ -16,16 +16,16 @@ target triple = "x86_64-apple-macosx10.8
 ;CHECK: ret i32 undef
 define i32 @foo(double* noalias nocapture %A, double* noalias nocapture %B, double %G) {
 entry:
-  %arrayidx = getelementptr inbounds double* %B, i64 10
+  %arrayidx = getelementptr inbounds double, double* %B, i64 10
   %0 = load double* %arrayidx, align 8
   %tobool = fcmp une double %0, 0.000000e+00
   %cond = select i1 %tobool, double %G, double 1.000000e+00
   store double %cond, double* %A, align 8
-  %arrayidx2 = getelementptr inbounds double* %B, i64 11
+  %arrayidx2 = getelementptr inbounds double, double* %B, i64 11
   %1 = load double* %arrayidx2, align 8
   %tobool3 = fcmp une double %1, 0.000000e+00
   %cond7 = select i1 %tobool3, double %G, double 1.000000e+00
-  %arrayidx8 = getelementptr inbounds double* %A, i64 1
+  %arrayidx8 = getelementptr inbounds double, double* %A, i64 1
   store double %cond7, double* %arrayidx8, align 8
   ret i32 undef
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/compare-reduce.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/compare-reduce.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/compare-reduce.ll Fri Feb 27 13:29:02 2015
@@ -21,13 +21,13 @@ entry:
 for.body:                                         ; preds = %for.inc, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
   %0 = shl nsw i64 %indvars.iv, 1
-  %arrayidx = getelementptr inbounds double* %A, i64 %0
+  %arrayidx = getelementptr inbounds double, double* %A, i64 %0
   %1 = load double* %arrayidx, align 8
   %mul1 = fmul double %conv, %1
   %mul2 = fmul double %mul1, 7.000000e+00
   %add = fadd double %mul2, 5.000000e+00
   %2 = or i64 %0, 1
-  %arrayidx6 = getelementptr inbounds double* %A, i64 %2
+  %arrayidx6 = getelementptr inbounds double, double* %A, i64 %2
   %3 = load double* %arrayidx6, align 8
   %mul8 = fmul double %conv, %3
   %mul9 = fmul double %mul8, 4.000000e+00

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll Fri Feb 27 13:29:02 2015
@@ -20,25 +20,25 @@ entry:
   store i32 %u, i32* %u.addr, align 4
   %mul = mul nsw i32 %u, 3
   %idxprom = sext i32 %mul to i64
-  %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
+  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
   %0 = load double* %arrayidx, align 8
-  %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
+  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
   %1 = load double* %arrayidx4, align 8
   %add5 = fadd double %0, %1
   store double %add5, double* %arrayidx, align 8
   %add11 = add nsw i32 %mul, 1
   %idxprom12 = sext i32 %add11 to i64
-  %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
+  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
   %2 = load double* %arrayidx13, align 8
-  %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
+  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
   %3 = load double* %arrayidx17, align 8
   %add18 = fadd double %2, %3
   store double %add18, double* %arrayidx13, align 8
   %add24 = add nsw i32 %mul, 2
   %idxprom25 = sext i32 %add24 to i64
-  %arrayidx26 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom25
+  %arrayidx26 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom25
   %4 = load double* %arrayidx26, align 8
-  %arrayidx30 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom25
+  %arrayidx30 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom25
   %5 = load double* %arrayidx30, align 8
   %add31 = fadd double %4, %5
   store double %add31, double* %arrayidx26, align 8
@@ -57,17 +57,17 @@ entry:
   store i32 %u, i32* %u.addr, align 4
   %mul = mul nsw i32 %u, 2
   %idxprom = sext i32 %mul to i64
-  %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
+  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
   %0 = load double* %arrayidx, align 8
-  %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
+  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
   %1 = load double* %arrayidx4, align 8
   %add5 = fadd double %0, %1
   store double %add5, double* %arrayidx, align 8
   %add11 = add nsw i32 %mul, 1
   %idxprom12 = sext i32 %add11 to i64
-  %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
+  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
   %2 = load double* %arrayidx13, align 8
-  %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
+  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
   %3 = load double* %arrayidx17, align 8
   %add18 = fadd double %2, %3
   store double %add18, double* %arrayidx13, align 8
@@ -84,33 +84,33 @@ entry:
   store i32 %u, i32* %u.addr, align 4
   %mul = mul nsw i32 %u, 4
   %idxprom = sext i32 %mul to i64
-  %arrayidx = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom
+  %arrayidx = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom
   %0 = load float* %arrayidx, align 4
-  %arrayidx4 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom
+  %arrayidx4 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom
   %1 = load float* %arrayidx4, align 4
   %add5 = fadd float %0, %1
   store float %add5, float* %arrayidx, align 4
   %add11 = add nsw i32 %mul, 1
   %idxprom12 = sext i32 %add11 to i64
-  %arrayidx13 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom12
+  %arrayidx13 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom12
   %2 = load float* %arrayidx13, align 4
-  %arrayidx17 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom12
+  %arrayidx17 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom12
   %3 = load float* %arrayidx17, align 4
   %add18 = fadd float %2, %3
   store float %add18, float* %arrayidx13, align 4
   %add24 = add nsw i32 %mul, 2
   %idxprom25 = sext i32 %add24 to i64
-  %arrayidx26 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom25
+  %arrayidx26 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom25
   %4 = load float* %arrayidx26, align 4
-  %arrayidx30 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom25
+  %arrayidx30 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom25
   %5 = load float* %arrayidx30, align 4
   %add31 = fadd float %4, %5
   store float %add31, float* %arrayidx26, align 4
   %add37 = add nsw i32 %mul, 3
   %idxprom38 = sext i32 %add37 to i64
-  %arrayidx39 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom38
+  %arrayidx39 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom38
   %6 = load float* %arrayidx39, align 4
-  %arrayidx43 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom38
+  %arrayidx43 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom38
   %7 = load float* %arrayidx43, align 4
   %add44 = fadd float %6, %7
   store float %add44, float* %arrayidx39, align 4
@@ -142,12 +142,12 @@ for.body:
   %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
   %mul = mul nsw i32 %0, 2
   %idxprom = sext i32 %mul to i64
-  %arrayidx = getelementptr inbounds double* %A, i64 %idxprom
+  %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
   %2 = load double* %arrayidx, align 8
   %mul1 = fmul double 7.000000e+00, %2
   %add = add nsw i32 %mul, 1
   %idxprom3 = sext i32 %add to i64
-  %arrayidx4 = getelementptr inbounds double* %A, i64 %idxprom3
+  %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
   %3 = load double* %arrayidx4, align 8
   %mul5 = fmul double 7.000000e+00, %3
   %add6 = fadd double %mul1, %mul5

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll Fri Feb 27 13:29:02 2015
@@ -12,13 +12,13 @@ entry:
   %i0 = load double* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %mul5, double* %arrayidx5, align 8
   %0 = bitcast double* %a to <4 x i32>*
   %1 = load <4 x i32>* %0, align 8

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_7zip.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_7zip.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_7zip.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_7zip.ll Fri Feb 27 13:29:02 2015
@@ -8,8 +8,8 @@ target triple = "x86_64-apple-macosx10.8
 
 define fastcc void @LzmaDec_DecodeReal2(%struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p) {
 entry:
-  %range20.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 4
-  %code21.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 5
+  %range20.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334, %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 4
+  %code21.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334, %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 5
   br label %do.body66.i
 
 do.body66.i:                                      ; preds = %do.cond.i, %entry

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet.ll Fri Feb 27 13:29:02 2015
@@ -13,8 +13,8 @@ if.then:
   ret void
 
 if.else:                                          ; preds = %entry
-  %m_numConstraintRows4 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 0
-  %nub5 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 1
+  %m_numConstraintRows4 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960", %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 0
+  %nub5 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960", %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 1
   br i1 undef, label %land.lhs.true.i.1, label %if.then7.1
 
 land.lhs.true.i.1:                                ; preds = %if.else
@@ -43,8 +43,8 @@ for.inc.1:
 
 define void @_ZN30GIM_TRIANGLE_CALCULATION_CACHE18triangle_collisionERK9btVector3S2_S2_fS2_S2_S2_fR25GIM_TRIANGLE_CONTACT_DATA(%class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this) {
 entry:
-  %arrayidx26 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 1
-  %arrayidx36 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 2
+  %arrayidx26 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 1
+  %arrayidx36 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 2
   %0 = load float* %arrayidx36, align 4
   %add587 = fadd float undef, undef
   %sub600 = fsub float %add587, undef

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll Fri Feb 27 13:29:02 2015
@@ -59,9 +59,9 @@ if.end332:
   %dy276.1 = phi float [ undef, %if.then329 ], [ undef, %if.end327 ], [ 0x3F847AE140000000, %if.then291 ]
   %sub334 = fsub float %add294, %dx272.1
   %sub338 = fsub float %add297, %dy276.1
-  %arrayidx.i.i606 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 0
+  %arrayidx.i.i606 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113, %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 0
   store float %sub334, float* %arrayidx.i.i606, align 4
-  %arrayidx3.i607 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 1
+  %arrayidx3.i607 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113, %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 1
   store float %sub338, float* %arrayidx3.i607, align 4
   br label %return
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll Fri Feb 27 13:29:02 2015
@@ -12,10 +12,10 @@ for.body:
   %acc1.056 = phi float [ 0.000000e+00, %entry ], [ %add13, %for.body ]
   %s1.055 = phi float [ 0.000000e+00, %entry ], [ %cond.i40, %for.body ]
   %s0.054 = phi float [ 0.000000e+00, %entry ], [ %cond.i44, %for.body ]
-  %arrayidx = getelementptr inbounds float* %src, i64 %indvars.iv
+  %arrayidx = getelementptr inbounds float, float* %src, i64 %indvars.iv
   %0 = load float* %arrayidx, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %arrayidx2 = getelementptr inbounds float* %dest, i64 %indvars.iv
+  %arrayidx2 = getelementptr inbounds float, float* %dest, i64 %indvars.iv
   store float %acc1.056, float* %arrayidx2, align 4
   %add = fadd float %s0.054, %0
   %add3 = fadd float %s1.055, %0

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll Fri Feb 27 13:29:02 2015
@@ -7,12 +7,12 @@ target triple = "x86_64-apple-macosx10.8
 ; Function Attrs: nounwind ssp uwtable
 define void @_ZSt6uniqueISt15_Deque_iteratorIdRdPdEET_S4_S4_(%"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* nocapture %__last) {
 entry:
-  %_M_cur2.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 0
+  %_M_cur2.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 0
   %0 = load double** %_M_cur2.i.i, align 8
-  %_M_first3.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 1
-  %_M_cur2.i.i81 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 0
+  %_M_first3.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 1
+  %_M_cur2.i.i81 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 0
   %1 = load double** %_M_cur2.i.i81, align 8
-  %_M_first3.i.i83 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 1
+  %_M_first3.i.i83 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 1
   %2 = load double** %_M_first3.i.i83, align 8
   br i1 undef, label %_ZSt13adjacent_findISt15_Deque_iteratorIdRdPdEET_S4_S4_.exit, label %while.cond.i.preheader
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_gep.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_gep.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_gep.ll Fri Feb 27 13:29:02 2015
@@ -9,9 +9,9 @@ target triple = "x86_64-unknown-linux-gn
 define i32 @fn1() {
 entry:
   %0 = load i64** @a, align 8
-  %add.ptr = getelementptr inbounds i64* %0, i64 1
+  %add.ptr = getelementptr inbounds i64, i64* %0, i64 1
   %1 = ptrtoint i64* %add.ptr to i64
-  %arrayidx = getelementptr inbounds i64* %0, i64 2
+  %arrayidx = getelementptr inbounds i64, i64* %0, i64 2
   store i64 %1, i64* %arrayidx, align 8
   %2 = ptrtoint i64* %arrayidx to i64
   store i64 %2, i64* %add.ptr, align 8

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_lencod.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_lencod.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_lencod.ll Fri Feb 27 13:29:02 2015
@@ -68,19 +68,19 @@ if.end103:
 define void @intrapred_luma() {
 entry:
   %conv153 = trunc i32 undef to i16
-  %arrayidx154 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 12
+  %arrayidx154 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 12
   store i16 %conv153, i16* %arrayidx154, align 8
-  %arrayidx155 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 11
+  %arrayidx155 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 11
   store i16 %conv153, i16* %arrayidx155, align 2
-  %arrayidx156 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 10
+  %arrayidx156 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 10
   store i16 %conv153, i16* %arrayidx156, align 4
   ret void
 }
 
 define fastcc void @dct36(double* %inbuf) {
 entry:
-  %arrayidx41 = getelementptr inbounds double* %inbuf, i64 2
-  %arrayidx44 = getelementptr inbounds double* %inbuf, i64 1
+  %arrayidx41 = getelementptr inbounds double, double* %inbuf, i64 2
+  %arrayidx44 = getelementptr inbounds double, double* %inbuf, i64 1
   %0 = load double* %arrayidx44, align 8
   %add46 = fadd double %0, undef
   store double %add46, double* %arrayidx41, align 8

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll Fri Feb 27 13:29:02 2015
@@ -62,11 +62,11 @@ bb:
   %tmp4 = fmul double %tmp3, undef
   %tmp5 = fmul double %tmp3, undef
   %tmp6 = fsub double %tmp5, undef
-  %tmp7 = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 1
+  %tmp7 = getelementptr inbounds %struct.hoge, %struct.hoge* %arg, i64 0, i32 1
   store double %tmp6, double* %tmp7, align 8
   %tmp8 = fmul double %tmp1, undef
   %tmp9 = fsub double %tmp8, undef
-  %tmp10 = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 2
+  %tmp10 = getelementptr inbounds %struct.hoge, %struct.hoge* %arg, i64 0, i32 2
   store double %tmp9, double* %tmp10, align 8
   br i1 undef, label %bb11, label %bb12
 
@@ -86,8 +86,8 @@ bb14:
 
 define void @rc4_crypt(%struct.rc4_state.0.24* nocapture %s) {
 entry:
-  %x1 = getelementptr inbounds %struct.rc4_state.0.24* %s, i64 0, i32 0
-  %y2 = getelementptr inbounds %struct.rc4_state.0.24* %s, i64 0, i32 1
+  %x1 = getelementptr inbounds %struct.rc4_state.0.24, %struct.rc4_state.0.24* %s, i64 0, i32 0
+  %y2 = getelementptr inbounds %struct.rc4_state.0.24, %struct.rc4_state.0.24* %s, i64 0, i32 1
   br i1 undef, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.body, %entry

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll Fri Feb 27 13:29:02 2015
@@ -25,11 +25,11 @@ for.body:
   %p3.addr.0258 = phi double [ %add, %bb1 ], [ %add28, %for.body ]
   %vecinit.i.i237 = insertelement <2 x double> undef, double %t.0259, i32 0
   %x13 = tail call i32 @_xfn(<2 x double> %vecinit.i.i237) #2
-  %arrayidx = getelementptr inbounds [256 x i32]* %tab1, i64 0, i64 %indvars.iv266
+  %arrayidx = getelementptr inbounds [256 x i32], [256 x i32]* %tab1, i64 0, i64 %indvars.iv266
   store i32 %x13, i32* %arrayidx, align 4, !tbaa !4
   %vecinit.i.i = insertelement <2 x double> undef, double %p3.addr.0258, i32 0
   %x14 = tail call i32 @_xfn(<2 x double> %vecinit.i.i) #2
-  %arrayidx26 = getelementptr inbounds [256 x i32]* %tab2, i64 0, i64 %indvars.iv266
+  %arrayidx26 = getelementptr inbounds [256 x i32], [256 x i32]* %tab2, i64 0, i64 %indvars.iv266
   store i32 %x14, i32* %arrayidx26, align 4, !tbaa !4
   %add27 = fadd double %mul19, %t.0259
   %add28 = fadd double %mul21, %p3.addr.0258

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll Fri Feb 27 13:29:02 2015
@@ -34,13 +34,13 @@ land.rhs.lr.ph:
   unreachable
 
 if.end98:                                         ; preds = %if.then17
-  %from299 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 1
+  %from299 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171, %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 1
   br i1 undef, label %land.lhs.true167, label %if.then103
 
 if.then103:                                       ; preds = %if.end98
   %.sub100 = select i1 undef, i32 250, i32 undef
   %mul114 = shl nsw i32 %.sub100, 2
-  %from1115 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 0
+  %from1115 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171, %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 0
   %cond125 = select i1 undef, i32 undef, i32 %mul114
   br label %for.cond.i
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll Fri Feb 27 13:29:02 2015
@@ -21,10 +21,10 @@ invoke.cont:
   br i1 undef, label %arrayctor.cont, label %invoke.cont
 
 arrayctor.cont:                                   ; preds = %invoke.cont
-  %agg.tmp99208.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 0
-  %agg.tmp99208.sroa.1.8.idx388 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 1
-  %agg.tmp101211.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 0
-  %agg.tmp101211.sroa.1.8.idx390 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 1
+  %agg.tmp99208.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 0
+  %agg.tmp99208.sroa.1.8.idx388 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 1
+  %agg.tmp101211.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 0
+  %agg.tmp101211.sroa.1.8.idx390 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 1
   br label %for.cond36.preheader
 
 for.cond36.preheader:                             ; preds = %_Z5clampd.exit.1, %arrayctor.cont
@@ -89,9 +89,9 @@ if.then38:
   %add4.i698 = fadd double undef, %add4.i719
   %mul.i.i679 = fmul double undef, %add.i695
   %mul2.i.i680 = fmul double undef, %add4.i698
-  %agg.tmp74663.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 0
+  %agg.tmp74663.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601, %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 0
   store double %mul.i.i679, double* %agg.tmp74663.sroa.0.0.idx, align 8
-  %agg.tmp74663.sroa.1.8.idx943 = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 1
+  %agg.tmp74663.sroa.1.8.idx943 = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601, %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 1
   store double %mul2.i.i680, double* %agg.tmp74663.sroa.1.8.idx943, align 8
   br label %return
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll Fri Feb 27 13:29:02 2015
@@ -17,12 +17,12 @@ target triple = "x86_64-apple-macosx10.9
 
 ;define fastcc void @bar() {
 define void @bar() {
-  %1 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
-  %2 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
-  %3 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
-  %4 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
-  %5 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
-  %6 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
+  %1 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+  %2 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
+  %3 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+  %4 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
+  %5 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+  %6 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
   br label %7
 
 ; <label>:7                                       ; preds = %18, %17, %17, %0

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll Fri Feb 27 13:29:02 2015
@@ -27,7 +27,7 @@ target triple = "x86_64-apple-macosx10.8
 define i32 @foo(double* nocapture %A, float* nocapture %B, i32 %g) {
 entry:
   %0 = load float* %B, align 4
-  %arrayidx1 = getelementptr inbounds float* %B, i64 1
+  %arrayidx1 = getelementptr inbounds float, float* %B, i64 1
   %1 = load float* %arrayidx1, align 4
   %add = fadd float %0, 5.000000e+00
   %add2 = fadd float %1, 8.000000e+00
@@ -44,7 +44,7 @@ if.end:
   %add4 = fadd double %conv, %2
   store double %add4, double* %A, align 8
   %conv5 = fpext float %add2 to double
-  %arrayidx6 = getelementptr inbounds double* %A, i64 1
+  %arrayidx6 = getelementptr inbounds double, double* %A, i64 1
   %3 = load double* %arrayidx6, align 8
   %add7 = fadd double %conv5, %3
   store double %add7, double* %arrayidx6, align 8

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/cse.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/cse.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/cse.ll Fri Feb 27 13:29:02 2015
@@ -21,23 +21,23 @@ target triple = "i386-apple-macosx10.8.0
 
 define i32 @test(double* nocapture %G) {
 entry:
-  %arrayidx = getelementptr inbounds double* %G, i64 5
+  %arrayidx = getelementptr inbounds double, double* %G, i64 5
   %0 = load double* %arrayidx, align 8
   %mul = fmul double %0, 4.000000e+00
   %add = fadd double %mul, 1.000000e+00
   store double %add, double* %G, align 8
-  %arrayidx2 = getelementptr inbounds double* %G, i64 6
+  %arrayidx2 = getelementptr inbounds double, double* %G, i64 6
   %1 = load double* %arrayidx2, align 8
   %mul3 = fmul double %1, 3.000000e+00
   %add4 = fadd double %mul3, 6.000000e+00
-  %arrayidx5 = getelementptr inbounds double* %G, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %G, i64 1
   store double %add4, double* %arrayidx5, align 8
   %add8 = fadd double %mul, 7.000000e+00
-  %arrayidx9 = getelementptr inbounds double* %G, i64 2
+  %arrayidx9 = getelementptr inbounds double, double* %G, i64 2
   store double %add8, double* %arrayidx9, align 8
   %mul11 = fmul double %1, 4.000000e+00
   %add12 = fadd double %mul11, 8.000000e+00
-  %arrayidx13 = getelementptr inbounds double* %G, i64 3
+  %arrayidx13 = getelementptr inbounds double, double* %G, i64 3
   store double %add12, double* %arrayidx13, align 8
   ret i32 undef
 }
@@ -61,19 +61,19 @@ entry:
   %mul1 = fmul double %conv, %mul
   %add = fadd double %mul1, 6.000000e+00
   store double %add, double* %A, align 8
-  %arrayidx3 = getelementptr inbounds double* %A, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
   %1 = load double* %arrayidx3, align 8
   %mul4 = fmul double %1, 7.700000e+00
   %mul6 = fmul double %conv, %mul4
   %add7 = fadd double %mul6, 2.000000e+00
   store double %add7, double* %arrayidx3, align 8
-  %arrayidx9 = getelementptr inbounds double* %A, i64 2
+  %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
   %2 = load double* %arrayidx9, align 8
   %mul10 = fmul double %2, 7.600000e+00
   %mul12 = fmul double %conv, %mul10
   %add13 = fadd double %mul12, 3.000000e+00
   store double %add13, double* %arrayidx9, align 8
-  %arrayidx15 = getelementptr inbounds double* %A, i64 3
+  %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
   %3 = load double* %arrayidx15, align 8
   %mul16 = fmul double %3, 7.400000e+00
   %mul18 = fmul double %conv, %mul16
@@ -101,7 +101,7 @@ entry:
 ; CHECK: ret
 define i32 @test2(double* nocapture %G, i32 %k) {
   %1 = icmp eq i32 %k, 0
-  %2 = getelementptr inbounds double* %G, i64 5
+  %2 = getelementptr inbounds double, double* %G, i64 5
   %3 = load double* %2, align 8
   %4 = fmul double %3, 4.000000e+00
   br i1 %1, label %12, label %5
@@ -109,23 +109,23 @@ define i32 @test2(double* nocapture %G,
 ; <label>:5                                       ; preds = %0
   %6 = fadd double %4, 1.000000e+00
   store double %6, double* %G, align 8
-  %7 = getelementptr inbounds double* %G, i64 6
+  %7 = getelementptr inbounds double, double* %G, i64 6
   %8 = load double* %7, align 8
   %9 = fmul double %8, 3.000000e+00
   %10 = fadd double %9, 6.000000e+00
-  %11 = getelementptr inbounds double* %G, i64 1
+  %11 = getelementptr inbounds double, double* %G, i64 1
   store double %10, double* %11, align 8
   br label %20
 
 ; <label>:12                                      ; preds = %0
   %13 = fadd double %4, 7.000000e+00
-  %14 = getelementptr inbounds double* %G, i64 2
+  %14 = getelementptr inbounds double, double* %G, i64 2
   store double %13, double* %14, align 8
-  %15 = getelementptr inbounds double* %G, i64 6
+  %15 = getelementptr inbounds double, double* %G, i64 6
   %16 = load double* %15, align 8
   %17 = fmul double %16, 3.000000e+00
   %18 = fadd double %17, 8.000000e+00
-  %19 = getelementptr inbounds double* %G, i64 3
+  %19 = getelementptr inbounds double, double* %G, i64 3
   store double %18, double* %19, align 8
   br label %20
 
@@ -153,19 +153,19 @@ entry:
   %mul1 = fmul double %conv, %mul
   %add = fadd double %mul1, 6.000000e+00
   store double %add, double* %A, align 8
-  %arrayidx3 = getelementptr inbounds double* %A, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
   %1 = load double* %arrayidx3, align 8
   %mul4 = fmul double %1, 7.900000e+00
   %mul6 = fmul double %conv, %mul4
   %add7 = fadd double %mul6, 6.000000e+00
   store double %add7, double* %arrayidx3, align 8
-  %arrayidx9 = getelementptr inbounds double* %A, i64 2
+  %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
   %2 = load double* %arrayidx9, align 8
   %mul10 = fmul double %2, 7.900000e+00
   %mul12 = fmul double %conv, %mul10
   %add13 = fadd double %mul12, 6.000000e+00
   store double %add13, double* %arrayidx9, align 8
-  %arrayidx15 = getelementptr inbounds double* %A, i64 3
+  %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
   %3 = load double* %arrayidx15, align 8
   %mul16 = fmul double %3, 7.900000e+00
   %mul18 = fmul double %conv, %mul16
@@ -193,7 +193,7 @@ entry:
   %conv = sitofp i32 %n to double
   %mul = fmul double %conv, %0
   store double %mul, double* %A, align 8
-  %arrayidx2 = getelementptr inbounds double* %A, i64 1
+  %arrayidx2 = getelementptr inbounds double, double* %A, i64 1
   %1 = load double* %arrayidx2, align 8
   %mul4 = fmul double %conv, %1
   store double %mul4, double* %arrayidx2, align 8
@@ -201,11 +201,11 @@ entry:
   br i1 %cmp, label %return, label %if.end
 
 if.end:                                           ; preds = %entry
-  %arrayidx7 = getelementptr inbounds double* %A, i64 2
+  %arrayidx7 = getelementptr inbounds double, double* %A, i64 2
   %2 = load double* %arrayidx7, align 8
   %mul9 = fmul double %conv, %2
   store double %mul9, double* %arrayidx7, align 8
-  %arrayidx11 = getelementptr inbounds double* %A, i64 3
+  %arrayidx11 = getelementptr inbounds double, double* %A, i64 3
   %3 = load double* %arrayidx11, align 8
   %add = add nsw i32 %n, 4
   %conv12 = sitofp i32 %add to double
@@ -227,13 +227,13 @@ entry:
   br i1 undef, label %if.end13, label %if.end13
 
 sw.epilog7:                                       ; No predecessors!
-  %.in = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 1
+  %.in = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 1
   %0 = load double* %.in, align 8
   %add = fadd double undef, 0.000000e+00
   %add6 = fadd double %add, %0
   %1 = load double* @a, align 8
   %add8 = fadd double %1, 0.000000e+00
-  %_dy = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 2
+  %_dy = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 2
   %2 = load double* %_dy, align 8
   %add10 = fadd double %add8, %2
   br i1 undef, label %if.then12, label %if.end13

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/cycle_dup.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/cycle_dup.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/cycle_dup.ll Fri Feb 27 13:29:02 2015
@@ -24,13 +24,13 @@ target triple = "x86_64-apple-macosx10.9
 define i32 @foo(i32* nocapture %A) #0 {
 entry:
   %0 = load i32* %A, align 4
-  %arrayidx1 = getelementptr inbounds i32* %A, i64 1
+  %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 1
   %1 = load i32* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds i32* %A, i64 2
+  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 2
   %2 = load i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32* %A, i64 3
+  %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 3
   %3 = load i32* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds i32* %A, i64 13
+  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 13
   %4 = load i32* %arrayidx4, align 4
   %cmp24 = icmp sgt i32 %4, 0
   br i1 %cmp24, label %for.body, label %for.end

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/debug_info.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/debug_info.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/debug_info.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/debug_info.ll Fri Feb 27 13:29:02 2015
@@ -32,18 +32,18 @@ entry:
   br i1 %cmp8, label %for.body.lr.ph, label %for.end, !dbg !23
 
 for.body.lr.ph:                                   ; preds = %entry
-  %arrayidx = getelementptr inbounds double* %A, i64 4, !dbg !24
+  %arrayidx = getelementptr inbounds double, double* %A, i64 4, !dbg !24
   %0 = load double* %arrayidx, align 8, !dbg !24
-  %arrayidx1 = getelementptr inbounds double* %A, i64 5, !dbg !29
+  %arrayidx1 = getelementptr inbounds double, double* %A, i64 5, !dbg !29
   %1 = load double* %arrayidx1, align 8, !dbg !29
   br label %for.end, !dbg !23
 
 for.end:                                          ; preds = %for.body.lr.ph, %entry
   %y1.0.lcssa = phi double [ %1, %for.body.lr.ph ], [ 1.000000e+00, %entry ]
   %y0.0.lcssa = phi double [ %0, %for.body.lr.ph ], [ 0.000000e+00, %entry ]
-  %arrayidx2 = getelementptr inbounds double* %A, i64 8, !dbg !30
+  %arrayidx2 = getelementptr inbounds double, double* %A, i64 8, !dbg !30
   store double %y0.0.lcssa, double* %arrayidx2, align 8, !dbg !30
-  %arrayidx3 = getelementptr inbounds double* %A, i64 9, !dbg !30
+  %arrayidx3 = getelementptr inbounds double, double* %A, i64 9, !dbg !30
   store double %y1.0.lcssa, double* %arrayidx3, align 8, !dbg !30
   ret i32 undef, !dbg !31
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/diamond.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/diamond.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/diamond.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/diamond.ll Fri Feb 27 13:29:02 2015
@@ -22,20 +22,20 @@ entry:
   %mul238 = add i32 %m, %n
   %add = mul i32 %0, %mul238
   store i32 %add, i32* %B, align 4
-  %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
   %1 = load i32* %arrayidx4, align 4
   %add8 = mul i32 %1, %mul238
-  %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+  %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
   store i32 %add8, i32* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds i32* %A, i64 2
+  %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
   %2 = load i32* %arrayidx10, align 4
   %add14 = mul i32 %2, %mul238
-  %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+  %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
   store i32 %add14, i32* %arrayidx15, align 4
-  %arrayidx16 = getelementptr inbounds i32* %A, i64 3
+  %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
   %3 = load i32* %arrayidx16, align 4
   %add20 = mul i32 %3, %mul238
-  %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+  %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
   store i32 %add20, i32* %arrayidx21, align 4
   ret i32 0
 }
@@ -60,20 +60,20 @@ entry:
   %mul238 = add i32 %m, %n
   %add = mul i32 %0, %mul238
   store i32 %add, i32* %B, align 4
-  %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
   %1 = load i32* %arrayidx4, align 4
   %add8 = mul i32 %1, %mul238
-  %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+  %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
   store i32 %add8, i32* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds i32* %A, i64 2
+  %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
   %2 = load i32* %arrayidx10, align 4
   %add14 = mul i32 %2, %mul238
-  %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+  %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
   store i32 %add14, i32* %arrayidx15, align 4
-  %arrayidx16 = getelementptr inbounds i32* %A, i64 3
+  %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
   %3 = load i32* %arrayidx16, align 4
   %add20 = mul i32 %3, %mul238
-  %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+  %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
   store i32 %add20, i32* %arrayidx21, align 4
   ret i32 %0  ;<--------- This value has multiple users
 }
@@ -90,20 +90,20 @@ entry:
   %mul238 = add i32 %m, %n
   %add = mul i32 %0, %mul238
   store i32 %add, i32* %B, align 4
-  %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
   %1 = load i32* %arrayidx4, align 4
   %add8 = mul i32 %1, %mul238
-  %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+  %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
   store i32 %add8, i32* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds i32* %A, i64 2
+  %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
   %2 = load i32* %arrayidx10, align 4
   %add14 = mul i32 %2, %mul238
-  %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+  %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
   store i32 %add14, i32* %arrayidx15, align 4
-  %arrayidx16 = getelementptr inbounds i32* %A, i64 3
+  %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
   %3 = load i32* %arrayidx16, align 4
   %add20 = mul i32 %3, %mul238
-  %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+  %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
   store i32 %add20, i32* %arrayidx21, align 4
   ret i32 %1  ;<--------- This value has multiple users
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user.ll Fri Feb 27 13:29:02 2015
@@ -33,7 +33,7 @@ target triple = "x86_64-apple-macosx10.8
 
 define double @ext_user(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) {
 entry:
-  %arrayidx = getelementptr inbounds double* %A, i64 1
+  %arrayidx = getelementptr inbounds double, double* %A, i64 1
   %0 = load double* %arrayidx, align 8
   %1 = load double* %A, align 8
   br label %for.body
@@ -54,7 +54,7 @@ for.body:
 
 for.end:                                          ; preds = %for.body
   store double %add5, double* %B, align 8
-  %arrayidx7 = getelementptr inbounds double* %B, i64 1
+  %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
   store double %add4, double* %arrayidx7, align 8
   ret double %mul3
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/extract.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/extract.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/extract.ll Fri Feb 27 13:29:02 2015
@@ -12,8 +12,8 @@ entry:
   %LD = load <2 x double>* undef
   %V0 = extractelement <2 x double> %LD, i32 0
   %V1 = extractelement <2 x double> %LD, i32 1
-  %P0 = getelementptr inbounds double* %ptr, i64 0
-  %P1 = getelementptr inbounds double* %ptr, i64 1
+  %P0 = getelementptr inbounds double, double* %ptr, i64 0
+  %P1 = getelementptr inbounds double, double* %ptr, i64 1
   %A0 = fadd double %V0, 0.0
   %A1 = fadd double %V1, 1.1
   store double %A0, double* %P0, align 4
@@ -30,8 +30,8 @@ entry:
   %LD = load <2 x double>* undef
   %V0 = extractelement <2 x double> %LD, i32 0
   %V1 = extractelement <2 x double> %LD, i32 1
-  %P0 = getelementptr inbounds double* %ptr, i64 1  ; <--- incorrect order
-  %P1 = getelementptr inbounds double* %ptr, i64 0
+  %P0 = getelementptr inbounds double, double* %ptr, i64 1  ; <--- incorrect order
+  %P1 = getelementptr inbounds double, double* %ptr, i64 0
   %A0 = fadd double %V0, 1.2
   %A1 = fadd double %V1, 3.4
   store double %A0, double* %P0, align 4
@@ -48,8 +48,8 @@ entry:
   %LD = load <4 x double>* undef
   %V0 = extractelement <4 x double> %LD, i32 0  ; <--- invalid size.
   %V1 = extractelement <4 x double> %LD, i32 1
-  %P0 = getelementptr inbounds double* %ptr, i64 0
-  %P1 = getelementptr inbounds double* %ptr, i64 1
+  %P0 = getelementptr inbounds double, double* %ptr, i64 0
+  %P1 = getelementptr inbounds double, double* %ptr, i64 1
   %A0 = fadd double %V0, 5.5
   %A1 = fadd double %V1, 6.6
   store double %A0, double* %P0, align 4

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll Fri Feb 27 13:29:02 2015
@@ -8,12 +8,12 @@ target datalayout = "e-m:o-i64:64-f80:12
 define i32 @fn1() {
 entry:
   %0 = load i64** @a, align 8
-  %add.ptr = getelementptr inbounds i64* %0, i64 11
+  %add.ptr = getelementptr inbounds i64, i64* %0, i64 11
   %1 = ptrtoint i64* %add.ptr to i64
   store i64 %1, i64* %add.ptr, align 8
-  %add.ptr1 = getelementptr inbounds i64* %0, i64 56
+  %add.ptr1 = getelementptr inbounds i64, i64* %0, i64 56
   %2 = ptrtoint i64* %add.ptr1 to i64
-  %arrayidx2 = getelementptr inbounds i64* %0, i64 12
+  %arrayidx2 = getelementptr inbounds i64, i64* %0, i64 12
   store i64 %2, i64* %arrayidx2, align 8
   ret i32 undef
 ; CHECK-LABEL: @fn1(
@@ -31,36 +31,36 @@ entry:
   %fp1 = sitofp i32 %add1 to float
   %call1 = tail call float @llvm.powi.f32(float %fp1,i32 %add1) nounwind readnone
 
-  %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
   %i2 = load i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
   %i3 = load i32* %arrayidx3, align 4
   %add2 = add i32 %i2, %i3
   %fp2 = sitofp i32 %add2 to float
   %call2 = tail call float @llvm.powi.f32(float %fp2,i32 %add1) nounwind readnone
 
-  %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+  %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
   %i4 = load i32* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+  %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
   %i5 = load i32* %arrayidx5, align 4
   %add3 = add i32 %i4, %i5
   %fp3 = sitofp i32 %add3 to float
   %call3 = tail call float @llvm.powi.f32(float %fp3,i32 %add1) nounwind readnone
 
-  %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+  %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
   %i6 = load i32* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+  %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
   %i7 = load i32* %arrayidx7, align 4
   %add4 = add i32 %i6, %i7
   %fp4 = sitofp i32 %add4 to float
   %call4 = tail call float @llvm.powi.f32(float %fp4,i32 %add1) nounwind readnone
 
   store float %call1, float* %c, align 4
-  %arrayidx8 = getelementptr inbounds float* %c, i32 1
+  %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
   store float %call2, float* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds float* %c, i32 2
+  %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
   store float %call3, float* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds float* %c, i32 3
+  %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
   store float %call4, float* %arrayidx10, align 4
   ret void
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/extractcost.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/extractcost.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/extractcost.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/extractcost.ll Fri Feb 27 13:29:02 2015
@@ -13,15 +13,15 @@ entry:
   store i32 %add, i32* %A, align 4
   %mul1 = mul nsw i32 %n, 9
   %add2 = add nsw i32 %mul1, 9
-  %arrayidx3 = getelementptr inbounds i32* %A, i64 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
   store i32 %add2, i32* %arrayidx3, align 4
   %mul4 = shl i32 %n, 3
   %add5 = add nsw i32 %mul4, 9
-  %arrayidx6 = getelementptr inbounds i32* %A, i64 2
+  %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 2
   store i32 %add5, i32* %arrayidx6, align 4
   %mul7 = mul nsw i32 %n, 10
   %add8 = add nsw i32 %mul7, 9
-  %arrayidx9 = getelementptr inbounds i32* %A, i64 3
+  %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 3
   store i32 %add8, i32* %arrayidx9, align 4
   %externaluse1 = add nsw i32 %add, %m  
   %externaluse2 = mul nsw i32 %add, %m  ; we should add the extract cost only once and the store will be vectorized

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/flag.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/flag.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/flag.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/flag.ll Fri Feb 27 13:29:02 2015
@@ -15,16 +15,16 @@ define i32 @rollable(i32* noalias nocapt
 .lr.ph:                                           ; preds = %0, %.lr.ph
   %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
   %2 = shl i64 %i.019, 2
-  %3 = getelementptr inbounds i32* %in, i64 %2
+  %3 = getelementptr inbounds i32, i32* %in, i64 %2
   %4 = load i32* %3, align 4
   %5 = or i64 %2, 1
-  %6 = getelementptr inbounds i32* %in, i64 %5
+  %6 = getelementptr inbounds i32, i32* %in, i64 %5
   %7 = load i32* %6, align 4
   %8 = or i64 %2, 2
-  %9 = getelementptr inbounds i32* %in, i64 %8
+  %9 = getelementptr inbounds i32, i32* %in, i64 %8
   %10 = load i32* %9, align 4
   %11 = or i64 %2, 3
-  %12 = getelementptr inbounds i32* %in, i64 %11
+  %12 = getelementptr inbounds i32, i32* %in, i64 %11
   %13 = load i32* %12, align 4
   %14 = mul i32 %4, 7
   %15 = add i32 %14, 7
@@ -34,13 +34,13 @@ define i32 @rollable(i32* noalias nocapt
   %19 = add i32 %18, 21
   %20 = mul i32 %13, 7
   %21 = add i32 %20, 28
-  %22 = getelementptr inbounds i32* %out, i64 %2
+  %22 = getelementptr inbounds i32, i32* %out, i64 %2
   store i32 %15, i32* %22, align 4
-  %23 = getelementptr inbounds i32* %out, i64 %5
+  %23 = getelementptr inbounds i32, i32* %out, i64 %5
   store i32 %17, i32* %23, align 4
-  %24 = getelementptr inbounds i32* %out, i64 %8
+  %24 = getelementptr inbounds i32, i32* %out, i64 %8
   store i32 %19, i32* %24, align 4
-  %25 = getelementptr inbounds i32* %out, i64 %11
+  %25 = getelementptr inbounds i32, i32* %out, i64 %11
   store i32 %21, i32* %25, align 4
   %26 = add i64 %i.019, 1
   %exitcond = icmp eq i64 %26, %n

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/gep.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/gep.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/gep.ll Fri Feb 27 13:29:02 2015
@@ -9,15 +9,15 @@ target datalayout = "e-m:o-i64:64-f80:12
 ; CHECK-LABEL: foo1
 ; CHECK: <2 x i32*>
 define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
-  %1 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 0
+  %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
   %2 = load i32** %1, align 8
-  %3 = getelementptr inbounds i32* %2, i64 16
-  %4 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 0
+  %3 = getelementptr inbounds i32, i32* %2, i64 16
+  %4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
   store i32* %3, i32** %4, align 8
-  %5 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 1
+  %5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
   %6 = load i32** %5, align 8
-  %7 = getelementptr inbounds i32* %6, i64 16
-  %8 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 1
+  %7 = getelementptr inbounds i32, i32* %6, i64 16
+  %8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
   store i32* %7, i32** %8, align 8
   ret void
 }
@@ -27,15 +27,15 @@ define void @foo1 ({ i32*, i32* }* noali
 ; CHECK-LABEL: foo2
 ; CHECK-NOT: <2 x i32*>
 define void @foo2 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y, i32 %i) {
-  %1 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 0
+  %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
   %2 = load i32** %1, align 8
-  %3 = getelementptr inbounds i32* %2, i32 %i
-  %4 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 0
+  %3 = getelementptr inbounds i32, i32* %2, i32 %i
+  %4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
   store i32* %3, i32** %4, align 8
-  %5 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 1
+  %5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
   %6 = load i32** %5, align 8
-  %7 = getelementptr inbounds i32* %6, i32 %i
-  %8 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 1
+  %7 = getelementptr inbounds i32, i32* %6, i32 %i
+  %8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
   store i32* %7, i32** %8, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/hoist.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/hoist.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/hoist.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/hoist.ll Fri Feb 27 13:29:02 2015
@@ -30,22 +30,22 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.024 = phi i32 [ 0, %entry ], [ %add10, %for.body ]
-  %arrayidx = getelementptr inbounds i32* %A, i32 %i.024
+  %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.024
   %0 = load i32* %arrayidx, align 4
   %add = add nsw i32 %0, %n
   store i32 %add, i32* %arrayidx, align 4
   %add121 = or i32 %i.024, 1
-  %arrayidx2 = getelementptr inbounds i32* %A, i32 %add121
+  %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %add121
   %1 = load i32* %arrayidx2, align 4
   %add3 = add nsw i32 %1, %k
   store i32 %add3, i32* %arrayidx2, align 4
   %add422 = or i32 %i.024, 2
-  %arrayidx5 = getelementptr inbounds i32* %A, i32 %add422
+  %arrayidx5 = getelementptr inbounds i32, i32* %A, i32 %add422
   %2 = load i32* %arrayidx5, align 4
   %add6 = add nsw i32 %2, %n
   store i32 %add6, i32* %arrayidx5, align 4
   %add723 = or i32 %i.024, 3
-  %arrayidx8 = getelementptr inbounds i32* %A, i32 %add723
+  %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add723
   %3 = load i32* %arrayidx8, align 4
   %add9 = add nsw i32 %3, %k
   store i32 %add9, i32* %arrayidx8, align 4

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal.ll Fri Feb 27 13:29:02 2015
@@ -32,21 +32,21 @@ for.body:
   %i.033 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
   %sum.032 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add17, %for.body ]
   %mul = shl nsw i64 %i.033, 2
-  %arrayidx = getelementptr inbounds float* %A, i64 %mul
+  %arrayidx = getelementptr inbounds float, float* %A, i64 %mul
   %1 = load float* %arrayidx, align 4
   %mul2 = fmul float %1, 7.000000e+00
   %add28 = or i64 %mul, 1
-  %arrayidx4 = getelementptr inbounds float* %A, i64 %add28
+  %arrayidx4 = getelementptr inbounds float, float* %A, i64 %add28
   %2 = load float* %arrayidx4, align 4
   %mul5 = fmul float %2, 7.000000e+00
   %add6 = fadd fast float %mul2, %mul5
   %add829 = or i64 %mul, 2
-  %arrayidx9 = getelementptr inbounds float* %A, i64 %add829
+  %arrayidx9 = getelementptr inbounds float, float* %A, i64 %add829
   %3 = load float* %arrayidx9, align 4
   %mul10 = fmul float %3, 7.000000e+00
   %add11 = fadd fast float %add6, %mul10
   %add1330 = or i64 %mul, 3
-  %arrayidx14 = getelementptr inbounds float* %A, i64 %add1330
+  %arrayidx14 = getelementptr inbounds float, float* %A, i64 %add1330
   %4 = load float* %arrayidx14, align 4
   %mul15 = fmul float %4, 7.000000e+00
   %add16 = fadd fast float %add11, %mul15
@@ -86,11 +86,11 @@ entry:
 
 for.body.lr.ph:
   %0 = load float* %B, align 4
-  %arrayidx4 = getelementptr inbounds float* %B, i64 1
+  %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
   %1 = load float* %arrayidx4, align 4
-  %arrayidx9 = getelementptr inbounds float* %B, i64 2
+  %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
   %2 = load float* %arrayidx9, align 4
-  %arrayidx15 = getelementptr inbounds float* %B, i64 3
+  %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
   %3 = load float* %arrayidx15, align 4
   %4 = sext i32 %n to i64
   br label %for.body
@@ -99,21 +99,21 @@ for.body:
   %i.040 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
   %sum.039 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %mul21, %for.body ]
   %mul = shl nsw i64 %i.040, 2
-  %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
+  %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
   %5 = load float* %arrayidx2, align 4
   %mul3 = fmul float %0, %5
   %add35 = or i64 %mul, 1
-  %arrayidx6 = getelementptr inbounds float* %A, i64 %add35
+  %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add35
   %6 = load float* %arrayidx6, align 4
   %mul7 = fmul float %1, %6
   %add8 = fadd fast float %mul3, %mul7
   %add1136 = or i64 %mul, 2
-  %arrayidx12 = getelementptr inbounds float* %A, i64 %add1136
+  %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add1136
   %7 = load float* %arrayidx12, align 4
   %mul13 = fmul float %2, %7
   %add14 = fadd fast float %add8, %mul13
   %add1737 = or i64 %mul, 3
-  %arrayidx18 = getelementptr inbounds float* %A, i64 %add1737
+  %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add1737
   %8 = load float* %arrayidx18, align 4
   %mul19 = fmul float %3, %8
   %add20 = fadd fast float %add14, %mul19
@@ -158,21 +158,21 @@ entry:
 
 for.body.lr.ph:
   %0 = load float* %B, align 4
-  %arrayidx4 = getelementptr inbounds float* %B, i64 1
+  %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
   %1 = load float* %arrayidx4, align 4
-  %arrayidx9 = getelementptr inbounds float* %B, i64 2
+  %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
   %2 = load float* %arrayidx9, align 4
-  %arrayidx15 = getelementptr inbounds float* %B, i64 3
+  %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
   %3 = load float* %arrayidx15, align 4
-  %arrayidx21 = getelementptr inbounds float* %B, i64 4
+  %arrayidx21 = getelementptr inbounds float, float* %B, i64 4
   %4 = load float* %arrayidx21, align 4
-  %arrayidx27 = getelementptr inbounds float* %B, i64 5
+  %arrayidx27 = getelementptr inbounds float, float* %B, i64 5
   %5 = load float* %arrayidx27, align 4
-  %arrayidx33 = getelementptr inbounds float* %B, i64 6
+  %arrayidx33 = getelementptr inbounds float, float* %B, i64 6
   %6 = load float* %arrayidx33, align 4
-  %arrayidx39 = getelementptr inbounds float* %B, i64 7
+  %arrayidx39 = getelementptr inbounds float, float* %B, i64 7
   %7 = load float* %arrayidx39, align 4
-  %arrayidx45 = getelementptr inbounds float* %B, i64 8
+  %arrayidx45 = getelementptr inbounds float, float* %B, i64 8
   %8 = load float* %arrayidx45, align 4
   %9 = sext i32 %n to i64
   br label %for.body
@@ -181,46 +181,46 @@ for.body:
   %i.083 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
   %sum.082 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add51, %for.body ]
   %mul = mul nsw i64 %i.083, 6
-  %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
+  %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
   %10 = load float* %arrayidx2, align 4
   %mul3 = fmul fast float %0, %10
   %add80 = or i64 %mul, 1
-  %arrayidx6 = getelementptr inbounds float* %A, i64 %add80
+  %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add80
   %11 = load float* %arrayidx6, align 4
   %mul7 = fmul fast float %1, %11
   %add8 = fadd fast float %mul3, %mul7
   %add11 = add nsw i64 %mul, 2
-  %arrayidx12 = getelementptr inbounds float* %A, i64 %add11
+  %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add11
   %12 = load float* %arrayidx12, align 4
   %mul13 = fmul fast float %2, %12
   %add14 = fadd fast float %add8, %mul13
   %add17 = add nsw i64 %mul, 3
-  %arrayidx18 = getelementptr inbounds float* %A, i64 %add17
+  %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add17
   %13 = load float* %arrayidx18, align 4
   %mul19 = fmul fast float %3, %13
   %add20 = fadd fast float %add14, %mul19
   %add23 = add nsw i64 %mul, 4
-  %arrayidx24 = getelementptr inbounds float* %A, i64 %add23
+  %arrayidx24 = getelementptr inbounds float, float* %A, i64 %add23
   %14 = load float* %arrayidx24, align 4
   %mul25 = fmul fast float %4, %14
   %add26 = fadd fast float %add20, %mul25
   %add29 = add nsw i64 %mul, 5
-  %arrayidx30 = getelementptr inbounds float* %A, i64 %add29
+  %arrayidx30 = getelementptr inbounds float, float* %A, i64 %add29
   %15 = load float* %arrayidx30, align 4
   %mul31 = fmul fast float %5, %15
   %add32 = fadd fast float %add26, %mul31
   %add35 = add nsw i64 %mul, 6
-  %arrayidx36 = getelementptr inbounds float* %A, i64 %add35
+  %arrayidx36 = getelementptr inbounds float, float* %A, i64 %add35
   %16 = load float* %arrayidx36, align 4
   %mul37 = fmul fast float %6, %16
   %add38 = fadd fast float %add32, %mul37
   %add41 = add nsw i64 %mul, 7
-  %arrayidx42 = getelementptr inbounds float* %A, i64 %add41
+  %arrayidx42 = getelementptr inbounds float, float* %A, i64 %add41
   %17 = load float* %arrayidx42, align 4
   %mul43 = fmul fast float %7, %17
   %add44 = fadd fast float %add38, %mul43
   %add47 = add nsw i64 %mul, 8
-  %arrayidx48 = getelementptr inbounds float* %A, i64 %add47
+  %arrayidx48 = getelementptr inbounds float, float* %A, i64 %add47
   %18 = load float* %arrayidx48, align 4
   %mul49 = fmul fast float %8, %18
   %add50 = fadd fast float %add44, %mul49
@@ -260,11 +260,11 @@ entry:
 
 for.body.lr.ph:
   %0 = load float* %B, align 4
-  %arrayidx4 = getelementptr inbounds float* %B, i64 1
+  %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
   %1 = load float* %arrayidx4, align 4
-  %arrayidx10 = getelementptr inbounds float* %B, i64 2
+  %arrayidx10 = getelementptr inbounds float, float* %B, i64 2
   %2 = load float* %arrayidx10, align 4
-  %arrayidx16 = getelementptr inbounds float* %B, i64 3
+  %arrayidx16 = getelementptr inbounds float, float* %B, i64 3
   %3 = load float* %arrayidx16, align 4
   %4 = sext i32 %n to i64
   br label %for.body
@@ -273,22 +273,22 @@ for.body:
   %i.043 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
   %sum.042 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add21, %for.body ]
   %mul = shl nsw i64 %i.043, 2
-  %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
+  %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
   %5 = load float* %arrayidx2, align 4
   %mul3 = fmul fast float %0, %5
   %add = fadd fast float %sum.042, %mul3
   %add638 = or i64 %mul, 1
-  %arrayidx7 = getelementptr inbounds float* %A, i64 %add638
+  %arrayidx7 = getelementptr inbounds float, float* %A, i64 %add638
   %6 = load float* %arrayidx7, align 4
   %mul8 = fmul fast float %1, %6
   %add9 = fadd fast float %add, %mul8
   %add1239 = or i64 %mul, 2
-  %arrayidx13 = getelementptr inbounds float* %A, i64 %add1239
+  %arrayidx13 = getelementptr inbounds float, float* %A, i64 %add1239
   %7 = load float* %arrayidx13, align 4
   %mul14 = fmul fast float %2, %7
   %add15 = fadd fast float %add9, %mul14
   %add1840 = or i64 %mul, 3
-  %arrayidx19 = getelementptr inbounds float* %A, i64 %add1840
+  %arrayidx19 = getelementptr inbounds float, float* %A, i64 %add1840
   %8 = load float* %arrayidx19, align 4
   %mul20 = fmul fast float %3, %8
   %add21 = fadd fast float %add15, %mul20
@@ -326,9 +326,9 @@ entry:
   br i1 %cmp37, label %for.body.lr.ph, label %for.end
 
 for.body.lr.ph:
-  %arrayidx4 = getelementptr inbounds float* %B, i64 1
-  %arrayidx9 = getelementptr inbounds float* %B, i64 2
-  %arrayidx15 = getelementptr inbounds float* %B, i64 3
+  %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
+  %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
+  %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
   %0 = sext i32 %n to i64
   br label %for.body
 
@@ -337,29 +337,29 @@ for.body:
   %C.addr.038 = phi float* [ %C, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
   %1 = load float* %B, align 4
   %mul = shl nsw i64 %i.039, 2
-  %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
+  %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
   %2 = load float* %arrayidx2, align 4
   %mul3 = fmul fast float %1, %2
   %3 = load float* %arrayidx4, align 4
   %add34 = or i64 %mul, 1
-  %arrayidx6 = getelementptr inbounds float* %A, i64 %add34
+  %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add34
   %4 = load float* %arrayidx6, align 4
   %mul7 = fmul fast float %3, %4
   %add8 = fadd fast float %mul3, %mul7
   %5 = load float* %arrayidx9, align 4
   %add1135 = or i64 %mul, 2
-  %arrayidx12 = getelementptr inbounds float* %A, i64 %add1135
+  %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add1135
   %6 = load float* %arrayidx12, align 4
   %mul13 = fmul fast float %5, %6
   %add14 = fadd fast float %add8, %mul13
   %7 = load float* %arrayidx15, align 4
   %add1736 = or i64 %mul, 3
-  %arrayidx18 = getelementptr inbounds float* %A, i64 %add1736
+  %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add1736
   %8 = load float* %arrayidx18, align 4
   %mul19 = fmul fast float %7, %8
   %add20 = fadd fast float %add14, %mul19
   store float %add20, float* %C.addr.038, align 4
-  %incdec.ptr = getelementptr inbounds float* %C.addr.038, i64 1
+  %incdec.ptr = getelementptr inbounds float, float* %C.addr.038, i64 1
   %inc = add nsw i64 %i.039, 1
   %exitcond = icmp eq i64 %inc, %0
   br i1 %exitcond, label %for.end, label %for.body
@@ -390,7 +390,7 @@ entry:
 
 for.body.lr.ph:
   %0 = load double* %B, align 8
-  %arrayidx4 = getelementptr inbounds double* %B, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %B, i64 1
   %1 = load double* %arrayidx4, align 8
   %2 = sext i32 %n to i64
   br label %for.body
@@ -398,15 +398,15 @@ for.body.lr.ph:
 for.body:
   %i.018 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
   %mul = shl nsw i64 %i.018, 2
-  %arrayidx2 = getelementptr inbounds double* %A, i64 %mul
+  %arrayidx2 = getelementptr inbounds double, double* %A, i64 %mul
   %3 = load double* %arrayidx2, align 8
   %mul3 = fmul fast double %0, %3
   %add16 = or i64 %mul, 1
-  %arrayidx6 = getelementptr inbounds double* %A, i64 %add16
+  %arrayidx6 = getelementptr inbounds double, double* %A, i64 %add16
   %4 = load double* %arrayidx6, align 8
   %mul7 = fmul fast double %1, %4
   %add8 = fadd fast double %mul3, %mul7
-  %arrayidx9 = getelementptr inbounds double* %C, i64 %i.018
+  %arrayidx9 = getelementptr inbounds double, double* %C, i64 %i.018
   store double %add8, double* %arrayidx9, align 8
   %inc = add nsw i64 %i.018, 1
   %exitcond = icmp eq i64 %inc, %2

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/implicitfloat.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/implicitfloat.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/implicitfloat.ll Fri Feb 27 13:29:02 2015
@@ -12,13 +12,13 @@ entry:
   %i0 = load double* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %mul5, double* %arrayidx5, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/in-tree-user.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/in-tree-user.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/in-tree-user.ll Fri Feb 27 13:29:02 2015
@@ -19,14 +19,14 @@ entry:
 for.body:                                         ; preds = %for.inc, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
   %0 = shl nsw i64 %indvars.iv, 1
-  %arrayidx = getelementptr inbounds double* %A, i64 %0
+  %arrayidx = getelementptr inbounds double, double* %A, i64 %0
   %1 = load double* %arrayidx, align 8
   %mul1 = fmul double %conv, %1
   %mul2 = fmul double %mul1, 7.000000e+00
   %add = fadd double %mul2, 5.000000e+00
   %InTreeUser = fadd double %add, %add    ; <------------------ In tree user.
   %2 = or i64 %0, 1
-  %arrayidx6 = getelementptr inbounds double* %A, i64 %2
+  %arrayidx6 = getelementptr inbounds double, double* %A, i64 %2
   %3 = load double* %arrayidx6, align 8
   %mul8 = fmul double %conv, %3
   %mul9 = fmul double %mul8, 4.000000e+00

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/intrinsic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/intrinsic.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/intrinsic.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/intrinsic.ll Fri Feb 27 13:29:02 2015
@@ -17,14 +17,14 @@ entry:
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
   %call = tail call double @llvm.fabs.f64(double %mul) nounwind readnone
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   %call5 = tail call double @llvm.fabs.f64(double %mul5) nounwind readnone
   store double %call, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %call5, double* %arrayidx5, align 8
   ret void
 }
@@ -44,28 +44,28 @@ entry:
   %call0 = tail call float @llvm.copysign.f32(float %0, float %1) nounwind readnone
   store float %call0, float* %c, align 4
 
-  %ix2 = getelementptr inbounds float* %a, i64 1
+  %ix2 = getelementptr inbounds float, float* %a, i64 1
   %2 = load float* %ix2, align 4
-  %ix3 = getelementptr inbounds float* %b, i64 1
+  %ix3 = getelementptr inbounds float, float* %b, i64 1
   %3 = load float* %ix3, align 4
   %call1 = tail call float @llvm.copysign.f32(float %2, float %3) nounwind readnone
-  %c1 = getelementptr inbounds float* %c, i64 1
+  %c1 = getelementptr inbounds float, float* %c, i64 1
   store float %call1, float* %c1, align 4
 
-  %ix4 = getelementptr inbounds float* %a, i64 2
+  %ix4 = getelementptr inbounds float, float* %a, i64 2
   %4 = load float* %ix4, align 4
-  %ix5 = getelementptr inbounds float* %b, i64 2
+  %ix5 = getelementptr inbounds float, float* %b, i64 2
   %5 = load float* %ix5, align 4
   %call2 = tail call float @llvm.copysign.f32(float %4, float %5) nounwind readnone
-  %c2 = getelementptr inbounds float* %c, i64 2
+  %c2 = getelementptr inbounds float, float* %c, i64 2
   store float %call2, float* %c2, align 4
 
-  %ix6 = getelementptr inbounds float* %a, i64 3
+  %ix6 = getelementptr inbounds float, float* %a, i64 3
   %6 = load float* %ix6, align 4
-  %ix7 = getelementptr inbounds float* %b, i64 3
+  %ix7 = getelementptr inbounds float, float* %b, i64 3
   %7 = load float* %ix7, align 4
   %call3 = tail call float @llvm.copysign.f32(float %6, float %7) nounwind readnone
-  %c3 = getelementptr inbounds float* %c, i64 3
+  %c3 = getelementptr inbounds float, float* %c, i64 3
   store float %call3, float* %c3, align 4
 
   ret void
@@ -80,33 +80,33 @@ entry:
   %add1 = add i32 %i0, %i1
   %call1 = tail call i32 @llvm.bswap.i32(i32 %add1) nounwind readnone
 
-  %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
   %i2 = load i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
   %i3 = load i32* %arrayidx3, align 4
   %add2 = add i32 %i2, %i3
   %call2 = tail call i32 @llvm.bswap.i32(i32 %add2) nounwind readnone
 
-  %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+  %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
   %i4 = load i32* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+  %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
   %i5 = load i32* %arrayidx5, align 4
   %add3 = add i32 %i4, %i5
   %call3 = tail call i32 @llvm.bswap.i32(i32 %add3) nounwind readnone
 
-  %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+  %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
   %i6 = load i32* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+  %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
   %i7 = load i32* %arrayidx7, align 4
   %add4 = add i32 %i6, %i7
   %call4 = tail call i32 @llvm.bswap.i32(i32 %add4) nounwind readnone
 
   store i32 %call1, i32* %c, align 4
-  %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+  %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
   store i32 %call2, i32* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+  %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
   store i32 %call3, i32* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+  %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
@@ -127,33 +127,33 @@ entry:
   %add1 = add i32 %i0, %i1
   %call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
 
-  %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
   %i2 = load i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
   %i3 = load i32* %arrayidx3, align 4
   %add2 = add i32 %i2, %i3
   %call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 true) nounwind readnone
 
-  %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+  %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
   %i4 = load i32* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+  %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
   %i5 = load i32* %arrayidx5, align 4
   %add3 = add i32 %i4, %i5
   %call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
 
-  %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+  %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
   %i6 = load i32* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+  %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
   %i7 = load i32* %arrayidx7, align 4
   %add4 = add i32 %i6, %i7
   %call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 true) nounwind readnone
 
   store i32 %call1, i32* %c, align 4
-  %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+  %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
   store i32 %call2, i32* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+  %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
   store i32 %call3, i32* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+  %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
@@ -172,33 +172,33 @@ entry:
   %add1 = add i32 %i0, %i1
   %call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
 
-  %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
   %i2 = load i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
   %i3 = load i32* %arrayidx3, align 4
   %add2 = add i32 %i2, %i3
   %call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 false) nounwind readnone
 
-  %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+  %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
   %i4 = load i32* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+  %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
   %i5 = load i32* %arrayidx5, align 4
   %add3 = add i32 %i4, %i5
   %call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
 
-  %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+  %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
   %i6 = load i32* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+  %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
   %i7 = load i32* %arrayidx7, align 4
   %add4 = add i32 %i6, %i7
   %call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 false) nounwind readnone
 
   store i32 %call1, i32* %c, align 4
-  %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+  %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
   store i32 %call2, i32* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+  %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
   store i32 %call3, i32* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+  %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
@@ -217,33 +217,33 @@ entry:
   %add1 = add i32 %i0, %i1
   %call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
 
-  %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
   %i2 = load i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
   %i3 = load i32* %arrayidx3, align 4
   %add2 = add i32 %i2, %i3
   %call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 true) nounwind readnone
 
-  %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+  %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
   %i4 = load i32* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+  %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
   %i5 = load i32* %arrayidx5, align 4
   %add3 = add i32 %i4, %i5
   %call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
 
-  %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+  %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
   %i6 = load i32* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+  %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
   %i7 = load i32* %arrayidx7, align 4
   %add4 = add i32 %i6, %i7
   %call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 true) nounwind readnone
 
   store i32 %call1, i32* %c, align 4
-  %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+  %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
   store i32 %call2, i32* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+  %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
   store i32 %call3, i32* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+  %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
@@ -262,33 +262,33 @@ entry:
   %add1 = add i32 %i0, %i1
   %call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
 
-  %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+  %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
   %i2 = load i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
   %i3 = load i32* %arrayidx3, align 4
   %add2 = add i32 %i2, %i3
   %call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 false) nounwind readnone
 
-  %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+  %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
   %i4 = load i32* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+  %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
   %i5 = load i32* %arrayidx5, align 4
   %add3 = add i32 %i4, %i5
   %call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
 
-  %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+  %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
   %i6 = load i32* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+  %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
   %i7 = load i32* %arrayidx7, align 4
   %add4 = add i32 %i6, %i7
   %call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 false) nounwind readnone
 
   store i32 %call1, i32* %c, align 4
-  %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+  %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
   store i32 %call2, i32* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+  %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
   store i32 %call3, i32* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+  %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
   store i32 %call4, i32* %arrayidx10, align 4
   ret void
 
@@ -305,33 +305,33 @@ entry:
   %add1 = fadd float %i0, %i1
   %call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
 
-  %arrayidx2 = getelementptr inbounds float* %a, i32 1
+  %arrayidx2 = getelementptr inbounds float, float* %a, i32 1
   %i2 = load float* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds float* %b, i32 1
+  %arrayidx3 = getelementptr inbounds float, float* %b, i32 1
   %i3 = load float* %arrayidx3, align 4
   %add2 = fadd float %i2, %i3
   %call2 = tail call float @llvm.powi.f32(float %add2,i32 %P) nounwind readnone
 
-  %arrayidx4 = getelementptr inbounds float* %a, i32 2
+  %arrayidx4 = getelementptr inbounds float, float* %a, i32 2
   %i4 = load float* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds float* %b, i32 2
+  %arrayidx5 = getelementptr inbounds float, float* %b, i32 2
   %i5 = load float* %arrayidx5, align 4
   %add3 = fadd float %i4, %i5
   %call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
 
-  %arrayidx6 = getelementptr inbounds float* %a, i32 3
+  %arrayidx6 = getelementptr inbounds float, float* %a, i32 3
   %i6 = load float* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds float* %b, i32 3
+  %arrayidx7 = getelementptr inbounds float, float* %b, i32 3
   %i7 = load float* %arrayidx7, align 4
   %add4 = fadd float %i6, %i7
   %call4 = tail call float @llvm.powi.f32(float %add4,i32 %P) nounwind readnone
 
   store float %call1, float* %c, align 4
-  %arrayidx8 = getelementptr inbounds float* %c, i32 1
+  %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
   store float %call2, float* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds float* %c, i32 2
+  %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
   store float %call3, float* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds float* %c, i32 3
+  %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
   store float %call4, float* %arrayidx10, align 4
   ret void
 
@@ -351,33 +351,33 @@ entry:
   %add1 = fadd float %i0, %i1
   %call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
 
-  %arrayidx2 = getelementptr inbounds float* %a, i32 1
+  %arrayidx2 = getelementptr inbounds float, float* %a, i32 1
   %i2 = load float* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds float* %b, i32 1
+  %arrayidx3 = getelementptr inbounds float, float* %b, i32 1
   %i3 = load float* %arrayidx3, align 4
   %add2 = fadd float %i2, %i3
   %call2 = tail call float @llvm.powi.f32(float %add2,i32 %Q) nounwind readnone
 
-  %arrayidx4 = getelementptr inbounds float* %a, i32 2
+  %arrayidx4 = getelementptr inbounds float, float* %a, i32 2
   %i4 = load float* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds float* %b, i32 2
+  %arrayidx5 = getelementptr inbounds float, float* %b, i32 2
   %i5 = load float* %arrayidx5, align 4
   %add3 = fadd float %i4, %i5
   %call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
 
-  %arrayidx6 = getelementptr inbounds float* %a, i32 3
+  %arrayidx6 = getelementptr inbounds float, float* %a, i32 3
   %i6 = load float* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds float* %b, i32 3
+  %arrayidx7 = getelementptr inbounds float, float* %b, i32 3
   %i7 = load float* %arrayidx7, align 4
   %add4 = fadd float %i6, %i7
   %call4 = tail call float @llvm.powi.f32(float %add4,i32 %Q) nounwind readnone
 
   store float %call1, float* %c, align 4
-  %arrayidx8 = getelementptr inbounds float* %c, i32 1
+  %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
   store float %call2, float* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds float* %c, i32 2
+  %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
   store float %call3, float* %arrayidx9, align 4
-  %arrayidx10 = getelementptr inbounds float* %c, i32 3
+  %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
   store float %call4, float* %arrayidx10, align 4
   ret void
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/long_chains.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/long_chains.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/long_chains.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/long_chains.ll Fri Feb 27 13:29:02 2015
@@ -14,7 +14,7 @@ target triple = "x86_64-apple-macosx10.8
 define i32 @test(double* nocapture %A, i8* nocapture %B) {
 entry:
   %0 = load i8* %B, align 1
-  %arrayidx1 = getelementptr inbounds i8* %B, i64 1
+  %arrayidx1 = getelementptr inbounds i8, i8* %B, i64 1
   %1 = load i8* %arrayidx1, align 1
   %add = add i8 %0, 3
   %add4 = add i8 %1, 3
@@ -41,7 +41,7 @@ entry:
   %mul25 = fmul double %add22, %add22
   %add26 = fadd double %mul25, 1.000000e+00
   store double %add24, double* %A, align 8
-  %arrayidx28 = getelementptr inbounds double* %A, i64 1
+  %arrayidx28 = getelementptr inbounds double, double* %A, i64 1
   store double %add26, double* %arrayidx28, align 8
   ret i32 undef
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/loopinvariant.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/loopinvariant.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/loopinvariant.ll Fri Feb 27 13:29:02 2015
@@ -18,42 +18,42 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
-  %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
   %0 = load i32* %arrayidx, align 4
   %add1 = add nsw i32 %0, %n
   store i32 %add1, i32* %arrayidx, align 4
   %1 = or i64 %indvars.iv, 1
-  %arrayidx4 = getelementptr inbounds i32* %A, i64 %1
+  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %1
   %2 = load i32* %arrayidx4, align 4
   %add5 = add nsw i32 %2, %n
   store i32 %add5, i32* %arrayidx4, align 4
   %3 = or i64 %indvars.iv, 2
-  %arrayidx8 = getelementptr inbounds i32* %A, i64 %3
+  %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %3
   %4 = load i32* %arrayidx8, align 4
   %add9 = add nsw i32 %4, %n
   store i32 %add9, i32* %arrayidx8, align 4
   %5 = or i64 %indvars.iv, 3
-  %arrayidx12 = getelementptr inbounds i32* %A, i64 %5
+  %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %5
   %6 = load i32* %arrayidx12, align 4
   %add13 = add nsw i32 %6, %n
   store i32 %add13, i32* %arrayidx12, align 4
   %7 = or i64 %indvars.iv, 4
-  %arrayidx16 = getelementptr inbounds i32* %A, i64 %7
+  %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %7
   %8 = load i32* %arrayidx16, align 4
   %add17 = add nsw i32 %8, %n
   store i32 %add17, i32* %arrayidx16, align 4
   %9 = or i64 %indvars.iv, 5
-  %arrayidx20 = getelementptr inbounds i32* %A, i64 %9
+  %arrayidx20 = getelementptr inbounds i32, i32* %A, i64 %9
   %10 = load i32* %arrayidx20, align 4
   %add21 = add nsw i32 %10, %n
   store i32 %add21, i32* %arrayidx20, align 4
   %11 = or i64 %indvars.iv, 6
-  %arrayidx24 = getelementptr inbounds i32* %A, i64 %11
+  %arrayidx24 = getelementptr inbounds i32, i32* %A, i64 %11
   %12 = load i32* %arrayidx24, align 4
   %add25 = add nsw i32 %12, %n
   store i32 %add25, i32* %arrayidx24, align 4
   %13 = or i64 %indvars.iv, 7
-  %arrayidx28 = getelementptr inbounds i32* %A, i64 %13
+  %arrayidx28 = getelementptr inbounds i32, i32* %A, i64 %13
   %14 = load i32* %arrayidx28, align 4
   %add29 = add nsw i32 %14, %n
   store i32 %add29, i32* %arrayidx28, align 4

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/metadata.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/metadata.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/metadata.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/metadata.ll Fri Feb 27 13:29:02 2015
@@ -15,13 +15,13 @@ entry:
   %i0 = load double* %a, align 8, !tbaa !4
   %i1 = load double* %b, align 8, !tbaa !4
   %mul = fmul double %i0, %i1, !fpmath !0
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8, !tbaa !4
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8, !tbaa !4
   %mul5 = fmul double %i3, %i4, !fpmath !0
   store double %mul, double* %c, align 8, !tbaa !4
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %mul5, double* %arrayidx5, align 8, !tbaa !4
   ret void
 }
@@ -38,14 +38,14 @@ entry:
   %i0 = load double* %a, align 8, !tbaa !4
   %i1 = load double* %b, align 8, !tbaa !4
   %mul = fmul double %i0, %i1, !fpmath !1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8, !tbaa !4
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8, !tbaa !4
   %mul5 = fmul double %i3, %i4, !fpmath !1
   %c = bitcast i8* %e to double*
   store double %mul, double* %c, align 8, !tbaa !4
-  %carrayidx5 = getelementptr inbounds i8* %e, i64 8
+  %carrayidx5 = getelementptr inbounds i8, i8* %e, i64 8
   %arrayidx5 = bitcast i8* %carrayidx5 to double*
   store double %mul5, double* %arrayidx5, align 8, !tbaa !4
   ret void

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_block.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_block.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_block.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_block.ll Fri Feb 27 13:29:02 2015
@@ -26,7 +26,7 @@ target triple = "x86_64-apple-macosx10.7
 ;CHECK: ret
 define i32 @bar(double* nocapture %A, i32 %d) {
   %1 = load double* %A, align 8
-  %2 = getelementptr inbounds double* %A, i64 1
+  %2 = getelementptr inbounds double, double* %A, i64 1
   %3 = load double* %2, align 8
   %4 = fptrunc double %1 to float
   %5 = fptrunc double %3 to float
@@ -42,11 +42,11 @@ define i32 @bar(double* nocapture %A, i3
   %11 = fadd float %5, 5.000000e+00
   %12 = fpext float %10 to double
   %13 = fadd double %12, 9.000000e+00
-  %14 = getelementptr inbounds double* %A, i64 8
+  %14 = getelementptr inbounds double, double* %A, i64 8
   store double %13, double* %14, align 8
   %15 = fpext float %11 to double
   %16 = fadd double %15, 5.000000e+00
-  %17 = getelementptr inbounds double* %A, i64 9
+  %17 = getelementptr inbounds double, double* %A, i64 9
   store double %16, double* %17, align 8
   ret i32 undef
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_user.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_user.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_user.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_user.ll Fri Feb 27 13:29:02 2015
@@ -24,22 +24,22 @@ define i32 @foo(i32* nocapture %A, i32 %
   %4 = add nsw i32 %2, %3
   store i32 %4, i32* %A, align 4
   %5 = add nsw i32 %1, 8
-  %6 = getelementptr inbounds i32* %A, i64 1
+  %6 = getelementptr inbounds i32, i32* %A, i64 1
   %7 = load i32* %6, align 4
   %8 = add nsw i32 %5, %7
   store i32 %8, i32* %6, align 4
   %9 = add nsw i32 %1, 9
-  %10 = getelementptr inbounds i32* %A, i64 2
+  %10 = getelementptr inbounds i32, i32* %A, i64 2
   %11 = load i32* %10, align 4
   %12 = add nsw i32 %9, %11
   store i32 %12, i32* %10, align 4
   %13 = add nsw i32 %1, 10
-  %14 = getelementptr inbounds i32* %A, i64 3
+  %14 = getelementptr inbounds i32, i32* %A, i64 3
   %15 = load i32* %14, align 4
   %16 = add nsw i32 %13, %15
   store i32 %16, i32* %14, align 4
   %17 = add nsw i32 %1, 11
-  %18 = getelementptr inbounds i32* %A, i64 4
+  %18 = getelementptr inbounds i32, i32* %A, i64 4
   %19 = load i32* %18, align 4
   %20 = add nsw i32 %17, %19
   store i32 %20, i32* %18, align 4

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/odd_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/odd_store.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/odd_store.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/odd_store.ll Fri Feb 27 13:29:02 2015
@@ -18,28 +18,28 @@ target triple = "x86_64-apple-macosx10.8
 ;CHECK-NOT: store <3 x i8>
 ;CHECK: ret
 define i32 @foo(i8* noalias nocapture %A, float* noalias nocapture %B, float %T) {
-  %1 = getelementptr inbounds float* %B, i64 10
+  %1 = getelementptr inbounds float, float* %B, i64 10
   %2 = load float* %1, align 4
   %3 = fmul float %2, %T
   %4 = fpext float %3 to double
   %5 = fadd double %4, 4.000000e+00
   %6 = fptosi double %5 to i8
   store i8 %6, i8* %A, align 1
-  %7 = getelementptr inbounds float* %B, i64 11
+  %7 = getelementptr inbounds float, float* %B, i64 11
   %8 = load float* %7, align 4
   %9 = fmul float %8, %T
   %10 = fpext float %9 to double
   %11 = fadd double %10, 5.000000e+00
   %12 = fptosi double %11 to i8
-  %13 = getelementptr inbounds i8* %A, i64 1
+  %13 = getelementptr inbounds i8, i8* %A, i64 1
   store i8 %12, i8* %13, align 1
-  %14 = getelementptr inbounds float* %B, i64 12
+  %14 = getelementptr inbounds float, float* %B, i64 12
   %15 = load float* %14, align 4
   %16 = fmul float %15, %T
   %17 = fpext float %16 to double
   %18 = fadd double %17, 6.000000e+00
   %19 = fptosi double %18 to i8
-  %20 = getelementptr inbounds i8* %A, i64 2
+  %20 = getelementptr inbounds i8, i8* %A, i64 2
   store i8 %19, i8* %20, align 1
   ret i32 undef
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/operandorder.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/operandorder.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/operandorder.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/operandorder.ll Fri Feb 27 13:29:02 2015
@@ -13,12 +13,12 @@ target datalayout = "e-p:32:32:32-i1:8:8
 
 define void @shuffle_operands1(double * noalias %from, double * noalias %to,
                                double %v1, double %v2) {
-  %from_1 = getelementptr double *%from, i64 1
+  %from_1 = getelementptr double, double *%from, i64 1
   %v0_1 = load double * %from
   %v0_2 = load double * %from_1
   %v1_1 = fadd double %v0_1, %v1
   %v1_2 = fadd double %v2, %v0_2
-  %to_2 = getelementptr double * %to, i64 1
+  %to_2 = getelementptr double, double * %to, i64 1
   store double %v1_1, double *%to
   store double %v1_2, double *%to_2
   ret void
@@ -35,12 +35,12 @@ br label %lp
 
 lp:
   %p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
-  %from_1 = getelementptr double *%from, i64 1
+  %from_1 = getelementptr double, double *%from, i64 1
   %v0_1 = load double * %from
   %v0_2 = load double * %from_1
   %v1_1 = fadd double %v0_1, %p
   %v1_2 = fadd double %v0_1, %v0_2
-  %to_2 = getelementptr double * %to, i64 1
+  %to_2 = getelementptr double, double * %to, i64 1
   store double %v1_1, double *%to
   store double %v1_2, double *%to_2
 br i1 undef, label %lp, label %ext
@@ -60,12 +60,12 @@ br label %lp
 
 lp:
   %p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
-  %from_1 = getelementptr double *%from, i64 1
+  %from_1 = getelementptr double, double *%from, i64 1
   %v0_1 = load double * %from
   %v0_2 = load double * %from_1
   %v1_1 = fadd double %p, %v0_1
   %v1_2 = fadd double %v0_2, %v0_1
-  %to_2 = getelementptr double * %to, i64 1
+  %to_2 = getelementptr double, double * %to, i64 1
   store double %v1_1, double *%to
   store double %v1_2, double *%to_2
 br i1 undef, label %lp, label %ext
@@ -85,12 +85,12 @@ br label %lp
 
 lp:
   %p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
-  %from_1 = getelementptr double *%from, i64 1
+  %from_1 = getelementptr double, double *%from, i64 1
   %v0_1 = load double * %from
   %v0_2 = load double * %from_1
   %v1_1 = fadd double %p, %v0_1
   %v1_2 = fadd double %v0_1, %v0_2
-  %to_2 = getelementptr double * %to, i64 1
+  %to_2 = getelementptr double, double * %to, i64 1
   store double %v1_1, double *%to
   store double %v1_2, double *%to_2
 br i1 undef, label %lp, label %ext
@@ -111,12 +111,12 @@ br label %lp
 
 lp:
   %p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
-  %from_1 = getelementptr double *%from, i64 1
+  %from_1 = getelementptr double, double *%from, i64 1
   %v0_1 = load double * %from
   %v0_2 = load double * %from_1
   %v1_1 = fadd double %v0_2, %v0_1
   %v1_2 = fadd double %p, %v0_1
-  %to_2 = getelementptr double * %to, i64 1
+  %to_2 = getelementptr double, double * %to, i64 1
   store double %v1_1, double *%to
   store double %v1_2, double *%to_2
 br i1 undef, label %lp, label %ext
@@ -136,12 +136,12 @@ br label %lp
 
 lp:
   %p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
-  %from_1 = getelementptr double *%from, i64 1
+  %from_1 = getelementptr double, double *%from, i64 1
   %v0_1 = load double * %from
   %v0_2 = load double * %from_1
   %v1_1 = fadd double %v0_1, %v0_2
   %v1_2 = fadd double %p, %v0_1
-  %to_2 = getelementptr double * %to, i64 1
+  %to_2 = getelementptr double, double * %to, i64 1
   store double %v1_1, double *%to
   store double %v1_2, double *%to_2
 br i1 undef, label %lp, label %ext
@@ -162,12 +162,12 @@ br label %lp
 
 lp:
   %p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
-  %from_1 = getelementptr double *%from, i64 1
+  %from_1 = getelementptr double, double *%from, i64 1
   %v0_1 = load double * %from
   %v0_2 = load double * %from_1
   %v1_1 = fadd double %v0_1, %v0_2
   %v1_2 = fadd double %v0_1, %p
-  %to_2 = getelementptr double * %to, i64 1
+  %to_2 = getelementptr double, double * %to, i64 1
   store double %v1_1, double *%to
   store double %v1_2, double *%to_2
 br i1 undef, label %lp, label %ext
@@ -200,28 +200,28 @@ for.body3:
   %1 = phi float [ %0, %for.cond1.preheader ], [ %10, %for.body3 ]
   %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
   %2 = add nsw i64 %indvars.iv, 1
-  %arrayidx = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %2
+  %arrayidx = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %2
   %3 = load float* %arrayidx, align 4
-  %arrayidx5 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv
+  %arrayidx5 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv
   %mul6 = fmul float %3, %1
   store float %mul6, float* %arrayidx5, align 4
   %4 = add nsw i64 %indvars.iv, 2
-  %arrayidx11 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %4
+  %arrayidx11 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %4
   %5 = load float* %arrayidx11, align 4
   %mul15 = fmul float %5, %3
   store float %mul15, float* %arrayidx, align 4
   %6 = add nsw i64 %indvars.iv, 3
-  %arrayidx21 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %6
+  %arrayidx21 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %6
   %7 = load float* %arrayidx21, align 4
   %mul25 = fmul float %7, %5
   store float %mul25, float* %arrayidx11, align 4
   %8 = add nsw i64 %indvars.iv, 4
-  %arrayidx31 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %8
+  %arrayidx31 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %8
   %9 = load float* %arrayidx31, align 4
   %mul35 = fmul float %9, %7
   store float %mul35, float* %arrayidx21, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 5
-  %arrayidx41 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.next
+  %arrayidx41 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.next
   %10 = load float* %arrayidx41, align 4
   %mul45 = fmul float %10, %9
   store float %mul45, float* %arrayidx31, align 4
@@ -245,12 +245,12 @@ define void @load_reorder_double(double*
   %2 = load double* %b
   %3 = fadd double %1, %2
   store double %3, double* %c
-  %4 = getelementptr inbounds double* %b, i64 1
+  %4 = getelementptr inbounds double, double* %b, i64 1
   %5 = load double* %4
-  %6 = getelementptr inbounds double* %a, i64 1
+  %6 = getelementptr inbounds double, double* %a, i64 1
   %7 = load double* %6
   %8 = fadd double %5, %7
-  %9 = getelementptr inbounds double* %c, i64 1
+  %9 = getelementptr inbounds double, double* %c, i64 1
   store double %8, double* %9
   ret void
 }
@@ -269,26 +269,26 @@ define void @load_reorder_float(float* n
   %2 = load float* %b
   %3 = fadd float %1, %2
   store float %3, float* %c
-  %4 = getelementptr inbounds float* %b, i64 1
+  %4 = getelementptr inbounds float, float* %b, i64 1
   %5 = load float* %4
-  %6 = getelementptr inbounds float* %a, i64 1
+  %6 = getelementptr inbounds float, float* %a, i64 1
   %7 = load float* %6
   %8 = fadd float %5, %7
-  %9 = getelementptr inbounds float* %c, i64 1
+  %9 = getelementptr inbounds float, float* %c, i64 1
   store float %8, float* %9
-  %10 = getelementptr inbounds float* %a, i64 2
+  %10 = getelementptr inbounds float, float* %a, i64 2
   %11 = load float* %10
-  %12 = getelementptr inbounds float* %b, i64 2
+  %12 = getelementptr inbounds float, float* %b, i64 2
   %13 = load float* %12
   %14 = fadd float %11, %13
-  %15 = getelementptr inbounds float* %c, i64 2
+  %15 = getelementptr inbounds float, float* %c, i64 2
   store float %14, float* %15
-  %16 = getelementptr inbounds float* %a, i64 3
+  %16 = getelementptr inbounds float, float* %a, i64 3
   %17 = load float* %16
-  %18 = getelementptr inbounds float* %b, i64 3
+  %18 = getelementptr inbounds float, float* %b, i64 3
   %19 = load float* %18
   %20 = fadd float %17, %19
-  %21 = getelementptr inbounds float* %c, i64 3
+  %21 = getelementptr inbounds float, float* %c, i64 3
   store float %20, float* %21
   ret void
 }
@@ -310,35 +310,35 @@ define void @opcode_reorder(float* noali
   %4 = load float* %d
   %5 = fadd float %3, %4
   store float %5, float* %a
-  %6 = getelementptr inbounds float* %d, i64 1
+  %6 = getelementptr inbounds float, float* %d, i64 1
   %7 = load float* %6
-  %8 = getelementptr inbounds float* %b, i64 1
+  %8 = getelementptr inbounds float, float* %b, i64 1
   %9 = load float* %8
-  %10 = getelementptr inbounds float* %c, i64 1
+  %10 = getelementptr inbounds float, float* %c, i64 1
   %11 = load float* %10
   %12 = fadd float %9, %11
   %13 = fadd float %7, %12
-  %14 = getelementptr inbounds float* %a, i64 1
+  %14 = getelementptr inbounds float, float* %a, i64 1
   store float %13, float* %14
-  %15 = getelementptr inbounds float* %b, i64 2
+  %15 = getelementptr inbounds float, float* %b, i64 2
   %16 = load float* %15
-  %17 = getelementptr inbounds float* %c, i64 2
+  %17 = getelementptr inbounds float, float* %c, i64 2
   %18 = load float* %17
   %19 = fadd float %16, %18
-  %20 = getelementptr inbounds float* %d, i64 2
+  %20 = getelementptr inbounds float, float* %d, i64 2
   %21 = load float* %20
   %22 = fadd float %19, %21
-  %23 = getelementptr inbounds float* %a, i64 2
+  %23 = getelementptr inbounds float, float* %a, i64 2
   store float %22, float* %23
-  %24 = getelementptr inbounds float* %b, i64 3
+  %24 = getelementptr inbounds float, float* %b, i64 3
   %25 = load float* %24
-  %26 = getelementptr inbounds float* %c, i64 3
+  %26 = getelementptr inbounds float, float* %c, i64 3
   %27 = load float* %26
   %28 = fadd float %25, %27
-  %29 = getelementptr inbounds float* %d, i64 3
+  %29 = getelementptr inbounds float, float* %d, i64 3
   %30 = load float* %29
   %31 = fadd float %28, %30
-  %32 = getelementptr inbounds float* %a, i64 3
+  %32 = getelementptr inbounds float, float* %a, i64 3
   store float %31, float* %32
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/opt.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/opt.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/opt.ll Fri Feb 27 13:29:02 2015
@@ -18,13 +18,13 @@ entry:
   %i0 = load double* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %mul5, double* %arrayidx5, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/phi.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/phi.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/phi.ll Fri Feb 27 13:29:02 2015
@@ -29,9 +29,9 @@ entry:
   br i1 %tobool, label %if.else, label %if.end
 
 if.else:                                          ; preds = %entry
-  %arrayidx = getelementptr inbounds double* %A, i64 10
+  %arrayidx = getelementptr inbounds double, double* %A, i64 10
   %0 = load double* %arrayidx, align 8
-  %arrayidx1 = getelementptr inbounds double* %A, i64 11
+  %arrayidx1 = getelementptr inbounds double, double* %A, i64 11
   %1 = load double* %arrayidx1, align 8
   br label %if.end
 
@@ -39,7 +39,7 @@ if.end:
   %A0.0 = phi double [ %0, %if.else ], [ 3.000000e+00, %entry ]
   %A1.0 = phi double [ %1, %if.else ], [ 5.000000e+00, %entry ]
   store double %A0.0, double* %A, align 8
-  %arrayidx3 = getelementptr inbounds double* %A, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
   store double %A1.0, double* %arrayidx3, align 8
   ret i32 undef
 }
@@ -69,7 +69,7 @@ if.end:
 ;CHECK: ret
 define i32 @foo2(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) #0 {
 entry:
-  %arrayidx = getelementptr inbounds double* %A, i64 1
+  %arrayidx = getelementptr inbounds double, double* %A, i64 1
   %0 = load double* %arrayidx, align 8
   %1 = load double* %A, align 8
   br label %for.body
@@ -90,7 +90,7 @@ for.body:
 
 for.end:                                          ; preds = %for.body
   store double %add5, double* %B, align 8
-  %arrayidx7 = getelementptr inbounds double* %B, i64 1
+  %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
   store double %add4, double* %arrayidx7, align 8
   ret i32 0
 }
@@ -124,13 +124,13 @@ for.end:
 define float @foo3(float* nocapture readonly %A) #0 {
 entry:
   %0 = load float* %A, align 4
-  %arrayidx1 = getelementptr inbounds float* %A, i64 1
+  %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
   %1 = load float* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds float* %A, i64 2
+  %arrayidx2 = getelementptr inbounds float, float* %A, i64 2
   %2 = load float* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds float* %A, i64 3
+  %arrayidx3 = getelementptr inbounds float, float* %A, i64 3
   %3 = load float* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds float* %A, i64 4
+  %arrayidx4 = getelementptr inbounds float, float* %A, i64 4
   %4 = load float* %arrayidx4, align 4
   br label %for.body
 
@@ -148,17 +148,17 @@ for.body:
   %mul10 = fmul float %5, 8.000000e+00
   %add11 = fadd float %G.053, %mul10
   %7 = add nsw i64 %indvars.iv, 2
-  %arrayidx14 = getelementptr inbounds float* %A, i64 %7
+  %arrayidx14 = getelementptr inbounds float, float* %A, i64 %7
   %8 = load float* %arrayidx14, align 4
   %mul15 = fmul float %8, 9.000000e+00
   %add16 = fadd float %B.054, %mul15
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
-  %arrayidx19 = getelementptr inbounds float* %A, i64 %indvars.iv.next
+  %arrayidx19 = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
   %9 = load float* %arrayidx19, align 4
   %mul20 = fmul float %9, 1.000000e+01
   %add21 = fadd float %Y.055, %mul20
   %10 = add nsw i64 %indvars.iv, 4
-  %arrayidx24 = getelementptr inbounds float* %A, i64 %10
+  %arrayidx24 = getelementptr inbounds float, float* %A, i64 %10
   %11 = load float* %arrayidx24, align 4
   %mul25 = fmul float %11, 1.100000e+01
   %add26 = fadd float %P.056, %mul25
@@ -216,7 +216,7 @@ define void @test(x86_fp80* %i1, x86_fp8
 
 entry:
   %i1.0 = load x86_fp80* %i1, align 16
-  %i1.gep1 = getelementptr x86_fp80* %i1, i64 1
+  %i1.gep1 = getelementptr x86_fp80, x86_fp80* %i1, i64 1
   %i1.1 = load x86_fp80* %i1.gep1, align 16
 ; CHECK: load x86_fp80*
 ; CHECK: load x86_fp80*
@@ -225,9 +225,9 @@ entry:
   br i1 undef, label %then, label %end
 
 then:
-  %i2.gep0 = getelementptr inbounds x86_fp80* %i2, i64 0
+  %i2.gep0 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 0
   %i2.0 = load x86_fp80* %i2.gep0, align 16
-  %i2.gep1 = getelementptr inbounds x86_fp80* %i2, i64 1
+  %i2.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 1
   %i2.1 = load x86_fp80* %i2.gep1, align 16
 ; CHECK: load x86_fp80*
 ; CHECK: load x86_fp80*
@@ -242,7 +242,7 @@ end:
 ; CHECK-NOT: extractelement <2 x x86_fp80>
 ; CHECK-NOT: extractelement <2 x x86_fp80>
   store x86_fp80 %phi0, x86_fp80* %o, align 16
-  %o.gep1 = getelementptr inbounds x86_fp80* %o, i64 1
+  %o.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %o, i64 1
   store x86_fp80 %phi1, x86_fp80* %o.gep1, align 16
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll Fri Feb 27 13:29:02 2015
@@ -13,7 +13,7 @@ define void @test(double* %i1, double* %
 
 entry:
   %i1.0 = load double* %i1, align 16
-  %i1.gep1 = getelementptr double* %i1, i64 1
+  %i1.gep1 = getelementptr double, double* %i1, i64 1
   %i1.1 = load double* %i1.gep1, align 16
 ; CHECK: load double*
 ; CHECK: load double*
@@ -22,9 +22,9 @@ entry:
   br i1 undef, label %then, label %end
 
 then:
-  %i2.gep0 = getelementptr inbounds double* %i2, i64 0
+  %i2.gep0 = getelementptr inbounds double, double* %i2, i64 0
   %i2.0 = load double* %i2.gep0, align 16
-  %i2.gep1 = getelementptr inbounds double* %i2, i64 1
+  %i2.gep1 = getelementptr inbounds double, double* %i2, i64 1
   %i2.1 = load double* %i2.gep1, align 16
 ; CHECK: load double*
 ; CHECK: load double*
@@ -39,7 +39,7 @@ end:
 ; CHECK: extractelement <2 x double>
 ; CHECK: extractelement <2 x double>
   store double %phi0, double* %o, align 16
-  %o.gep1 = getelementptr inbounds double* %o, i64 1
+  %o.gep1 = getelementptr inbounds double, double* %o, i64 1
   store double %phi1, double* %o.gep1, align 16
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/powof2div.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/powof2div.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/powof2div.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/powof2div.ll Fri Feb 27 13:29:02 2015
@@ -14,29 +14,29 @@ entry:
   %add = add nsw i32 %1, %0
   %div = sdiv i32 %add, 2
   store i32 %div, i32* %a, align 4
-  %arrayidx3 = getelementptr inbounds i32* %b, i64 1
+  %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1
   %2 = load i32* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds i32* %c, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1
   %3 = load i32* %arrayidx4, align 4
   %add5 = add nsw i32 %3, %2
   %div6 = sdiv i32 %add5, 2
-  %arrayidx7 = getelementptr inbounds i32* %a, i64 1
+  %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1
   store i32 %div6, i32* %arrayidx7, align 4
-  %arrayidx8 = getelementptr inbounds i32* %b, i64 2
+  %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2
   %4 = load i32* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds i32* %c, i64 2
+  %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
   %5 = load i32* %arrayidx9, align 4
   %add10 = add nsw i32 %5, %4
   %div11 = sdiv i32 %add10, 2
-  %arrayidx12 = getelementptr inbounds i32* %a, i64 2
+  %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2
   store i32 %div11, i32* %arrayidx12, align 4
-  %arrayidx13 = getelementptr inbounds i32* %b, i64 3
+  %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3
   %6 = load i32* %arrayidx13, align 4
-  %arrayidx14 = getelementptr inbounds i32* %c, i64 3
+  %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3
   %7 = load i32* %arrayidx14, align 4
   %add15 = add nsw i32 %7, %6
   %div16 = sdiv i32 %add15, 2
-  %arrayidx17 = getelementptr inbounds i32* %a, i64 3
+  %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3
   store i32 %div16, i32* %arrayidx17, align 4
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/pr16899.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/pr16899.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/pr16899.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/pr16899.ll Fri Feb 27 13:29:02 2015
@@ -9,7 +9,7 @@ define i32 @fn1() #0 {
 entry:
   %0 = load i32** @a, align 4, !tbaa !4
   %1 = load i32* %0, align 4, !tbaa !5
-  %arrayidx1 = getelementptr inbounds i32* %0, i32 1
+  %arrayidx1 = getelementptr inbounds i32, i32* %0, i32 1
   %2 = load i32* %arrayidx1, align 4, !tbaa !5
   br label %do.body
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/pr19657.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/pr19657.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/pr19657.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/pr19657.ll Fri Feb 27 13:29:02 2015
@@ -12,60 +12,60 @@ define void @foo(double* %x) #0 {
   %1 = alloca double*, align 8
   store double* %x, double** %1, align 8
   %2 = load double** %1, align 8
-  %3 = getelementptr inbounds double* %2, i64 0
+  %3 = getelementptr inbounds double, double* %2, i64 0
   %4 = load double* %3, align 8
   %5 = load double** %1, align 8
-  %6 = getelementptr inbounds double* %5, i64 0
+  %6 = getelementptr inbounds double, double* %5, i64 0
   %7 = load double* %6, align 8
   %8 = fadd double %4, %7
   %9 = load double** %1, align 8
-  %10 = getelementptr inbounds double* %9, i64 0
+  %10 = getelementptr inbounds double, double* %9, i64 0
   %11 = load double* %10, align 8
   %12 = fadd double %8, %11
   %13 = load double** %1, align 8
-  %14 = getelementptr inbounds double* %13, i64 0
+  %14 = getelementptr inbounds double, double* %13, i64 0
   store double %12, double* %14, align 8
   %15 = load double** %1, align 8
-  %16 = getelementptr inbounds double* %15, i64 1
+  %16 = getelementptr inbounds double, double* %15, i64 1
   %17 = load double* %16, align 8
   %18 = load double** %1, align 8
-  %19 = getelementptr inbounds double* %18, i64 1
+  %19 = getelementptr inbounds double, double* %18, i64 1
   %20 = load double* %19, align 8
   %21 = fadd double %17, %20
   %22 = load double** %1, align 8
-  %23 = getelementptr inbounds double* %22, i64 1
+  %23 = getelementptr inbounds double, double* %22, i64 1
   %24 = load double* %23, align 8
   %25 = fadd double %21, %24
   %26 = load double** %1, align 8
-  %27 = getelementptr inbounds double* %26, i64 1
+  %27 = getelementptr inbounds double, double* %26, i64 1
   store double %25, double* %27, align 8
   %28 = load double** %1, align 8
-  %29 = getelementptr inbounds double* %28, i64 2
+  %29 = getelementptr inbounds double, double* %28, i64 2
   %30 = load double* %29, align 8
   %31 = load double** %1, align 8
-  %32 = getelementptr inbounds double* %31, i64 2
+  %32 = getelementptr inbounds double, double* %31, i64 2
   %33 = load double* %32, align 8
   %34 = fadd double %30, %33
   %35 = load double** %1, align 8
-  %36 = getelementptr inbounds double* %35, i64 2
+  %36 = getelementptr inbounds double, double* %35, i64 2
   %37 = load double* %36, align 8
   %38 = fadd double %34, %37
   %39 = load double** %1, align 8
-  %40 = getelementptr inbounds double* %39, i64 2
+  %40 = getelementptr inbounds double, double* %39, i64 2
   store double %38, double* %40, align 8
   %41 = load double** %1, align 8
-  %42 = getelementptr inbounds double* %41, i64 3
+  %42 = getelementptr inbounds double, double* %41, i64 3
   %43 = load double* %42, align 8
   %44 = load double** %1, align 8
-  %45 = getelementptr inbounds double* %44, i64 3
+  %45 = getelementptr inbounds double, double* %44, i64 3
   %46 = load double* %45, align 8
   %47 = fadd double %43, %46
   %48 = load double** %1, align 8
-  %49 = getelementptr inbounds double* %48, i64 3
+  %49 = getelementptr inbounds double, double* %48, i64 3
   %50 = load double* %49, align 8
   %51 = fadd double %47, %50
   %52 = load double** %1, align 8
-  %53 = getelementptr inbounds double* %52, i64 3
+  %53 = getelementptr inbounds double, double* %52, i64 3
   store double %51, double* %53, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll Fri Feb 27 13:29:02 2015
@@ -10,10 +10,10 @@ target triple = "x86_64-unknown-unknown"
 ; CHECK-LABEL: @exact(
 ; CHECK: lshr exact <4 x i32>
 define void @exact(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4
@@ -36,10 +36,10 @@ define void @exact(i32* %x) {
 ; CHECK-LABEL: @not_exact(
 ; CHECK: lshr <4 x i32>
 define void @not_exact(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4
@@ -62,10 +62,10 @@ define void @not_exact(i32* %x) {
 ; CHECK-LABEL: @nsw(
 ; CHECK: add nsw <4 x i32>
 define void @nsw(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4
@@ -88,10 +88,10 @@ define void @nsw(i32* %x) {
 ; CHECK-LABEL: @not_nsw(
 ; CHECK: add <4 x i32>
 define void @not_nsw(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4
@@ -114,10 +114,10 @@ define void @not_nsw(i32* %x) {
 ; CHECK-LABEL: @nuw(
 ; CHECK: add nuw <4 x i32>
 define void @nuw(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4
@@ -140,10 +140,10 @@ define void @nuw(i32* %x) {
 ; CHECK-LABEL: @not_nuw(
 ; CHECK: add <4 x i32>
 define void @not_nuw(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4
@@ -166,10 +166,10 @@ define void @not_nuw(i32* %x) {
 ; CHECK-LABEL: @nnan(
 ; CHECK: fadd nnan <4 x float>
 define void @nnan(float* %x) {
-  %idx1 = getelementptr inbounds float* %x, i64 0
-  %idx2 = getelementptr inbounds float* %x, i64 1
-  %idx3 = getelementptr inbounds float* %x, i64 2
-  %idx4 = getelementptr inbounds float* %x, i64 3
+  %idx1 = getelementptr inbounds float, float* %x, i64 0
+  %idx2 = getelementptr inbounds float, float* %x, i64 1
+  %idx3 = getelementptr inbounds float, float* %x, i64 2
+  %idx4 = getelementptr inbounds float, float* %x, i64 3
 
   %load1 = load float* %idx1, align 4
   %load2 = load float* %idx2, align 4
@@ -192,10 +192,10 @@ define void @nnan(float* %x) {
 ; CHECK-LABEL: @not_nnan(
 ; CHECK: fadd <4 x float>
 define void @not_nnan(float* %x) {
-  %idx1 = getelementptr inbounds float* %x, i64 0
-  %idx2 = getelementptr inbounds float* %x, i64 1
-  %idx3 = getelementptr inbounds float* %x, i64 2
-  %idx4 = getelementptr inbounds float* %x, i64 3
+  %idx1 = getelementptr inbounds float, float* %x, i64 0
+  %idx2 = getelementptr inbounds float, float* %x, i64 1
+  %idx3 = getelementptr inbounds float, float* %x, i64 2
+  %idx4 = getelementptr inbounds float, float* %x, i64 3
 
   %load1 = load float* %idx1, align 4
   %load2 = load float* %idx2, align 4
@@ -218,10 +218,10 @@ define void @not_nnan(float* %x) {
 ; CHECK-LABEL: @only_fast(
 ; CHECK: fadd fast <4 x float>
 define void @only_fast(float* %x) {
-  %idx1 = getelementptr inbounds float* %x, i64 0
-  %idx2 = getelementptr inbounds float* %x, i64 1
-  %idx3 = getelementptr inbounds float* %x, i64 2
-  %idx4 = getelementptr inbounds float* %x, i64 3
+  %idx1 = getelementptr inbounds float, float* %x, i64 0
+  %idx2 = getelementptr inbounds float, float* %x, i64 1
+  %idx3 = getelementptr inbounds float, float* %x, i64 2
+  %idx4 = getelementptr inbounds float, float* %x, i64 3
 
   %load1 = load float* %idx1, align 4
   %load2 = load float* %idx2, align 4
@@ -244,10 +244,10 @@ define void @only_fast(float* %x) {
 ; CHECK-LABEL: @only_arcp(
 ; CHECK: fadd arcp <4 x float>
 define void @only_arcp(float* %x) {
-  %idx1 = getelementptr inbounds float* %x, i64 0
-  %idx2 = getelementptr inbounds float* %x, i64 1
-  %idx3 = getelementptr inbounds float* %x, i64 2
-  %idx4 = getelementptr inbounds float* %x, i64 3
+  %idx1 = getelementptr inbounds float, float* %x, i64 0
+  %idx2 = getelementptr inbounds float, float* %x, i64 1
+  %idx3 = getelementptr inbounds float, float* %x, i64 2
+  %idx4 = getelementptr inbounds float, float* %x, i64 3
 
   %load1 = load float* %idx1, align 4
   %load2 = load float* %idx2, align 4
@@ -271,10 +271,10 @@ define void @only_arcp(float* %x) {
 ; CHECK: add nsw <4 x i32>
 ; CHECK: sub nsw <4 x i32>
 define void @addsub_all_nsw(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4
@@ -298,10 +298,10 @@ define void @addsub_all_nsw(i32* %x) {
 ; CHECK: add nsw <4 x i32>
 ; CHECK: sub <4 x i32>
 define void @addsub_some_nsw(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4
@@ -325,10 +325,10 @@ define void @addsub_some_nsw(i32* %x) {
 ; CHECK: add <4 x i32>
 ; CHECK: sub <4 x i32>
 define void @addsub_no_nsw(i32* %x) {
-  %idx1 = getelementptr inbounds i32* %x, i64 0
-  %idx2 = getelementptr inbounds i32* %x, i64 1
-  %idx3 = getelementptr inbounds i32* %x, i64 2
-  %idx4 = getelementptr inbounds i32* %x, i64 3
+  %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+  %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+  %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+  %idx4 = getelementptr inbounds i32, i32* %x, i64 3
 
   %load1 = load i32* %idx1, align 4
   %load2 = load i32* %idx2, align 4

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction.ll Fri Feb 27 13:29:02 2015
@@ -23,11 +23,11 @@ for.body:
   %i.015 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
   %sum.014 = phi double [ %add6, %for.body ], [ 0.000000e+00, %entry ]
   %mul = shl nsw i32 %i.015, 1
-  %arrayidx = getelementptr inbounds double* %A, i32 %mul
+  %arrayidx = getelementptr inbounds double, double* %A, i32 %mul
   %0 = load double* %arrayidx, align 4
   %mul1 = fmul double %0, 7.000000e+00
   %add12 = or i32 %mul, 1
-  %arrayidx3 = getelementptr inbounds double* %A, i32 %add12
+  %arrayidx3 = getelementptr inbounds double, double* %A, i32 %add12
   %1 = load double* %arrayidx3, align 4
   %mul4 = fmul double %1, 7.000000e+00
   %add5 = fadd double %mul1, %mul4

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction2.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction2.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction2.ll Fri Feb 27 13:29:02 2015
@@ -13,12 +13,12 @@ define double @foo(double* nocapture %D)
   %i.02 = phi i32 [ 0, %0 ], [ %10, %1 ]
   %sum.01 = phi double [ 0.000000e+00, %0 ], [ %9, %1 ]
   %2 = shl nsw i32 %i.02, 1
-  %3 = getelementptr inbounds double* %D, i32 %2
+  %3 = getelementptr inbounds double, double* %D, i32 %2
   %4 = load double* %3, align 4
   %A4 = fmul double %4, %4
   %A42 = fmul double %A4, %A4
   %5 = or i32 %2, 1
-  %6 = getelementptr inbounds double* %D, i32 %5
+  %6 = getelementptr inbounds double, double* %D, i32 %5
   %7 = load double* %6, align 4
   %A7 = fmul double %7, %7
   %A72 = fmul double %A7, %A7

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/return.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/return.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/return.ll Fri Feb 27 13:29:02 2015
@@ -41,12 +41,12 @@ entry:
 define double @return2(double* nocapture readonly %x) {
 entry:
   %x0 = load double* %x, align 4
-  %arrayidx1 = getelementptr inbounds double* %x, i32 2
+  %arrayidx1 = getelementptr inbounds double, double* %x, i32 2
   %x2 = load double* %arrayidx1, align 4
   %add3 = fadd double %x0, %x2
-  %arrayidx2 = getelementptr inbounds double* %x, i32 1
+  %arrayidx2 = getelementptr inbounds double, double* %x, i32 1
   %x1 = load double* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds double* %x, i32 3
+  %arrayidx3 = getelementptr inbounds double, double* %x, i32 3
   %x3 = load double* %arrayidx3, align 4
   %add4 = fadd double %x1, %x3
   %add5 = fadd double %add3, %add4

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/rgb_phi.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/rgb_phi.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/rgb_phi.ll Fri Feb 27 13:29:02 2015
@@ -34,9 +34,9 @@ target triple = "i386-apple-macosx10.9.0
 define float @foo(float* nocapture readonly %A) {
 entry:
   %0 = load float* %A, align 4
-  %arrayidx1 = getelementptr inbounds float* %A, i64 1
+  %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
   %1 = load float* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds float* %A, i64 2
+  %arrayidx2 = getelementptr inbounds float, float* %A, i64 2
   %2 = load float* %arrayidx2, align 4
   br label %for.body
 
@@ -49,12 +49,12 @@ for.body:
   %mul = fmul float %3, 7.000000e+00
   %add4 = fadd float %R.030, %mul
   %4 = add nsw i64 %indvars.iv, 1
-  %arrayidx7 = getelementptr inbounds float* %A, i64 %4
+  %arrayidx7 = getelementptr inbounds float, float* %A, i64 %4
   %5 = load float* %arrayidx7, align 4
   %mul8 = fmul float %5, 8.000000e+00
   %add9 = fadd float %G.031, %mul8
   %6 = add nsw i64 %indvars.iv, 2
-  %arrayidx12 = getelementptr inbounds float* %A, i64 %6
+  %arrayidx12 = getelementptr inbounds float, float* %A, i64 %6
   %7 = load float* %arrayidx12, align 4
   %mul13 = fmul float %7, 9.000000e+00
   %add14 = fadd float %B.032, %mul13
@@ -64,7 +64,7 @@ for.body:
   br i1 %cmp, label %for.body.for.body_crit_edge, label %for.end
 
 for.body.for.body_crit_edge:                      ; preds = %for.body
-  %arrayidx3.phi.trans.insert = getelementptr inbounds float* %A, i64 %indvars.iv.next
+  %arrayidx3.phi.trans.insert = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
   %.pre = load float* %arrayidx3.phi.trans.insert, align 4
   br label %for.body
 

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/saxpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/saxpy.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/saxpy.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/saxpy.ll Fri Feb 27 13:29:02 2015
@@ -9,34 +9,34 @@ target triple = "x86_64-apple-macosx10.8
 ;CHECK: ret
 
 define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a, i64 %i) {
-  %1 = getelementptr inbounds i32* %x, i64 %i
+  %1 = getelementptr inbounds i32, i32* %x, i64 %i
   %2 = load i32* %1, align 4
   %3 = mul nsw i32 %2, %a
-  %4 = getelementptr inbounds i32* %y, i64 %i
+  %4 = getelementptr inbounds i32, i32* %y, i64 %i
   %5 = load i32* %4, align 4
   %6 = add nsw i32 %3, %5
   store i32 %6, i32* %1, align 4
   %7 = add i64 %i, 1
-  %8 = getelementptr inbounds i32* %x, i64 %7
+  %8 = getelementptr inbounds i32, i32* %x, i64 %7
   %9 = load i32* %8, align 4
   %10 = mul nsw i32 %9, %a
-  %11 = getelementptr inbounds i32* %y, i64 %7
+  %11 = getelementptr inbounds i32, i32* %y, i64 %7
   %12 = load i32* %11, align 4
   %13 = add nsw i32 %10, %12
   store i32 %13, i32* %8, align 4
   %14 = add i64 %i, 2
-  %15 = getelementptr inbounds i32* %x, i64 %14
+  %15 = getelementptr inbounds i32, i32* %x, i64 %14
   %16 = load i32* %15, align 4
   %17 = mul nsw i32 %16, %a
-  %18 = getelementptr inbounds i32* %y, i64 %14
+  %18 = getelementptr inbounds i32, i32* %y, i64 %14
   %19 = load i32* %18, align 4
   %20 = add nsw i32 %17, %19
   store i32 %20, i32* %15, align 4
   %21 = add i64 %i, 3
-  %22 = getelementptr inbounds i32* %x, i64 %21
+  %22 = getelementptr inbounds i32, i32* %x, i64 %21
   %23 = load i32* %22, align 4
   %24 = mul nsw i32 %23, %a
-  %25 = getelementptr inbounds i32* %y, i64 %21
+  %25 = getelementptr inbounds i32, i32* %y, i64 %21
   %26 = load i32* %25, align 4
   %27 = add nsw i32 %24, %26
   store i32 %27, i32* %22, align 4
@@ -46,14 +46,14 @@ define void @SAXPY(i32* noalias nocaptur
 ; Make sure we don't crash on this one.
 define void @SAXPY_crash(i32* noalias nocapture %x, i32* noalias nocapture %y, i64 %i) {
   %1 = add i64 %i, 1
-  %2 = getelementptr inbounds i32* %x, i64 %1
-  %3 = getelementptr inbounds i32* %y, i64 %1
+  %2 = getelementptr inbounds i32, i32* %x, i64 %1
+  %3 = getelementptr inbounds i32, i32* %y, i64 %1
   %4 = load i32* %3, align 4
   %5 = add nsw i32 undef, %4
   store i32 %5, i32* %2, align 4
   %6 = add i64 %i, 2
-  %7 = getelementptr inbounds i32* %x, i64 %6
-  %8 = getelementptr inbounds i32* %y, i64 %6
+  %7 = getelementptr inbounds i32, i32* %x, i64 %6
+  %8 = getelementptr inbounds i32, i32* %y, i64 %6
   %9 = load i32* %8, align 4
   %10 = add nsw i32 undef, %9
   store i32 %10, i32* %7, align 4

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/scheduling.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/scheduling.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/scheduling.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/scheduling.ll Fri Feb 27 13:29:02 2015
@@ -24,43 +24,43 @@ for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %a.088 = phi i32 [ 0, %entry ], [ %add52, %for.body ]
   %1 = shl i64 %indvars.iv, 3
-  %arrayidx = getelementptr inbounds i32* %diff, i64 %1
+  %arrayidx = getelementptr inbounds i32, i32* %diff, i64 %1
   %2 = load i32* %arrayidx, align 4
   %3 = or i64 %1, 4
-  %arrayidx2 = getelementptr inbounds i32* %diff, i64 %3
+  %arrayidx2 = getelementptr inbounds i32, i32* %diff, i64 %3
   %4 = load i32* %arrayidx2, align 4
   %add3 = add nsw i32 %4, %2
-  %arrayidx6 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
+  %arrayidx6 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
   store i32 %add3, i32* %arrayidx6, align 16
   %add10 = add nsw i32 %add3, %a.088
   %5 = or i64 %1, 1
-  %arrayidx13 = getelementptr inbounds i32* %diff, i64 %5
+  %arrayidx13 = getelementptr inbounds i32, i32* %diff, i64 %5
   %6 = load i32* %arrayidx13, align 4
   %7 = or i64 %1, 5
-  %arrayidx16 = getelementptr inbounds i32* %diff, i64 %7
+  %arrayidx16 = getelementptr inbounds i32, i32* %diff, i64 %7
   %8 = load i32* %arrayidx16, align 4
   %add17 = add nsw i32 %8, %6
-  %arrayidx20 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
+  %arrayidx20 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
   store i32 %add17, i32* %arrayidx20, align 4
   %add24 = add nsw i32 %add10, %add17
   %9 = or i64 %1, 2
-  %arrayidx27 = getelementptr inbounds i32* %diff, i64 %9
+  %arrayidx27 = getelementptr inbounds i32, i32* %diff, i64 %9
   %10 = load i32* %arrayidx27, align 4
   %11 = or i64 %1, 6
-  %arrayidx30 = getelementptr inbounds i32* %diff, i64 %11
+  %arrayidx30 = getelementptr inbounds i32, i32* %diff, i64 %11
   %12 = load i32* %arrayidx30, align 4
   %add31 = add nsw i32 %12, %10
-  %arrayidx34 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
+  %arrayidx34 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
   store i32 %add31, i32* %arrayidx34, align 8
   %add38 = add nsw i32 %add24, %add31
   %13 = or i64 %1, 3
-  %arrayidx41 = getelementptr inbounds i32* %diff, i64 %13
+  %arrayidx41 = getelementptr inbounds i32, i32* %diff, i64 %13
   %14 = load i32* %arrayidx41, align 4
   %15 = or i64 %1, 7
-  %arrayidx44 = getelementptr inbounds i32* %diff, i64 %15
+  %arrayidx44 = getelementptr inbounds i32, i32* %diff, i64 %15
   %16 = load i32* %arrayidx44, align 4
   %add45 = add nsw i32 %16, %14
-  %arrayidx48 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
+  %arrayidx48 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
   store i32 %add45, i32* %arrayidx48, align 4
   %add52 = add nsw i32 %add38, %add45
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -68,7 +68,7 @@ for.body:
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body
-  %arraydecay = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 0
+  %arraydecay = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 0
   call void @ff([8 x i32]* %arraydecay) #1
   ret i32 %add52
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/simple-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/simple-loop.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/simple-loop.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/simple-loop.ll Fri Feb 27 13:29:02 2015
@@ -11,17 +11,17 @@ define i32 @rollable(i32* noalias nocapt
 .lr.ph:                                           ; preds = %0, %.lr.ph
   %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
   %2 = shl i64 %i.019, 2
-  %3 = getelementptr inbounds i32* %in, i64 %2
+  %3 = getelementptr inbounds i32, i32* %in, i64 %2
 ;CHECK:load <4 x i32>
   %4 = load i32* %3, align 4
   %5 = or i64 %2, 1
-  %6 = getelementptr inbounds i32* %in, i64 %5
+  %6 = getelementptr inbounds i32, i32* %in, i64 %5
   %7 = load i32* %6, align 4
   %8 = or i64 %2, 2
-  %9 = getelementptr inbounds i32* %in, i64 %8
+  %9 = getelementptr inbounds i32, i32* %in, i64 %8
   %10 = load i32* %9, align 4
   %11 = or i64 %2, 3
-  %12 = getelementptr inbounds i32* %in, i64 %11
+  %12 = getelementptr inbounds i32, i32* %in, i64 %11
   %13 = load i32* %12, align 4
 ;CHECK:mul <4 x i32>
   %14 = mul i32 %4, 7
@@ -33,14 +33,14 @@ define i32 @rollable(i32* noalias nocapt
   %19 = add i32 %18, 21
   %20 = mul i32 %13, 7
   %21 = add i32 %20, 28
-  %22 = getelementptr inbounds i32* %out, i64 %2
+  %22 = getelementptr inbounds i32, i32* %out, i64 %2
 ;CHECK:store <4 x i32>
   store i32 %15, i32* %22, align 4
-  %23 = getelementptr inbounds i32* %out, i64 %5
+  %23 = getelementptr inbounds i32, i32* %out, i64 %5
   store i32 %17, i32* %23, align 4
-  %24 = getelementptr inbounds i32* %out, i64 %8
+  %24 = getelementptr inbounds i32, i32* %out, i64 %8
   store i32 %19, i32* %24, align 4
-  %25 = getelementptr inbounds i32* %out, i64 %11
+  %25 = getelementptr inbounds i32, i32* %out, i64 %11
   store i32 %21, i32* %25, align 4
   %26 = add i64 %i.019, 1
   %exitcond = icmp eq i64 %26, %n
@@ -61,16 +61,16 @@ define i32 @unrollable(i32* %in, i32* %o
 .lr.ph:                                           ; preds = %0, %.lr.ph
   %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
   %2 = shl i64 %i.019, 2
-  %3 = getelementptr inbounds i32* %in, i64 %2
+  %3 = getelementptr inbounds i32, i32* %in, i64 %2
   %4 = load i32* %3, align 4
   %5 = or i64 %2, 1
-  %6 = getelementptr inbounds i32* %in, i64 %5
+  %6 = getelementptr inbounds i32, i32* %in, i64 %5
   %7 = load i32* %6, align 4
   %8 = or i64 %2, 2
-  %9 = getelementptr inbounds i32* %in, i64 %8
+  %9 = getelementptr inbounds i32, i32* %in, i64 %8
   %10 = load i32* %9, align 4
   %11 = or i64 %2, 3
-  %12 = getelementptr inbounds i32* %in, i64 %11
+  %12 = getelementptr inbounds i32, i32* %in, i64 %11
   %13 = load i32* %12, align 4
   %14 = mul i32 %4, 7
   %15 = add i32 %14, 7
@@ -80,14 +80,14 @@ define i32 @unrollable(i32* %in, i32* %o
   %19 = add i32 %18, 21
   %20 = mul i32 %13, 7
   %21 = add i32 %20, 28
-  %22 = getelementptr inbounds i32* %out, i64 %2
+  %22 = getelementptr inbounds i32, i32* %out, i64 %2
   store i32 %15, i32* %22, align 4
-  %23 = getelementptr inbounds i32* %out, i64 %5
+  %23 = getelementptr inbounds i32, i32* %out, i64 %5
   store i32 %17, i32* %23, align 4
   %barrier = call i32 @goo(i32 0)                      ; <---------------- memory barrier.
-  %24 = getelementptr inbounds i32* %out, i64 %8
+  %24 = getelementptr inbounds i32, i32* %out, i64 %8
   store i32 %19, i32* %24, align 4
-  %25 = getelementptr inbounds i32* %out, i64 %11
+  %25 = getelementptr inbounds i32, i32* %out, i64 %11
   store i32 %21, i32* %25, align 4
   %26 = add i64 %i.019, 1
   %exitcond = icmp eq i64 %26, %n

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/simplebb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/simplebb.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/simplebb.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/simplebb.ll Fri Feb 27 13:29:02 2015
@@ -12,13 +12,13 @@ entry:
   %i0 = load double* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %mul5, double* %arrayidx5, align 8
   ret void
 }
@@ -32,14 +32,14 @@ entry:
   %i0 = load double* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   %c = bitcast i8* %e to double*
   store double %mul, double* %c, align 8
-  %carrayidx5 = getelementptr inbounds i8* %e, i64 8
+  %carrayidx5 = getelementptr inbounds i8, i8* %e, i64 8
   %arrayidx5 = bitcast i8* %carrayidx5 to double*
   store double %mul5, double* %arrayidx5, align 8
   ret void
@@ -55,13 +55,13 @@ entry:
   %i0 = load volatile double* %a, align 8
   %i1 = load volatile double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %mul5, double* %arrayidx5, align 8
   ret void
 }
@@ -75,13 +75,13 @@ entry:
   %i0 = load double* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store volatile double %mul, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store volatile double %mul5, double* %arrayidx5, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/tiny-tree.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/tiny-tree.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/tiny-tree.ll Fri Feb 27 13:29:02 2015
@@ -19,12 +19,12 @@ for.body:
   %src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
   %0 = load double* %src.addr.013, align 8
   store double %0, double* %dst.addr.014, align 8
-  %arrayidx2 = getelementptr inbounds double* %src.addr.013, i64 1
+  %arrayidx2 = getelementptr inbounds double, double* %src.addr.013, i64 1
   %1 = load double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double* %dst.addr.014, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %dst.addr.014, i64 1
   store double %1, double* %arrayidx3, align 8
-  %add.ptr = getelementptr inbounds double* %src.addr.013, i64 %i.015
-  %add.ptr4 = getelementptr inbounds double* %dst.addr.014, i64 %i.015
+  %add.ptr = getelementptr inbounds double, double* %src.addr.013, i64 %i.015
+  %add.ptr4 = getelementptr inbounds double, double* %dst.addr.014, i64 %i.015
   %inc = add i64 %i.015, 1
   %exitcond = icmp eq i64 %inc, %count
   br i1 %exitcond, label %for.end, label %for.body
@@ -49,20 +49,20 @@ for.body:
   %src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
   %0 = load float* %src.addr.021, align 4
   store float %0, float* %dst.addr.022, align 4
-  %arrayidx2 = getelementptr inbounds float* %src.addr.021, i64 1
+  %arrayidx2 = getelementptr inbounds float, float* %src.addr.021, i64 1
   %1 = load float* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds float* %dst.addr.022, i64 1
+  %arrayidx3 = getelementptr inbounds float, float* %dst.addr.022, i64 1
   store float %1, float* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds float* %src.addr.021, i64 2
+  %arrayidx4 = getelementptr inbounds float, float* %src.addr.021, i64 2
   %2 = load float* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds float* %dst.addr.022, i64 2
+  %arrayidx5 = getelementptr inbounds float, float* %dst.addr.022, i64 2
   store float %2, float* %arrayidx5, align 4
-  %arrayidx6 = getelementptr inbounds float* %src.addr.021, i64 3
+  %arrayidx6 = getelementptr inbounds float, float* %src.addr.021, i64 3
   %3 = load float* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds float* %dst.addr.022, i64 3
+  %arrayidx7 = getelementptr inbounds float, float* %dst.addr.022, i64 3
   store float %3, float* %arrayidx7, align 4
-  %add.ptr = getelementptr inbounds float* %src.addr.021, i64 %i.023
-  %add.ptr8 = getelementptr inbounds float* %dst.addr.022, i64 %i.023
+  %add.ptr = getelementptr inbounds float, float* %src.addr.021, i64 %i.023
+  %add.ptr8 = getelementptr inbounds float, float* %dst.addr.022, i64 %i.023
   %inc = add i64 %i.023, 1
   %exitcond = icmp eq i64 %inc, %count
   br i1 %exitcond, label %for.end, label %for.body
@@ -87,12 +87,12 @@ for.body:
   %src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
   %0 = load double* %src.addr.013, align 8
   store double %0, double* %dst.addr.014, align 8
-  %arrayidx2 = getelementptr inbounds double* %src.addr.013, i64 2
+  %arrayidx2 = getelementptr inbounds double, double* %src.addr.013, i64 2
   %1 = load double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double* %dst.addr.014, i64 1 
+  %arrayidx3 = getelementptr inbounds double, double* %dst.addr.014, i64 1 
   store double %1, double* %arrayidx3, align 8
-  %add.ptr = getelementptr inbounds double* %src.addr.013, i64 %i.015
-  %add.ptr4 = getelementptr inbounds double* %dst.addr.014, i64 %i.015
+  %add.ptr = getelementptr inbounds double, double* %src.addr.013, i64 %i.015
+  %add.ptr4 = getelementptr inbounds double, double* %dst.addr.014, i64 %i.015
   %inc = add i64 %i.015, 1
   %exitcond = icmp eq i64 %inc, %count
   br i1 %exitcond, label %for.end, label %for.body
@@ -117,20 +117,20 @@ for.body:
   %src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
   %0 = load float* %src.addr.021, align 4
   store float %0, float* %dst.addr.022, align 4
-  %arrayidx2 = getelementptr inbounds float* %src.addr.021, i64 4 
+  %arrayidx2 = getelementptr inbounds float, float* %src.addr.021, i64 4 
   %1 = load float* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds float* %dst.addr.022, i64 1
+  %arrayidx3 = getelementptr inbounds float, float* %dst.addr.022, i64 1
   store float %1, float* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds float* %src.addr.021, i64 2
+  %arrayidx4 = getelementptr inbounds float, float* %src.addr.021, i64 2
   %2 = load float* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds float* %dst.addr.022, i64 2
+  %arrayidx5 = getelementptr inbounds float, float* %dst.addr.022, i64 2
   store float %2, float* %arrayidx5, align 4
-  %arrayidx6 = getelementptr inbounds float* %src.addr.021, i64 3
+  %arrayidx6 = getelementptr inbounds float, float* %src.addr.021, i64 3
   %3 = load float* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds float* %dst.addr.022, i64 3
+  %arrayidx7 = getelementptr inbounds float, float* %dst.addr.022, i64 3
   store float %3, float* %arrayidx7, align 4
-  %add.ptr = getelementptr inbounds float* %src.addr.021, i64 %i.023
-  %add.ptr8 = getelementptr inbounds float* %dst.addr.022, i64 %i.023
+  %add.ptr = getelementptr inbounds float, float* %src.addr.021, i64 %i.023
+  %add.ptr8 = getelementptr inbounds float, float* %dst.addr.022, i64 %i.023
   %inc = add i64 %i.023, 1
   %exitcond = icmp eq i64 %inc, %count
   br i1 %exitcond, label %for.end, label %for.body
@@ -143,13 +143,13 @@ for.end:
 ; CHECK-LABEL: store_splat
 ; CHECK: store <4 x float>
 define void @store_splat(float*, float) {
-  %3 = getelementptr inbounds float* %0, i64 0
+  %3 = getelementptr inbounds float, float* %0, i64 0
   store float %1, float* %3, align 4
-  %4 = getelementptr inbounds float* %0, i64 1
+  %4 = getelementptr inbounds float, float* %0, i64 1
   store float %1, float* %4, align 4
-  %5 = getelementptr inbounds float* %0, i64 2
+  %5 = getelementptr inbounds float, float* %0, i64 2
   store float %1, float* %5, align 4
-  %6 = getelementptr inbounds float* %0, i64 3
+  %6 = getelementptr inbounds float, float* %0, i64 3
   store float %1, float* %6, align 4
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/unreachable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/unreachable.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/unreachable.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/unreachable.ll Fri Feb 27 13:29:02 2015
@@ -11,15 +11,15 @@ entry:
   br label %bb2
 
 bb1:                                    ; an unreachable block
-  %t3 = getelementptr inbounds i32* %x, i64 4
+  %t3 = getelementptr inbounds i32, i32* %x, i64 4
   %t4 = load i32* %t3, align 4
-  %t5 = getelementptr inbounds i32* %x, i64 5
+  %t5 = getelementptr inbounds i32, i32* %x, i64 5
   %t6 = load i32* %t5, align 4
   %bad = fadd float %bad, 0.000000e+00  ; <- an instruction with self dependency,
                                         ;    but legal in unreachable code
-  %t7 = getelementptr inbounds i32* %x, i64 6
+  %t7 = getelementptr inbounds i32, i32* %x, i64 6
   %t8 = load i32* %t7, align 4
-  %t9 = getelementptr inbounds i32* %x, i64 7
+  %t9 = getelementptr inbounds i32, i32* %x, i64 7
   %t10 = load i32* %t9, align 4
   br label %bb2
 
@@ -29,11 +29,11 @@ bb2:
   %t3.0 = phi i32 [ %t8, %bb1 ], [ 2, %entry ]
   %t4.0 = phi i32 [ %t10, %bb1 ], [ 2, %entry ]
   store i32 %t1.0, i32* %x, align 4
-  %t12 = getelementptr inbounds i32* %x, i64 1
+  %t12 = getelementptr inbounds i32, i32* %x, i64 1
   store i32 %t2.0, i32* %t12, align 4
-  %t13 = getelementptr inbounds i32* %x, i64 2
+  %t13 = getelementptr inbounds i32, i32* %x, i64 2
   store i32 %t3.0, i32* %t13, align 4
-  %t14 = getelementptr inbounds i32* %x, i64 3
+  %t14 = getelementptr inbounds i32, i32* %x, i64 3
   store i32 %t4.0, i32* %t14, align 4
   ret void
 }

Modified: llvm/trunk/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll Fri Feb 27 13:29:02 2015
@@ -11,13 +11,13 @@ entry:
   %i0 = load double* %a, align 8
   %i1 = load double* %b, align 8
   %mul = fmul double %i0, %i1
-  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
   %i3 = load double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
   %i4 = load double* %arrayidx4, align 8
   %mul5 = fmul double %i3, %i4
   store double %mul, double* %c, align 8
-  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
   store double %mul5, double* %arrayidx5, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/SROA/address-spaces.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/address-spaces.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/address-spaces.ll (original)
+++ llvm/trunk/test/Transforms/SROA/address-spaces.ll Fri Feb 27 13:29:02 2015
@@ -60,7 +60,7 @@ for.end:
   %in = alloca %struct.struct_test_27.0.13, align 8
   %0 = bitcast %struct.struct_test_27.0.13* %in to [5 x i64]*
   store [5 x i64] %in.coerce, [5 x i64]* %0, align 8
-  %scevgep9 = getelementptr %struct.struct_test_27.0.13* %in, i32 0, i32 4, i32 0
+  %scevgep9 = getelementptr %struct.struct_test_27.0.13, %struct.struct_test_27.0.13* %in, i32 0, i32 4, i32 0
   %scevgep910 = bitcast i32* %scevgep9 to i8*
   call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* undef, i8* %scevgep910, i32 16, i32 4, i1 false)
   ret void

Modified: llvm/trunk/test/Transforms/SROA/alignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/alignment.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/alignment.ll (original)
+++ llvm/trunk/test/Transforms/SROA/alignment.ll Fri Feb 27 13:29:02 2015
@@ -5,21 +5,21 @@ declare void @llvm.memcpy.p0i8.p0i8.i32(
 
 define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
 ; CHECK-LABEL: @test1(
-; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 0
+; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 0
 ; CHECK: %[[a0:.*]] = load i8* %[[gep_a0]], align 16
-; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 1
+; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 1
 ; CHECK: %[[a1:.*]] = load i8* %[[gep_a1]], align 1
-; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 0
+; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 0
 ; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
-; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 1
+; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 1
 ; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
 ; CHECK: ret void
 
 entry:
   %alloca = alloca { i8, i8 }, align 16
-  %gep_a = getelementptr { i8, i8 }* %a, i32 0, i32 0
-  %gep_alloca = getelementptr { i8, i8 }* %alloca, i32 0, i32 0
-  %gep_b = getelementptr { i8, i8 }* %b, i32 0, i32 0
+  %gep_a = getelementptr { i8, i8 }, { i8, i8 }* %a, i32 0, i32 0
+  %gep_alloca = getelementptr { i8, i8 }, { i8, i8 }* %alloca, i32 0, i32 0
+  %gep_b = getelementptr { i8, i8 }, { i8, i8 }* %b, i32 0, i32 0
 
   store i8 420, i8* %gep_alloca, align 16
 
@@ -37,10 +37,10 @@ define void @test2() {
 
 entry:
   %a = alloca { i8, i8, i8, i8 }, align 2
-  %gep1 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 1
+  %gep1 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 1
   %cast1 = bitcast i8* %gep1 to i16*
   store volatile i16 0, i16* %cast1
-  %gep2 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 2
+  %gep2 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 2
   %result = load i8* %gep2
   store i8 42, i8* %gep2
   ret void
@@ -79,7 +79,7 @@ entry:
   %a_raw = bitcast { i8*, i8*, i8* }* %a to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a_raw, i8* %x, i32 22, i32 8, i1 false)
   %b_raw = bitcast { i8*, i8*, i8* }* %b to i8*
-  %b_gep = getelementptr i8* %b_raw, i32 6
+  %b_gep = getelementptr i8, i8* %b_raw, i32 6
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_gep, i8* %x, i32 18, i32 2, i1 false)
   ret void
 }
@@ -101,18 +101,18 @@ define void @test5() {
 
 entry:
   %a = alloca [18 x i8]
-  %raw1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 0
+  %raw1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 0
   %ptr1 = bitcast i8* %raw1 to double*
   store volatile double 0.0, double* %ptr1, align 1
-  %weird_gep1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 7
+  %weird_gep1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 7
   %weird_cast1 = bitcast i8* %weird_gep1 to i16*
   %weird_load1 = load volatile i16* %weird_cast1, align 1
 
-  %raw2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 9
+  %raw2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 9
   %ptr2 = bitcast i8* %raw2 to double*
   %d1 = load double* %ptr1, align 1
   store volatile double %d1, double* %ptr2, align 1
-  %weird_gep2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 16
+  %weird_gep2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 16
   %weird_cast2 = bitcast i8* %weird_gep2 to i16*
   %weird_load2 = load volatile i16* %weird_cast2, align 1
 
@@ -130,11 +130,11 @@ define void @test6() {
 
 entry:
   %a = alloca [16 x i8]
-  %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
+  %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
   %ptr1 = bitcast i8* %raw1 to double*
   store volatile double 0.0, double* %ptr1, align 1
 
-  %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
+  %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
   %ptr2 = bitcast i8* %raw2 to double*
   %val = load double* %ptr1, align 1
   store volatile double %val, double* %ptr2, align 1
@@ -150,9 +150,9 @@ define void @test7(i8* %out) {
 
 entry:
   %a = alloca [16 x i8]
-  %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
+  %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
   %ptr1 = bitcast i8* %raw1 to double*
-  %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
+  %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
   %ptr2 = bitcast i8* %raw2 to double*
 
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i32 0, i1 false)

Modified: llvm/trunk/test/Transforms/SROA/basictest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/basictest.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/basictest.ll (original)
+++ llvm/trunk/test/Transforms/SROA/basictest.ll Fri Feb 27 13:29:02 2015
@@ -43,7 +43,7 @@ define i32 @test1() {
 
 entry:
   %X = alloca { i32, float }
-  %Y = getelementptr { i32, float }* %X, i64 0, i32 0
+  %Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0
   store i32 0, i32* %Y
   %Z = load i32* %Y
   ret i32 %Z
@@ -79,48 +79,48 @@ entry:
 ; CHECK-NEXT: %[[test3_a6:.*]] = alloca [7 x i8]
 ; CHECK-NEXT: %[[test3_a7:.*]] = alloca [85 x i8]
 
-  %b = getelementptr [300 x i8]* %a, i64 0, i64 0
+  %b = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 300, i32 1, i1 false)
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a1]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 42
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
 ; CHECK-NEXT: %[[test3_r1:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 43
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [99 x i8]* %[[test3_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 142
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 142
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 16
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 158
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 158
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 42
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 200
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 200
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 207
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 207
 ; CHECK-NEXT: %[[test3_r2:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 208
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 208
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 215
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 215
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 85
 
   ; Clobber a single element of the array, this should be promotable.
-  %c = getelementptr [300 x i8]* %a, i64 0, i64 42
+  %c = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 42
   store i8 0, i8* %c
 
   ; Make a sequence of overlapping stores to the array. These overlap both in
   ; forward strides and in shrinking accesses.
-  %overlap.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 142
-  %overlap.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 143
-  %overlap.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 144
-  %overlap.4.i8 = getelementptr [300 x i8]* %a, i64 0, i64 145
-  %overlap.5.i8 = getelementptr [300 x i8]* %a, i64 0, i64 146
-  %overlap.6.i8 = getelementptr [300 x i8]* %a, i64 0, i64 147
-  %overlap.7.i8 = getelementptr [300 x i8]* %a, i64 0, i64 148
-  %overlap.8.i8 = getelementptr [300 x i8]* %a, i64 0, i64 149
-  %overlap.9.i8 = getelementptr [300 x i8]* %a, i64 0, i64 150
+  %overlap.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 142
+  %overlap.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 143
+  %overlap.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 144
+  %overlap.4.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 145
+  %overlap.5.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 146
+  %overlap.6.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 147
+  %overlap.7.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 148
+  %overlap.8.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 149
+  %overlap.9.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 150
   %overlap.1.i16 = bitcast i8* %overlap.1.i8 to i16*
   %overlap.1.i32 = bitcast i8* %overlap.1.i8 to i32*
   %overlap.1.i64 = bitcast i8* %overlap.1.i8 to i64*
@@ -133,7 +133,7 @@ entry:
   %overlap.8.i64 = bitcast i8* %overlap.8.i8 to i64*
   %overlap.9.i64 = bitcast i8* %overlap.9.i8 to i64*
   store i8 1, i8* %overlap.1.i8
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
 ; CHECK-NEXT: store i8 1, i8* %[[gep]]
   store i16 1, i16* %overlap.1.i16
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i16*
@@ -145,48 +145,48 @@ entry:
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i64*
 ; CHECK-NEXT: store i64 1, i64* %[[bitcast]]
   store i64 2, i64* %overlap.2.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 1
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
 ; CHECK-NEXT: store i64 2, i64* %[[bitcast]]
   store i64 3, i64* %overlap.3.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 2
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
 ; CHECK-NEXT: store i64 3, i64* %[[bitcast]]
   store i64 4, i64* %overlap.4.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 3
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 3
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
 ; CHECK-NEXT: store i64 4, i64* %[[bitcast]]
   store i64 5, i64* %overlap.5.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 4
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 4
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
 ; CHECK-NEXT: store i64 5, i64* %[[bitcast]]
   store i64 6, i64* %overlap.6.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 5
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 5
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
 ; CHECK-NEXT: store i64 6, i64* %[[bitcast]]
   store i64 7, i64* %overlap.7.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 6
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 6
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
 ; CHECK-NEXT: store i64 7, i64* %[[bitcast]]
   store i64 8, i64* %overlap.8.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 7
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 7
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
 ; CHECK-NEXT: store i64 8, i64* %[[bitcast]]
   store i64 9, i64* %overlap.9.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 8
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 8
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
 ; CHECK-NEXT: store i64 9, i64* %[[bitcast]]
 
   ; Make two sequences of overlapping stores with more gaps and irregularities.
-  %overlap2.1.0.i8 = getelementptr [300 x i8]* %a, i64 0, i64 200
-  %overlap2.1.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 201
-  %overlap2.1.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 202
-  %overlap2.1.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 203
-
-  %overlap2.2.0.i8 = getelementptr [300 x i8]* %a, i64 0, i64 208
-  %overlap2.2.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 209
-  %overlap2.2.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 210
-  %overlap2.2.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 211
+  %overlap2.1.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 200
+  %overlap2.1.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 201
+  %overlap2.1.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 202
+  %overlap2.1.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 203
+
+  %overlap2.2.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 208
+  %overlap2.2.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 209
+  %overlap2.2.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 210
+  %overlap2.2.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 211
 
   %overlap2.1.0.i16 = bitcast i8* %overlap2.1.0.i8 to i16*
   %overlap2.1.0.i32 = bitcast i8* %overlap2.1.0.i8 to i32*
@@ -194,7 +194,7 @@ entry:
   %overlap2.1.2.i32 = bitcast i8* %overlap2.1.2.i8 to i32*
   %overlap2.1.3.i32 = bitcast i8* %overlap2.1.3.i8 to i32*
   store i8 1,  i8*  %overlap2.1.0.i8
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
 ; CHECK-NEXT: store i8 1, i8* %[[gep]]
   store i16 1, i16* %overlap2.1.0.i16
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i16*
@@ -203,15 +203,15 @@ entry:
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i32*
 ; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
   store i32 2, i32* %overlap2.1.1.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 1
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
 ; CHECK-NEXT: store i32 2, i32* %[[bitcast]]
   store i32 3, i32* %overlap2.1.2.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 2
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
 ; CHECK-NEXT: store i32 3, i32* %[[bitcast]]
   store i32 4, i32* %overlap2.1.3.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 3
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 3
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
 ; CHECK-NEXT: store i32 4, i32* %[[bitcast]]
 
@@ -224,78 +224,78 @@ entry:
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a6]] to i32*
 ; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
   store i8 1,  i8*  %overlap2.2.1.i8
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
 ; CHECK-NEXT: store i8 1, i8* %[[gep]]
   store i16 1, i16* %overlap2.2.1.i16
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
 ; CHECK-NEXT: store i16 1, i16* %[[bitcast]]
   store i32 1, i32* %overlap2.2.1.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
 ; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
   store i32 3, i32* %overlap2.2.2.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 2
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
 ; CHECK-NEXT: store i32 3, i32* %[[bitcast]]
   store i32 4, i32* %overlap2.2.3.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 3
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 3
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
 ; CHECK-NEXT: store i32 4, i32* %[[bitcast]]
 
-  %overlap2.prefix = getelementptr i8* %overlap2.1.1.i8, i64 -4
+  %overlap2.prefix = getelementptr i8, i8* %overlap2.1.1.i8, i64 -4
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.prefix, i8* %src, i32 8, i32 1, i1 false)
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 39
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 39
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 3
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 3
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 3
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 5
 
   ; Bridge between the overlapping areas
   call void @llvm.memset.p0i8.i32(i8* %overlap2.1.2.i8, i8 42, i32 8, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 2
 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[gep]], i8 42, i32 5
 ; ...promoted i8 store...
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[gep]], i8 42, i32 2
 
   ; Entirely within the second overlap.
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.1.i8, i8* %src, i32 5, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 5
 
   ; Trailing past the second overlap.
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.2.i8, i8* %src, i32 8, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 2
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 5
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 5
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 5
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 3
 
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 300, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a1]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[gep]], i32 42
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 42
 ; CHECK-NEXT: store i8 0, i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 43
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [99 x i8]* %[[test3_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 43
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 142
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 142
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 16
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 158
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 158
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 42
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 200
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 200
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 207
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 207
 ; CHECK-NEXT: store i8 42, i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 208
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 208
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 215
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 215
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 85
 
   ret void
@@ -314,90 +314,90 @@ entry:
 ; CHECK-NEXT: %[[test4_a5:.*]] = alloca [7 x i8]
 ; CHECK-NEXT: %[[test4_a6:.*]] = alloca [40 x i8]
 
-  %b = getelementptr [100 x i8]* %a, i64 0, i64 0
+  %b = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 100, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8]* %[[test4_a1]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8], [20 x i8]* %[[test4_a1]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 20
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 20
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 20
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
 ; CHECK-NEXT: %[[test4_r1:.*]] = load i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 22
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 22
 ; CHECK-NEXT: %[[test4_r2:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 23
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 23
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 30
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [10 x i8]* %[[test4_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 30
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %[[test4_a3]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 40
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 40
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
 ; CHECK-NEXT: %[[test4_r3:.*]] = load i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
 ; CHECK-NEXT: %[[test4_r4:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 43
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 50
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 50
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
 ; CHECK-NEXT: %[[test4_r5:.*]] = load i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 52
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 52
 ; CHECK-NEXT: %[[test4_r6:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 53
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 53
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 60
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [40 x i8]* %[[test4_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 60
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [40 x i8], [40 x i8]* %[[test4_a6]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 40
 
-  %a.src.1 = getelementptr [100 x i8]* %a, i64 0, i64 20
-  %a.dst.1 = getelementptr [100 x i8]* %a, i64 0, i64 40
+  %a.src.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 20
+  %a.dst.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 40
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.1, i32 10, i32 1, i1 false)
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
 
   ; Clobber a single element of the array, this should be promotable, and be deleted.
-  %c = getelementptr [100 x i8]* %a, i64 0, i64 42
+  %c = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 42
   store i8 0, i8* %c
 
-  %a.src.2 = getelementptr [100 x i8]* %a, i64 0, i64 50
+  %a.src.2 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 50
   call void @llvm.memmove.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.2, i32 10, i32 1, i1 false)
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
 
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 100, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8]* %[[test4_a1]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8], [20 x i8]* %[[test4_a1]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[gep]], i32 20
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 20
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 20
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
 ; CHECK-NEXT: store i16 %[[test4_r1]], i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 22
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 22
 ; CHECK-NEXT: store i8 %[[test4_r2]], i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 23
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 23
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 30
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [10 x i8]* %[[test4_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 30
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %[[test4_a3]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 40
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 40
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
 ; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 42
 ; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 43
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 43
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 50
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 50
 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
 ; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 52
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 52
 ; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 53
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 53
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 60
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [40 x i8]* %[[test4_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 60
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [40 x i8], [40 x i8]* %[[test4_a6]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 40
 
   ret void
@@ -420,7 +420,7 @@ entry:
   %a = alloca [4 x i8]
   %fptr = bitcast [4 x i8]* %a to float*
   store float 0.0, float* %fptr
-  %ptr = getelementptr [4 x i8]* %a, i32 0, i32 2
+  %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
   %iptr = bitcast i8* %ptr to i16*
   %val = load i16* %iptr
   ret i16 %val
@@ -435,7 +435,7 @@ define i32 @test6() {
 
 entry:
   %a = alloca [4 x i8]
-  %ptr = getelementptr [4 x i8]* %a, i32 0, i32 0
+  %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
   call void @llvm.memset.p0i8.i32(i8* %ptr, i8 42, i32 4, i32 1, i1 true)
   %iptr = bitcast i8* %ptr to i32*
   %val = load i32* %iptr
@@ -455,7 +455,7 @@ define void @test7(i8* %src, i8* %dst) {
 
 entry:
   %a = alloca [4 x i8]
-  %ptr = getelementptr [4 x i8]* %a, i32 0, i32 0
+  %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 true)
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 true)
   ret void
@@ -471,22 +471,22 @@ entry:
   %new = alloca %S2
 ; CHECK-NOT: alloca
 
-  %s2.next.ptr = getelementptr %S2* %s2, i64 0, i32 1
+  %s2.next.ptr = getelementptr %S2, %S2* %s2, i64 0, i32 1
   %s2.next = load %S2** %s2.next.ptr
-; CHECK:      %[[gep:.*]] = getelementptr %S2* %s2, i64 0, i32 1
+; CHECK:      %[[gep:.*]] = getelementptr %S2, %S2* %s2, i64 0, i32 1
 ; CHECK-NEXT: %[[next:.*]] = load %S2** %[[gep]]
 
-  %s2.next.s1.ptr = getelementptr %S2* %s2.next, i64 0, i32 0
+  %s2.next.s1.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 0
   %s2.next.s1 = load %S1** %s2.next.s1.ptr
-  %new.s1.ptr = getelementptr %S2* %new, i64 0, i32 0
+  %new.s1.ptr = getelementptr %S2, %S2* %new, i64 0, i32 0
   store %S1* %s2.next.s1, %S1** %new.s1.ptr
-  %s2.next.next.ptr = getelementptr %S2* %s2.next, i64 0, i32 1
+  %s2.next.next.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 1
   %s2.next.next = load %S2** %s2.next.next.ptr
-  %new.next.ptr = getelementptr %S2* %new, i64 0, i32 1
+  %new.next.ptr = getelementptr %S2, %S2* %new, i64 0, i32 1
   store %S2* %s2.next.next, %S2** %new.next.ptr
-; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2* %[[next]], i64 0, i32 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 0
 ; CHECK-NEXT: %[[next_s1:.*]] = load %S1** %[[gep]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2* %[[next]], i64 0, i32 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 1
 ; CHECK-NEXT: %[[next_next:.*]] = load %S2** %[[gep]]
 
   %new.s1 = load %S1** %new.s1.ptr
@@ -522,14 +522,14 @@ define i64 @test9() {
 
 entry:
   %a = alloca { [3 x i8] }, align 8
-  %gep1 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 0
+  %gep1 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 0
   store i8 0, i8* %gep1, align 1
-  %gep2 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 1
+  %gep2 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 1
   store i8 0, i8* %gep2, align 1
-  %gep3 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 2
+  %gep3 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 2
   store i8 26, i8* %gep3, align 1
   %cast = bitcast { [3 x i8] }* %a to { i64 }*
-  %elt = getelementptr inbounds { i64 }* %cast, i32 0, i32 0
+  %elt = getelementptr inbounds { i64 }, { i64 }* %cast, i32 0, i32 0
   %load = load i64* %elt
   %result = and i64 %load, 16777215
   ret i64 %result
@@ -542,7 +542,7 @@ define %S2* @test10() {
 
 entry:
   %a = alloca [8 x i8]
-  %ptr = getelementptr [8 x i8]* %a, i32 0, i32 0
+  %ptr = getelementptr [8 x i8], [8 x i8]* %a, i32 0, i32 0
   call void @llvm.memset.p0i8.i32(i8* %ptr, i8 0, i32 8, i32 1, i1 false)
   %s2ptrptr = bitcast i8* %ptr to %S2**
   %s2ptr = load %S2** %s2ptrptr
@@ -559,13 +559,13 @@ entry:
   br i1 undef, label %good, label %bad
 
 good:
-  %Y = getelementptr i32* %X, i64 0
+  %Y = getelementptr i32, i32* %X, i64 0
   store i32 0, i32* %Y
   %Z = load i32* %Y
   ret i32 %Z
 
 bad:
-  %Y2 = getelementptr i32* %X, i64 1
+  %Y2 = getelementptr i32, i32* %X, i64 1
   store i32 0, i32* %Y2
   %Z2 = load i32* %Y2
   ret i32 %Z2
@@ -582,11 +582,11 @@ entry:
   %b = alloca [3 x i8]
 ; CHECK-NOT: alloca
 
-  %a0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+  %a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
   store i8 0, i8* %a0ptr
-  %a1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+  %a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
   store i8 0, i8* %a1ptr
-  %a2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+  %a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
   store i8 0, i8* %a2ptr
   %aiptr = bitcast [3 x i8]* %a to i24*
   %ai = load i24* %aiptr
@@ -606,11 +606,11 @@ entry:
 
   %biptr = bitcast [3 x i8]* %b to i24*
   store i24 %ai, i24* %biptr
-  %b0ptr = getelementptr [3 x i8]* %b, i64 0, i32 0
+  %b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
   %b0 = load i8* %b0ptr
-  %b1ptr = getelementptr [3 x i8]* %b, i64 0, i32 1
+  %b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
   %b1 = load i8* %b1ptr
-  %b2ptr = getelementptr [3 x i8]* %b, i64 0, i32 2
+  %b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
   %b2 = load i8* %b2ptr
 ; CHECK-NOT: store
 ; CHECK-NOT: load
@@ -638,14 +638,14 @@ define i32 @test13() {
 
 entry:
   %a = alloca [3 x i8], align 2
-  %b0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+  %b0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
   store i8 0, i8* %b0ptr
-  %b1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+  %b1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
   store i8 0, i8* %b1ptr
-  %b2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+  %b2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
   store i8 0, i8* %b2ptr
   %iptrcast = bitcast [3 x i8]* %a to i16*
-  %iptrgep = getelementptr i16* %iptrcast, i64 1
+  %iptrgep = getelementptr i16, i16* %iptrcast, i64 1
   %i = load i16* %iptrgep
   %ret = zext i16 %i to i32
   ret i32 %ret
@@ -666,20 +666,20 @@ entry:
   %a = alloca %test14.struct
   %p = alloca %test14.struct*
   %0 = bitcast %test14.struct* %a to i8*
-  %1 = getelementptr i8* %0, i64 12
+  %1 = getelementptr i8, i8* %0, i64 12
   %2 = bitcast i8* %1 to %test14.struct*
-  %3 = getelementptr inbounds %test14.struct* %2, i32 0, i32 0
-  %4 = getelementptr inbounds %test14.struct* %a, i32 0, i32 0
+  %3 = getelementptr inbounds %test14.struct, %test14.struct* %2, i32 0, i32 0
+  %4 = getelementptr inbounds %test14.struct, %test14.struct* %a, i32 0, i32 0
   %5 = bitcast [3 x i32]* %3 to i32*
   %6 = bitcast [3 x i32]* %4 to i32*
   %7 = load i32* %6, align 4
   store i32 %7, i32* %5, align 4
-  %8 = getelementptr inbounds i32* %5, i32 1
-  %9 = getelementptr inbounds i32* %6, i32 1
+  %8 = getelementptr inbounds i32, i32* %5, i32 1
+  %9 = getelementptr inbounds i32, i32* %6, i32 1
   %10 = load i32* %9, align 4
   store i32 %10, i32* %8, align 4
-  %11 = getelementptr inbounds i32* %5, i32 2
-  %12 = getelementptr inbounds i32* %6, i32 2
+  %11 = getelementptr inbounds i32, i32* %5, i32 2
+  %12 = getelementptr inbounds i32, i32* %6, i32 2
   %13 = load i32* %12, align 4
   store i32 %13, i32* %11, align 4
   ret void
@@ -707,25 +707,25 @@ loop:
 
   store i64 1879048192, i64* %l0, align 8
   %bc0 = bitcast i64* %l0 to i8*
-  %gep0 = getelementptr i8* %bc0, i64 3
+  %gep0 = getelementptr i8, i8* %bc0, i64 3
   %dead0 = bitcast i8* %gep0 to i64*
 
   store i64 1879048192, i64* %l1, align 8
   %bc1 = bitcast i64* %l1 to i8*
-  %gep1 = getelementptr i8* %bc1, i64 3
-  %dead1 = getelementptr i8* %gep1, i64 1
+  %gep1 = getelementptr i8, i8* %bc1, i64 3
+  %dead1 = getelementptr i8, i8* %gep1, i64 1
 
   store i64 1879048192, i64* %l2, align 8
   %bc2 = bitcast i64* %l2 to i8*
-  %gep2.1 = getelementptr i8* %bc2, i64 1
-  %gep2.2 = getelementptr i8* %bc2, i64 3
+  %gep2.1 = getelementptr i8, i8* %bc2, i64 1
+  %gep2.2 = getelementptr i8, i8* %bc2, i64 3
   ; Note that this select should get visited multiple times due to using two
   ; different GEPs off the same alloca. We should only delete it once.
   %dead2 = select i1 %flag, i8* %gep2.1, i8* %gep2.2
 
   store i64 1879048192, i64* %l3, align 8
   %bc3 = bitcast i64* %l3 to i8*
-  %gep3 = getelementptr i8* %bc3, i64 3
+  %gep3 = getelementptr i8, i8* %bc3, i64 3
 
   br label %loop
 }
@@ -742,7 +742,7 @@ define void @test16(i8* %src, i8* %dst)
 
 entry:
   %a = alloca [3 x i8]
-  %ptr = getelementptr [3 x i8]* %a, i32 0, i32 0
+  %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 false)
   %cast = bitcast i8* %ptr to i24*
   store i24 0, i24* %cast
@@ -755,14 +755,14 @@ define void @test17(i8* %src, i8* %dst)
 ; the alloca.
 ; CHECK-LABEL: @test17(
 ; CHECK:      %[[a:.*]] = alloca [3 x i8]
-; CHECK-NEXT: %[[ptr:.*]] = getelementptr [3 x i8]* %[[a]], i32 0, i32 0
+; CHECK-NEXT: %[[ptr:.*]] = getelementptr [3 x i8], [3 x i8]* %[[a]], i32 0, i32 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[ptr]], i8* %src,
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[ptr]],
 ; CHECK-NEXT: ret void
 
 entry:
   %a = alloca [3 x i8]
-  %ptr = getelementptr [3 x i8]* %a, i32 0, i32 0
+  %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 true)
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 true)
   ret void
@@ -774,27 +774,27 @@ define void @test18(i8* %src, i8* %dst,
 ; the variable sized intrinsic.
 ; CHECK-LABEL: @test18(
 ; CHECK:      %[[a:.*]] = alloca [34 x i8]
-; CHECK:      %[[srcgep1:.*]] = getelementptr inbounds i8* %src, i64 4
+; CHECK:      %[[srcgep1:.*]] = getelementptr inbounds i8, i8* %src, i64 4
 ; CHECK-NEXT: %[[srccast1:.*]] = bitcast i8* %[[srcgep1]] to i32*
 ; CHECK-NEXT: %[[srcload:.*]] = load i32* %[[srccast1]]
-; CHECK-NEXT: %[[agep1:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: %[[agep1:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[agep1]], i8* %src, i32 %size,
-; CHECK-NEXT: %[[agep2:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: %[[agep2:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[agep2]], i8 42, i32 %size,
 ; CHECK-NEXT: %[[dstcast1:.*]] = bitcast i8* %dst to i32*
 ; CHECK-NEXT: store i32 42, i32* %[[dstcast1]]
-; CHECK-NEXT: %[[dstgep1:.*]] = getelementptr inbounds i8* %dst, i64 4
+; CHECK-NEXT: %[[dstgep1:.*]] = getelementptr inbounds i8, i8* %dst, i64 4
 ; CHECK-NEXT: %[[dstcast2:.*]] = bitcast i8* %[[dstgep1]] to i32*
 ; CHECK-NEXT: store i32 %[[srcload]], i32* %[[dstcast2]]
-; CHECK-NEXT: %[[agep3:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: %[[agep3:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[agep3]], i32 %size,
 ; CHECK-NEXT: ret void
 
 entry:
   %a = alloca [42 x i8]
-  %ptr = getelementptr [42 x i8]* %a, i32 0, i32 0
+  %ptr = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i32 1, i1 false)
-  %ptr2 = getelementptr [42 x i8]* %a, i32 0, i32 8
+  %ptr2 = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 8
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr2, i8* %src, i32 %size, i32 1, i1 false)
   call void @llvm.memset.p0i8.i32(i8* %ptr2, i8 42, i32 %size, i32 1, i1 false)
   %cast = bitcast i8* %ptr to i32*
@@ -820,7 +820,7 @@ entry:
   %cast1 = bitcast %opaque* %x to i8*
   %cast2 = bitcast { i64, i8* }* %a to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast2, i8* %cast1, i32 16, i32 1, i1 false)
-  %gep = getelementptr inbounds { i64, i8* }* %a, i32 0, i32 0
+  %gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
   %val = load i64* %gep
   ret i32 undef
 }
@@ -836,13 +836,13 @@ define i32 @test20() {
 
 entry:
   %a = alloca [3 x i32]
-  %gep1 = getelementptr [3 x i32]* %a, i32 0, i32 0
+  %gep1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 0
   store i32 1, i32* %gep1
-  %gep2.1 = getelementptr [3 x i32]* %a, i32 0, i32 -2
-  %gep2.2 = getelementptr i32* %gep2.1, i32 3
+  %gep2.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 -2
+  %gep2.2 = getelementptr i32, i32* %gep2.1, i32 3
   store i32 2, i32* %gep2.2
-  %gep3.1 = getelementptr [3 x i32]* %a, i32 0, i32 14
-  %gep3.2 = getelementptr i32* %gep3.1, i32 -12
+  %gep3.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 14
+  %gep3.2 = getelementptr i32, i32* %gep3.1, i32 -12
   store i32 3, i32* %gep3.2
 
   %load1 = load i32* %gep1
@@ -865,19 +865,19 @@ define i8 @test21() {
 
 entry:
   %a = alloca [2305843009213693951 x i8]
-  %gep0 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
+  %gep0 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
   store i8 255, i8* %gep0
-  %gep1 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
-  %gep2 = getelementptr i8* %gep1, i64 -1
+  %gep1 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
+  %gep2 = getelementptr i8, i8* %gep1, i64 -1
   call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i32 1, i1 false)
-  %gep3 = getelementptr i8* %gep1, i64 9223372036854775807
-  %gep4 = getelementptr i8* %gep3, i64 9223372036854775807
-  %gep5 = getelementptr i8* %gep4, i64 -6917529027641081857
+  %gep3 = getelementptr i8, i8* %gep1, i64 9223372036854775807
+  %gep4 = getelementptr i8, i8* %gep3, i64 9223372036854775807
+  %gep5 = getelementptr i8, i8* %gep4, i64 -6917529027641081857
   store i8 255, i8* %gep5
   %cast1 = bitcast i8* %gep4 to i32*
   store i32 0, i32* %cast1
   %load = load i8* %gep0
-  %gep6 = getelementptr i8* %gep0, i32 1
+  %gep6 = getelementptr i8, i8* %gep0, i32 1
   %load2 = load i8* %gep6
   %result = or i8 %load, %load2
   ret i8 %result
@@ -918,7 +918,7 @@ if.then:
   br label %if.end
 
 if.end:
-  %gep = getelementptr %PR13916.struct* %a, i32 0, i32 0
+  %gep = getelementptr %PR13916.struct, %PR13916.struct* %a, i32 0, i32 0
   %tmp2 = load i8* %gep
   ret void
 }
@@ -987,8 +987,8 @@ define void @PR14034() {
 
 entry:
   %a = alloca %PR14034.struct
-  %list = getelementptr %PR14034.struct* %a, i32 0, i32 2
-  %prev = getelementptr %PR14034.list* %list, i32 0, i32 1
+  %list = getelementptr %PR14034.struct, %PR14034.struct* %a, i32 0, i32 2
+  %prev = getelementptr %PR14034.list, %PR14034.list* %list, i32 0, i32 1
   store %PR14034.list* undef, %PR14034.list** %prev
   %cast0 = bitcast %PR14034.struct* undef to i8*
   %cast1 = bitcast %PR14034.struct* %a to i8*
@@ -1008,10 +1008,10 @@ entry:
 ; CHECK-NOT: alloca
 
   %wrap1 = insertvalue [1 x { i32 }] undef, i32 %x, 0, 0
-  %gep1 = getelementptr { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
+  %gep1 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
   store [1 x { i32 }] %wrap1, [1 x { i32 }]* %gep1
 
-  %gep2 = getelementptr { { [1 x { i32 }] } }* %a1, i32 0, i32 0
+  %gep2 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0
   %ptrcast1 = bitcast { [1 x { i32 }] }* %gep2 to { [1 x { float }] }*
   %load1 = load { [1 x { float }] }* %ptrcast1
   %unwrap1 = extractvalue { [1 x { float }] } %load1, 0, 0
@@ -1019,18 +1019,18 @@ entry:
   %wrap2 = insertvalue { {}, { float }, [0 x i8] } undef, { float } %unwrap1, 1
   store { {}, { float }, [0 x i8] } %wrap2, { {}, { float }, [0 x i8] }* %a2
 
-  %gep3 = getelementptr { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
+  %gep3 = getelementptr { {}, { float }, [0 x i8] }, { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
   %ptrcast2 = bitcast float* %gep3 to <4 x i8>*
   %load3 = load <4 x i8>* %ptrcast2
   %valcast1 = bitcast <4 x i8> %load3 to i32
 
   %wrap3 = insertvalue [1 x [1 x i32]] undef, i32 %valcast1, 0, 0
   %wrap4 = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] %wrap3, 0
-  %gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
+  %gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
   %ptrcast3 = bitcast { [0 x double], [1 x [1 x <4 x i8>]], {} }* %gep4 to { [1 x [1 x i32]], {} }*
   store { [1 x [1 x i32]], {} } %wrap4, { [1 x [1 x i32]], {} }* %ptrcast3
 
-  %gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
+  %gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
   %ptrcast4 = bitcast [1 x <4 x i8>]* %gep5 to { {}, float, {} }*
   %load4 = load { {}, float, {} }* %ptrcast4
   %unwrap2 = extractvalue { {}, float, {} } %load4, 1
@@ -1064,14 +1064,14 @@ entry:
   store i32 0, i32* %X.sroa.0.0.cast2.i, align 8
 
   ; Also use a memset to the middle 32-bits for fun.
-  %X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8* %0, i32 2
+  %X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8, i8* %0, i32 2
   call void @llvm.memset.p0i8.i64(i8* %X.sroa.0.2.raw_idx2.i, i8 0, i64 4, i32 1, i1 false)
 
   ; Or a memset of the whole thing.
   call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i32 1, i1 false)
 
   ; Write to the high 32-bits with a memcpy.
-  %X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8* %0, i32 4
+  %X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8, i8* %0, i32 4
   %d.raw = bitcast double* %d to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %X.sroa.0.4.raw_idx4.i, i8* %d.raw, i32 4, i32 1, i1 false)
 
@@ -1103,17 +1103,17 @@ entry:
   store i64 0, i64* %0
   ; CHECK-NOT: store
 
-  %phi.realp = getelementptr inbounds { float, float }* %phi, i32 0, i32 0
+  %phi.realp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
   %phi.real = load float* %phi.realp
-  %phi.imagp = getelementptr inbounds { float, float }* %phi, i32 0, i32 1
+  %phi.imagp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
   %phi.imag = load float* %phi.imagp
-  ; CHECK:      %[[realp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 0
+  ; CHECK:      %[[realp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
   ; CHECK-NEXT: %[[real:.*]] = load float* %[[realp]]
-  ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 1
+  ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
   ; CHECK-NEXT: %[[imag:.*]] = load float* %[[imagp]]
 
-  %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
-  %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+  %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
+  %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
   store float %phi.real, float* %real
   store float %phi.imag, float* %imag
   ; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32
@@ -1141,8 +1141,8 @@ entry:
   %a = alloca { [16 x i8] }, align 8
 ; CHECK: alloca [16 x i8], align 8
 
-  %gep = getelementptr inbounds { [16 x i8] }* %ptr, i64 -1
-; CHECK-NEXT: getelementptr inbounds { [16 x i8] }* %ptr, i64 -1, i32 0, i64 0
+  %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1
+; CHECK-NEXT: getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1, i32 0, i64 0
 
   %cast1 = bitcast { [16 x i8 ] }* %gep to i8*
   %cast2 = bitcast { [16 x i8 ] }* %a to i8*
@@ -1159,8 +1159,8 @@ entry:
   %a = alloca { [16 x i8] }, align 8
 ; CHECK: alloca [16 x i8], align 8
 
-  %gep = getelementptr inbounds { [16 x i8] } addrspace(1)* %ptr, i64 -1
-; CHECK-NEXT: getelementptr inbounds { [16 x i8] } addrspace(1)* %ptr, i16 -1, i32 0, i16 0
+  %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i64 -1
+; CHECK-NEXT: getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i16 -1, i32 0, i16 0
 
   %cast1 = bitcast { [16 x i8 ] } addrspace(1)* %gep to i8 addrspace(1)*
   %cast2 = bitcast { [16 x i8 ] }* %a to i8*
@@ -1207,7 +1207,7 @@ entry:
   %a.i8 = bitcast <{ i1 }>* %a to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.i8, i8* %b.i8, i32 1, i32 1, i1 false) nounwind
   %bar = load i8* %a.i8, align 1
-  %a.i1 = getelementptr inbounds <{ i1 }>* %a, i32 0, i32 0
+  %a.i1 = getelementptr inbounds <{ i1 }>, <{ i1 }>* %a, i32 0, i32 0
   %baz = load i1* %a.i1, align 1
 ; CHECK-NEXT: %[[a_cast:.*]] = bitcast i8* %[[a]] to i1*
 ; CHECK-NEXT: {{.*}} = load i1* %[[a_cast]], align 8
@@ -1282,36 +1282,36 @@ entry:
   ]
 
 bb4:
-  %src.gep3 = getelementptr inbounds i8* %src, i32 3
+  %src.gep3 = getelementptr inbounds i8, i8* %src, i32 3
   %src.3 = load i8* %src.gep3
-  %tmp.gep3 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 3
+  %tmp.gep3 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 3
   store i8 %src.3, i8* %tmp.gep3
 ; CHECK: store i8
 
   br label %bb3
 
 bb3:
-  %src.gep2 = getelementptr inbounds i8* %src, i32 2
+  %src.gep2 = getelementptr inbounds i8, i8* %src, i32 2
   %src.2 = load i8* %src.gep2
-  %tmp.gep2 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 2
+  %tmp.gep2 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 2
   store i8 %src.2, i8* %tmp.gep2
 ; CHECK: store i8
 
   br label %bb2
 
 bb2:
-  %src.gep1 = getelementptr inbounds i8* %src, i32 1
+  %src.gep1 = getelementptr inbounds i8, i8* %src, i32 1
   %src.1 = load i8* %src.gep1
-  %tmp.gep1 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 1
+  %tmp.gep1 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 1
   store i8 %src.1, i8* %tmp.gep1
 ; CHECK: store i8
 
   br label %bb1
 
 bb1:
-  %src.gep0 = getelementptr inbounds i8* %src, i32 0
+  %src.gep0 = getelementptr inbounds i8, i8* %src, i32 0
   %src.0 = load i8* %src.gep0
-  %tmp.gep0 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 0
+  %tmp.gep0 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 0
   store i8 %src.0, i8* %tmp.gep0
 ; CHECK: store i8
 
@@ -1373,7 +1373,7 @@ entry:
   %b = alloca i32, align 4
   %b.cast = bitcast i32* %b to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b.cast, i8* %a, i32 4, i32 4, i1 true)
-  %b.gep = getelementptr inbounds i8* %b.cast, i32 2
+  %b.gep = getelementptr inbounds i8, i8* %b.cast, i32 2
   load i8* %b.gep, align 2
   unreachable
 }
@@ -1390,9 +1390,9 @@ define void @PR16651.2() {
 
 entry:
   %tv1 = alloca { <2 x float>, <2 x float> }, align 8
-  %0 = getelementptr { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1
+  %0 = getelementptr { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1
   store <2 x float> undef, <2 x float>* %0, align 8
-  %1 = getelementptr inbounds { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
+  %1 = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
   %cond105.in.i.i = select i1 undef, float* null, float* %1
   %cond105.i.i = load float* %cond105.in.i.i, align 8
   ret void
@@ -1405,8 +1405,8 @@ define void @test23(i32 %x) {
 entry:
   %a = alloca i32, align 4
   store i32 %x, i32* %a, align 4
-  %gep1 = getelementptr inbounds i32* %a, i32 1
-  %gep0 = getelementptr inbounds i32* %a, i32 0
+  %gep1 = getelementptr inbounds i32, i32* %a, i32 1
+  %gep0 = getelementptr inbounds i32, i32* %a, i32 0
   %cast1 = bitcast i32* %gep1 to i8*
   %cast0 = bitcast i32* %gep0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast0, i32 4, i32 1, i1 false)
@@ -1419,7 +1419,7 @@ define void @PR18615() {
 ; CHECK: ret void
 entry:
   %f = alloca i8
-  %gep = getelementptr i8* %f, i64 -1
+  %gep = getelementptr i8, i8* %f, i64 -1
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %gep, i32 1, i32 1, i1 false)
   ret void
 }
@@ -1459,11 +1459,11 @@ entry:
   %a = alloca i64
   %b = alloca i64
   %a.cast = bitcast i64* %a to [2 x float]*
-  %a.gep1 = getelementptr [2 x float]* %a.cast, i32 0, i32 0
-  %a.gep2 = getelementptr [2 x float]* %a.cast, i32 0, i32 1
+  %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
+  %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
   %b.cast = bitcast i64* %b to [2 x float]*
-  %b.gep1 = getelementptr [2 x float]* %b.cast, i32 0, i32 0
-  %b.gep2 = getelementptr [2 x float]* %b.cast, i32 0, i32 1
+  %b.gep1 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 0
+  %b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
   store float 0.0, float* %a.gep1
   store float 1.0, float* %a.gep2
   %v = load i64* %a
@@ -1496,8 +1496,8 @@ define void @test26() {
 entry:
   %a = alloca i64
   %a.cast = bitcast i64* %a to [2 x float]*
-  %a.gep1 = getelementptr [2 x float]* %a.cast, i32 0, i32 0
-  %a.gep2 = getelementptr [2 x float]* %a.cast, i32 0, i32 1
+  %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
+  %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
   %v1 = load i64* bitcast ([2 x float]* @complex1 to i64*)
   store i64 %v1, i64* %a
   %f1 = load float* %a.gep1
@@ -1524,9 +1524,9 @@ define float @test27() {
 
 entry:
   %a = alloca [12 x i8]
-  %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
-  %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
-  %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
+  %gep1 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 0
+  %gep2 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 4
+  %gep3 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 8
   %iptr1 = bitcast i8* %gep1 to i64*
   %iptr2 = bitcast i8* %gep2 to i64*
   %fptr1 = bitcast i8* %gep1 to float*
@@ -1589,7 +1589,7 @@ entry:
   store volatile i16 42, i16* %a.cast2
   %load = load i32* %a.cast1
   store i32 %load, i32* %a.cast1
-  %a.gep1 = getelementptr i32* %a.cast1, i32 1
+  %a.gep1 = getelementptr i32, i32* %a.cast1, i32 1
   %a.cast3 = bitcast i32* %a.gep1 to i8*
   store volatile i8 13, i8* %a.cast3
   store i32 %load, i32* %a.gep1

Modified: llvm/trunk/test/Transforms/SROA/big-endian.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/big-endian.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/big-endian.ll (original)
+++ llvm/trunk/test/Transforms/SROA/big-endian.ll Fri Feb 27 13:29:02 2015
@@ -16,11 +16,11 @@ entry:
   %b = alloca [3 x i8]
 ; CHECK-NOT: alloca
 
-  %a0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+  %a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
   store i8 0, i8* %a0ptr
-  %a1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+  %a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
   store i8 0, i8* %a1ptr
-  %a2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+  %a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
   store i8 0, i8* %a2ptr
   %aiptr = bitcast [3 x i8]* %a to i24*
   %ai = load i24* %aiptr
@@ -40,11 +40,11 @@ entry:
 
   %biptr = bitcast [3 x i8]* %b to i24*
   store i24 %ai, i24* %biptr
-  %b0ptr = getelementptr [3 x i8]* %b, i64 0, i32 0
+  %b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
   %b0 = load i8* %b0ptr
-  %b1ptr = getelementptr [3 x i8]* %b, i64 0, i32 1
+  %b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
   %b1 = load i8* %b1ptr
-  %b2ptr = getelementptr [3 x i8]* %b, i64 0, i32 2
+  %b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
   %b2 = load i8* %b2ptr
 ; CHECK-NOT: store
 ; CHECK-NOT: load
@@ -72,10 +72,10 @@ entry:
   %a = alloca [7 x i8]
 ; CHECK-NOT: alloca
 
-  %a0ptr = getelementptr [7 x i8]* %a, i64 0, i32 0
-  %a1ptr = getelementptr [7 x i8]* %a, i64 0, i32 1
-  %a2ptr = getelementptr [7 x i8]* %a, i64 0, i32 2
-  %a3ptr = getelementptr [7 x i8]* %a, i64 0, i32 3
+  %a0ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 0
+  %a1ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 1
+  %a2ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 2
+  %a3ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 3
 
 ; CHECK-NOT: store
 ; CHECK-NOT: load

Modified: llvm/trunk/test/Transforms/SROA/fca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/fca.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/fca.ll (original)
+++ llvm/trunk/test/Transforms/SROA/fca.ll Fri Feb 27 13:29:02 2015
@@ -14,9 +14,9 @@ entry:
 
   store { i32, i32 } undef, { i32, i32 }* %a
 
-  %gep1 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 0
+  %gep1 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 0
   store i32 %x, i32* %gep1
-  %gep2 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 1
+  %gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
   store i32 %y, i32* %gep2
 
   %result = load { i32, i32 }* %a
@@ -38,9 +38,9 @@ entry:
   %a = alloca { i32, i32 }
   %b = alloca { i32, i32 }
 
-  %gep1 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 0
+  %gep1 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 0
   store i32 %x, i32* %gep1
-  %gep2 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 1
+  %gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
   store i32 %y, i32* %gep2
 
   %result = load volatile { i32, i32 }* %a

Modified: llvm/trunk/test/Transforms/SROA/phi-and-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/phi-and-select.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/phi-and-select.ll (original)
+++ llvm/trunk/test/Transforms/SROA/phi-and-select.ll Fri Feb 27 13:29:02 2015
@@ -7,8 +7,8 @@ entry:
 	%a = alloca [2 x i32]
 ; CHECK-NOT: alloca
 
-  %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
-  %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+  %a0 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+  %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
 	store i32 0, i32* %a0
 	store i32 1, i32* %a1
 	%v0 = load i32* %a0
@@ -36,8 +36,8 @@ entry:
 	%a = alloca [2 x i32]
 ; CHECK-NOT: alloca
 
-  %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
-  %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+  %a0 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+  %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
 	store i32 0, i32* %a0
 	store i32 1, i32* %a1
 	%v0 = load i32* %a0
@@ -62,10 +62,10 @@ entry:
   ; Note that we build redundant GEPs here to ensure that having different GEPs
   ; into the same alloca partation continues to work with PHI speculation. This
   ; was the underlying cause of PR13926.
-  %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
-  %a0b = getelementptr [2 x i32]* %a, i64 0, i32 0
-  %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
-  %a1b = getelementptr [2 x i32]* %a, i64 0, i32 1
+  %a0 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+  %a0b = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+  %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
+  %a1b = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
 	store i32 0, i32* %a0
 	store i32 1, i32* %a1
 ; CHECK-NOT: store
@@ -110,8 +110,8 @@ entry:
 	%a = alloca [2 x i32]
 ; CHECK-NOT: alloca
 
-  %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
-  %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+  %a0 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+  %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
 	store i32 0, i32* %a0
 	store i32 1, i32* %a1
 	%v0 = load i32* %a0
@@ -134,7 +134,7 @@ entry:
 	%a = alloca [2 x i32]
 ; CHECK-NOT: alloca
 
-  %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+  %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
 	store i32 1, i32* %a1
 ; CHECK-NOT: store
 
@@ -157,7 +157,7 @@ entry:
   %c = alloca i32
 ; CHECK-NOT: alloca
 
-  %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+  %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
 	store i32 1, i32* %a1
 
 	%select = select i1 true, i32* %a1, i32* %b
@@ -190,12 +190,12 @@ entry:
   br i1 undef, label %good, label %bad
 
 good:
-  %Y1 = getelementptr i32* %X, i64 0
+  %Y1 = getelementptr i32, i32* %X, i64 0
   store i32 0, i32* %Y1
   br label %exit
 
 bad:
-  %Y2 = getelementptr i32* %X, i64 1
+  %Y2 = getelementptr i32, i32* %X, i64 1
   store i32 0, i32* %Y2
   br label %exit
 
@@ -488,7 +488,7 @@ then:
 
 else:
   %a.raw = bitcast i64* %a to i8*
-  %a.raw.4 = getelementptr i8* %a.raw, i64 4
+  %a.raw.4 = getelementptr i8, i8* %a.raw, i64 4
   %a.raw.4.f = bitcast i8* %a.raw.4 to float*
   br label %end
 ; CHECK: %[[hi_cast:.*]] = bitcast i32 %[[hi]] to float
@@ -516,12 +516,12 @@ entry:
   br i1 %cond, label %then, label %else
 
 then:
-  %0 = getelementptr inbounds [4 x float]* %arr, i64 0, i64 3
+  %0 = getelementptr inbounds [4 x float], [4 x float]* %arr, i64 0, i64 3
   store float 1.000000e+00, float* %0, align 4
   br label %merge
 
 else:
-  %1 = getelementptr inbounds [4 x float]* %arr, i64 0, i64 3
+  %1 = getelementptr inbounds [4 x float], [4 x float]* %arr, i64 0, i64 3
   store float 2.000000e+00, float* %1, align 4
   br label %merge
 
@@ -546,7 +546,7 @@ entry:
   br i1 %cond, label %then, label %else
 
 then:
-  %0 = getelementptr inbounds [4 x float]* %arr, i64 0, i64 3
+  %0 = getelementptr inbounds [4 x float], [4 x float]* %arr, i64 0, i64 3
   store float 1.000000e+00, float* %0, align 4
   br label %then2
 
@@ -556,7 +556,7 @@ then2:
   br label %merge
 
 else:
-  %2 = getelementptr inbounds [4 x float]* %arr, i64 0, i64 3
+  %2 = getelementptr inbounds [4 x float], [4 x float]* %arr, i64 0, i64 3
   store float 3.000000e+00, float* %2, align 4
   br label %merge
 

Modified: llvm/trunk/test/Transforms/SROA/slice-order-independence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/slice-order-independence.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/slice-order-independence.ll (original)
+++ llvm/trunk/test/Transforms/SROA/slice-order-independence.ll Fri Feb 27 13:29:02 2015
@@ -13,7 +13,7 @@ define void @skipped_inttype_first({ i16
   %2 = bitcast { i16*, i32 }* %0 to i8*
   %3 = bitcast { i16*, i32 }* %arg to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* %2, i32 16, i32 8, i1 false)
-  %b = getelementptr inbounds { i16*, i32 }* %arg, i64 0, i32 0
+  %b = getelementptr inbounds { i16*, i32 }, { i16*, i32 }* %arg, i64 0, i32 0
   %pb0 = bitcast i16** %b to i63*
   %b0 = load i63* %pb0
   %pb1 = bitcast i16** %b to i8**
@@ -28,7 +28,7 @@ define void @skipped_inttype_last({ i16*
   %2 = bitcast { i16*, i32 }* %0 to i8*
   %3 = bitcast { i16*, i32 }* %arg to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* %2, i32 16, i32 8, i1 false)
-  %b = getelementptr inbounds { i16*, i32 }* %arg, i64 0, i32 0
+  %b = getelementptr inbounds { i16*, i32 }, { i16*, i32 }* %arg, i64 0, i32 0
   %pb1 = bitcast i16** %b to i8**
   %b1 = load i8** %pb1
   %pb0 = bitcast i16** %b to i63*

Modified: llvm/trunk/test/Transforms/SROA/slice-width.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/slice-width.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/slice-width.ll (original)
+++ llvm/trunk/test/Transforms/SROA/slice-width.ll Fri Feb 27 13:29:02 2015
@@ -42,7 +42,7 @@ define void @memcpy_fp80_padding() {
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x_i8, i8* bitcast (%union.Foo* @foo_copy_source to i8*), i32 32, i32 16, i1 false)
 
   ; Access a slice of the alloca to trigger SROA.
-  %mid_p = getelementptr %union.Foo* %x, i32 0, i32 1
+  %mid_p = getelementptr %union.Foo, %union.Foo* %x, i32 0, i32 1
   %elt = load i64* %mid_p
   store i64 %elt, i64* @i64_sink
   ret void
@@ -61,7 +61,7 @@ define void @memset_fp80_padding() {
   call void @llvm.memset.p0i8.i32(i8* %x_i8, i8 -1, i32 32, i32 16, i1 false)
 
   ; Access a slice of the alloca to trigger SROA.
-  %mid_p = getelementptr %union.Foo* %x, i32 0, i32 1
+  %mid_p = getelementptr %union.Foo, %union.Foo* %x, i32 0, i32 1
   %elt = load i64* %mid_p
   store i64 %elt, i64* @i64_sink
   ret void
@@ -89,7 +89,7 @@ entry:
 
   ; The following block does nothing; but appears to confuse SROA
   %unused1 = bitcast %S.vec3float* %tmp1 to %U.vec3float*
-  %unused2 = getelementptr inbounds %U.vec3float* %unused1, i32 0, i32 0
+  %unused2 = getelementptr inbounds %U.vec3float, %U.vec3float* %unused1, i32 0, i32 0
   %unused3 = load <4 x float>* %unused2, align 1
 
   ; Create a second temporary and copy %tmp1 into it

Modified: llvm/trunk/test/Transforms/SROA/vector-promotion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SROA/vector-promotion.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/vector-promotion.ll (original)
+++ llvm/trunk/test/Transforms/SROA/vector-promotion.ll Fri Feb 27 13:29:02 2015
@@ -9,17 +9,17 @@ entry:
 	%a = alloca [2 x <4 x i32>]
 ; CHECK-NOT: alloca
 
-  %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+  %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
   store <4 x i32> %x, <4 x i32>* %a.x
-  %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+  %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
   store <4 x i32> %y, <4 x i32>* %a.y
 ; CHECK-NOT: store
 
-  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
   %tmp1 = load i32* %a.tmp1
-  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
   %tmp2 = load i32* %a.tmp2
-  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
   %tmp3 = load i32* %a.tmp3
 ; CHECK-NOT: load
 ; CHECK:      extractelement <4 x i32> %x, i32 2
@@ -40,17 +40,17 @@ entry:
 	%a = alloca [2 x <4 x i32>]
 ; CHECK-NOT: alloca
 
-  %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+  %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
   store <4 x i32> %x, <4 x i32>* %a.x
-  %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+  %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
   store <4 x i32> %y, <4 x i32>* %a.y
 ; CHECK-NOT: store
 
-  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
   %tmp1 = load i32* %a.tmp1
-  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
   %tmp2 = load i32* %a.tmp2
-  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
   %a.tmp3.cast = bitcast i32* %a.tmp3 to <2 x i32>*
   %tmp3.vec = load <2 x i32>* %a.tmp3.cast
   %tmp3 = extractelement <2 x i32> %tmp3.vec, i32 0
@@ -74,9 +74,9 @@ entry:
 	%a = alloca [2 x <4 x i32>]
 ; CHECK-NOT: alloca
 
-  %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+  %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
   store <4 x i32> %x, <4 x i32>* %a.x
-  %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+  %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
   store <4 x i32> %y, <4 x i32>* %a.y
 ; CHECK-NOT: store
 
@@ -84,13 +84,13 @@ entry:
   call void @llvm.memset.p0i8.i32(i8* %a.y.cast, i8 0, i32 16, i32 1, i1 false)
 ; CHECK-NOT: memset
 
-  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
   %a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
   call void @llvm.memset.p0i8.i32(i8* %a.tmp1.cast, i8 -1, i32 4, i32 1, i1 false)
   %tmp1 = load i32* %a.tmp1
-  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
   %tmp2 = load i32* %a.tmp2
-  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
   %tmp3 = load i32* %a.tmp3
 ; CHECK-NOT: load
 ; CHECK:      %[[insert:.*]] = insertelement <4 x i32> %x, i32 -1, i32 2
@@ -112,9 +112,9 @@ entry:
 	%a = alloca [2 x <4 x i32>]
 ; CHECK-NOT: alloca
 
-  %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+  %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
   store <4 x i32> %x, <4 x i32>* %a.x
-  %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+  %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
   store <4 x i32> %y, <4 x i32>* %a.y
 ; CHECK-NOT: store
 
@@ -123,19 +123,19 @@ entry:
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.y.cast, i8* %z.cast, i32 16, i32 1, i1 false)
 ; CHECK-NOT: memcpy
 
-  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
   %a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
-  %z.tmp1 = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+  %z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
   %z.tmp1.cast = bitcast i32* %z.tmp1 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.tmp1.cast, i8* %z.tmp1.cast, i32 4, i32 1, i1 false)
   %tmp1 = load i32* %a.tmp1
-  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
   %tmp2 = load i32* %a.tmp2
-  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
   %tmp3 = load i32* %a.tmp3
 ; CHECK-NOT: memcpy
 ; CHECK:      %[[load:.*]] = load <4 x i32>* %z
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
 ; CHECK-NEXT: %[[element_load:.*]] = load i32* %[[gep]]
 ; CHECK-NEXT: %[[insert:.*]] = insertelement <4 x i32> %x, i32 %[[element_load]], i32 2
 ; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
@@ -159,9 +159,9 @@ entry:
 	%a = alloca [2 x <4 x i32>]
 ; CHECK-NOT: alloca
 
-  %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+  %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
   store <4 x i32> %x, <4 x i32>* %a.x
-  %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+  %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
   store <4 x i32> %y, <4 x i32>* %a.y
 ; CHECK-NOT: store
 
@@ -170,19 +170,19 @@ entry:
   call void @llvm.memcpy.p0i8.p1i8.i32(i8* %a.y.cast, i8 addrspace(1)* %z.cast, i32 16, i32 1, i1 false)
 ; CHECK-NOT: memcpy
 
-  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
   %a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
-  %z.tmp1 = getelementptr inbounds <4 x i32> addrspace(1)* %z, i16 0, i16 2
+  %z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %z, i16 0, i16 2
   %z.tmp1.cast = bitcast i32 addrspace(1)* %z.tmp1 to i8 addrspace(1)*
   call void @llvm.memcpy.p0i8.p1i8.i32(i8* %a.tmp1.cast, i8 addrspace(1)* %z.tmp1.cast, i32 4, i32 1, i1 false)
   %tmp1 = load i32* %a.tmp1
-  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
   %tmp2 = load i32* %a.tmp2
-  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
   %tmp3 = load i32* %a.tmp3
 ; CHECK-NOT: memcpy
 ; CHECK:      %[[load:.*]] = load <4 x i32> addrspace(1)* %z
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32> addrspace(1)* %z, i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %z, i64 0, i64 2
 ; CHECK-NEXT: %[[element_load:.*]] = load i32 addrspace(1)* %[[gep]]
 ; CHECK-NEXT: %[[insert:.*]] = insertelement <4 x i32> %x, i32 %[[element_load]], i32 2
 ; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
@@ -205,9 +205,9 @@ entry:
 	%a = alloca [2 x <4 x i32>]
 ; CHECK-NOT: alloca
 
-  %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+  %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
   store <4 x i32> %x, <4 x i32>* %a.x
-  %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+  %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
   store <4 x i32> %y, <4 x i32>* %a.y
 ; CHECK-NOT: store
 
@@ -216,18 +216,18 @@ entry:
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.x.cast, i8* %a.y.cast, i32 16, i32 1, i1 false)
 ; CHECK-NOT: memcpy
 
-  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+  %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
   %a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
-  %z.tmp1 = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+  %z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
   %z.tmp1.cast = bitcast i32* %z.tmp1 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %z.tmp1.cast, i8* %a.tmp1.cast, i32 4, i32 1, i1 false)
   %tmp1 = load i32* %a.tmp1
-  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+  %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
   %tmp2 = load i32* %a.tmp2
-  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+  %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
   %tmp3 = load i32* %a.tmp3
 ; CHECK-NOT: memcpy
-; CHECK:      %[[gep:.*]] = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+; CHECK:      %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
 ; CHECK-NEXT: %[[extract:.*]] = extractelement <4 x i32> %y, i32 2
 ; CHECK-NEXT: store i32 %[[extract]], i32* %[[gep]]
 ; CHECK-NEXT: extractelement <4 x i32> %y, i32 2
@@ -250,13 +250,13 @@ define i64 @test6(<4 x i64> %x, <4 x i64
 ; The old scalarrepl pass would wrongly drop the store to the second alloca.
 ; PR13254
   %tmp = alloca { <4 x i64>, <4 x i64> }
-  %p0 = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0
+  %p0 = getelementptr inbounds { <4 x i64>, <4 x i64> }, { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0
   store <4 x i64> %x, <4 x i64>* %p0
 ; CHECK: store <4 x i64> %x,
-  %p1 = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 1
+  %p1 = getelementptr inbounds { <4 x i64>, <4 x i64> }, { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 1
   store <4 x i64> %y, <4 x i64>* %p1
 ; CHECK: store <4 x i64> %y,
-  %addr = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0, i64 %n
+  %addr = getelementptr inbounds { <4 x i64>, <4 x i64> }, { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0, i64 %n
   %res = load i64* %addr, align 4
   ret i64 %res
 }
@@ -267,23 +267,23 @@ entry:
   %a = alloca <4 x i32>
 ; CHECK-NOT: alloca
 
-  %a.gep0 = getelementptr <4 x i32>* %a, i32 0, i32 0
+  %a.gep0 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 0
   %a.cast0 = bitcast i32* %a.gep0 to <2 x i32>*
   store <2 x i32> <i32 0, i32 0>, <2 x i32>* %a.cast0
 ; CHECK-NOT: store
 ; CHECK:     select <4 x i1> <i1 true, i1 true, i1 false, i1 false> 
 
-  %a.gep1 = getelementptr <4 x i32>* %a, i32 0, i32 1
+  %a.gep1 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 1
   %a.cast1 = bitcast i32* %a.gep1 to <2 x i32>*
   store <2 x i32> <i32 1, i32 1>, <2 x i32>* %a.cast1
 ; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
 
-  %a.gep2 = getelementptr <4 x i32>* %a, i32 0, i32 2
+  %a.gep2 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 2
   %a.cast2 = bitcast i32* %a.gep2 to <2 x i32>*
   store <2 x i32> <i32 2, i32 2>, <2 x i32>* %a.cast2
 ; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
 
-  %a.gep3 = getelementptr <4 x i32>* %a, i32 0, i32 3
+  %a.gep3 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 3
   store i32 3, i32* %a.gep3
 ; CHECK-NEXT: insertelement <4 x i32>
 
@@ -301,18 +301,18 @@ entry:
   store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>* %a
 ; CHECK-NOT: store
 
-  %a.gep0 = getelementptr <4 x i32>* %a, i32 0, i32 0
+  %a.gep0 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 0
   %a.cast0 = bitcast i32* %a.gep0 to <2 x i32>*
   %first = load <2 x i32>* %a.cast0
 ; CHECK-NOT: load
 ; CHECK:      %[[extract1:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
 
-  %a.gep1 = getelementptr <4 x i32>* %a, i32 0, i32 1
+  %a.gep1 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 1
   %a.cast1 = bitcast i32* %a.gep1 to <2 x i32>*
   %second = load <2 x i32>* %a.cast1
 ; CHECK-NEXT: %[[extract2:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 1, i32 2>
 
-  %a.gep2 = getelementptr <4 x i32>* %a, i32 0, i32 2
+  %a.gep2 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 2
   %a.cast2 = bitcast i32* %a.gep2 to <2 x i32>*
   %third = load <2 x i32>* %a.cast2
 ; CHECK-NEXT: %[[extract3:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -334,23 +334,23 @@ entry:
   %a = alloca <4 x float>
 ; CHECK-NOT: alloca
 
-  %a.gep0 = getelementptr <4 x float>* %a, i32 0, i32 0
+  %a.gep0 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 0
   %a.cast0 = bitcast float* %a.gep0 to i8*
   call void @llvm.memset.p0i8.i32(i8* %a.cast0, i8 0, i32 8, i32 0, i1 false)
 ; CHECK-NOT: store
 ; CHECK: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>
 
-  %a.gep1 = getelementptr <4 x float>* %a, i32 0, i32 1
+  %a.gep1 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 1
   %a.cast1 = bitcast float* %a.gep1 to i8*
   call void @llvm.memset.p0i8.i32(i8* %a.cast1, i8 1, i32 8, i32 0, i1 false)
 ; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
 
-  %a.gep2 = getelementptr <4 x float>* %a, i32 0, i32 2
+  %a.gep2 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 2
   %a.cast2 = bitcast float* %a.gep2 to i8*
   call void @llvm.memset.p0i8.i32(i8* %a.cast2, i8 3, i32 8, i32 0, i1 false)
 ; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
 
-  %a.gep3 = getelementptr <4 x float>* %a, i32 0, i32 3
+  %a.gep3 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 3
   %a.cast3 = bitcast float* %a.gep3 to i8*
   call void @llvm.memset.p0i8.i32(i8* %a.cast3, i8 7, i32 4, i32 0, i1 false)
 ; CHECK-NEXT: insertelement <4 x float> 
@@ -367,7 +367,7 @@ entry:
   %a = alloca <4 x float>
 ; CHECK-NOT: alloca
 
-  %a.gep0 = getelementptr <4 x float>* %a, i32 0, i32 0
+  %a.gep0 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 0
   %a.cast0 = bitcast float* %a.gep0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast0, i8* %x, i32 8, i32 0, i1 false)
 ; CHECK:      %[[xptr:.*]] = bitcast i8* %x to <2 x float>*
@@ -375,7 +375,7 @@ entry:
 ; CHECK-NEXT: %[[expand_x:.*]] = shufflevector <2 x float> %[[x]], <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
 ; CHECK-NEXT: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>  
 
-  %a.gep1 = getelementptr <4 x float>* %a, i32 0, i32 1
+  %a.gep1 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 1
   %a.cast1 = bitcast float* %a.gep1 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast1, i8* %y, i32 8, i32 0, i1 false)
 ; CHECK-NEXT: %[[yptr:.*]] = bitcast i8* %y to <2 x float>*
@@ -383,7 +383,7 @@ entry:
 ; CHECK-NEXT: %[[expand_y:.*]] = shufflevector <2 x float> %[[y]], <2 x float> undef, <4 x i32> <i32 undef, i32 0, i32 1, i32 undef>
 ; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
 
-  %a.gep2 = getelementptr <4 x float>* %a, i32 0, i32 2
+  %a.gep2 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 2
   %a.cast2 = bitcast float* %a.gep2 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast2, i8* %z, i32 8, i32 0, i1 false)
 ; CHECK-NEXT: %[[zptr:.*]] = bitcast i8* %z to <2 x float>*
@@ -391,7 +391,7 @@ entry:
 ; CHECK-NEXT: %[[expand_z:.*]] = shufflevector <2 x float> %[[z]], <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
 ; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
 
-  %a.gep3 = getelementptr <4 x float>* %a, i32 0, i32 3
+  %a.gep3 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 3
   %a.cast3 = bitcast float* %a.gep3 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast3, i8* %f, i32 4, i32 0, i1 false)
 ; CHECK-NEXT: %[[fptr:.*]] = bitcast i8* %f to float*
@@ -477,17 +477,17 @@ entry:
   %a.cast = bitcast [2 x i64]* %a to [2 x <2 x i32>]*
 ; CHECK-NOT: alloca
 
-  %a.x = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 0
+  %a.x = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 0
   store <2 x i32> %x, <2 x i32>* %a.x
-  %a.y = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 1
+  %a.y = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 1
   store <2 x i32> %y, <2 x i32>* %a.y
 ; CHECK-NOT: store
 
-  %a.tmp1 = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 0, i64 1
+  %a.tmp1 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 0, i64 1
   %tmp1 = load i32* %a.tmp1
-  %a.tmp2 = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 1
+  %a.tmp2 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 1
   %tmp2 = load i32* %a.tmp2
-  %a.tmp3 = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 0
+  %a.tmp3 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 0
   %tmp3 = load i32* %a.tmp3
 ; CHECK-NOT: load
 ; CHECK:      extractelement <2 x i32> %x, i32 1
@@ -516,7 +516,7 @@ entry:
 ; CHECK-NOT: store
 
   %tmp1 = load i32* %a.i32
-  %a.tmp2 = getelementptr inbounds i32* %a.i32, i64 1
+  %a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
   %tmp2 = load i32* %a.tmp2
 ; CHECK-NOT: load
 ; CHECK:      extractelement <2 x i32> %x, i32 0
@@ -539,7 +539,7 @@ entry:
 ; CHECK-NOT: alloca
 
   store i32 %x, i32* %a.i32
-  %a.tmp2 = getelementptr inbounds i32* %a.i32, i64 1
+  %a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
   store i32 %y, i32* %a.tmp2
 ; CHECK-NOT: store
 ; CHECK:      %[[V1:.*]] = insertelement <2 x i32> undef, i32 %x, i32 0
@@ -564,7 +564,7 @@ entry:
 ; CHECK-NOT: alloca
 
   store <4 x i16> %x, <4 x i16>* %a.vec2
-  %a.tmp2 = getelementptr inbounds i32* %a.i32, i64 1
+  %a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
   store i32 %y, i32* %a.tmp2
 ; CHECK-NOT: store
 ; CHECK:      %[[V1:.*]] = bitcast <4 x i16> %x to <2 x i32>
@@ -590,7 +590,7 @@ entry:
 ; CHECK-NOT: alloca
 
   store <4 x i16> %x, <4 x i16>* %a.vec2
-  %a.tmp2 = getelementptr inbounds i32* %a.i32, i64 1
+  %a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
   store i32 %y, i32* %a.tmp2
 ; CHECK-NOT: store
 ; CHECK:      %[[V1:.*]] = bitcast i32 %y to <2 x i16>

Modified: llvm/trunk/test/Transforms/SampleProfile/branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SampleProfile/branch.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SampleProfile/branch.ll (original)
+++ llvm/trunk/test/Transforms/SampleProfile/branch.ll Fri Feb 27 13:29:02 2015
@@ -40,7 +40,7 @@ entry:
 ; CHECK: edge entry -> if.end probability is 1 / 2 = 50%
 
 if.end:                                           ; preds = %entry
-  %arrayidx = getelementptr inbounds i8** %argv, i64 1, !dbg !30
+  %arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1, !dbg !30
   %0 = load i8** %arrayidx, align 8, !dbg !30, !tbaa !31
   %call = tail call i32 @atoi(i8* %0) #4, !dbg !30
   tail call void @llvm.dbg.value(metadata i32 %call, i64 0, metadata !17, metadata !{}), !dbg !30

Modified: llvm/trunk/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll Fri Feb 27 13:29:02 2015
@@ -5,9 +5,9 @@ target datalayout = "E-p:64:64:64-i1:8:8
 
 define i32 @test() nounwind {
 	%X = alloca [4 x i32]		; <[4 x i32]*> [#uses=1]
-	%Y = getelementptr [4 x i32]* %X, i64 0, i64 0		; <i32*> [#uses=1]
+	%Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 0		; <i32*> [#uses=1]
         ; Must preserve arrayness!
-	%Z = getelementptr i32* %Y, i64 1		; <i32*> [#uses=1]
+	%Z = getelementptr i32, i32* %Y, i64 1		; <i32*> [#uses=1]
 	%A = load i32* %Z		; <i32> [#uses=1]
 	ret i32 %A
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2003-09-12-IncorrectPromote.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2003-09-12-IncorrectPromote.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2003-09-12-IncorrectPromote.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2003-09-12-IncorrectPromote.ll Fri Feb 27 13:29:02 2015
@@ -4,8 +4,8 @@
 
 define i8* @test() {
 	%A = alloca [30 x i8]		; <[30 x i8]*> [#uses=1]
-	%B = getelementptr [30 x i8]* %A, i64 0, i64 0		; <i8*> [#uses=2]
-	%C = getelementptr i8* %B, i64 1		; <i8*> [#uses=1]
+	%B = getelementptr [30 x i8], [30 x i8]* %A, i64 0, i64 0		; <i8*> [#uses=2]
+	%C = getelementptr i8, i8* %B, i64 1		; <i8*> [#uses=1]
 	store i8 0, i8* %B
 	ret i8* %C
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2003-10-29-ArrayProblem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2003-10-29-ArrayProblem.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2003-10-29-ArrayProblem.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2003-10-29-ArrayProblem.ll Fri Feb 27 13:29:02 2015
@@ -7,9 +7,9 @@ declare void @.iter_2(i32 (i8*)*, i8*)
 
 define i32 @main() {
 	%d = alloca %T		; <{ [80 x i8], i32, i32 }*> [#uses=2]
-	%tmp.0 = getelementptr %T* %d, i64 0, i32 2		; <i32*> [#uses=1]
+	%tmp.0 = getelementptr %T, %T* %d, i64 0, i32 2		; <i32*> [#uses=1]
 	store i32 0, i32* %tmp.0
-	%tmp.1 = getelementptr %T* %d, i64 0, i32 0, i64 0		; <i8*> [#uses=1]
+	%tmp.1 = getelementptr %T, %T* %d, i64 0, i32 0, i64 0		; <i8*> [#uses=1]
 	call void @.iter_2( i32 (i8*)* @.callback_1, i8* %tmp.1 )
 	ret i32 0
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll Fri Feb 27 13:29:02 2015
@@ -5,13 +5,13 @@ define i32 @func(<4 x float> %v0, <4 x f
 	%vsiidx = alloca [2 x <4 x i32>], align 16		; <[2 x <4 x i32>]*> [#uses=3]
 	%tmp = call <4 x i32> @llvm.x86.sse2.cvttps2dq( <4 x float> %v0 )		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.1 = bitcast <4 x i32> %tmp to <2 x i64>		; <<2 x i64>> [#uses=0]
-	%tmp.upgrd.2 = getelementptr [2 x <4 x i32>]* %vsiidx, i32 0, i32 0		; <<4 x i32>*> [#uses=1]
+	%tmp.upgrd.2 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %vsiidx, i32 0, i32 0		; <<4 x i32>*> [#uses=1]
 	store <4 x i32> %tmp, <4 x i32>* %tmp.upgrd.2
 	%tmp10 = call <4 x i32> @llvm.x86.sse2.cvttps2dq( <4 x float> %v1 )		; <<4 x i32>> [#uses=2]
 	%tmp10.upgrd.3 = bitcast <4 x i32> %tmp10 to <2 x i64>		; <<2 x i64>> [#uses=0]
-	%tmp14 = getelementptr [2 x <4 x i32>]* %vsiidx, i32 0, i32 1		; <<4 x i32>*> [#uses=1]
+	%tmp14 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %vsiidx, i32 0, i32 1		; <<4 x i32>*> [#uses=1]
 	store <4 x i32> %tmp10, <4 x i32>* %tmp14
-	%tmp15 = getelementptr [2 x <4 x i32>]* %vsiidx, i32 0, i32 0, i32 4		; <i32*> [#uses=1]
+	%tmp15 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %vsiidx, i32 0, i32 0, i32 4		; <i32*> [#uses=1]
 	%tmp.upgrd.4 = load i32* %tmp15		; <i32> [#uses=1]
 	ret i32 %tmp.upgrd.4
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll Fri Feb 27 13:29:02 2015
@@ -11,12 +11,12 @@ target triple = "i686-apple-darwin8"
 define void @_Z4testP9UnionTypePS0_(%struct.UnionType* %p, %struct.UnionType** %pointerToUnion) {
 entry:
   %tmp = alloca %struct.UnionType, align 8
-  %tmp2 = getelementptr %struct.UnionType* %tmp, i32 0, i32 0, i32 0
-  %tmp13 = getelementptr %struct.UnionType* %p, i32 0, i32 0, i32 0
+  %tmp2 = getelementptr %struct.UnionType, %struct.UnionType* %tmp, i32 0, i32 0, i32 0
+  %tmp13 = getelementptr %struct.UnionType, %struct.UnionType* %p, i32 0, i32 0, i32 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp13, i32 8, i32 0, i1 false)
   %tmp5 = load %struct.UnionType** %pointerToUnion
-  %tmp56 = getelementptr %struct.UnionType* %tmp5, i32 0, i32 0, i32 0
-  %tmp7 = getelementptr %struct.UnionType* %tmp, i32 0, i32 0, i32 0
+  %tmp56 = getelementptr %struct.UnionType, %struct.UnionType* %tmp5, i32 0, i32 0, i32 0
+  %tmp7 = getelementptr %struct.UnionType, %struct.UnionType* %tmp, i32 0, i32 0, i32 0
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp56, i8* %tmp7, i32 8, i32 0, i1 false)
   ret void
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll Fri Feb 27 13:29:02 2015
@@ -10,12 +10,12 @@ entry:
 	%tmp = alloca i32		; <i32*> [#uses=2]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store i16 %b, i16* %b_addr
-	%tmp1 = getelementptr %struct.S* %s, i32 0, i32 0		; <i16*> [#uses=1]
+	%tmp1 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0		; <i16*> [#uses=1]
 	%tmp2 = load i16* %b_addr, align 2		; <i16> [#uses=1]
 	store i16 %tmp2, i16* %tmp1, align 2
-	%tmp3 = getelementptr %struct.S* %s, i32 0, i32 0		; <i16*> [#uses=1]
+	%tmp3 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0		; <i16*> [#uses=1]
 	%tmp34 = bitcast i16* %tmp3 to [2 x i1]*		; <[2 x i1]*> [#uses=1]
-	%tmp5 = getelementptr [2 x i1]* %tmp34, i32 0, i32 1		; <i1*> [#uses=1]
+	%tmp5 = getelementptr [2 x i1], [2 x i1]* %tmp34, i32 0, i32 1		; <i1*> [#uses=1]
 	%tmp6 = load i1* %tmp5, align 1		; <i1> [#uses=1]
 	%tmp67 = zext i1 %tmp6 to i32		; <i32> [#uses=1]
 	store i32 %tmp67, i32* %tmp, align 4

Modified: llvm/trunk/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll Fri Feb 27 13:29:02 2015
@@ -10,8 +10,8 @@ entry:
 	%T3 = bitcast [1 x %struct.T]* %s to i32*
 	store i32 -61184, i32* %T3
 
-	%tmp16 = getelementptr [1 x %struct.T]* %s, i32 0, i32 0		; <%struct.T*> [#uses=1]
-	%tmp17 = getelementptr %struct.T* %tmp16, i32 0, i32 1		; <[3 x i8]*> [#uses=1]
+	%tmp16 = getelementptr [1 x %struct.T], [1 x %struct.T]* %s, i32 0, i32 0		; <%struct.T*> [#uses=1]
+	%tmp17 = getelementptr %struct.T, %struct.T* %tmp16, i32 0, i32 1		; <[3 x i8]*> [#uses=1]
 	%tmp1718 = bitcast [3 x i8]* %tmp17 to i32*		; <i32*> [#uses=1]
 	%tmp19 = load i32* %tmp1718, align 4		; <i32> [#uses=1]
 	%mask = and i32 %tmp19, 16777215		; <i32> [#uses=2]

Modified: llvm/trunk/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll Fri Feb 27 13:29:02 2015
@@ -6,11 +6,11 @@ target triple = "i686-apple-darwin8"
 define i32 @main(i32 %argc, i8** %argv) {
 entry:
 	%c = alloca %struct..0anon		; <%struct..0anon*> [#uses=2]
-	%tmp2 = getelementptr %struct..0anon* %c, i32 0, i32 0		; <<1 x i64>*> [#uses=1]
+	%tmp2 = getelementptr %struct..0anon, %struct..0anon* %c, i32 0, i32 0		; <<1 x i64>*> [#uses=1]
 	store <1 x i64> zeroinitializer, <1 x i64>* %tmp2, align 8
-	%tmp7 = getelementptr %struct..0anon* %c, i32 0, i32 0		; <<1 x i64>*> [#uses=1]
+	%tmp7 = getelementptr %struct..0anon, %struct..0anon* %c, i32 0, i32 0		; <<1 x i64>*> [#uses=1]
 	%tmp78 = bitcast <1 x i64>* %tmp7 to [2 x i32]*		; <[2 x i32]*> [#uses=1]
-	%tmp9 = getelementptr [2 x i32]* %tmp78, i32 0, i32 0		; <i32*> [#uses=1]
+	%tmp9 = getelementptr [2 x i32], [2 x i32]* %tmp78, i32 0, i32 0		; <i32*> [#uses=1]
 	%tmp10 = load i32* %tmp9, align 4		; <i32> [#uses=0]
 	unreachable
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll Fri Feb 27 13:29:02 2015
@@ -14,7 +14,7 @@ define i32 @foo() {
         ; And store it
 	store { i32, i32 } %res2, { i32, i32 }* %target
         ; Actually use %target, so it doesn't get removed altogether
-        %ptr = getelementptr { i32, i32 }* %target, i32 0, i32 0
+        %ptr = getelementptr { i32, i32 }, { i32, i32 }* %target, i32 0, i32 0
         %val = load i32* %ptr
 	ret i32 %val
 }
@@ -27,7 +27,7 @@ define i32 @bar() {
         ; And store it
 	store [ 2 x i32 ] %res2, [ 2 x i32 ]* %target
         ; Actually use %target, so it doesn't get removed altogether
-        %ptr = getelementptr [ 2 x i32 ]* %target, i32 0, i32 0
+        %ptr = getelementptr [ 2 x i32 ], [ 2 x i32 ]* %target, i32 0, i32 0
         %val = load i32* %ptr
 	ret i32 %val
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll Fri Feb 27 13:29:02 2015
@@ -13,7 +13,7 @@ entry:
   %r1 = bitcast %struct.x* %r to i8*
   %s2 = bitcast %struct.x* %s to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %r1, i8* %s2, i32 12, i32 8, i1 false)
-  %1 = getelementptr %struct.x* %r, i32 0, i32 0, i32 1
+  %1 = getelementptr %struct.x, %struct.x* %r, i32 0, i32 0, i32 1
   %2 = load i32* %1, align 4
   ret i32 %2
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2008-09-22-vector-gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2008-09-22-vector-gep.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2008-09-22-vector-gep.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2008-09-22-vector-gep.ll Fri Feb 27 13:29:02 2015
@@ -12,11 +12,11 @@ target datalayout = "E-p:64:64:64-i1:8:8
 define void @main(%struct.two* %D, i16 %V) {
 entry:
 	%S = alloca %struct.two
-        %S.2 = getelementptr %struct.two* %S, i32 0, i32 1
+        %S.2 = getelementptr %struct.two, %struct.two* %S, i32 0, i32 1
         store i16 %V, i16* %S.2
         ; This gep is effectively a bitcast to i8*, but is sometimes generated
         ; because the type of the first element in %struct.two is i8.
-	%tmpS = getelementptr %struct.two* %S, i32 0, i32 0, i32 0 
+	%tmpS = getelementptr %struct.two, %struct.two* %S, i32 0, i32 0, i32 0 
 	%tmpD = bitcast %struct.two* %D to i8*
         call void @llvm.memmove.p0i8.p0i8.i32(i8* %tmpD, i8* %tmpS, i32 4, i32 1, i1 false)
         ret void

Modified: llvm/trunk/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll Fri Feb 27 13:29:02 2015
@@ -6,10 +6,10 @@ target triple = "i386-pc-linux-gnu"
 
 define i32 @f(i32 %x, i32 %y) {
        %instance = alloca %pair
-       %first = getelementptr %pair* %instance, i32 0, i32 0
+       %first = getelementptr %pair, %pair* %instance, i32 0, i32 0
        %cast = bitcast [1 x i32]* %first to i32*
        store i32 %x, i32* %cast
-       %second = getelementptr %pair* %instance, i32 0, i32 1
+       %second = getelementptr %pair, %pair* %instance, i32 0, i32 1
        store i32 %y, i32* %second
        %v = load i32* %cast
        ret i32 %v

Modified: llvm/trunk/test/Transforms/ScalarRepl/2009-03-04-MemCpyAlign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2009-03-04-MemCpyAlign.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2009-03-04-MemCpyAlign.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2009-03-04-MemCpyAlign.ll Fri Feb 27 13:29:02 2015
@@ -9,7 +9,7 @@ target datalayout = "E-p:64:64:64-i1:8:8
 define void @f(i8* %p) nounwind {
 entry:
         %s = alloca %struct.st, align 4  ; <%struct.st*> [#uses=2]
-        %0 = getelementptr %struct.st* %s, i32 0, i32 0  ; <i16*> [#uses=1]
+        %0 = getelementptr %struct.st, %struct.st* %s, i32 0, i32 0  ; <i16*> [#uses=1]
         store i16 1, i16* %0, align 4
         %s1 = bitcast %struct.st* %s to i8*  ; <i8*> [#uses=1]
         call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %s1, i32 2, i32 1, i1 false)

Modified: llvm/trunk/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll Fri Feb 27 13:29:02 2015
@@ -25,30 +25,30 @@ entry:
   %tmp2 = alloca %struct.int16x8x2_t
   %0 = alloca %struct.int16x8x2_t
   %"alloca point" = bitcast i32 0 to i32
-  %1 = getelementptr inbounds %struct.int16x8_t* %tmp_addr, i32 0, i32 0
+  %1 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %tmp_addr, i32 0, i32 0
   store <8 x i16> %tmp.0, <8 x i16>* %1
   store %struct.int16x8x2_t* %dst, %struct.int16x8x2_t** %dst_addr
-  %2 = getelementptr inbounds %struct.int16x8_t* %__ax, i32 0, i32 0
-  %3 = getelementptr inbounds %struct.int16x8_t* %tmp_addr, i32 0, i32 0
+  %2 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__ax, i32 0, i32 0
+  %3 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %tmp_addr, i32 0, i32 0
   %4 = load <8 x i16>* %3, align 16
   store <8 x i16> %4, <8 x i16>* %2, align 16
-  %5 = getelementptr inbounds %struct.int16x8_t* %__bx, i32 0, i32 0
-  %6 = getelementptr inbounds %struct.int16x8_t* %tmp_addr, i32 0, i32 0
+  %5 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__bx, i32 0, i32 0
+  %6 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %tmp_addr, i32 0, i32 0
   %7 = load <8 x i16>* %6, align 16
   store <8 x i16> %7, <8 x i16>* %5, align 16
-  %8 = getelementptr inbounds %struct.int16x8_t* %__ax, i32 0, i32 0
+  %8 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__ax, i32 0, i32 0
   %9 = load <8 x i16>* %8, align 16
-  %10 = getelementptr inbounds %struct.int16x8_t* %__bx, i32 0, i32 0
+  %10 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__bx, i32 0, i32 0
   %11 = load <8 x i16>* %10, align 16
-  %12 = getelementptr inbounds %union..0anon* %__rv, i32 0, i32 0
+  %12 = getelementptr inbounds %union..0anon, %union..0anon* %__rv, i32 0, i32 0
   %13 = bitcast %struct.int16x8x2_t* %12 to %struct.__neon_int16x8x2_t*
   %14 = shufflevector <8 x i16> %9, <8 x i16> %11, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
-  %15 = getelementptr inbounds %struct.__neon_int16x8x2_t* %13, i32 0, i32 0
+  %15 = getelementptr inbounds %struct.__neon_int16x8x2_t, %struct.__neon_int16x8x2_t* %13, i32 0, i32 0
   store <8 x i16> %14, <8 x i16>* %15
   %16 = shufflevector <8 x i16> %9, <8 x i16> %11, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
-  %17 = getelementptr inbounds %struct.__neon_int16x8x2_t* %13, i32 0, i32 1
+  %17 = getelementptr inbounds %struct.__neon_int16x8x2_t, %struct.__neon_int16x8x2_t* %13, i32 0, i32 1
   store <8 x i16> %16, <8 x i16>* %17
-  %18 = getelementptr inbounds %union..0anon* %__rv, i32 0, i32 0
+  %18 = getelementptr inbounds %union..0anon, %union..0anon* %__rv, i32 0, i32 0
   %19 = bitcast %struct.int16x8x2_t* %0 to i8*
   %20 = bitcast %struct.int16x8x2_t* %18 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %19, i8* %20, i32 32, i32 16, i1 false)

Modified: llvm/trunk/test/Transforms/ScalarRepl/2011-05-06-CapturedAlloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2011-05-06-CapturedAlloca.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2011-05-06-CapturedAlloca.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2011-05-06-CapturedAlloca.ll Fri Feb 27 13:29:02 2015
@@ -17,7 +17,7 @@ entry:
   %tmp = bitcast [4 x i32]* %l_10 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp, i8* bitcast ([4 x i32]* @func_1.l_10 to i8*), i64 16, i32 16, i1 false)
 ; CHECK: call void @llvm.memcpy
-  %arrayidx = getelementptr inbounds [4 x i32]* %l_10, i64 0, i64 0
+  %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %l_10, i64 0, i64 0
   %call = call i32* @noop(i32* %arrayidx)
   store i32 0, i32* %call
   ret i32 0

Modified: llvm/trunk/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll Fri Feb 27 13:29:02 2015
@@ -16,11 +16,11 @@ define void @main() uwtable ssp {
 entry:
   %ref.tmp2 = alloca %0, align 16
   %tmpcast = bitcast %0* %ref.tmp2 to %struct.Point_3*
-  %0 = getelementptr %0* %ref.tmp2, i64 0, i32 0
+  %0 = getelementptr %0, %0* %ref.tmp2, i64 0, i32 0
   store <2 x float> zeroinitializer, <2 x float>* %0, align 16
-  %1 = getelementptr inbounds %struct.Point_3* %tmpcast, i64 0, i32 0
-  %base.i.i.i = getelementptr inbounds %struct.PointC3* %1, i64 0, i32 0
-  %arrayidx.i.i.i.i = getelementptr inbounds %struct.array* %base.i.i.i, i64 0, i32 0, i64 0
+  %1 = getelementptr inbounds %struct.Point_3, %struct.Point_3* %tmpcast, i64 0, i32 0
+  %base.i.i.i = getelementptr inbounds %struct.PointC3, %struct.PointC3* %1, i64 0, i32 0
+  %arrayidx.i.i.i.i = getelementptr inbounds %struct.array, %struct.array* %base.i.i.i, i64 0, i32 0, i64 0
   %tmp5.i.i = load float* %arrayidx.i.i.i.i, align 4
   ret void
 }
@@ -33,7 +33,7 @@ define void @test1() uwtable ssp {
 entry:
   %ref.tmp2 = alloca {<2 x float>, float}, align 16
   %tmpcast = bitcast {<2 x float>, float}* %ref.tmp2 to float*
-  %0 = getelementptr {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
+  %0 = getelementptr {<2 x float>, float}, {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
   store <2 x float> zeroinitializer, <2 x float>* %0, align 16
   %tmp5.i.i = load float* %tmpcast, align 4
   ret void
@@ -50,8 +50,8 @@ define float @test2() uwtable ssp {
 entry:
   %ref.tmp2 = alloca {<2 x float>, float}, align 16
   %tmpcast = bitcast {<2 x float>, float}* %ref.tmp2 to float*
-  %tmpcast2 = getelementptr {<2 x float>, float}* %ref.tmp2, i64 0, i32 1
-  %0 = getelementptr {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
+  %tmpcast2 = getelementptr {<2 x float>, float}, {<2 x float>, float}* %ref.tmp2, i64 0, i32 1
+  %0 = getelementptr {<2 x float>, float}, {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
   store <2 x float> zeroinitializer, <2 x float>* %0, align 16
   store float 1.0, float* %tmpcast2, align 4
   %r1 = load float* %tmpcast, align 4
@@ -69,7 +69,7 @@ entry:
   %ai = alloca { <2 x float>, <2 x float> }, align 8
   store { <2 x float>, <2 x float> } {<2 x float> <float 0.0, float 1.0>, <2 x float> <float 2.0, float 3.0>}, { <2 x float>, <2 x float> }* %ai, align 8
   %tmpcast = bitcast { <2 x float>, <2 x float> }* %ai to [4 x float]*
-  %arrayidx = getelementptr inbounds [4 x float]* %tmpcast, i64 0, i64 3
+  %arrayidx = getelementptr inbounds [4 x float], [4 x float]* %tmpcast, i64 0, i64 3
   %f = load float* %arrayidx, align 4
   ret float %f
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll Fri Feb 27 13:29:02 2015
@@ -29,7 +29,7 @@ entry:
   %p = bitcast { <4 x float> }* %a to i8*
   call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 16, i32 16, i1 false)
   %q = bitcast { <4 x float> }* %a to [2 x <2 x float>]*
-  %arrayidx = getelementptr inbounds [2 x <2 x float>]* %q, i32 0, i32 0
+  %arrayidx = getelementptr inbounds [2 x <2 x float>], [2 x <2 x float>]* %q, i32 0, i32 0
   store <2 x float> undef, <2 x float>* %arrayidx, align 8
   ret void
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/2011-10-22-VectorCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2011-10-22-VectorCrash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2011-10-22-VectorCrash.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2011-10-22-VectorCrash.ll Fri Feb 27 13:29:02 2015
@@ -11,7 +11,7 @@ define void @test() nounwind {
 entry:
   %u = alloca %union.anon, align 16
   %u164 = bitcast %union.anon* %u to [4 x i32]*
-  %arrayidx165 = getelementptr inbounds [4 x i32]* %u164, i32 0, i32 0
+  %arrayidx165 = getelementptr inbounds [4 x i32], [4 x i32]* %u164, i32 0, i32 0
   store i32 undef, i32* %arrayidx165, align 4
   %v186 = bitcast %union.anon* %u to <4 x float>*
   store <4 x float> undef, <4 x float>* %v186, align 16

Modified: llvm/trunk/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll Fri Feb 27 13:29:02 2015
@@ -13,7 +13,7 @@ define double @test() nounwind uwtable s
 entry:
   %retval = alloca %struct.S, align 8
   %ret = alloca %struct.S, align 8
-  %b = getelementptr inbounds %struct.S* %ret, i32 0, i32 1
+  %b = getelementptr inbounds %struct.S, %struct.S* %ret, i32 0, i32 1
   store double 1.000000e+00, double* %b, align 8
   %0 = bitcast %struct.S* %retval to i8*
   %1 = bitcast %struct.S* %ret to i8*

Modified: llvm/trunk/test/Transforms/ScalarRepl/AggregatePromote.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/AggregatePromote.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/AggregatePromote.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/AggregatePromote.ll Fri Feb 27 13:29:02 2015
@@ -18,21 +18,21 @@ define i8 @test2(i64 %X) {
 	%X_addr = alloca i64		; <i64*> [#uses=2]
 	store i64 %X, i64* %X_addr
 	%tmp.0 = bitcast i64* %X_addr to i32*		; <i32*> [#uses=1]
-	%tmp.1 = getelementptr i32* %tmp.0, i32 1		; <i32*> [#uses=1]
+	%tmp.1 = getelementptr i32, i32* %tmp.0, i32 1		; <i32*> [#uses=1]
 	%tmp.2 = bitcast i32* %tmp.1 to i8*		; <i8*> [#uses=1]
-	%tmp.3 = getelementptr i8* %tmp.2, i32 3		; <i8*> [#uses=1]
+	%tmp.3 = getelementptr i8, i8* %tmp.2, i32 3		; <i8*> [#uses=1]
 	%tmp.2.upgrd.1 = load i8* %tmp.3		; <i8> [#uses=1]
 	ret i8 %tmp.2.upgrd.1
 }
 
 define i16 @crafty(i64 %X) {
 	%a = alloca { i64 }		; <{ i64 }*> [#uses=2]
-	%tmp.0 = getelementptr { i64 }* %a, i32 0, i32 0		; <i64*> [#uses=1]
+	%tmp.0 = getelementptr { i64 }, { i64 }* %a, i32 0, i32 0		; <i64*> [#uses=1]
 	store i64 %X, i64* %tmp.0
 	%tmp.3 = bitcast { i64 }* %a to [4 x i16]*		; <[4 x i16]*> [#uses=2]
-	%tmp.4 = getelementptr [4 x i16]* %tmp.3, i32 0, i32 3		; <i16*> [#uses=1]
+	%tmp.4 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 3		; <i16*> [#uses=1]
 	%tmp.5 = load i16* %tmp.4		; <i16> [#uses=1]
-	%tmp.8 = getelementptr [4 x i16]* %tmp.3, i32 0, i32 2		; <i16*> [#uses=1]
+	%tmp.8 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 2		; <i16*> [#uses=1]
 	%tmp.9 = load i16* %tmp.8		; <i16> [#uses=1]
 	%tmp.10 = or i16 %tmp.9, %tmp.5		; <i16> [#uses=1]
 	ret i16 %tmp.10
@@ -42,9 +42,9 @@ define i16 @crafty2(i64 %X) {
 	%a = alloca i64		; <i64*> [#uses=2]
 	store i64 %X, i64* %a
 	%tmp.3 = bitcast i64* %a to [4 x i16]*		; <[4 x i16]*> [#uses=2]
-	%tmp.4 = getelementptr [4 x i16]* %tmp.3, i32 0, i32 3		; <i16*> [#uses=1]
+	%tmp.4 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 3		; <i16*> [#uses=1]
 	%tmp.5 = load i16* %tmp.4		; <i16> [#uses=1]
-	%tmp.8 = getelementptr [4 x i16]* %tmp.3, i32 0, i32 2		; <i16*> [#uses=1]
+	%tmp.8 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 2		; <i16*> [#uses=1]
 	%tmp.9 = load i16* %tmp.8		; <i16> [#uses=1]
 	%tmp.10 = or i16 %tmp.9, %tmp.5		; <i16> [#uses=1]
 	ret i16 %tmp.10

Modified: llvm/trunk/test/Transforms/ScalarRepl/address-space.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/address-space.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/address-space.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/address-space.ll Fri Feb 27 13:29:02 2015
@@ -13,16 +13,16 @@ target triple = "x86_64-apple-darwin10"
 define void @Test(%struct.anon addrspace(2)* %pPtr) nounwind {
 entry:
   %s = alloca %struct.anon, align 4               ; <%struct.anon*> [#uses=3]
-  %arrayidx = getelementptr inbounds %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
+  %arrayidx = getelementptr inbounds %struct.anon, %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
   %tmp1 = bitcast %struct.anon* %s to i8*         ; <i8*> [#uses=1]
   %tmp2 = bitcast %struct.anon addrspace(2)* %arrayidx to i8 addrspace(2)* ; <i8 addrspace(2)*> [#uses=1]
   call void @llvm.memcpy.p0i8.p2i8.i64(i8* %tmp1, i8 addrspace(2)* %tmp2, i64 4, i32 4, i1 false)
-  %tmp3 = getelementptr inbounds %struct.anon* %s, i32 0, i32 0 ; <[1 x float]*> [#uses=1]
-  %arrayidx4 = getelementptr inbounds [1 x float]* %tmp3, i32 0, i64 0 ; <float*> [#uses=2]
+  %tmp3 = getelementptr inbounds %struct.anon, %struct.anon* %s, i32 0, i32 0 ; <[1 x float]*> [#uses=1]
+  %arrayidx4 = getelementptr inbounds [1 x float], [1 x float]* %tmp3, i32 0, i64 0 ; <float*> [#uses=2]
   %tmp5 = load float* %arrayidx4                  ; <float> [#uses=1]
   %sub = fsub float %tmp5, 5.000000e+00           ; <float> [#uses=1]
   store float %sub, float* %arrayidx4
-  %arrayidx7 = getelementptr inbounds %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
+  %arrayidx7 = getelementptr inbounds %struct.anon, %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
   %tmp8 = bitcast %struct.anon addrspace(2)* %arrayidx7 to i8 addrspace(2)* ; <i8 addrspace(2)*> [#uses=1]
   %tmp9 = bitcast %struct.anon* %s to i8*         ; <i8*> [#uses=1]
   call void @llvm.memcpy.p2i8.p0i8.i64(i8 addrspace(2)* %tmp8, i8* %tmp9, i64 4, i32 4, i1 false)

Modified: llvm/trunk/test/Transforms/ScalarRepl/arraytest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/arraytest.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/arraytest.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/arraytest.ll Fri Feb 27 13:29:02 2015
@@ -3,7 +3,7 @@ target datalayout = "E-p:64:64:64-i1:8:8
 
 define i32 @test() {
 	%X = alloca [4 x i32]		; <[4 x i32]*> [#uses=1]
-	%Y = getelementptr [4 x i32]* %X, i64 0, i64 0		; <i32*> [#uses=2]
+	%Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 0		; <i32*> [#uses=2]
 	store i32 0, i32* %Y
 	%Z = load i32* %Y		; <i32> [#uses=1]
 	ret i32 %Z

Modified: llvm/trunk/test/Transforms/ScalarRepl/badarray.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/badarray.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/badarray.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/badarray.ll Fri Feb 27 13:29:02 2015
@@ -10,7 +10,7 @@ define i32 @test1() {
 ; CHECK-LABEL: @test1(
 ; CHECK-NOT: = alloca
 	%X = alloca [4 x i32]
-	%Y = getelementptr [4 x i32]* %X, i64 0, i64 6		; <i32*> [#uses=2]
+	%Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 6		; <i32*> [#uses=2]
 	store i32 0, i32* %Y
 	%Z = load i32* %Y		; <i32> [#uses=1]
 	ret i32 %Z
@@ -38,9 +38,9 @@ entry:
 ; CHECK-NOT: = alloca
 ; CHECK: store i64
   %var_1 = alloca %padded, align 8                ; <%padded*> [#uses=3]
-  %0 = getelementptr inbounds %padded* %var_1, i32 0, i32 0 ; <%base*> [#uses=2]
+  %0 = getelementptr inbounds %padded, %padded* %var_1, i32 0, i32 0 ; <%base*> [#uses=2]
   
-  %p2 = getelementptr inbounds %base* %0, i32 0, i32 1, i32 0 ; <i8*> [#uses=1]
+  %p2 = getelementptr inbounds %base, %base* %0, i32 0, i32 1, i32 0 ; <i8*> [#uses=1]
   store i8 72, i8* %p2, align 1
   
   ; 72 -> a[0].

Modified: llvm/trunk/test/Transforms/ScalarRepl/basictest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/basictest.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/basictest.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/basictest.ll Fri Feb 27 13:29:02 2015
@@ -3,7 +3,7 @@ target datalayout = "E-p:64:64:64-i1:8:8
 
 define i32 @test1() {
 	%X = alloca { i32, float }		; <{ i32, float }*> [#uses=1]
-	%Y = getelementptr { i32, float }* %X, i64 0, i32 0		; <i32*> [#uses=2]
+	%Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0		; <i32*> [#uses=2]
 	store i32 0, i32* %Y
 	%Z = load i32* %Y		; <i32> [#uses=1]
 	ret i32 %Z

Modified: llvm/trunk/test/Transforms/ScalarRepl/bitfield-sroa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/bitfield-sroa.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/bitfield-sroa.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/bitfield-sroa.ll Fri Feb 27 13:29:02 2015
@@ -7,7 +7,7 @@ define i8 @foo(i64 %A) {
         %ALL = alloca %t, align 8 
         %tmp59172 = bitcast %t* %ALL to i64*
         store i64 %A, i64* %tmp59172, align 8
-        %C = getelementptr %t* %ALL, i32 0, i32 0, i32 1             
+        %C = getelementptr %t, %t* %ALL, i32 0, i32 0, i32 1             
         %D = bitcast i16* %C to i32*    
         %E = load i32* %D, align 4     
         %F = bitcast %t* %ALL to i8* 

Modified: llvm/trunk/test/Transforms/ScalarRepl/copy-aggregate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/copy-aggregate.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/copy-aggregate.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/copy-aggregate.ll Fri Feb 27 13:29:02 2015
@@ -10,8 +10,8 @@ define i32 @test1(i64 %V) nounwind {
 	%Y = bitcast {{i32,i32}}* %X to i64*
 	store i64 %V, i64* %Y
 
-	%A = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 0
-	%B = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 1
+	%A = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 0
+	%B = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 1
 	%a = load i32* %A
 	%b = load i32* %B
 	%c = add i32 %a, %b
@@ -26,8 +26,8 @@ define float @test2(i128 %V) nounwind {
 	%Y = bitcast {[4 x float]}* %X to i128*
 	store i128 %V, i128* %Y
 
-	%A = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 0
-	%B = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 3
+	%A = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 0
+	%B = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 3
 	%a = load float* %A
 	%b = load float* %B
 	%c = fadd float %a, %b
@@ -40,8 +40,8 @@ define i64 @test3(i32 %a, i32 %b) nounwi
 ; CHECK-NOT: alloca
 	%X = alloca {{i32, i32}}
 
-	%A = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 0
-	%B = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 1
+	%A = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 0
+	%B = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 1
         store i32 %a, i32* %A
         store i32 %b, i32* %B
 
@@ -55,8 +55,8 @@ define i128 @test4(float %a, float %b) n
 ; CHECK: test4
 ; CHECK-NOT: alloca
 	%X = alloca {[4 x float]}
-	%A = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 0
-	%B = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 3
+	%A = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 0
+	%B = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 3
 	store float %a, float* %A
 	store float %b, float* %B
         

Modified: llvm/trunk/test/Transforms/ScalarRepl/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/crash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/crash.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/crash.ll Fri Feb 27 13:29:02 2015
@@ -22,13 +22,13 @@ if.end:
 
 define void @test2() {
   %E = alloca { { i32, float, double, i64 }, { i32, float, double, i64 } }        ; <{ { i32, float, double, i64 }, { i32, float, double, i64 } }*> [#uses=1]
-  %tmp.151 = getelementptr { { i32, float, double, i64 }, { i32, float, double, i64 } }* %E, i64 0, i32 1, i32 3          ; <i64*> [#uses=0]
+  %tmp.151 = getelementptr { { i32, float, double, i64 }, { i32, float, double, i64 } }, { { i32, float, double, i64 }, { i32, float, double, i64 } }* %E, i64 0, i32 1, i32 3          ; <i64*> [#uses=0]
   ret void
 }
 
 define i32 @test3() {
         %X = alloca { [4 x i32] }               ; <{ [4 x i32] }*> [#uses=1]
-        %Y = getelementptr { [4 x i32] }* %X, i64 0, i32 0, i64 2               ; <i32*> [#uses=2]
+        %Y = getelementptr { [4 x i32] }, { [4 x i32] }* %X, i64 0, i32 0, i64 2               ; <i32*> [#uses=2]
         store i32 4, i32* %Y
         %Z = load i32* %Y               ; <i32> [#uses=1]
         ret i32 %Z
@@ -128,7 +128,7 @@ entry:
         %tmp.i = load %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i          ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
         %tmp.i.upgrd.1 = bitcast %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp.i to %"struct.__gnu_cxx::bitmap_allocator<char>"*              ; <%"struct.__gnu_cxx::bitmap_allocator<char>"*> [#uses=0]
         %tmp1.i = load %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i         ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
-        %tmp.i.upgrd.2 = getelementptr %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp1.i, i32 0, i32 0         ; <%"struct.__gnu_cxx::bitmap_allocator<char>::_Alloc_block"**> [#uses=0]
+        %tmp.i.upgrd.2 = getelementptr %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>", %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp1.i, i32 0, i32 0         ; <%"struct.__gnu_cxx::bitmap_allocator<char>::_Alloc_block"**> [#uses=0]
         unreachable
 }
 
@@ -162,7 +162,7 @@ cond_next:              ; preds = %cond_
 
 cond_next34:            ; preds = %cond_next
         %i.2.reload22 = load i32* null          ; <i32> [#uses=1]
-        %tmp51 = getelementptr %struct.aal_spanbucket_t* %SB, i32 0, i32 2, i32 0, i32 0, i32 %i.2.reload22, i32 1      
+        %tmp51 = getelementptr %struct.aal_spanbucket_t, %struct.aal_spanbucket_t* %SB, i32 0, i32 2, i32 0, i32 0, i32 %i.2.reload22, i32 1      
         ; <i16*> [#uses=0]
         ret void
 
@@ -180,7 +180,7 @@ cond_next79:            ; preds = %cond_
 define void @test8() {
 entry:
         %v = alloca %struct.c37304a__vrec
-        %0 = getelementptr %struct.c37304a__vrec* %v, i32 0, i32 0             
+        %0 = getelementptr %struct.c37304a__vrec, %struct.c37304a__vrec* %v, i32 0, i32 0             
         store i8 8, i8* %0, align 1
         unreachable
 }
@@ -193,7 +193,7 @@ entry:
 define i32 @test9() {
 entry:
         %.compoundliteral = alloca %0           
-        %tmp228 = getelementptr %0* %.compoundliteral, i32 0, i32 7
+        %tmp228 = getelementptr %0, %0* %.compoundliteral, i32 0, i32 7
         %tmp229 = bitcast [0 x i16]* %tmp228 to i8*             
         call void @llvm.memset.p0i8.i64(i8* %tmp229, i8 0, i64 0, i32 2, i1 false)
         unreachable
@@ -207,7 +207,7 @@ declare void @llvm.memset.i64(i8* nocapt
 define void @test10() {
 entry:
         %w = alloca %wrapper, align 8           ; <%wrapper*> [#uses=1]
-        %0 = getelementptr %wrapper* %w, i64 0, i32 0           ; <i1*>
+        %0 = getelementptr %wrapper, %wrapper* %w, i64 0, i32 0           ; <i1*>
         store i1 true, i1* %0
         ret void
 }
@@ -220,7 +220,7 @@ entry:
         %a = alloca %struct.singlebool, align 1         ; <%struct.singlebool*> [#uses=2]
         %storetmp.i = bitcast %struct.singlebool* %a to i1*             ; <i1*> [#uses=1]
         store i1 true, i1* %storetmp.i
-        %tmp = getelementptr %struct.singlebool* %a, i64 0, i32 0               ; <i8*> [#uses=1]
+        %tmp = getelementptr %struct.singlebool, %struct.singlebool* %a, i64 0, i32 0               ; <i8*> [#uses=1]
         %tmp1 = load i8* %tmp           ; <i8> [#uses=1]
         ret i8 %tmp1
 }
@@ -246,7 +246,7 @@ define void @test12() {
 bb4.i:
         %malloccall = tail call i8* @malloc(i32 0)
         %0 = bitcast i8* %malloccall to [0 x %struct.Item]*
-        %.sub.i.c.i = getelementptr [0 x %struct.Item]* %0, i32 0, i32 0                ; <%struct.Item*> [#uses=0]
+        %.sub.i.c.i = getelementptr [0 x %struct.Item], [0 x %struct.Item]* %0, i32 0, i32 0                ; <%struct.Item*> [#uses=0]
         unreachable
 }
 declare noalias i8* @malloc(i32)
@@ -268,13 +268,13 @@ define fastcc void @test() optsize inlin
 entry:
   %alloc.0.0 = alloca <4 x float>, align 16
   %bitcast = bitcast <4 x float>* %alloc.0.0 to [4 x float]*
-  %idx3 = getelementptr inbounds [4 x float]* %bitcast, i32 0, i32 3
+  %idx3 = getelementptr inbounds [4 x float], [4 x float]* %bitcast, i32 0, i32 3
   store float 0.000000e+00, float* %idx3, align 4
   br label %for.body10
 
 for.body10:                                       ; preds = %for.body10, %entry
   %loopidx = phi i32 [ 0, %entry ], [ undef, %for.body10 ]
-  %unusedidx = getelementptr inbounds <4 x float>* %alloc.0.0, i32 0, i32 %loopidx
+  %unusedidx = getelementptr inbounds <4 x float>, <4 x float>* %alloc.0.0, i32 0, i32 %loopidx
   br i1 undef, label %for.end, label %for.body10
 
 for.end:                                          ; preds = %for.body10

Modified: llvm/trunk/test/Transforms/ScalarRepl/inline-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/inline-vector.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/inline-vector.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/inline-vector.ll Fri Feb 27 13:29:02 2015
@@ -42,7 +42,7 @@ for.body:
   br label %for.cond
 
 for.end:                                          ; preds = %for.cond
-  %x = getelementptr inbounds %struct.Vector4* %vector, i32 0, i32 0
+  %x = getelementptr inbounds %struct.Vector4, %struct.Vector4* %vector, i32 0, i32 0
   %tmp5 = load float* %x, align 16
   %conv = fpext float %tmp5 to double
   %call = call i32 (...)* @printf(double %conv) nounwind

Modified: llvm/trunk/test/Transforms/ScalarRepl/lifetime.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/lifetime.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/lifetime.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/lifetime.ll Fri Feb 27 13:29:02 2015
@@ -11,9 +11,9 @@ declare void @llvm.lifetime.end(i64, i8*
 define void @test1() {
 ; CHECK-LABEL: @test1(
   %A = alloca %t1
-  %A1 = getelementptr %t1* %A, i32 0, i32 0
-  %A2 = getelementptr %t1* %A, i32 0, i32 1
-  %A3 = getelementptr %t1* %A, i32 0, i32 2
+  %A1 = getelementptr %t1, %t1* %A, i32 0, i32 0
+  %A2 = getelementptr %t1, %t1* %A, i32 0, i32 1
+  %A3 = getelementptr %t1, %t1* %A, i32 0, i32 2
   %B = bitcast i32* %A1 to i8*
   store i32 0, i32* %A1
   call void @llvm.lifetime.start(i64 -1, i8* %B)
@@ -24,9 +24,9 @@ define void @test1() {
 define void @test2() {
 ; CHECK-LABEL: @test2(
   %A = alloca %t1
-  %A1 = getelementptr %t1* %A, i32 0, i32 0
-  %A2 = getelementptr %t1* %A, i32 0, i32 1
-  %A3 = getelementptr %t1* %A, i32 0, i32 2
+  %A1 = getelementptr %t1, %t1* %A, i32 0, i32 0
+  %A2 = getelementptr %t1, %t1* %A, i32 0, i32 1
+  %A3 = getelementptr %t1, %t1* %A, i32 0, i32 2
   %B = bitcast i32* %A2 to i8*
   store i32 0, i32* %A2
   call void @llvm.lifetime.start(i64 -1, i8* %B)
@@ -38,9 +38,9 @@ define void @test2() {
 define void @test3() {
 ; CHECK-LABEL: @test3(
   %A = alloca %t1
-  %A1 = getelementptr %t1* %A, i32 0, i32 0
-  %A2 = getelementptr %t1* %A, i32 0, i32 1
-  %A3 = getelementptr %t1* %A, i32 0, i32 2
+  %A1 = getelementptr %t1, %t1* %A, i32 0, i32 0
+  %A2 = getelementptr %t1, %t1* %A, i32 0, i32 1
+  %A3 = getelementptr %t1, %t1* %A, i32 0, i32 2
   %B = bitcast i32* %A2 to i8*
   store i32 0, i32* %A2
   call void @llvm.lifetime.start(i64 6, i8* %B)
@@ -52,9 +52,9 @@ define void @test3() {
 define void @test4() {
 ; CHECK-LABEL: @test4(
   %A = alloca %t1
-  %A1 = getelementptr %t1* %A, i32 0, i32 0
-  %A2 = getelementptr %t1* %A, i32 0, i32 1
-  %A3 = getelementptr %t1* %A, i32 0, i32 2
+  %A1 = getelementptr %t1, %t1* %A, i32 0, i32 0
+  %A2 = getelementptr %t1, %t1* %A, i32 0, i32 1
+  %A3 = getelementptr %t1, %t1* %A, i32 0, i32 2
   %B = bitcast i32* %A2 to i8*
   store i32 0, i32* %A2
   call void @llvm.lifetime.start(i64 1, i8* %B)
@@ -72,19 +72,19 @@ define void @test5() {
 ; CHECK: alloca{{.*}}i8
 ; CHECK: alloca{{.*}}i8
 
-  %A21 = getelementptr %t2* %A, i32 0, i32 1, i32 0
-  %A22 = getelementptr %t2* %A, i32 0, i32 1, i32 1
-  %A23 = getelementptr %t2* %A, i32 0, i32 1, i32 2
-  %A24 = getelementptr %t2* %A, i32 0, i32 1, i32 3
+  %A21 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 0
+  %A22 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 1
+  %A23 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 2
+  %A24 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 3
 ; CHECK-NOT: store i8 1
   store i8 1, i8* %A21
   store i8 2, i8* %A22
   store i8 3, i8* %A23
   store i8 4, i8* %A24
 
-  %A1 = getelementptr %t2* %A, i32 0, i32 0
-  %A2 = getelementptr %t2* %A, i32 0, i32 1, i32 1
-  %A3 = getelementptr %t2* %A, i32 0, i32 2
+  %A1 = getelementptr %t2, %t2* %A, i32 0, i32 0
+  %A2 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 1
+  %A3 = getelementptr %t2, %t2* %A, i32 0, i32 2
   store i8 0, i8* %A2
   call void @llvm.lifetime.start(i64 5, i8* %A2)
 ; CHECK: llvm.lifetime{{.*}}i64 1
@@ -103,10 +103,10 @@ define void @test6() {
 ; CHECK: alloca i8
 ; CHECK: alloca i8
 
-  %A11 = getelementptr %t3* %A, i32 0, i32 0, i32 0
-  %A12 = getelementptr %t3* %A, i32 0, i32 0, i32 1
-  %A13 = getelementptr %t3* %A, i32 0, i32 0, i32 2
-  %A14 = getelementptr %t3* %A, i32 0, i32 0, i32 3
+  %A11 = getelementptr %t3, %t3* %A, i32 0, i32 0, i32 0
+  %A12 = getelementptr %t3, %t3* %A, i32 0, i32 0, i32 1
+  %A13 = getelementptr %t3, %t3* %A, i32 0, i32 0, i32 2
+  %A14 = getelementptr %t3, %t3* %A, i32 0, i32 0, i32 3
   store i16 11, i16* %A11
   store i16 12, i16* %A12
   store i16 13, i16* %A13
@@ -116,10 +116,10 @@ define void @test6() {
 ; CHECK-NOT: store i16 13
 ; CHECK-NOT: store i16 14
 
-  %A21 = getelementptr %t3* %A, i32 0, i32 1, i32 0
-  %A22 = getelementptr %t3* %A, i32 0, i32 1, i32 1
-  %A23 = getelementptr %t3* %A, i32 0, i32 1, i32 2
-  %A24 = getelementptr %t3* %A, i32 0, i32 1, i32 3
+  %A21 = getelementptr %t3, %t3* %A, i32 0, i32 1, i32 0
+  %A22 = getelementptr %t3, %t3* %A, i32 0, i32 1, i32 1
+  %A23 = getelementptr %t3, %t3* %A, i32 0, i32 1, i32 2
+  %A24 = getelementptr %t3, %t3* %A, i32 0, i32 1, i32 3
   store i8 21, i8* %A21
   store i8 22, i8* %A22
   store i8 23, i8* %A23

Modified: llvm/trunk/test/Transforms/ScalarRepl/load-store-aggregate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/load-store-aggregate.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/load-store-aggregate.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/load-store-aggregate.ll Fri Feb 27 13:29:02 2015
@@ -14,7 +14,7 @@ entry:
         %V = load %struct.foo* %P
         store %struct.foo %V, %struct.foo* %L
 
-	%tmp4 = getelementptr %struct.foo* %L, i32 0, i32 0		; <i32*> [#uses=1]
+	%tmp4 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0		; <i32*> [#uses=1]
 	%tmp5 = load i32* %tmp4		; <i32> [#uses=1]
 	ret i32 %tmp5
 }
@@ -22,9 +22,9 @@ entry:
 define %struct.foo @test2(i32 %A, i32 %B) {
 entry:
 	%L = alloca %struct.foo, align 8		; <%struct.foo*> [#uses=2]
-        %L.0 = getelementptr %struct.foo* %L, i32 0, i32 0
+        %L.0 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0
         store i32 %A, i32* %L.0
-        %L.1 = getelementptr %struct.foo* %L, i32 0, i32 1
+        %L.1 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 1
         store i32 %B, i32* %L.1
         %V = load %struct.foo* %L
         ret %struct.foo %V

Modified: llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll Fri Feb 27 13:29:02 2015
@@ -12,8 +12,8 @@ target triple = "powerpc-apple-darwin8.8
 define i32 @test1(%struct.foo* %P) {
 entry:
 	%L = alloca %struct.foo, align 2		; <%struct.foo*> [#uses=1]
-	%L2 = getelementptr %struct.foo* %L, i32 0, i32 0		; <i8*> [#uses=2]
-	%tmp13 = getelementptr %struct.foo* %P, i32 0, i32 0		; <i8*> [#uses=1]
+	%L2 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0		; <i8*> [#uses=2]
+	%tmp13 = getelementptr %struct.foo, %struct.foo* %P, i32 0, i32 0		; <i8*> [#uses=1]
 	call void @llvm.memcpy.p0i8.p0i8.i32( i8* %L2, i8* %tmp13, i32 2, i32 1, i1 false)
 	%tmp5 = load i8* %L2		; <i8> [#uses=1]
 	%tmp56 = sext i8 %tmp5 to i32		; <i32> [#uses=1]

Modified: llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate.ll Fri Feb 27 13:29:02 2015
@@ -15,7 +15,7 @@ entry:
 	%L2 = bitcast %struct.foo* %L to i8*		; <i8*> [#uses=1]
 	%tmp13 = bitcast %struct.foo* %P to i8*		; <i8*> [#uses=1]
         call void @llvm.memcpy.p0i8.p0i8.i32(i8* %L2, i8* %tmp13, i32 8, i32 4, i1 false)
-	%tmp4 = getelementptr %struct.foo* %L, i32 0, i32 0		; <i32*> [#uses=1]
+	%tmp4 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0		; <i32*> [#uses=1]
 	%tmp5 = load i32* %tmp4		; <i32> [#uses=1]
 	ret i32 %tmp5
 }
@@ -26,7 +26,7 @@ entry:
 	%L = alloca [4 x %struct.foo], align 16		; <[4 x %struct.foo]*> [#uses=2]
 	%L12 = bitcast [4 x %struct.foo]* %L to i8*		; <i8*> [#uses=1]
         call void @llvm.memset.p0i8.i32(i8* %L12, i8 0, i32 32, i32 16, i1 false)
-	%tmp4 = getelementptr [4 x %struct.foo]* %L, i32 0, i32 0, i32 0		; <i32*> [#uses=1]
+	%tmp4 = getelementptr [4 x %struct.foo], [4 x %struct.foo]* %L, i32 0, i32 0, i32 0		; <i32*> [#uses=1]
 	%tmp5 = load i32* %tmp4		; <i32> [#uses=1]
 	ret i32 %tmp5
 }
@@ -37,11 +37,11 @@ entry:
 	%B = alloca %struct.bar, align 16		; <%struct.bar*> [#uses=4]
 	%B1 = bitcast %struct.bar* %B to i8*		; <i8*> [#uses=1]
 	call void @llvm.memset.p0i8.i32(i8* %B1, i8 1, i32 24, i32 16, i1 false)
-	%tmp3 = getelementptr %struct.bar* %B, i32 0, i32 0, i32 0		; <i32*> [#uses=1]
+	%tmp3 = getelementptr %struct.bar, %struct.bar* %B, i32 0, i32 0, i32 0		; <i32*> [#uses=1]
 	store i32 1, i32* %tmp3
-	%tmp4 = getelementptr %struct.bar* %B, i32 0, i32 2		; <double*> [#uses=1]
+	%tmp4 = getelementptr %struct.bar, %struct.bar* %B, i32 0, i32 2		; <double*> [#uses=1]
 	store double 1.000000e+01, double* %tmp4
-	%tmp6 = getelementptr %struct.bar* %B, i32 0, i32 0, i32 1		; <i32*> [#uses=1]
+	%tmp6 = getelementptr %struct.bar, %struct.bar* %B, i32 0, i32 0, i32 1		; <i32*> [#uses=1]
 	%tmp7 = load i32* %tmp6		; <i32> [#uses=1]
 	ret i32 %tmp7
 }
@@ -52,12 +52,12 @@ entry:
 define i16 @test4() nounwind {
 entry:
 	%A = alloca %struct.f, align 8		; <%struct.f*> [#uses=3]
-	%0 = getelementptr %struct.f* %A, i32 0, i32 0		; <i32*> [#uses=1]
+	%0 = getelementptr %struct.f, %struct.f* %A, i32 0, i32 0		; <i32*> [#uses=1]
 	store i32 1, i32* %0, align 8
-	%1 = getelementptr %struct.f* %A, i32 0, i32 1		; <i32*> [#uses=1]
+	%1 = getelementptr %struct.f, %struct.f* %A, i32 0, i32 1		; <i32*> [#uses=1]
 	%2 = bitcast i32* %1 to i8*		; <i8*> [#uses=1]
 	call void @llvm.memset.p0i8.i32(i8* %2, i8 2, i32 12, i32 4, i1 false)
-	%3 = getelementptr %struct.f* %A, i32 0, i32 2		; <i32*> [#uses=1]
+	%3 = getelementptr %struct.f, %struct.f* %A, i32 0, i32 2		; <i32*> [#uses=1]
 	%4 = load i32* %3, align 8		; <i32> [#uses=1]
 	%retval12 = trunc i32 %4 to i16		; <i16> [#uses=1]
 	ret i16 %retval12

Modified: llvm/trunk/test/Transforms/ScalarRepl/negative-memset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/negative-memset.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/negative-memset.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/negative-memset.ll Fri Feb 27 13:29:02 2015
@@ -12,7 +12,7 @@ entry:
   store i32 0, i32* %retval
   %0 = bitcast [1 x i8]* %buff to i8*
   call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 1, i32 1, i1 false)
-  %arraydecay = getelementptr inbounds [1 x i8]* %buff, i32 0, i32 0
+  %arraydecay = getelementptr inbounds [1 x i8], [1 x i8]* %buff, i32 0, i32 0
   call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -1, i32 -8, i32 1, i1 false)	; Negative 8!
   ret i32 0
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/nonzero-first-index.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/nonzero-first-index.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/nonzero-first-index.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/nonzero-first-index.ll Fri Feb 27 13:29:02 2015
@@ -12,8 +12,8 @@ define i32 @test1() {
 ; CHECK-NOT: = i160
 ; CHECK: ret i32 undef
 	%A = alloca %nested
-	%B = getelementptr %nested* %A, i32 0, i32 1, i32 0
-	%C = getelementptr i32* %B, i32 2
+	%B = getelementptr %nested, %nested* %A, i32 0, i32 1, i32 0
+	%C = getelementptr i32, i32* %B, i32 2
 	%D = load i32* %C
 	ret i32 %D
 }
@@ -23,8 +23,8 @@ define i32 @test2() {
 ; CHECK-LABEL: @test2(
 ; CHECK: i160
 	%A = alloca %nested
-	%B = getelementptr %nested* %A, i32 0, i32 1, i32 0
-	%C = getelementptr i32* %B, i32 4
+	%B = getelementptr %nested, %nested* %A, i32 0, i32 1, i32 0
+	%C = getelementptr i32, i32* %B, i32 4
 	%D = load i32* %C
 	ret i32 %D
 }
@@ -36,7 +36,7 @@ define i32 @test3() {
 ; CHECK: ret i32 undef
 	%A = alloca %nested
 	%B = bitcast %nested* %A to i32*
-	%C = getelementptr i32* %B, i32 2
+	%C = getelementptr i32, i32* %B, i32 2
 	%D = load i32* %C
 	ret i32 %D
 }
@@ -47,7 +47,7 @@ define i32 @test4() {
 ; CHECK: i160
 	%A = alloca %nested
 	%B = bitcast %nested* %A to i32*
-	%C = getelementptr i32* %B, i32 -1
+	%C = getelementptr i32, i32* %B, i32 -1
 	%D = load i32* %C
 	ret i32 %D
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/not-a-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/not-a-vector.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/not-a-vector.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/not-a-vector.ll Fri Feb 27 13:29:02 2015
@@ -8,11 +8,11 @@ define double @test(double %A, double %B
 	%C = bitcast [7 x i64]* %ARR to double*
 	store double %A, double* %C
 
-	%D = getelementptr [7 x i64]* %ARR, i32 0, i32 4
+	%D = getelementptr [7 x i64], [7 x i64]* %ARR, i32 0, i32 4
 	%E = bitcast i64* %D to double*
 	store double %B, double* %E
 
-	%F = getelementptr double* %C, i32 4
+	%F = getelementptr double, double* %C, i32 4
 	%G = load double* %F
 	ret double %G
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/phi-cycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/phi-cycle.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/phi-cycle.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/phi-cycle.ll Fri Feb 27 13:29:02 2015
@@ -11,9 +11,9 @@ target triple = "x86_64-unknown-linux-gn
 define i32 @main(i32 %argc, i8** nocapture %argv) nounwind uwtable {
 entry:
   %f = alloca %struct.foo, align 4
-  %x.i = getelementptr inbounds %struct.foo* %f, i64 0, i32 0
+  %x.i = getelementptr inbounds %struct.foo, %struct.foo* %f, i64 0, i32 0
   store i32 1, i32* %x.i, align 4
-  %y.i = getelementptr inbounds %struct.foo* %f, i64 0, i32 1
+  %y.i = getelementptr inbounds %struct.foo, %struct.foo* %f, i64 0, i32 1
   br label %while.cond.i
 
 ; CHECK: while.cond.i:

Modified: llvm/trunk/test/Transforms/ScalarRepl/phi-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/phi-select.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/phi-select.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/phi-select.ll Fri Feb 27 13:29:02 2015
@@ -13,13 +13,13 @@ define i32 @test1(i32 %x) nounwind readn
 entry:
   %a = alloca %struct.X, align 8                  ; <%struct.X*> [#uses=2]
   %b = alloca %struct.X, align 8                  ; <%struct.X*> [#uses=2]
-  %0 = getelementptr inbounds %struct.X* %a, i64 0, i32 0 ; <i32*> [#uses=1]
+  %0 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 0 ; <i32*> [#uses=1]
   store i32 1, i32* %0, align 8
-  %1 = getelementptr inbounds %struct.X* %b, i64 0, i32 0 ; <i32*> [#uses=1]
+  %1 = getelementptr inbounds %struct.X, %struct.X* %b, i64 0, i32 0 ; <i32*> [#uses=1]
   store i32 2, i32* %1, align 8
   %2 = icmp eq i32 %x, 0                          ; <i1> [#uses=1]
   %p.0 = select i1 %2, %struct.X* %b, %struct.X* %a ; <%struct.X*> [#uses=1]
-  %3 = getelementptr inbounds %struct.X* %p.0, i64 0, i32 0 ; <i32*> [#uses=1]
+  %3 = getelementptr inbounds %struct.X, %struct.X* %p.0, i64 0, i32 0 ; <i32*> [#uses=1]
   %4 = load i32* %3, align 8                      ; <i32> [#uses=1]
   ret i32 %4
 }
@@ -30,11 +30,11 @@ entry:
 define i32 @test2(i1 %c) {
 entry:
   %A = alloca {i32, i32}
-  %B = getelementptr {i32, i32}* %A, i32 0, i32 0
+  %B = getelementptr {i32, i32}, {i32, i32}* %A, i32 0, i32 0
   store i32 1, i32* %B
   br i1 %c, label %T, label %F
 T:
-  %C = getelementptr {i32, i32}* %A, i32 0, i32 1
+  %C = getelementptr {i32, i32}, {i32, i32}* %A, i32 0, i32 1
   store i32 2, i32* %C
   br label %F
 F:
@@ -49,9 +49,9 @@ F:
 ; rdar://8904039
 define i32 @test3(i1 %c) {
   %A = alloca {i32, i32}
-  %B = getelementptr {i32, i32}* %A, i32 0, i32 0
+  %B = getelementptr {i32, i32}, {i32, i32}* %A, i32 0, i32 0
   store i32 1, i32* %B
-  %C = getelementptr {i32, i32}* %A, i32 0, i32 1
+  %C = getelementptr {i32, i32}, {i32, i32}* %A, i32 0, i32 1
   store i32 2, i32* %C
   
   %X = select i1 %c, i32* %B, i32* %C
@@ -65,9 +65,9 @@ entry:
   %A = alloca %PairTy
   ; CHECK-LABEL: @test4(
   ; CHECK: %A = alloca %PairTy
-  %B = getelementptr %PairTy* %A, i32 0, i32 0
+  %B = getelementptr %PairTy, %PairTy* %A, i32 0, i32 0
   store i32 1, i32* %B
-  %C = getelementptr %PairTy* %A, i32 0, i32 1
+  %C = getelementptr %PairTy, %PairTy* %A, i32 0, i32 1
   store i32 2, i32* %B
   
   %X = select i1 %c, i32* %B, i32* %C

Modified: llvm/trunk/test/Transforms/ScalarRepl/sroa_two.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/sroa_two.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/sroa_two.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/sroa_two.ll Fri Feb 27 13:29:02 2015
@@ -2,11 +2,11 @@
 
 define i32 @test(i32 %X) {
 	%Arr = alloca [2 x i32]		; <[2 x i32]*> [#uses=3]
-	%tmp.0 = getelementptr [2 x i32]* %Arr, i32 0, i32 0		; <i32*> [#uses=1]
+	%tmp.0 = getelementptr [2 x i32], [2 x i32]* %Arr, i32 0, i32 0		; <i32*> [#uses=1]
 	store i32 1, i32* %tmp.0
-	%tmp.1 = getelementptr [2 x i32]* %Arr, i32 0, i32 1		; <i32*> [#uses=1]
+	%tmp.1 = getelementptr [2 x i32], [2 x i32]* %Arr, i32 0, i32 1		; <i32*> [#uses=1]
 	store i32 2, i32* %tmp.1
-	%tmp.3 = getelementptr [2 x i32]* %Arr, i32 0, i32 %X		; <i32*> [#uses=1]
+	%tmp.3 = getelementptr [2 x i32], [2 x i32]* %Arr, i32 0, i32 %X		; <i32*> [#uses=1]
 	%tmp.4 = load i32* %tmp.3		; <i32> [#uses=1]
 	ret i32 %tmp.4
 }

Modified: llvm/trunk/test/Transforms/ScalarRepl/union-pointer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/union-pointer.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/union-pointer.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/union-pointer.ll Fri Feb 27 13:29:02 2015
@@ -36,7 +36,7 @@ define i8 addrspace(1)* @test_as1_array(
 ; CHECK-NEXT: %2 = inttoptr i16 %1 to i8 addrspace(1)*
 ; CHECK-NEXT: ret i8 addrspace(1)* %2
   %as_ptr_array = alloca [4 x i16 addrspace(1)*]
-  %elem1 = getelementptr [4 x i16 addrspace(1)*]* %as_ptr_array, i32 0, i32 1
+  %elem1 = getelementptr [4 x i16 addrspace(1)*], [4 x i16 addrspace(1)*]* %as_ptr_array, i32 0, i32 1
   store i16 addrspace(1)* %x, i16 addrspace(1)** %elem1
   %elem1.cast = bitcast i16 addrspace(1)** %elem1 to i8 addrspace(1)**
   %tmp = load i8 addrspace(1)** %elem1.cast
@@ -54,16 +54,16 @@ define void @test2(i64 %Op.0) {
 	%tmp.upgrd.2 = call i64 @_Z3foov( )		; <i64> [#uses=1]
 	%tmp1.upgrd.3 = bitcast %struct.Val* %tmp1 to i64*		; <i64*> [#uses=1]
 	store i64 %tmp.upgrd.2, i64* %tmp1.upgrd.3
-	%tmp.upgrd.4 = getelementptr %struct.Val* %tmp, i32 0, i32 0		; <i32**> [#uses=1]
-	%tmp2 = getelementptr %struct.Val* %tmp1, i32 0, i32 0		; <i32**> [#uses=1]
+	%tmp.upgrd.4 = getelementptr %struct.Val, %struct.Val* %tmp, i32 0, i32 0		; <i32**> [#uses=1]
+	%tmp2 = getelementptr %struct.Val, %struct.Val* %tmp1, i32 0, i32 0		; <i32**> [#uses=1]
 	%tmp.upgrd.5 = load i32** %tmp2		; <i32*> [#uses=1]
 	store i32* %tmp.upgrd.5, i32** %tmp.upgrd.4
-	%tmp3 = getelementptr %struct.Val* %tmp, i32 0, i32 1		; <i32*> [#uses=1]
-	%tmp4 = getelementptr %struct.Val* %tmp1, i32 0, i32 1		; <i32*> [#uses=1]
+	%tmp3 = getelementptr %struct.Val, %struct.Val* %tmp, i32 0, i32 1		; <i32*> [#uses=1]
+	%tmp4 = getelementptr %struct.Val, %struct.Val* %tmp1, i32 0, i32 1		; <i32*> [#uses=1]
 	%tmp.upgrd.6 = load i32* %tmp4		; <i32> [#uses=1]
 	store i32 %tmp.upgrd.6, i32* %tmp3
 	%tmp7 = bitcast %struct.Val* %tmp to { i64 }*		; <{ i64 }*> [#uses=1]
-	%tmp8 = getelementptr { i64 }* %tmp7, i32 0, i32 0		; <i64*> [#uses=1]
+	%tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i32 0, i32 0		; <i64*> [#uses=1]
 	%tmp9 = load i64* %tmp8		; <i64> [#uses=1]
 	call void @_Z3bar3ValS_( i64 %Op.0, i64 %tmp9 )
 	ret void

Modified: llvm/trunk/test/Transforms/ScalarRepl/vector_promote.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/vector_promote.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/vector_promote.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/vector_promote.ll Fri Feb 27 13:29:02 2015
@@ -8,7 +8,7 @@ entry:
 	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
 	%tmp3 = fadd <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp3, <4 x float>* %G
-	%G.upgrd.1 = getelementptr <4 x float>* %G, i32 0, i32 0		; <float*> [#uses=1]
+	%G.upgrd.1 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 0		; <float*> [#uses=1]
 	store float %f, float* %G.upgrd.1
 	%tmp4 = load <4 x float>* %G		; <<4 x float>> [#uses=2]
 	%tmp6 = fadd <4 x float> %tmp4, %tmp4		; <<4 x float>> [#uses=1]
@@ -27,7 +27,7 @@ entry:
 	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
 	%tmp3 = fadd <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp3, <4 x float>* %G
-	%tmp.upgrd.2 = getelementptr <4 x float>* %G, i32 0, i32 2		; <float*> [#uses=1]
+	%tmp.upgrd.2 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2		; <float*> [#uses=1]
 	store float %f, float* %tmp.upgrd.2
 	%tmp4 = load <4 x float>* %G		; <<4 x float>> [#uses=2]
 	%tmp6 = fadd <4 x float> %tmp4, %tmp4		; <<4 x float>> [#uses=1]
@@ -46,7 +46,7 @@ entry:
 	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
 	%tmp3 = fadd <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp3, <4 x float>* %G
-	%tmp.upgrd.3 = getelementptr <4 x float>* %G, i32 0, i32 2		; <float*> [#uses=1]
+	%tmp.upgrd.3 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2		; <float*> [#uses=1]
 	%tmp.upgrd.4 = load float* %tmp.upgrd.3		; <float> [#uses=1]
 	store float %tmp.upgrd.4, float* %f
 	ret void
@@ -63,7 +63,7 @@ entry:
 	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
 	%tmp3 = fadd <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp3, <4 x float>* %G
-	%G.upgrd.5 = getelementptr <4 x float>* %G, i32 0, i32 0		; <float*> [#uses=1]
+	%G.upgrd.5 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 0		; <float*> [#uses=1]
 	%tmp.upgrd.6 = load float* %G.upgrd.5		; <float> [#uses=1]
 	store float %tmp.upgrd.6, float* %f
 	ret void
@@ -76,7 +76,7 @@ entry:
 
 define i32 @test5(float %X) {  ;; should turn into bitcast.
 	%X_addr = alloca [4 x float]
-        %X1 = getelementptr [4 x float]* %X_addr, i32 0, i32 2
+        %X1 = getelementptr [4 x float], [4 x float]* %X_addr, i32 0, i32 2
 	store float %X, float* %X1
 	%a = bitcast float* %X1 to i32*
 	%tmp = load i32* %a
@@ -104,7 +104,7 @@ entry:
   %memtmp = alloca %struct.test7, align 16
   %0 = bitcast %struct.test7* %memtmp to <4 x i32>*
   store <4 x i32> zeroinitializer, <4 x i32>* %0, align 16
-  %1 = getelementptr inbounds %struct.test7* %memtmp, i64 0, i32 0, i64 5
+  %1 = getelementptr inbounds %struct.test7, %struct.test7* %memtmp, i64 0, i32 0, i64 5
   store i32 0, i32* %1, align 4
   ret void
 ; CHECK-LABEL: @test7(

Modified: llvm/trunk/test/Transforms/ScalarRepl/volatile.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarRepl/volatile.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/volatile.ll (original)
+++ llvm/trunk/test/Transforms/ScalarRepl/volatile.ll Fri Feb 27 13:29:02 2015
@@ -2,11 +2,11 @@
 
 define i32 @voltest(i32 %T) {
 	%A = alloca {i32, i32}
-	%B = getelementptr {i32,i32}* %A, i32 0, i32 0
+	%B = getelementptr {i32,i32}, {i32,i32}* %A, i32 0, i32 0
 	store volatile i32 %T, i32* %B
 ; CHECK: store volatile
 
-	%C = getelementptr {i32,i32}* %A, i32 0, i32 1
+	%C = getelementptr {i32,i32}, {i32,i32}* %A, i32 0, i32 1
 	%X = load volatile i32* %C
 ; CHECK: load volatile
 	ret i32 %X

Modified: llvm/trunk/test/Transforms/Scalarizer/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Scalarizer/basic.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/Scalarizer/basic.ll (original)
+++ llvm/trunk/test/Transforms/Scalarizer/basic.ll Fri Feb 27 13:29:02 2015
@@ -19,14 +19,14 @@ define void @f1(<4 x float> %init, <4 x
 ; CHECK:   %acc.i2 = phi float [ %init.i2, %entry ], [ %sel.i2, %loop ]
 ; CHECK:   %acc.i3 = phi float [ %init.i3, %entry ], [ %sel.i3, %loop ]
 ; CHECK:   %nexti = sub i32 %i, 1
-; CHECK:   %ptr = getelementptr <4 x float>* %base, i32 %i
+; CHECK:   %ptr = getelementptr <4 x float>, <4 x float>* %base, i32 %i
 ; CHECK:   %ptr.i0 = bitcast <4 x float>* %ptr to float*
 ; CHECK:   %val.i0 = load float* %ptr.i0, align 16
-; CHECK:   %ptr.i1 = getelementptr float* %ptr.i0, i32 1
+; CHECK:   %ptr.i1 = getelementptr float, float* %ptr.i0, i32 1
 ; CHECK:   %val.i1 = load float* %ptr.i1, align 4
-; CHECK:   %ptr.i2 = getelementptr float* %ptr.i0, i32 2
+; CHECK:   %ptr.i2 = getelementptr float, float* %ptr.i0, i32 2
 ; CHECK:   %val.i2 = load float* %ptr.i2, align 8
-; CHECK:   %ptr.i3 = getelementptr float* %ptr.i0, i32 3
+; CHECK:   %ptr.i3 = getelementptr float, float* %ptr.i0, i32 3
 ; CHECK:   %val.i3 = load float* %ptr.i3, align 4
 ; CHECK:   %add.i0 = fadd float %val.i0, %val.i2
 ; CHECK:   %add.i1 = fadd float %val.i1, %val.i3
@@ -65,7 +65,7 @@ loop:
   %acc = phi <4 x float> [ %init, %entry ], [ %sel, %loop ]
   %nexti = sub i32 %i, 1
 
-  %ptr = getelementptr <4 x float> *%base, i32 %i
+  %ptr = getelementptr <4 x float>, <4 x float> *%base, i32 %i
   %val = load <4 x float> *%ptr
   %dval = bitcast <4 x float> %val to <2 x double>
   %dacc = bitcast <4 x float> %acc to <2 x double>
@@ -105,14 +105,14 @@ define void @f2(<4 x i32> %init, <4 x i8
 ; CHECK:   %acc.i2 = phi i32 [ %init.i2, %entry ], [ %sel.i2, %loop ]
 ; CHECK:   %acc.i3 = phi i32 [ %init.i3, %entry ], [ %sel.i3, %loop ]
 ; CHECK:   %nexti = sub i32 %i, 1
-; CHECK:   %ptr = getelementptr <4 x i8>* %base, i32 %i
+; CHECK:   %ptr = getelementptr <4 x i8>, <4 x i8>* %base, i32 %i
 ; CHECK:   %ptr.i0 = bitcast <4 x i8>* %ptr to i8*
 ; CHECK:   %val.i0 = load i8* %ptr.i0, align 4
-; CHECK:   %ptr.i1 = getelementptr i8* %ptr.i0, i32 1
+; CHECK:   %ptr.i1 = getelementptr i8, i8* %ptr.i0, i32 1
 ; CHECK:   %val.i1 = load i8* %ptr.i1, align 1
-; CHECK:   %ptr.i2 = getelementptr i8* %ptr.i0, i32 2
+; CHECK:   %ptr.i2 = getelementptr i8, i8* %ptr.i0, i32 2
 ; CHECK:   %val.i2 = load i8* %ptr.i2, align 2
-; CHECK:   %ptr.i3 = getelementptr i8* %ptr.i0, i32 3
+; CHECK:   %ptr.i3 = getelementptr i8, i8* %ptr.i0, i32 3
 ; CHECK:   %val.i3 = load i8* %ptr.i3, align 1
 ; CHECK:   %ext.i0 = sext i8 %val.i0 to i32
 ; CHECK:   %ext.i1 = sext i8 %val.i1 to i32
@@ -150,7 +150,7 @@ loop:
   %acc = phi <4 x i32> [ %init, %entry ], [ %sel, %loop ]
   %nexti = sub i32 %i, 1
 
-  %ptr = getelementptr <4 x i8> *%base, i32 %i
+  %ptr = getelementptr <4 x i8>, <4 x i8> *%base, i32 %i
   %val = load <4 x i8> *%ptr
   %ext = sext <4 x i8> %val to <4 x i32>
   %add = add <4 x i32> %ext, %acc
@@ -222,8 +222,8 @@ entry:
 
 loop:
   %index = phi i32 [ 0, %entry ], [ %next_index, %loop ]
-  %this_src = getelementptr <4 x i32> *%src, i32 %index
-  %this_dst = getelementptr <4 x i32> *%dst, i32 %index
+  %this_src = getelementptr <4 x i32>, <4 x i32> *%src, i32 %index
+  %this_dst = getelementptr <4 x i32>, <4 x i32> *%dst, i32 %index
   %val = load <4 x i32> *%this_src, !llvm.mem.parallel_loop_access !3
   %add = add <4 x i32> %val, %val
   store <4 x i32> %add, <4 x i32> *%this_dst, !llvm.mem.parallel_loop_access !3
@@ -272,18 +272,18 @@ define void @f8(<4 x float *> *%dest, <4
                 float *%other) {
 ; CHECK-LABEL: @f8(
 ; CHECK: %dest.i0 = bitcast <4 x float*>* %dest to float**
-; CHECK: %dest.i1 = getelementptr float** %dest.i0, i32 1
-; CHECK: %dest.i2 = getelementptr float** %dest.i0, i32 2
-; CHECK: %dest.i3 = getelementptr float** %dest.i0, i32 3
+; CHECK: %dest.i1 = getelementptr float*, float** %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float*, float** %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float*, float** %dest.i0, i32 3
 ; CHECK: %i0.i1 = extractelement <4 x i32> %i0, i32 1
 ; CHECK: %i0.i3 = extractelement <4 x i32> %i0, i32 3
 ; CHECK: %ptr0.i0 = extractelement <4 x float*> %ptr0, i32 0
-; CHECK: %val.i0 = getelementptr float* %ptr0.i0, i32 100
-; CHECK: %val.i1 = getelementptr float* %other, i32 %i0.i1
+; CHECK: %val.i0 = getelementptr float, float* %ptr0.i0, i32 100
+; CHECK: %val.i1 = getelementptr float, float* %other, i32 %i0.i1
 ; CHECK: %ptr0.i2 = extractelement <4 x float*> %ptr0, i32 2
-; CHECK: %val.i2 = getelementptr float* %ptr0.i2, i32 100
+; CHECK: %val.i2 = getelementptr float, float* %ptr0.i2, i32 100
 ; CHECK: %ptr0.i3 = extractelement <4 x float*> %ptr0, i32 3
-; CHECK: %val.i3 = getelementptr float* %ptr0.i3, i32 %i0.i3
+; CHECK: %val.i3 = getelementptr float, float* %ptr0.i3, i32 %i0.i3
 ; CHECK: store float* %val.i0, float** %dest.i0, align 32
 ; CHECK: store float* %val.i1, float** %dest.i1, align 8
 ; CHECK: store float* %val.i2, float** %dest.i2, align 16
@@ -292,7 +292,7 @@ define void @f8(<4 x float *> *%dest, <4
   %i1 = insertelement <4 x i32> %i0, i32 100, i32 0
   %i2 = insertelement <4 x i32> %i1, i32 100, i32 2
   %ptr1 = insertelement <4 x float *> %ptr0, float *%other, i32 1
-  %val = getelementptr <4 x float *> %ptr1, <4 x i32> %i2
+  %val = getelementptr float, <4 x float *> %ptr1, <4 x i32> %i2
   store <4 x float *> %val, <4 x float *> *%dest
   ret void
 }
@@ -301,16 +301,16 @@ define void @f8(<4 x float *> *%dest, <4
 define void @f9(<4 x float> *%dest, <4 x float> *%src) {
 ; CHECK: @f9(
 ; CHECK: %dest.i0 = bitcast <4 x float>* %dest to float*
-; CHECK: %dest.i1 = getelementptr float* %dest.i0, i32 1
-; CHECK: %dest.i2 = getelementptr float* %dest.i0, i32 2
-; CHECK: %dest.i3 = getelementptr float* %dest.i0, i32 3
+; CHECK: %dest.i1 = getelementptr float, float* %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float, float* %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float, float* %dest.i0, i32 3
 ; CHECK: %src.i0 = bitcast <4 x float>* %src to float*
 ; CHECK: %val.i0 = load float* %src.i0, align 4
-; CHECK: %src.i1 = getelementptr float* %src.i0, i32 1
+; CHECK: %src.i1 = getelementptr float, float* %src.i0, i32 1
 ; CHECK: %val.i1 = load float* %src.i1, align 4
-; CHECK: %src.i2 = getelementptr float* %src.i0, i32 2
+; CHECK: %src.i2 = getelementptr float, float* %src.i0, i32 2
 ; CHECK: %val.i2 = load float* %src.i2, align 4
-; CHECK: %src.i3 = getelementptr float* %src.i0, i32 3
+; CHECK: %src.i3 = getelementptr float, float* %src.i0, i32 3
 ; CHECK: %val.i3 = load float* %src.i3, align 4
 ; CHECK: store float %val.i0, float* %dest.i0, align 8
 ; CHECK: store float %val.i1, float* %dest.i1, align 4
@@ -326,16 +326,16 @@ define void @f9(<4 x float> *%dest, <4 x
 define void @f10(<4 x float> *%dest, <4 x float> *%src) {
 ; CHECK: @f10(
 ; CHECK: %dest.i0 = bitcast <4 x float>* %dest to float*
-; CHECK: %dest.i1 = getelementptr float* %dest.i0, i32 1
-; CHECK: %dest.i2 = getelementptr float* %dest.i0, i32 2
-; CHECK: %dest.i3 = getelementptr float* %dest.i0, i32 3
+; CHECK: %dest.i1 = getelementptr float, float* %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float, float* %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float, float* %dest.i0, i32 3
 ; CHECK: %src.i0 = bitcast <4 x float>* %src to float*
 ; CHECK: %val.i0 = load float* %src.i0, align 1
-; CHECK: %src.i1 = getelementptr float* %src.i0, i32 1
+; CHECK: %src.i1 = getelementptr float, float* %src.i0, i32 1
 ; CHECK: %val.i1 = load float* %src.i1, align 1
-; CHECK: %src.i2 = getelementptr float* %src.i0, i32 2
+; CHECK: %src.i2 = getelementptr float, float* %src.i0, i32 2
 ; CHECK: %val.i2 = load float* %src.i2, align 1
-; CHECK: %src.i3 = getelementptr float* %src.i0, i32 3
+; CHECK: %src.i3 = getelementptr float, float* %src.i0, i32 3
 ; CHECK: %val.i3 = load float* %src.i3, align 1
 ; CHECK: store float %val.i0, float* %dest.i0, align 2
 ; CHECK: store float %val.i1, float* %dest.i1, align 2
@@ -354,7 +354,7 @@ define void @f11(<32 x i1> *%dest, <32 x
 ; CHECK: %val1 = load <32 x i1>* %src1
 ; CHECK: store <32 x i1> %and, <32 x i1>* %dest
 ; CHECK: ret void
-  %src1 = getelementptr <32 x i1> *%src0, i32 1
+  %src1 = getelementptr <32 x i1>, <32 x i1> *%src0, i32 1
   %val0 = load <32 x i1> *%src0
   %val1 = load <32 x i1> *%src1
   %and = and <32 x i1> %val0, %val1
@@ -387,27 +387,27 @@ define void @f13(<4 x float *> *%dest, <
                  float *%other) {
 ; CHECK-LABEL: @f13(
 ; CHECK: %dest.i0 = bitcast <4 x float*>* %dest to float**
-; CHECK: %dest.i1 = getelementptr float** %dest.i0, i32 1
-; CHECK: %dest.i2 = getelementptr float** %dest.i0, i32 2
-; CHECK: %dest.i3 = getelementptr float** %dest.i0, i32 3
+; CHECK: %dest.i1 = getelementptr float*, float** %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float*, float** %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float*, float** %dest.i0, i32 3
 ; CHECK: %i.i0 = extractelement <4 x i32> %i, i32 0
 ; CHECK: %ptr.i0 = extractelement <4 x [4 x float]*> %ptr, i32 0
-; CHECK: %val.i0 = getelementptr inbounds [4 x float]* %ptr.i0, i32 0, i32 %i.i0
+; CHECK: %val.i0 = getelementptr inbounds [4 x float], [4 x float]* %ptr.i0, i32 0, i32 %i.i0
 ; CHECK: %i.i1 = extractelement <4 x i32> %i, i32 1
 ; CHECK: %ptr.i1 = extractelement <4 x [4 x float]*> %ptr, i32 1
-; CHECK: %val.i1 = getelementptr inbounds [4 x float]* %ptr.i1, i32 1, i32 %i.i1
+; CHECK: %val.i1 = getelementptr inbounds [4 x float], [4 x float]* %ptr.i1, i32 1, i32 %i.i1
 ; CHECK: %i.i2 = extractelement <4 x i32> %i, i32 2
 ; CHECK: %ptr.i2 = extractelement <4 x [4 x float]*> %ptr, i32 2
-; CHECK: %val.i2 = getelementptr inbounds [4 x float]* %ptr.i2, i32 2, i32 %i.i2
+; CHECK: %val.i2 = getelementptr inbounds [4 x float], [4 x float]* %ptr.i2, i32 2, i32 %i.i2
 ; CHECK: %i.i3 = extractelement <4 x i32> %i, i32 3
 ; CHECK: %ptr.i3 = extractelement <4 x [4 x float]*> %ptr, i32 3
-; CHECK: %val.i3 = getelementptr inbounds [4 x float]* %ptr.i3, i32 3, i32 %i.i3
+; CHECK: %val.i3 = getelementptr inbounds [4 x float], [4 x float]* %ptr.i3, i32 3, i32 %i.i3
 ; CHECK: store float* %val.i0, float** %dest.i0, align 32
 ; CHECK: store float* %val.i1, float** %dest.i1, align 8
 ; CHECK: store float* %val.i2, float** %dest.i2, align 16
 ; CHECK: store float* %val.i3, float** %dest.i3, align 8
 ; CHECK: ret void
-  %val = getelementptr inbounds <4 x [4 x float] *> %ptr,
+  %val = getelementptr inbounds [4 x float], <4 x [4 x float] *> %ptr,
                                 <4 x i32> <i32 0, i32 1, i32 2, i32 3>,
                                 <4 x i32> %i
   store <4 x float *> %val, <4 x float *> *%dest

Modified: llvm/trunk/test/Transforms/Scalarizer/dbginfo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Scalarizer/dbginfo.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/Scalarizer/dbginfo.ll (original)
+++ llvm/trunk/test/Transforms/Scalarizer/dbginfo.ll Fri Feb 27 13:29:02 2015
@@ -5,17 +5,17 @@ target datalayout = "e-p:64:64:64-i1:8:8
 define void @f1(<4 x i32>* nocapture %a, <4 x i32>* nocapture readonly %b, <4 x i32>* nocapture readonly %c) #0 {
 ; CHECK: @f1(
 ; CHECK: %a.i0 = bitcast <4 x i32>* %a to i32*
-; CHECK: %a.i1 = getelementptr i32* %a.i0, i32 1
-; CHECK: %a.i2 = getelementptr i32* %a.i0, i32 2
-; CHECK: %a.i3 = getelementptr i32* %a.i0, i32 3
+; CHECK: %a.i1 = getelementptr i32, i32* %a.i0, i32 1
+; CHECK: %a.i2 = getelementptr i32, i32* %a.i0, i32 2
+; CHECK: %a.i3 = getelementptr i32, i32* %a.i0, i32 3
 ; CHECK: %c.i0 = bitcast <4 x i32>* %c to i32*
-; CHECK: %c.i1 = getelementptr i32* %c.i0, i32 1
-; CHECK: %c.i2 = getelementptr i32* %c.i0, i32 2
-; CHECK: %c.i3 = getelementptr i32* %c.i0, i32 3
+; CHECK: %c.i1 = getelementptr i32, i32* %c.i0, i32 1
+; CHECK: %c.i2 = getelementptr i32, i32* %c.i0, i32 2
+; CHECK: %c.i3 = getelementptr i32, i32* %c.i0, i32 3
 ; CHECK: %b.i0 = bitcast <4 x i32>* %b to i32*
-; CHECK: %b.i1 = getelementptr i32* %b.i0, i32 1
-; CHECK: %b.i2 = getelementptr i32* %b.i0, i32 2
-; CHECK: %b.i3 = getelementptr i32* %b.i0, i32 3
+; CHECK: %b.i1 = getelementptr i32, i32* %b.i0, i32 1
+; CHECK: %b.i2 = getelementptr i32, i32* %b.i0, i32 2
+; CHECK: %b.i3 = getelementptr i32, i32* %b.i0, i32 3
 ; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %a, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}
 ; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %b, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}
 ; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %c, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}

Modified: llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll (original)
+++ llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll Fri Feb 27 13:29:02 2015
@@ -21,23 +21,23 @@ define void @sum_of_array(i32 %x, i32 %y
 .preheader:
   %0 = sext i32 %y to i64
   %1 = sext i32 %x to i64
-  %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
   %3 = addrspacecast float addrspace(3)* %2 to float*
   %4 = load float* %3, align 4
   %5 = fadd float %4, 0.000000e+00
   %6 = add i32 %y, 1
   %7 = sext i32 %6 to i64
-  %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+  %8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
   %9 = addrspacecast float addrspace(3)* %8 to float*
   %10 = load float* %9, align 4
   %11 = fadd float %5, %10
   %12 = add i32 %x, 1
   %13 = sext i32 %12 to i64
-  %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+  %14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
   %15 = addrspacecast float addrspace(3)* %14 to float*
   %16 = load float* %15, align 4
   %17 = fadd float %11, %16
-  %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+  %18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
   %19 = addrspacecast float addrspace(3)* %18 to float*
   %20 = load float* %19, align 4
   %21 = fadd float %17, %20
@@ -51,10 +51,10 @@ define void @sum_of_array(i32 %x, i32 %y
 ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
 
 ; IR-LABEL: @sum_of_array(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 33
 
 ; @sum_of_array2 is very similar to @sum_of_array. The only difference is in
 ; the order of "sext" and "add" when computing the array indices. @sum_of_array
@@ -66,21 +66,21 @@ define void @sum_of_array2(i32 %x, i32 %
 .preheader:
   %0 = sext i32 %y to i64
   %1 = sext i32 %x to i64
-  %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
   %3 = addrspacecast float addrspace(3)* %2 to float*
   %4 = load float* %3, align 4
   %5 = fadd float %4, 0.000000e+00
   %6 = add i64 %0, 1
-  %7 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
+  %7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
   %8 = addrspacecast float addrspace(3)* %7 to float*
   %9 = load float* %8, align 4
   %10 = fadd float %5, %9
   %11 = add i64 %1, 1
-  %12 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
+  %12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
   %13 = addrspacecast float addrspace(3)* %12 to float*
   %14 = load float* %13, align 4
   %15 = fadd float %10, %14
-  %16 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
+  %16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
   %17 = addrspacecast float addrspace(3)* %16 to float*
   %18 = load float* %17, align 4
   %19 = fadd float %15, %18
@@ -94,10 +94,10 @@ define void @sum_of_array2(i32 %x, i32 %
 ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
 
 ; IR-LABEL: @sum_of_array2(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 33
 
 
 ; This function loads
@@ -114,23 +114,23 @@ define void @sum_of_array3(i32 %x, i32 %
 .preheader:
   %0 = zext i32 %y to i64
   %1 = zext i32 %x to i64
-  %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
   %3 = addrspacecast float addrspace(3)* %2 to float*
   %4 = load float* %3, align 4
   %5 = fadd float %4, 0.000000e+00
   %6 = add nuw i32 %y, 1
   %7 = zext i32 %6 to i64
-  %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+  %8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
   %9 = addrspacecast float addrspace(3)* %8 to float*
   %10 = load float* %9, align 4
   %11 = fadd float %5, %10
   %12 = add nuw i32 %x, 1
   %13 = zext i32 %12 to i64
-  %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+  %14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
   %15 = addrspacecast float addrspace(3)* %14 to float*
   %16 = load float* %15, align 4
   %17 = fadd float %11, %16
-  %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+  %18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
   %19 = addrspacecast float addrspace(3)* %18 to float*
   %20 = load float* %19, align 4
   %21 = fadd float %17, %20
@@ -144,10 +144,10 @@ define void @sum_of_array3(i32 %x, i32 %
 ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
 
 ; IR-LABEL: @sum_of_array3(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 33
 
 
 ; This function loads
@@ -162,21 +162,21 @@ define void @sum_of_array4(i32 %x, i32 %
 .preheader:
   %0 = zext i32 %y to i64
   %1 = zext i32 %x to i64
-  %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
   %3 = addrspacecast float addrspace(3)* %2 to float*
   %4 = load float* %3, align 4
   %5 = fadd float %4, 0.000000e+00
   %6 = add i64 %0, 1
-  %7 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
+  %7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
   %8 = addrspacecast float addrspace(3)* %7 to float*
   %9 = load float* %8, align 4
   %10 = fadd float %5, %9
   %11 = add i64 %1, 1
-  %12 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
+  %12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
   %13 = addrspacecast float addrspace(3)* %12 to float*
   %14 = load float* %13, align 4
   %15 = fadd float %10, %14
-  %16 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
+  %16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
   %17 = addrspacecast float addrspace(3)* %16 to float*
   %18 = load float* %17, align 4
   %19 = fadd float %15, %18
@@ -190,7 +190,7 @@ define void @sum_of_array4(i32 %x, i32 %
 ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
 
 ; IR-LABEL: @sum_of_array4(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 33

Modified: llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll (original)
+++ llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll Fri Feb 27 13:29:02 2015
@@ -20,11 +20,11 @@ define double* @struct(i32 %i) {
 entry:
   %add = add nsw i32 %i, 5
   %idxprom = sext i32 %add to i64
-  %p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+  %p = getelementptr inbounds [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
   ret double* %p
 }
 ; CHECK-LABEL: @struct(
-; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
+; CHECK: getelementptr [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
 
 ; We should be able to trace into sext(a + b) if a + b is non-negative
 ; (e.g., used as an index of an inbounds GEP) and one of a and b is
@@ -36,15 +36,15 @@ entry:
   %2 = add i32 %j, -2
   ; However, inbound sext(j + -2) != sext(j) + -2, e.g., j = INT_MIN
   %3 = sext i32 %2 to i64
-  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
+  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
   ret float* %p
 }
 ; CHECK-LABEL: @sext_add(
 ; CHECK-NOT: = add
 ; CHECK: add i32 %j, -2
 ; CHECK: sext
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 32
+; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* %{{[a-zA-Z0-9]+}}, i64 32
 
 ; We should be able to trace into sext/zext if it can be distributed to both
 ; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
@@ -60,12 +60,12 @@ define float* @ext_add_no_overflow(i64 %
   %d1 = add nuw i32 %d, 1
   %d2 = zext i32 %d1 to i64
   %j = add i64 %c, %d2       ; j = c + zext(d +nuw 1)
-  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
   ret float* %p
 }
 ; CHECK-LABEL: @ext_add_no_overflow(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR]], i64 33
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 33
 
 ; Verifies we handle nested sext/zext correctly.
 define void @sext_zext(i32 %a, i32 %b, float** %out1, float** %out2) {
@@ -76,7 +76,7 @@ entry:
   %3 = add nsw i32 %b, 2
   %4 = sext i32 %3 to i48
   %5 = zext i48 %4 to i64    ; zext(sext(b +nsw 2)) != zext(sext(b)) + 2
-  %p1 = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5
+  %p1 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5
   store float* %p1, float** %out1
   %6 = add nuw i32 %a, 3
   %7 = zext i32 %6 to i48
@@ -84,15 +84,15 @@ entry:
   %9 = add nsw i32 %b, 4
   %10 = zext i32 %9 to i48
   %11 = sext i48 %10 to i64  ; sext(zext(b +nsw 4)) != zext(b) + 4
-  %p2 = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11
+  %p2 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11
   store float* %p2, float** %out2
   ret void
 }
 ; CHECK-LABEL: @sext_zext(
-; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR_1]], i64 32
-; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR_2]], i64 96
+; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR_1]], i64 32
+; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR_2]], i64 96
 
 ; Similar to @ext_add_no_overflow, we should be able to trace into s/zext if
 ; its operand is an OR and the two operands of the OR have no common bits.
@@ -105,12 +105,12 @@ entry:
   %b3.ext = sext i32 %b3 to i64
   %i = add i64 %a, %b2.ext
   %j = add i64 %a, %b3.ext
-  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
   ret float* %p
 }
 ; CHECK-LABEL: @sext_or(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR]], i64 32
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 32
 
 ; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b +
 ; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't
@@ -119,13 +119,13 @@ define float* @expr(i64 %a, i64 %b, i64*
 entry:
   %b5 = add i64 %b, 5
   %i = add i64 %b5, %a
-  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
+  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
   store i64 %b5, i64* %out
   ret float* %p
 }
 ; CHECK-LABEL: @expr(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
-; CHECK: getelementptr float* [[BASE_PTR]], i64 160
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 160
 ; CHECK: store i64 %b5, i64* %out
 
 ; d + sext(a +nsw (b +nsw (c +nsw 8))) => (d + sext(a) + sext(b) + sext(c)) + 8
@@ -136,26 +136,26 @@ entry:
   %2 = add nsw i32 %a, %1
   %3 = sext i32 %2 to i64
   %i = add i64 %d, %3
-  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
   ret float* %p
 }
 ; CHECK-LABEL: @sext_expr(
 ; CHECK: sext i32
 ; CHECK: sext i32
 ; CHECK: sext i32
-; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 8
+; CHECK: getelementptr float, float* %{{[a-zA-Z0-9]+}}, i64 8
 
 ; Verifies we handle "sub" correctly.
 define float* @sub(i64 %i, i64 %j) {
   %i2 = sub i64 %i, 5 ; i - 5
   %j2 = sub i64 5, %j ; 5 - i
-  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
+  %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
   ret float* %p
 }
 ; CHECK-LABEL: @sub(
 ; CHECK: %[[j2:[a-zA-Z0-9]+]] = sub i64 0, %j
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
-; CHECK: getelementptr float* [[BASE_PTR]], i64 -155
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 -155
 
 %struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed
 
@@ -167,13 +167,13 @@ entry:
   %idxprom = sext i32 %add to i64
   %add1 = add nsw i32 %i, 1
   %idxprom2 = sext i32 %add1 to i64
-  %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
+  %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
   ret i64* %arrayidx3
 }
 ; CHECK-LABEL: @packed_struct(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
 ; CHECK: [[CASTED_PTR:%[a-zA-Z0-9]+]] = bitcast i64* [[BASE_PTR]] to i8*
-; CHECK: %uglygep = getelementptr i8* [[CASTED_PTR]], i64 100
+; CHECK: %uglygep = getelementptr i8, i8* [[CASTED_PTR]], i64 100
 ; CHECK: bitcast i8* %uglygep to i64*
 
 ; We shouldn't be able to extract the 8 from "zext(a +nuw (b + 8))",
@@ -183,11 +183,11 @@ entry:
   %0 = add i32 %b, 8
   %1 = add nuw i32 %a, %0
   %i = zext i32 %1 to i64
-  %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+  %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
   ret float* %p
 }
 ; CHECK-LABEL: zext_expr(
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
 
 ; Per http://llvm.org/docs/LangRef.html#id181, the indices of a off-bound gep
 ; should be considered sign-extended to the pointer size. Therefore,
@@ -200,11 +200,11 @@ entry:
 define float* @i32_add(i32 %a) {
 entry:
   %i = add i32 %a, 8
-  %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i
+  %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i
   ret float* %p
 }
 ; CHECK-LABEL: @i32_add(
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
 ; CHECK-NOT: getelementptr
 
 ; Verifies that we compute the correct constant offset when the index is
@@ -216,23 +216,23 @@ entry:
   %0 = add nsw nuw i1 %a, 1
   %1 = sext i1 %0 to i4
   %2 = zext i4 %1 to i64         ; zext (sext i1 1 to i4) to i64 = 15
-  %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2
+  %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2
   ret float* %p
 }
 ; CHECK-LABEL: @apint(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR]], i64 15
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 15
 
 ; Do not trace into binary operators other than ADD, SUB, and OR.
 define float* @and(i64 %a) {
 entry:
   %0 = shl i64 %a, 2
   %1 = and i64 %0, 1
-  %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1
+  %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1
   ret float* %p
 }
 ; CHECK-LABEL: @and(
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array
+; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array
 ; CHECK-NOT: getelementptr
 
 ; The code that rebuilds an OR expression used to be buggy, and failed on this
@@ -247,9 +247,9 @@ entry:
   ; ((a << 2) + 12) and 1 have no common bits. Therefore,
   ; SeparateConstOffsetFromGEP is able to extract the 12.
   ; TODO(jingyue): We could reassociate the expression to combine 12 and 1.
-  %p = getelementptr float* %ptr, i64 %or
-; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr float* %ptr, i64 [[OR]]
-; CHECK: getelementptr float* [[PTR]], i64 12
+  %p = getelementptr float, float* %ptr, i64 %or
+; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr float, float* %ptr, i64 [[OR]]
+; CHECK: getelementptr float, float* [[PTR]], i64 12
   ret float* %p
 ; CHECK-NEXT: ret
 }
@@ -269,10 +269,10 @@ define %struct2* @sign_mod_unsign(%struc
 entry:
   %arrayidx = add nsw i64 %idx, -2
 ; CHECK-NOT: add
-  %ptr2 = getelementptr inbounds %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
-; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1
+  %ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
+; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1
 ; CHECK: [[PTR1:%[a-zA-Z0-9]+]] = bitcast %struct2* [[PTR]] to i8*
-; CHECK: getelementptr i8* [[PTR1]], i64 -64
+; CHECK: getelementptr i8, i8* [[PTR1]], i64 -64
 ; CHECK: bitcast
   ret %struct2* %ptr2
 ; CHECK-NEXT: ret

Modified: llvm/trunk/test/Transforms/SimplifyCFG/2005-08-01-PHIUpdateFail.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/2005-08-01-PHIUpdateFail.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/2005-08-01-PHIUpdateFail.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/2005-08-01-PHIUpdateFail.ll Fri Feb 27 13:29:02 2015
@@ -58,7 +58,7 @@ then.2.i:		; preds = %endif.1.i, %then.1
 getfree.exit:		; preds = %endif.1.i, %then.1.i
 	ret void
 endif.1:		; preds = %read_min.exit
-	%tmp.27.i = getelementptr i32* null, i32 0		; <i32*> [#uses=0]
+	%tmp.27.i = getelementptr i32, i32* null, i32 0		; <i32*> [#uses=0]
 	br i1 false, label %loopexit.0.i15, label %no_exit.0.i14
 no_exit.0.i14:		; preds = %endif.1
 	ret void

Modified: llvm/trunk/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll Fri Feb 27 13:29:02 2015
@@ -66,7 +66,7 @@ cond_true20:		; preds = %cond_next13
 	ret i32 1
 cond_next27:		; preds = %cond_next13
 	%tmp29 = load %struct.anon** %num		; <%struct.anon*> [#uses=1]
-	%tmp30 = getelementptr %struct.anon* %tmp29, i32 0, i32 2		; <i32*> [#uses=1]
+	%tmp30 = getelementptr %struct.anon, %struct.anon* %tmp29, i32 0, i32 2		; <i32*> [#uses=1]
 	%tmp31 = load i32* %tmp30		; <i32> [#uses=2]
 	%tmp33 = icmp sge i32 %tmp31, %scale		; <i1> [#uses=1]
 	%max = select i1 %tmp33, i32 %tmp31, i32 %scale		; <i32> [#uses=4]
@@ -75,7 +75,7 @@ cond_next27:		; preds = %cond_next13
 	call void @init_num( %struct.anon** %guess1 )
 	%tmp36 = call %struct.anon* @new_num( i32 1, i32 1 )		; <%struct.anon*> [#uses=2]
 	store %struct.anon* %tmp36, %struct.anon** %point5
-	%tmp.upgrd.3 = getelementptr %struct.anon* %tmp36, i32 0, i32 4, i32 1		; <i8*> [#uses=1]
+	%tmp.upgrd.3 = getelementptr %struct.anon, %struct.anon* %tmp36, i32 0, i32 4, i32 1		; <i8*> [#uses=1]
 	store i8 5, i8* %tmp.upgrd.3
 	%tmp39 = icmp slt i32 %tmp17, 0		; <i1> [#uses=1]
 	br i1 %tmp39, label %cond_true40, label %cond_false43
@@ -87,14 +87,14 @@ cond_true40:		; preds = %cond_next27
 cond_false43:		; preds = %cond_next27
 	call void @int2num( %struct.anon** %guess, i32 10 )
 	%tmp45 = load %struct.anon** %num		; <%struct.anon*> [#uses=1]
-	%tmp46 = getelementptr %struct.anon* %tmp45, i32 0, i32 1		; <i32*> [#uses=1]
+	%tmp46 = getelementptr %struct.anon, %struct.anon* %tmp45, i32 0, i32 1		; <i32*> [#uses=1]
 	%tmp47 = load i32* %tmp46		; <i32> [#uses=1]
 	call void @int2num( %struct.anon** %guess1, i32 %tmp47 )
 	%tmp48 = load %struct.anon** %guess1		; <%struct.anon*> [#uses=1]
 	%tmp49 = load %struct.anon** %point5		; <%struct.anon*> [#uses=1]
 	call void @bc_multiply( %struct.anon* %tmp48, %struct.anon* %tmp49, %struct.anon** %guess1, i32 %max )
 	%tmp51 = load %struct.anon** %guess1		; <%struct.anon*> [#uses=1]
-	%tmp52 = getelementptr %struct.anon* %tmp51, i32 0, i32 2		; <i32*> [#uses=1]
+	%tmp52 = getelementptr %struct.anon, %struct.anon* %tmp51, i32 0, i32 2		; <i32*> [#uses=1]
 	store i32 0, i32* %tmp52
 	%tmp53 = load %struct.anon** %guess		; <%struct.anon*> [#uses=1]
 	%tmp54 = load %struct.anon** %guess1		; <%struct.anon*> [#uses=1]

Modified: llvm/trunk/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll Fri Feb 27 13:29:02 2015
@@ -43,46 +43,46 @@ bb:		; preds = %entry
 	ret void
 bb145:		; preds = %entry
 	%tmp146 = load %struct.tree_node** null		; <%struct.tree_node*> [#uses=1]
-	%tmp148 = getelementptr %struct.tree_node* %tmp146, i32 0, i32 0, i32 0, i32 1		; <%struct.tree_node**> [#uses=1]
+	%tmp148 = getelementptr %struct.tree_node, %struct.tree_node* %tmp146, i32 0, i32 0, i32 0, i32 1		; <%struct.tree_node**> [#uses=1]
 	%tmp149 = load %struct.tree_node** %tmp148		; <%struct.tree_node*> [#uses=1]
 	%tmp150 = bitcast %struct.tree_node* %tmp149 to %struct.tree_type*		; <%struct.tree_type*> [#uses=1]
-	%tmp151 = getelementptr %struct.tree_type* %tmp150, i32 0, i32 6		; <i16*> [#uses=1]
+	%tmp151 = getelementptr %struct.tree_type, %struct.tree_type* %tmp150, i32 0, i32 6		; <i16*> [#uses=1]
 	%tmp151.upgrd.1 = bitcast i16* %tmp151 to i32*		; <i32*> [#uses=1]
 	%tmp152 = load i32* %tmp151.upgrd.1		; <i32> [#uses=1]
 	%tmp154 = lshr i32 %tmp152, 16		; <i32> [#uses=1]
 	%tmp154.mask = and i32 %tmp154, 127		; <i32> [#uses=1]
 	%gep.upgrd.2 = zext i32 %tmp154.mask to i64		; <i64> [#uses=1]
-	%tmp155 = getelementptr [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.2		; <i8*> [#uses=1]
+	%tmp155 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.2		; <i8*> [#uses=1]
 	%tmp156 = load i8* %tmp155		; <i8> [#uses=1]
 	%tmp157 = icmp eq i8 %tmp156, 4		; <i1> [#uses=1]
 	br i1 %tmp157, label %cond_next241, label %cond_true158
 cond_true158:		; preds = %bb145
 	%tmp172 = load %struct.tree_node** null		; <%struct.tree_node*> [#uses=1]
-	%tmp174 = getelementptr %struct.tree_node* %tmp172, i32 0, i32 0, i32 0, i32 1		; <%struct.tree_node**> [#uses=1]
+	%tmp174 = getelementptr %struct.tree_node, %struct.tree_node* %tmp172, i32 0, i32 0, i32 0, i32 1		; <%struct.tree_node**> [#uses=1]
 	%tmp175 = load %struct.tree_node** %tmp174		; <%struct.tree_node*> [#uses=1]
 	%tmp176 = bitcast %struct.tree_node* %tmp175 to %struct.tree_type*		; <%struct.tree_type*> [#uses=1]
-	%tmp177 = getelementptr %struct.tree_type* %tmp176, i32 0, i32 6		; <i16*> [#uses=1]
+	%tmp177 = getelementptr %struct.tree_type, %struct.tree_type* %tmp176, i32 0, i32 6		; <i16*> [#uses=1]
 	%tmp177.upgrd.3 = bitcast i16* %tmp177 to i32*		; <i32*> [#uses=1]
 	%tmp178 = load i32* %tmp177.upgrd.3		; <i32> [#uses=1]
 	%tmp180 = lshr i32 %tmp178, 16		; <i32> [#uses=1]
 	%tmp180.mask = and i32 %tmp180, 127		; <i32> [#uses=1]
 	%gep.upgrd.4 = zext i32 %tmp180.mask to i64		; <i64> [#uses=1]
-	%tmp181 = getelementptr [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.4		; <i8*> [#uses=1]
+	%tmp181 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.4		; <i8*> [#uses=1]
 	%tmp182 = load i8* %tmp181		; <i8> [#uses=1]
 	%tmp183 = icmp eq i8 %tmp182, 8		; <i1> [#uses=1]
 	br i1 %tmp183, label %cond_next241, label %cond_true184
 cond_true184:		; preds = %cond_true158
 	%tmp185 = load %struct.tree_node** null		; <%struct.tree_node*> [#uses=1]
-	%tmp187 = getelementptr %struct.tree_node* %tmp185, i32 0, i32 0, i32 0, i32 1		; <%struct.tree_node**> [#uses=1]
+	%tmp187 = getelementptr %struct.tree_node, %struct.tree_node* %tmp185, i32 0, i32 0, i32 0, i32 1		; <%struct.tree_node**> [#uses=1]
 	%tmp188 = load %struct.tree_node** %tmp187		; <%struct.tree_node*> [#uses=1]
 	%tmp189 = bitcast %struct.tree_node* %tmp188 to %struct.tree_type*		; <%struct.tree_type*> [#uses=1]
-	%tmp190 = getelementptr %struct.tree_type* %tmp189, i32 0, i32 6		; <i16*> [#uses=1]
+	%tmp190 = getelementptr %struct.tree_type, %struct.tree_type* %tmp189, i32 0, i32 6		; <i16*> [#uses=1]
 	%tmp190.upgrd.5 = bitcast i16* %tmp190 to i32*		; <i32*> [#uses=1]
 	%tmp191 = load i32* %tmp190.upgrd.5		; <i32> [#uses=1]
 	%tmp193 = lshr i32 %tmp191, 16		; <i32> [#uses=1]
 	%tmp193.mask = and i32 %tmp193, 127		; <i32> [#uses=1]
 	%gep.upgrd.6 = zext i32 %tmp193.mask to i64		; <i64> [#uses=1]
-	%tmp194 = getelementptr [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.6		; <i8*> [#uses=1]
+	%tmp194 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.6		; <i8*> [#uses=1]
 	%tmp195 = load i8* %tmp194		; <i8> [#uses=1]
 	%tmp196 = icmp eq i8 %tmp195, 4		; <i1> [#uses=1]
 	br i1 %tmp196, label %cond_next241, label %cond_true197

Modified: llvm/trunk/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll Fri Feb 27 13:29:02 2015
@@ -45,16 +45,16 @@ entry:
 	store %struct.trie_s* %t, %struct.trie_s** %t_addr
 	store %struct.FILE* %f, %struct.FILE** %f_addr
 	store i32 0, i32* %wstate
-	%tmp = getelementptr %struct.charsequence* %cs, i64 0, i32 0		; <i8**> [#uses=1]
-	%tmp1 = getelementptr %struct.charsequence* @C.0.2294, i64 0, i32 0		; <i8**> [#uses=1]
+	%tmp = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 0		; <i8**> [#uses=1]
+	%tmp1 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 0		; <i8**> [#uses=1]
 	%tmp.upgrd.5 = load i8** %tmp1		; <i8*> [#uses=1]
 	store i8* %tmp.upgrd.5, i8** %tmp
-	%tmp.upgrd.6 = getelementptr %struct.charsequence* %cs, i64 0, i32 1		; <i32*> [#uses=1]
-	%tmp2 = getelementptr %struct.charsequence* @C.0.2294, i64 0, i32 1		; <i32*> [#uses=1]
+	%tmp.upgrd.6 = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 1		; <i32*> [#uses=1]
+	%tmp2 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 1		; <i32*> [#uses=1]
 	%tmp.upgrd.7 = load i32* %tmp2		; <i32> [#uses=1]
 	store i32 %tmp.upgrd.7, i32* %tmp.upgrd.6
-	%tmp3 = getelementptr %struct.charsequence* %cs, i64 0, i32 2		; <i32*> [#uses=1]
-	%tmp4 = getelementptr %struct.charsequence* @C.0.2294, i64 0, i32 2		; <i32*> [#uses=1]
+	%tmp3 = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 2		; <i32*> [#uses=1]
+	%tmp4 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 2		; <i32*> [#uses=1]
 	%tmp5 = load i32* %tmp4		; <i32> [#uses=1]
 	store i32 %tmp5, i32* %tmp3
 	br label %bb33

Modified: llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll Fri Feb 27 13:29:02 2015
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-darwin12.0
 ; CHECK: entry:
 ; CHECK-NEXT: sub i3 %arg, -4
 ; CHECK-NEXT: zext i3 %switch.tableidx to i4
-; CHECK-NEXT: getelementptr inbounds [8 x i64]* @switch.table, i32 0, i4 %switch.tableidx.zext
+; CHECK-NEXT: getelementptr inbounds [8 x i64], [8 x i64]* @switch.table, i32 0, i4 %switch.tableidx.zext
 ; CHECK-NEXT: load i64* %switch.gep
 ; CHECK-NEXT: add i64
 ; CHECK-NEXT: ret i64

Modified: llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll Fri Feb 27 13:29:02 2015
@@ -8,7 +8,7 @@ target triple = "x86_64-apple-darwin12.0
 ; CHECK: entry:
 ; CHECK-NEXT: sub i2 %0, -2
 ; CHECK-NEXT: zext i2 %switch.tableidx to i3
-; CHECK-NEXT: getelementptr inbounds [4 x i64]* @switch.table, i32 0, i3 %switch.tableidx.zext
+; CHECK-NEXT: getelementptr inbounds [4 x i64], [4 x i64]* @switch.table, i32 0, i3 %switch.tableidx.zext
 ; CHECK-NEXT: load i64* %switch.gep
 ; CHECK-NEXT: ret i64 %switch.load
 define i64 @_TFO6reduce1E5toRawfS0_FT_Si(i2) {

Modified: llvm/trunk/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll Fri Feb 27 13:29:02 2015
@@ -58,7 +58,7 @@ return:
 ; CHECK-NEXT: %0 = icmp ult i32 %switch.tableidx, 7
 ; CHECK-NEXT: br i1 %0, label %switch.lookup, label %return
 ; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [7 x i32]* @switch.table, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table, i32 0, i32 %switch.tableidx
 ; CHECK-NEXT: %switch.load = load i32* %switch.gep
 ; CHECK-NEXT: ret i32 %switch.load
 ; CHECK: return:
@@ -97,7 +97,7 @@ sw.epilog:
 ; CHECK-NEXT: %switch.shiftamt = mul i32 %switch.tableidx, 8
 ; CHECK-NEXT: %switch.downshift = lshr i32 89655594, %switch.shiftamt
 ; CHECK-NEXT: %switch.masked = trunc i32 %switch.downshift to i8
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x float]* @switch.table1, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x float], [4 x float]* @switch.table1, i32 0, i32 %switch.tableidx
 ; CHECK-NEXT: %switch.load = load float* %switch.gep
 ; CHECK-NEXT: br label %sw.epilog
 ; CHECK: sw.epilog:
@@ -144,7 +144,7 @@ return:
 ; CHECK-NEXT: %0 = icmp ult i32 %switch.tableidx, 4
 ; CHECK-NEXT: br i1 %0, label %switch.lookup, label %return
 ; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i8*]* @switch.table2, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i8*], [4 x i8*]* @switch.table2, i32 0, i32 %switch.tableidx
 ; CHECK-NEXT: %switch.load = load i8** %switch.gep
 ; CHECK-NEXT: ret i8* %switch.load
 }
@@ -173,7 +173,7 @@ sw.epilog:
 
 ; CHECK-LABEL: @earlyreturncrash(
 ; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32]* @switch.table3, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table3, i32 0, i32 %switch.tableidx
 ; CHECK-NEXT: %switch.load = load i32* %switch.gep
 ; CHECK-NEXT: ret i32 %switch.load
 ; CHECK: sw.epilog:
@@ -749,7 +749,7 @@ return:
 
 ; CHECK-LABEL: @cprop(
 ; CHECK: switch.lookup:
-; CHECK: %switch.gep = getelementptr inbounds [7 x i32]* @switch.table5, i32 0, i32 %switch.tableidx
+; CHECK: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table5, i32 0, i32 %switch.tableidx
 }
 
 define i32 @unreachable_case(i32 %x)  {
@@ -778,7 +778,7 @@ return:
 
 ; CHECK-LABEL: @unreachable_case(
 ; CHECK: switch.lookup:
-; CHECK: getelementptr inbounds [9 x i32]* @switch.table6, i32 0, i32 %switch.tableidx
+; CHECK: getelementptr inbounds [9 x i32], [9 x i32]* @switch.table6, i32 0, i32 %switch.tableidx
 }
 
 define i32 @unreachable_default(i32 %x)  {
@@ -805,7 +805,7 @@ return:
 ; CHECK-NEXT: %switch.tableidx = sub i32 %x, 0
 ; CHECK-NOT: icmp
 ; CHECK-NOT: br 1i
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32]* @switch.table7, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table7, i32 0, i32 %switch.tableidx
 ; CHECK-NEXT: %switch.load = load i32* %switch.gep
 ; CHECK-NEXT: ret i32 %switch.load
 }

Modified: llvm/trunk/test/Transforms/SimplifyCFG/attr-noduplicate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/attr-noduplicate.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/attr-noduplicate.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/attr-noduplicate.ll Fri Feb 27 13:29:02 2015
@@ -8,8 +8,8 @@
 ; CHECK-NOT: call void @barrier
 define void @noduplicate(i32 %cond, i32* %out) {
 entry:
-  %out1 = getelementptr i32* %out, i32 1
-  %out2 = getelementptr i32* %out, i32 2
+  %out1 = getelementptr i32, i32* %out, i32 1
+  %out2 = getelementptr i32, i32* %out, i32 2
   %cmp = icmp eq i32 %cond, 0
   br i1 %cmp, label %if.then, label %if.end
 

Modified: llvm/trunk/test/Transforms/SimplifyCFG/branch-fold-dbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/branch-fold-dbg.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/branch-fold-dbg.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/branch-fold-dbg.ll Fri Feb 27 13:29:02 2015
@@ -24,7 +24,7 @@ BB2:
 ;CHECK-NEXT: icmp eq
 
 BB3:                                              ; preds = %BB2
-  %6 = getelementptr inbounds [5 x %0]* @0, i32 0, i32 %0, !dbg !6
+  %6 = getelementptr inbounds [5 x %0], [5 x %0]* @0, i32 0, i32 %0, !dbg !6
   call void @llvm.dbg.value(metadata %0* %6, i64 0, metadata !7, metadata !{}), !dbg !12
   %7 = icmp eq %0* %6, null, !dbg !13
   br i1 %7, label %BB5, label %BB4, !dbg !13

Modified: llvm/trunk/test/Transforms/SimplifyCFG/indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/indirectbr.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/indirectbr.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/indirectbr.ll Fri Feb 27 13:29:02 2015
@@ -192,7 +192,7 @@ escape-string.top:
 
 xlab8x:                                           ; preds = %xlab5x
   %xvaluex = call i32 @xselectorx()
-  %xblkx.x = getelementptr [9 x i8*]* @xblkx.bbs, i32 0, i32 %xvaluex
+  %xblkx.x = getelementptr [9 x i8*], [9 x i8*]* @xblkx.bbs, i32 0, i32 %xvaluex
   %xblkx.load = load i8** %xblkx.x
   indirectbr i8* %xblkx.load, [label %xblkx.begin, label %xblkx.begin3, label %xblkx.begin4, label %xblkx.begin5, label %xblkx.begin6, label %xblkx.begin7, label %xblkx.begin8, label %xblkx.begin9, label %xblkx.end]
 

Modified: llvm/trunk/test/Transforms/SimplifyCFG/multiple-phis.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/multiple-phis.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/multiple-phis.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/multiple-phis.ll Fri Feb 27 13:29:02 2015
@@ -22,7 +22,7 @@ while.body:
   %add = add i32 %low.0, %high.addr.0
   %div = udiv i32 %add, 2
   %idxprom = zext i32 %div to i64
-  %arrayidx = getelementptr inbounds i32* %r, i64 %idxprom
+  %arrayidx = getelementptr inbounds i32, i32* %r, i64 %idxprom
   %0 = load i32* %arrayidx
   %cmp1 = icmp ult i32 %k, %0
   br i1 %cmp1, label %if.then, label %if.else

Modified: llvm/trunk/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll Fri Feb 27 13:29:02 2015
@@ -105,7 +105,7 @@ if.then4:
 
 if.end7:                                          ; preds = %if.else, %if.then4, %if.then
   %x.0 = phi i32* [ %a, %if.then ], [ null, %if.then4 ], [ null, %if.else ]
-  %gep = getelementptr i32* %x.0, i32 10
+  %gep = getelementptr i32, i32* %x.0, i32 10
   %tmp9 = load i32* %gep
   %tmp10 = or i32 %tmp9, 1
   store i32 %tmp10, i32* %gep

Modified: llvm/trunk/test/Transforms/SimplifyCFG/select-gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/select-gep.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/select-gep.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/select-gep.ll Fri Feb 27 13:29:02 2015
@@ -8,7 +8,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  %incdec.ptr = getelementptr %ST* %x, i32 0, i32 1
+  %incdec.ptr = getelementptr %ST, %ST* %x, i32 0, i32 1
   br label %if.end
 
 if.end:

Modified: llvm/trunk/test/Transforms/SimplifyCFG/speculate-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/speculate-store.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/speculate-store.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/speculate-store.ll Fri Feb 27 13:29:02 2015
@@ -2,14 +2,14 @@
 
 define void @ifconvertstore(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
 entry:
-  %arrayidx = getelementptr inbounds i32* %B, i64 0
+  %arrayidx = getelementptr inbounds i32, i32* %B, i64 0
   %0 = load i32* %arrayidx, align 4
   %add = add nsw i32 %0, %C
-  %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
 
 ; First store to the location.
   store i32 %add, i32* %arrayidx2, align 4
-  %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
   %1 = load i32* %arrayidx4, align 4
   %add5 = add nsw i32 %1, %D
   %cmp6 = icmp sgt i32 %add5, %C
@@ -30,14 +30,14 @@ ret.end:
 
 define void @noifconvertstore1(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
 entry:
-  %arrayidx = getelementptr inbounds i32* %B, i64 0
+  %arrayidx = getelementptr inbounds i32, i32* %B, i64 0
   %0 = load i32* %arrayidx, align 4
   %add = add nsw i32 %0, %C
-  %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
 
 ; Store to a different location.
   store i32 %add, i32* %arrayidx, align 4
-  %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
   %1 = load i32* %arrayidx4, align 4
   %add5 = add nsw i32 %1, %D
   %cmp6 = icmp sgt i32 %add5, %C
@@ -57,15 +57,15 @@ declare void @unknown_fun()
 
 define void @noifconvertstore2(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
 entry:
-  %arrayidx = getelementptr inbounds i32* %B, i64 0
+  %arrayidx = getelementptr inbounds i32, i32* %B, i64 0
   %0 = load i32* %arrayidx, align 4
   %add = add nsw i32 %0, %C
-  %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
 
 ; First store to the location.
   store i32 %add, i32* %arrayidx2, align 4
   call void @unknown_fun()
-  %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
   %1 = load i32* %arrayidx4, align 4
   %add5 = add nsw i32 %1, %D
   %cmp6 = icmp sgt i32 %add5, %C
@@ -83,14 +83,14 @@ ret.end:
 
 define void @noifconvertstore_volatile(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
 entry:
-  %arrayidx = getelementptr inbounds i32* %B, i64 0
+  %arrayidx = getelementptr inbounds i32, i32* %B, i64 0
   %0 = load i32* %arrayidx, align 4
   %add = add nsw i32 %0, %C
-  %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
 
 ; First store to the location.
   store i32 %add, i32* %arrayidx2, align 4
-  %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+  %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
   %1 = load i32* %arrayidx4, align 4
   %add5 = add nsw i32 %1, %D
   %cmp6 = icmp sgt i32 %add5, %C

Modified: llvm/trunk/test/Transforms/SimplifyCFG/speculate-with-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/speculate-with-offset.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/speculate-with-offset.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/speculate-with-offset.ll Fri Feb 27 13:29:02 2015
@@ -9,7 +9,7 @@
 define void @yes(i1 %c) nounwind {
 entry:
   %a = alloca [4 x i64*], align 8
-  %__a.addr = getelementptr [4 x i64*]* %a, i64 0, i64 3
+  %__a.addr = getelementptr [4 x i64*], [4 x i64*]* %a, i64 0, i64 3
   call void @frob(i64** %__a.addr)
   br i1 %c, label %if.then, label %if.end
 
@@ -31,7 +31,7 @@ return:
 define void @no0(i1 %c) nounwind {
 entry:
   %a = alloca [4 x i64*], align 8
-  %__a.addr = getelementptr [4 x i64*]* %a, i64 0, i64 4
+  %__a.addr = getelementptr [4 x i64*], [4 x i64*]* %a, i64 0, i64 4
   call void @frob(i64** %__a.addr)
   br i1 %c, label %if.then, label %if.end
 
@@ -53,7 +53,7 @@ return:
 define void @no1(i1 %c, i64 %n) nounwind {
 entry:
   %a = alloca [4 x i64*], align 8
-  %__a.addr = getelementptr [4 x i64*]* %a, i64 0, i64 %n
+  %__a.addr = getelementptr [4 x i64*], [4 x i64*]* %a, i64 0, i64 %n
   call void @frob(i64** %__a.addr)
   br i1 %c, label %if.then, label %if.end
 
@@ -75,7 +75,7 @@ return:
 define void @no2(i1 %c, i64 %n) nounwind {
 entry:
   %a = alloca [4 x i64*], align 8
-  %__a.addr = getelementptr [4 x i64*]* %a, i64 1, i64 0
+  %__a.addr = getelementptr [4 x i64*], [4 x i64*]* %a, i64 1, i64 0
   call void @frob(i64** %__a.addr)
   br i1 %c, label %if.then, label %if.end
 

Modified: llvm/trunk/test/Transforms/SimplifyCFG/switch_create.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/switch_create.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/switch_create.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/switch_create.ll Fri Feb 27 13:29:02 2015
@@ -154,7 +154,7 @@ lor.end:
 
 define i1 @test6({ i32, i32 }* %I) {
 entry:
-        %tmp.1.i = getelementptr { i32, i32 }* %I, i64 0, i32 1         ; <i32*> [#uses=1]
+        %tmp.1.i = getelementptr { i32, i32 }, { i32, i32 }* %I, i64 0, i32 1         ; <i32*> [#uses=1]
         %tmp.2.i = load i32* %tmp.1.i           ; <i32> [#uses=6]
         %tmp.2 = icmp eq i32 %tmp.2.i, 14               ; <i1> [#uses=1]
         br i1 %tmp.2, label %shortcirc_done.4, label %shortcirc_next.0

Modified: llvm/trunk/test/Transforms/SimplifyCFG/unreachable-blocks.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/unreachable-blocks.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/unreachable-blocks.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/unreachable-blocks.ll Fri Feb 27 13:29:02 2015
@@ -10,7 +10,7 @@ entry:
 
 while_block:                                      ; preds = %and_if_cont2, %and_if_cont
   %newlen = sub i32 %newlen, 1
-  %newptr = getelementptr i8* %newptr, i64 1
+  %newptr = getelementptr i8, i8* %newptr, i64 1
   %test = icmp sgt i32 %newlen, 0
   br i1 %test, label %and_if1, label %and_if_cont2
 

Modified: llvm/trunk/test/Transforms/SimplifyCFG/volatile-phioper.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SimplifyCFG/volatile-phioper.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SimplifyCFG/volatile-phioper.ll (original)
+++ llvm/trunk/test/Transforms/SimplifyCFG/volatile-phioper.ll Fri Feb 27 13:29:02 2015
@@ -29,7 +29,7 @@ while.body:
   %Addr.017 = phi i8* [ %incdec.ptr, %while.body ], [ null, %if.then ], [ null, %entry ]
   %x.016 = phi i8 [ %inc, %while.body ], [ 0, %if.then ], [ 0, %entry ]
   %inc = add i8 %x.016, 1
-  %incdec.ptr = getelementptr inbounds i8* %Addr.017, i64 1
+  %incdec.ptr = getelementptr inbounds i8, i8* %Addr.017, i64 1
   store volatile i8 %x.016, i8* %Addr.017, align 1
   %0 = ptrtoint i8* %incdec.ptr to i64
   %1 = trunc i64 %0 to i32

Modified: llvm/trunk/test/Transforms/Sink/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Sink/basic.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/Sink/basic.ll (original)
+++ llvm/trunk/test/Transforms/Sink/basic.ll Fri Feb 27 13:29:02 2015
@@ -75,10 +75,10 @@ entry:
   br i1 %1, label %if, label %endif
 
 if:
-  %2 = getelementptr i32* %0, i32 1
+  %2 = getelementptr i32, i32* %0, i32 1
   store i32 0, i32* %0
   store i32 1, i32* %2
-  %3 = getelementptr i32* %0, i32 %b
+  %3 = getelementptr i32, i32* %0, i32 %b
   %4 = load i32* %3
   ret i32 %4
 
@@ -100,10 +100,10 @@ entry:
   br i1 %1, label %if, label %endif
 
 if:
-  %2 = getelementptr i32* %0, i32 1
+  %2 = getelementptr i32, i32* %0, i32 1
   store i32 0, i32* %0
   store i32 1, i32* %2
-  %3 = getelementptr i32* %0, i32 %b
+  %3 = getelementptr i32, i32* %0, i32 %b
   %4 = load i32* %3
   ret i32 %4
 
@@ -131,10 +131,10 @@ if0:
   br i1 %1, label %if, label %endif
 
 if:
-  %2 = getelementptr i32* %0, i32 1
+  %2 = getelementptr i32, i32* %0, i32 1
   store i32 0, i32* %0
   store i32 1, i32* %2
-  %3 = getelementptr i32* %0, i32 %b
+  %3 = getelementptr i32, i32* %0, i32 %b
   %4 = load i32* %3
   ret i32 %4
 

Modified: llvm/trunk/test/Transforms/StructurizeCFG/branch-on-argument.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/StructurizeCFG/branch-on-argument.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/StructurizeCFG/branch-on-argument.ll (original)
+++ llvm/trunk/test/Transforms/StructurizeCFG/branch-on-argument.ll Fri Feb 27 13:29:02 2015
@@ -28,7 +28,7 @@ entry:
 
 for.body:
   %i = phi i32 [0, %entry], [%i.inc, %end.loop]
-  %ptr = getelementptr i32 addrspace(1)* %out, i32 %i
+  %ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %i
   store i32 %i, i32 addrspace(1)* %ptr, align 4
   br i1 %arg, label %mid.loop, label %end.loop
 

Modified: llvm/trunk/test/Transforms/StructurizeCFG/loop-multiple-exits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/StructurizeCFG/loop-multiple-exits.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/StructurizeCFG/loop-multiple-exits.ll (original)
+++ llvm/trunk/test/Transforms/StructurizeCFG/loop-multiple-exits.ll Fri Feb 27 13:29:02 2015
@@ -23,7 +23,7 @@ for.cond:
 
 ; CHECK: for.body:
 for.body:                                         ; preds = %for.cond
-  %arrayidx = getelementptr inbounds i32 addrspace(1)* %out, i32 %i.0
+  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %i.0
   store i32 %i.0, i32 addrspace(1)* %arrayidx, align 4
   %cmp1 = icmp ugt i32 %i.0, %cond_b
 ; CHECK: br i1 %{{[0-9a-zA-Z_]+}}, label %for.inc, label %[[FLOW1:[0-9a-zA-Z_]+]]
@@ -37,7 +37,7 @@ for.body:
 
 for.inc:                                          ; preds = %for.body
   %0 = add i32 %cond_a, %i.0
-  %arrayidx3 = getelementptr inbounds i32 addrspace(1)* %out, i32 %0
+  %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %0
   store i32 %i.0, i32 addrspace(1)* %arrayidx3, align 4
   %inc = add i32 %i.0, 1
   br label %for.cond

Modified: llvm/trunk/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll (original)
+++ llvm/trunk/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll Fri Feb 27 13:29:02 2015
@@ -28,10 +28,10 @@ for.body:
 ; CHECK: lor.lhs.false:
 ; CHECK: br label %Flow
 lor.lhs.false:                                    ; preds = %for.body
-  %arrayidx = getelementptr inbounds float* %nr, i64 %indvars.iv
+  %arrayidx = getelementptr inbounds float, float* %nr, i64 %indvars.iv
   %tmp1 = load float* %arrayidx, align 4
   %tmp2 = add nsw i64 %indvars.iv, -1
-  %arrayidx2 = getelementptr inbounds float* %nr, i64 %tmp2
+  %arrayidx2 = getelementptr inbounds float, float* %nr, i64 %tmp2
   %tmp3 = load float* %arrayidx2, align 4
   %cmp3 = fcmp une float %tmp1, %tmp3
   br i1 %cmp3, label %if.then, label %for.body.1
@@ -44,7 +44,7 @@ lor.lhs.false:
 if.then:                                          ; preds = %lor.lhs.false, %for.body
   %sub4 = sub nsw i32 %tmp0, %prev_start.026
   %tmp4 = add nsw i64 %indvars.iv, -1
-  %arrayidx8 = getelementptr inbounds float* %nr, i64 %tmp4
+  %arrayidx8 = getelementptr inbounds float, float* %nr, i64 %tmp4
   %tmp5 = load float* %arrayidx8, align 4
   br i1 %cmp1, label %for.end, label %for.body.1
 
@@ -83,7 +83,7 @@ for.body.6:
 ; CHECK: if.then6.6
 ; CHECK: br label %for.body.backedge
 if.then6.6:                                       ; preds = %for.body.6
-  %arrayidx8.6 = getelementptr inbounds float* %nr, i64 %indvars.iv.next.454
+  %arrayidx8.6 = getelementptr inbounds float, float* %nr, i64 %indvars.iv.next.454
   %tmp29 = load float* %arrayidx8.6, align 4
   br label %for.body.backedge
 

Modified: llvm/trunk/test/Verifier/2002-11-05-GetelementptrPointers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Verifier/2002-11-05-GetelementptrPointers.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Verifier/2002-11-05-GetelementptrPointers.ll (original)
+++ llvm/trunk/test/Verifier/2002-11-05-GetelementptrPointers.ll Fri Feb 27 13:29:02 2015
@@ -5,6 +5,6 @@
 ; contained WITHIN a structure.
 
 define void @test({i32, i32*} * %X) {
-	getelementptr {i32, i32*} * %X, i32 0, i32 1, i32 0
+	getelementptr {i32, i32*}, {i32, i32*} * %X, i32 0, i32 1, i32 0
 	ret void
 }

Modified: llvm/trunk/test/Verifier/2010-08-07-PointerIntrinsic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Verifier/2010-08-07-PointerIntrinsic.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Verifier/2010-08-07-PointerIntrinsic.ll (original)
+++ llvm/trunk/test/Verifier/2010-08-07-PointerIntrinsic.ll Fri Feb 27 13:29:02 2015
@@ -8,10 +8,10 @@ target triple = "x86-unknown-unknown"
 @bb = global [16 x i8] zeroinitializer, align 1
 define void @x() nounwind {
 L.0:
-	%0 = getelementptr [32 x i8]* @aa, i32 0, i32 4
+	%0 = getelementptr [32 x i8], [32 x i8]* @aa, i32 0, i32 4
 	%1 = bitcast i8* %0 to [16 x i8]*
 	%2 = bitcast [16 x i8]* %1 to [0 x i8]*
-	%3 = getelementptr [16 x i8]* @bb
+	%3 = getelementptr [16 x i8], [16 x i8]* @bb
 	%4 = bitcast [16 x i8]* %3 to [0 x i8]*
 	call void @llvm.memcpy.i32([0 x i8]* %2, [0 x i8]* %4, i32 16, i32 1)
 	br label %return

Modified: llvm/trunk/test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll (original)
+++ llvm/trunk/test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll Fri Feb 27 13:29:02 2015
@@ -7,7 +7,7 @@ target datalayout = "e-p:32:32:32-p1:16:
 ; Check that we can find inttoptr -> illegal bitcasts when hidden
 ; inside constantexpr pointer operands
 define i32 addrspace(2)* @illegal_bitcast_inttoptr_as_1_to_2_inside_gep() {
-  %cast = getelementptr i32 addrspace(2)* bitcast (i32 addrspace(1)* inttoptr (i32 1234 to i32 addrspace(1)*) to i32 addrspace(2)*), i32 3
+  %cast = getelementptr i32, i32 addrspace(2)* bitcast (i32 addrspace(1)* inttoptr (i32 1234 to i32 addrspace(1)*) to i32 addrspace(2)*), i32 3
   ret i32 addrspace(2)* %cast
 }
 

Modified: llvm/trunk/test/Verifier/inalloca-vararg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Verifier/inalloca-vararg.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
    (empty)

Propchange: llvm/trunk/test/Verifier/inalloca-vararg.ll
------------------------------------------------------------------------------
--- svn:executable (original)
+++ svn:executable (removed)
@@ -1 +0,0 @@
-*

Modified: llvm/trunk/test/tools/gold/slp-vectorize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/gold/slp-vectorize.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/tools/gold/slp-vectorize.ll (original)
+++ llvm/trunk/test/tools/gold/slp-vectorize.ll Fri Feb 27 13:29:02 2015
@@ -14,15 +14,15 @@ define void @f(float* nocapture %x) {
   %tmp = load float* %x, align 4
   %add = fadd float %tmp, 1.000000e+00
   store float %add, float* %x, align 4
-  %arrayidx1 = getelementptr inbounds float* %x, i64 1
+  %arrayidx1 = getelementptr inbounds float, float* %x, i64 1
   %tmp1 = load float* %arrayidx1, align 4
   %add2 = fadd float %tmp1, 1.000000e+00
   store float %add2, float* %arrayidx1, align 4
-  %arrayidx3 = getelementptr inbounds float* %x, i64 2
+  %arrayidx3 = getelementptr inbounds float, float* %x, i64 2
   %tmp2 = load float* %arrayidx3, align 4
   %add4 = fadd float %tmp2, 1.000000e+00
   store float %add4, float* %arrayidx3, align 4
-  %arrayidx5 = getelementptr inbounds float* %x, i64 3
+  %arrayidx5 = getelementptr inbounds float, float* %x, i64 3
   %tmp3 = load float* %arrayidx5, align 4
   %add6 = fadd float %tmp3, 1.000000e+00
   store float %add6, float* %arrayidx5, align 4

Modified: llvm/trunk/test/tools/gold/vectorize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/gold/vectorize.ll?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/test/tools/gold/vectorize.ll (original)
+++ llvm/trunk/test/tools/gold/vectorize.ll Fri Feb 27 13:29:02 2015
@@ -17,7 +17,7 @@ bb:
 
 bb1:
   %i.0 = phi i64 [ 0, %bb ], [ %tmp4, %bb1 ]
-  %tmp = getelementptr inbounds float* %x, i64 %i.0
+  %tmp = getelementptr inbounds float, float* %x, i64 %i.0
   %tmp2 = load float* %tmp, align 4
   %tmp3 = fadd float %tmp2, 1.000000e+00
   store float %tmp3, float* %tmp, align 4

Modified: llvm/trunk/unittests/IR/ConstantsTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/IR/ConstantsTest.cpp?rev=230786&r1=230785&r2=230786&view=diff
==============================================================================
--- llvm/trunk/unittests/IR/ConstantsTest.cpp (original)
+++ llvm/trunk/unittests/IR/ConstantsTest.cpp Fri Feb 27 13:29:02 2015
@@ -248,9 +248,9 @@ TEST(ConstantsTest, AsInstructionsTest)
   // FIXME: getGetElementPtr() actually creates an inbounds ConstantGEP,
   //        not a normal one!
   //CHECK(ConstantExpr::getGetElementPtr(Global, V, false),
-  //      "getelementptr i32** @dummy, i32 1");
+  //      "getelementptr i32*, i32** @dummy, i32 1");
   CHECK(ConstantExpr::getInBoundsGetElementPtr(Global, V),
-        "getelementptr inbounds i32** @dummy, i32 1");
+        "getelementptr inbounds i32*, i32** @dummy, i32 1");
 
   CHECK(ConstantExpr::getExtractElement(P6, One), "extractelement <2 x i16> "
         P6STR ", i32 1");





More information about the llvm-commits mailing list