[llvm] r230794 - [opaque pointer type] Add textual IR support for explicit type parameter to load instruction

David Blaikie dblaikie at gmail.com
Fri Feb 27 13:18:04 PST 2015


Modified: llvm/trunk/test/CodeGen/ARM/thumb_indirect_calls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/thumb_indirect_calls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/thumb_indirect_calls.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/thumb_indirect_calls.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 ; CHECK-LABEL foo:
 define void @foo(i32 %x) {
 entry:
-  %0 = load void (i32)** @f, align 4
+  %0 = load void (i32)*, void (i32)** @f, align 4
   tail call void %0(i32 %x)
   ret void
 

Modified: llvm/trunk/test/CodeGen/ARM/tls1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/tls1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/tls1.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/tls1.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@
 
 define i32 @f() {
 entry:
-	%tmp1 = load i32* @i		; <i32> [#uses=1]
+	%tmp1 = load i32, i32* @i		; <i32> [#uses=1]
 	ret i32 %tmp1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/tls2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/tls2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/tls2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/tls2.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define i32 @f() {
 ; CHECK-PIC-LABEL: f:
 ; CHECK-PIC: __tls_get_addr
 entry:
-	%tmp1 = load i32* @i		; <i32> [#uses=1]
+	%tmp1 = load i32, i32* @i		; <i32> [#uses=1]
 	ret i32 %tmp1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/tls3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/tls3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/tls3.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/tls3.ll Fri Feb 27 15:17:42 2015
@@ -6,6 +6,6 @@
 
 define i32 @main() {
 entry:
-	%tmp2 = load i32* getelementptr (%struct.anon* @teste, i32 0, i32 0), align 8		; <i32> [#uses=1]
+	%tmp2 = load i32, i32* getelementptr (%struct.anon* @teste, i32 0, i32 0), align 8		; <i32> [#uses=1]
 	ret i32 %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/trunc_ldr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/trunc_ldr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/trunc_ldr.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/trunc_ldr.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 define i8 @f1(%struct.A* %d) {
 	%tmp2 = getelementptr %struct.A, %struct.A* %d, i32 0, i32 4
 	%tmp23 = bitcast i16* %tmp2 to i32*
-	%tmp4 = load i32* %tmp23
+	%tmp4 = load i32, i32* %tmp23
 	%tmp512 = lshr i32 %tmp4, 24
 	%tmp56 = trunc i32 %tmp512 to i8
 	ret i8 %tmp56
@@ -15,7 +15,7 @@ define i8 @f1(%struct.A* %d) {
 define i32 @f2(%struct.A* %d) {
 	%tmp2 = getelementptr %struct.A, %struct.A* %d, i32 0, i32 4
 	%tmp23 = bitcast i16* %tmp2 to i32*
-	%tmp4 = load i32* %tmp23
+	%tmp4 = load i32, i32* %tmp23
 	%tmp512 = lshr i32 %tmp4, 24
 	%tmp56 = trunc i32 %tmp512 to i8
         %tmp57 = sext i8 %tmp56 to i32

Modified: llvm/trunk/test/CodeGen/ARM/truncstore-dag-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/truncstore-dag-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/truncstore-dag-combine.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/truncstore-dag-combine.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define void @bar(i8* %P, i16* %Q) {
 entry:
 	%P1 = bitcast i8* %P to i16*		; <i16*> [#uses=1]
-	%tmp = load i16* %Q, align 1		; <i16> [#uses=1]
+	%tmp = load i16, i16* %Q, align 1		; <i16> [#uses=1]
 	store i16 %tmp, i16* %P1, align 1
 	ret void
 }
@@ -11,7 +11,7 @@ entry:
 define void @foo(i8* %P, i32* %Q) {
 entry:
 	%P1 = bitcast i8* %P to i32*		; <i32*> [#uses=1]
-	%tmp = load i32* %Q, align 1		; <i32> [#uses=1]
+	%tmp = load i32, i32* %Q, align 1		; <i32> [#uses=1]
 	store i32 %tmp, i32* %P1, align 1
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/twoaddrinstr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/twoaddrinstr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/twoaddrinstr.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/twoaddrinstr.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define void @PR13378() nounwind {
 ; CHECK-NEXT:   vst1.32
 
 entry:
-  %0 = load <4 x float>* undef, align 4
+  %0 = load <4 x float>, <4 x float>* undef, align 4
   store <4 x float> zeroinitializer, <4 x float>* undef, align 4
   store <4 x float> %0, <4 x float>* undef, align 4
   %1 = insertelement <4 x float> %0, float 1.000000e+00, i32 3

Modified: llvm/trunk/test/CodeGen/ARM/uint64tof64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/uint64tof64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/uint64tof64.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/uint64tof64.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 
 define fastcc void @t() {
 entry:
-	%0 = load i64* null, align 4		; <i64> [#uses=1]
+	%0 = load i64, i64* null, align 4		; <i64> [#uses=1]
 	%1 = uitofp i64 %0 to double		; <double> [#uses=1]
 	%2 = fdiv double 0.000000e+00, %1		; <double> [#uses=1]
 	%3 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* null, i8* getelementptr ([54 x i8]* @"\01LC10", i32 0, i32 0), i64 0, double %2)		; <i32> [#uses=0]

Modified: llvm/trunk/test/CodeGen/ARM/umulo-32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/umulo-32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/umulo-32.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/umulo-32.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ store i32 0, i32* %1
 store i32 %argc, i32* %2, align 4
 store i8** %argv, i8*** %3, align 4
 store i32 10, i32* %m_degree, align 4
-%4 = load i32* %m_degree, align 4
+%4 = load i32, i32* %m_degree, align 4
 %5 = call %umul.ty @llvm.umul.with.overflow.i32(i32 %4, i32 8)
 %6 = extractvalue %umul.ty %5, 1
 %7 = extractvalue %umul.ty %5, 0

Modified: llvm/trunk/test/CodeGen/ARM/unaligned_load_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/unaligned_load_store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/unaligned_load_store.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/unaligned_load_store.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ entry:
 
   %__src1.i = bitcast i8* %b to i32*              ; <i32*> [#uses=1]
   %__dest2.i = bitcast i8* %a to i32*             ; <i32*> [#uses=1]
-  %tmp.i = load i32* %__src1.i, align 1           ; <i32> [#uses=1]
+  %tmp.i = load i32, i32* %__src1.i, align 1           ; <i32> [#uses=1]
   store i32 %tmp.i, i32* %__dest2.i, align 1
   ret void
 }
@@ -44,7 +44,7 @@ entry:
 ; UNALIGNED-LABEL: hword:
 ; UNALIGNED: vld1.16
 ; UNALIGNED: vst1.16
-  %tmp = load double* %a, align 2
+  %tmp = load double, double* %a, align 2
   store double %tmp, double* %b, align 2
   ret void
 }
@@ -60,7 +60,7 @@ entry:
 ; UNALIGNED-LABEL: byte:
 ; UNALIGNED: vld1.8
 ; UNALIGNED: vst1.8
-  %tmp = load double* %a, align 1
+  %tmp = load double, double* %a, align 1
   store double %tmp, double* %b, align 1
   ret void
 }
@@ -76,7 +76,7 @@ entry:
 ; UNALIGNED: ldr
 ; UNALIGNED-NOT: strb
 ; UNALIGNED: str
-  %tmp = load i32* %a, align 1
+  %tmp = load i32, i32* %a, align 1
   store i32 %tmp, i32* %b, align 1
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/unaligned_load_store_vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/unaligned_load_store_vector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/unaligned_load_store_vector.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/unaligned_load_store_vector.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ entry:
   %vi  = bitcast i8* %pi to <8 x i8>*
   %vo  = bitcast i8* %po to <8 x i8>*
 ;CHECK: vld1.8
-  %v1 = load  <8 x i8>* %vi, align 1
+  %v1 = load  <8 x i8>,  <8 x i8>* %vi, align 1
 ;CHECK: vst1.8
   store <8 x i8> %v1, <8 x i8>* %vo, align 1
   ret void
@@ -29,7 +29,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x i16>*
   %vo  = bitcast i8* %po to <4 x i16>*
 ;CHECK: vld1.8
-  %v1 = load  <4 x i16>* %vi, align 1
+  %v1 = load  <4 x i16>,  <4 x i16>* %vi, align 1
 ;CHECK: vst1.8
   store <4 x i16> %v1, <4 x i16>* %vo, align 1
   ret void
@@ -47,7 +47,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x i32>*
   %vo  = bitcast i8* %po to <2 x i32>*
 ;CHECK: vld1.8
-  %v1 = load  <2 x i32>* %vi, align 1
+  %v1 = load  <2 x i32>,  <2 x i32>* %vi, align 1
 ;CHECK: vst1.8
   store <2 x i32> %v1, <2 x i32>* %vo, align 1
   ret void
@@ -65,7 +65,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x float>*
   %vo  = bitcast i8* %po to <2 x float>*
 ;CHECK: vld1.8
-  %v1 = load  <2 x float>* %vi, align 1
+  %v1 = load  <2 x float>,  <2 x float>* %vi, align 1
 ;CHECK: vst1.8
   store <2 x float> %v1, <2 x float>* %vo, align 1
   ret void
@@ -83,7 +83,7 @@ entry:
   %vi  = bitcast i8* %pi to <16 x i8>*
   %vo  = bitcast i8* %po to <16 x i8>*
 ;CHECK: vld1.8
-  %v1 = load  <16 x i8>* %vi, align 1
+  %v1 = load  <16 x i8>,  <16 x i8>* %vi, align 1
 ;CHECK: vst1.8
   store <16 x i8> %v1, <16 x i8>* %vo, align 1
   ret void
@@ -101,7 +101,7 @@ entry:
   %vi  = bitcast i8* %pi to <8 x i16>*
   %vo  = bitcast i8* %po to <8 x i16>*
 ;CHECK: vld1.8
-  %v1 = load  <8 x i16>* %vi, align 1
+  %v1 = load  <8 x i16>,  <8 x i16>* %vi, align 1
 ;CHECK: vst1.8
   store <8 x i16> %v1, <8 x i16>* %vo, align 1
   ret void
@@ -119,7 +119,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x i32>*
   %vo  = bitcast i8* %po to <4 x i32>*
 ;CHECK: vld1.8
-  %v1 = load  <4 x i32>* %vi, align 1
+  %v1 = load  <4 x i32>,  <4 x i32>* %vi, align 1
 ;CHECK: vst1.8
   store <4 x i32> %v1, <4 x i32>* %vo, align 1
   ret void
@@ -137,7 +137,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x i64>*
   %vo  = bitcast i8* %po to <2 x i64>*
 ;CHECK: vld1.8
-  %v1 = load  <2 x i64>* %vi, align 1
+  %v1 = load  <2 x i64>,  <2 x i64>* %vi, align 1
 ;CHECK: vst1.8
   store <2 x i64> %v1, <2 x i64>* %vo, align 1
   ret void
@@ -155,7 +155,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x float>*
   %vo  = bitcast i8* %po to <4 x float>*
 ;CHECK: vld1.8
-  %v1 = load  <4 x float>* %vi, align 1
+  %v1 = load  <4 x float>,  <4 x float>* %vi, align 1
 ;CHECK: vst1.8
   store <4 x float> %v1, <4 x float>* %vo, align 1
   ret void
@@ -173,7 +173,7 @@ entry:
   %vi  = bitcast i8* %pi to <8 x i8>*
   %vo  = bitcast i8* %po to <8 x i8>*
 ;CHECK: vld1.16
-  %v1 = load  <8 x i8>* %vi, align 2
+  %v1 = load  <8 x i8>,  <8 x i8>* %vi, align 2
 ;CHECK: vst1.16
   store <8 x i8> %v1, <8 x i8>* %vo, align 2
   ret void
@@ -191,7 +191,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x i16>*
   %vo  = bitcast i8* %po to <4 x i16>*
 ;CHECK: vld1.16
-  %v1 = load  <4 x i16>* %vi, align 2
+  %v1 = load  <4 x i16>,  <4 x i16>* %vi, align 2
 ;CHECK: vst1.16
   store <4 x i16> %v1, <4 x i16>* %vo, align 2
   ret void
@@ -209,7 +209,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x i32>*
   %vo  = bitcast i8* %po to <2 x i32>*
 ;CHECK: vld1.16
-  %v1 = load  <2 x i32>* %vi, align 2
+  %v1 = load  <2 x i32>,  <2 x i32>* %vi, align 2
 ;CHECK: vst1.16
   store <2 x i32> %v1, <2 x i32>* %vo, align 2
   ret void
@@ -227,7 +227,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x float>*
   %vo  = bitcast i8* %po to <2 x float>*
 ;CHECK: vld1.16
-  %v1 = load  <2 x float>* %vi, align 2
+  %v1 = load  <2 x float>,  <2 x float>* %vi, align 2
 ;CHECK: vst1.16
   store <2 x float> %v1, <2 x float>* %vo, align 2
   ret void
@@ -245,7 +245,7 @@ entry:
   %vi  = bitcast i8* %pi to <16 x i8>*
   %vo  = bitcast i8* %po to <16 x i8>*
 ;CHECK: vld1.16
-  %v1 = load  <16 x i8>* %vi, align 2
+  %v1 = load  <16 x i8>,  <16 x i8>* %vi, align 2
 ;CHECK: vst1.16
   store <16 x i8> %v1, <16 x i8>* %vo, align 2
   ret void
@@ -263,7 +263,7 @@ entry:
   %vi  = bitcast i8* %pi to <8 x i16>*
   %vo  = bitcast i8* %po to <8 x i16>*
 ;CHECK: vld1.16
-  %v1 = load  <8 x i16>* %vi, align 2
+  %v1 = load  <8 x i16>,  <8 x i16>* %vi, align 2
 ;CHECK: vst1.16
   store <8 x i16> %v1, <8 x i16>* %vo, align 2
   ret void
@@ -281,7 +281,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x i32>*
   %vo  = bitcast i8* %po to <4 x i32>*
 ;CHECK: vld1.16
-  %v1 = load  <4 x i32>* %vi, align 2
+  %v1 = load  <4 x i32>,  <4 x i32>* %vi, align 2
 ;CHECK: vst1.16
   store <4 x i32> %v1, <4 x i32>* %vo, align 2
   ret void
@@ -299,7 +299,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x i64>*
   %vo  = bitcast i8* %po to <2 x i64>*
 ;CHECK: vld1.16
-  %v1 = load  <2 x i64>* %vi, align 2
+  %v1 = load  <2 x i64>,  <2 x i64>* %vi, align 2
 ;CHECK: vst1.16
   store <2 x i64> %v1, <2 x i64>* %vo, align 2
   ret void
@@ -317,7 +317,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x float>*
   %vo  = bitcast i8* %po to <4 x float>*
 ;CHECK: vld1.16
-  %v1 = load  <4 x float>* %vi, align 2
+  %v1 = load  <4 x float>,  <4 x float>* %vi, align 2
 ;CHECK: vst1.16
   store <4 x float> %v1, <4 x float>* %vo, align 2
   ret void
@@ -335,7 +335,7 @@ entry:
   %vi  = bitcast i8* %pi to <8 x i8>*
   %vo  = bitcast i8* %po to <8 x i8>*
 ;CHECK: vldr
-  %v1 = load  <8 x i8>* %vi, align 4
+  %v1 = load  <8 x i8>,  <8 x i8>* %vi, align 4
 ;CHECK: vstr
   store <8 x i8> %v1, <8 x i8>* %vo, align 4
   ret void
@@ -353,7 +353,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x i16>*
   %vo  = bitcast i8* %po to <4 x i16>*
 ;CHECK: vldr
-  %v1 = load  <4 x i16>* %vi, align 4
+  %v1 = load  <4 x i16>,  <4 x i16>* %vi, align 4
 ;CHECK: vstr
   store <4 x i16> %v1, <4 x i16>* %vo, align 4
   ret void
@@ -371,7 +371,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x i32>*
   %vo  = bitcast i8* %po to <2 x i32>*
 ;CHECK: vldr
-  %v1 = load  <2 x i32>* %vi, align 4
+  %v1 = load  <2 x i32>,  <2 x i32>* %vi, align 4
 ;CHECK: vstr
   store <2 x i32> %v1, <2 x i32>* %vo, align 4
   ret void
@@ -389,7 +389,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x float>*
   %vo  = bitcast i8* %po to <2 x float>*
 ;CHECK: vldr
-  %v1 = load  <2 x float>* %vi, align 4
+  %v1 = load  <2 x float>,  <2 x float>* %vi, align 4
 ;CHECK: vstr
   store <2 x float> %v1, <2 x float>* %vo, align 4
   ret void
@@ -407,7 +407,7 @@ entry:
   %vi  = bitcast i8* %pi to <16 x i8>*
   %vo  = bitcast i8* %po to <16 x i8>*
 ;CHECK: vld1.32
-  %v1 = load  <16 x i8>* %vi, align 4
+  %v1 = load  <16 x i8>,  <16 x i8>* %vi, align 4
 ;CHECK: vst1.32
   store <16 x i8> %v1, <16 x i8>* %vo, align 4
   ret void
@@ -425,7 +425,7 @@ entry:
   %vi  = bitcast i8* %pi to <8 x i16>*
   %vo  = bitcast i8* %po to <8 x i16>*
 ;CHECK: vld1.32
-  %v1 = load  <8 x i16>* %vi, align 4
+  %v1 = load  <8 x i16>,  <8 x i16>* %vi, align 4
 ;CHECK: vst1.32
   store <8 x i16> %v1, <8 x i16>* %vo, align 4
   ret void
@@ -443,7 +443,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x i32>*
   %vo  = bitcast i8* %po to <4 x i32>*
 ;CHECK: vld1.32
-  %v1 = load  <4 x i32>* %vi, align 4
+  %v1 = load  <4 x i32>,  <4 x i32>* %vi, align 4
 ;CHECK: vst1.32
   store <4 x i32> %v1, <4 x i32>* %vo, align 4
   ret void
@@ -461,7 +461,7 @@ entry:
   %vi  = bitcast i8* %pi to <2 x i64>*
   %vo  = bitcast i8* %po to <2 x i64>*
 ;CHECK: vld1.32
-  %v1 = load  <2 x i64>* %vi, align 4
+  %v1 = load  <2 x i64>,  <2 x i64>* %vi, align 4
 ;CHECK: vst1.32
   store <2 x i64> %v1, <2 x i64>* %vo, align 4
   ret void
@@ -479,7 +479,7 @@ entry:
   %vi  = bitcast i8* %pi to <4 x float>*
   %vo  = bitcast i8* %po to <4 x float>*
 ;CHECK: vld1.32
-  %v1 = load  <4 x float>* %vi, align 4
+  %v1 = load  <4 x float>,  <4 x float>* %vi, align 4
 ;CHECK: vst1.32
   store <4 x float> %v1, <4 x float>* %vo, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/undef-sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/undef-sext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/undef-sext.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/undef-sext.ll Fri Feb 27 15:17:42 2015
@@ -9,6 +9,6 @@ entry:
 ; CHECK: bx lr
   %0 = sext i16 undef to i32
   %1 = getelementptr inbounds i32, i32* %a, i32 %0
-  %2 = load i32* %1, align 4
+  %2 = load i32, i32* %1, align 4
   ret i32 %2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vaba.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vaba.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vaba.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vaba.ll Fri Feb 27 15:17:42 2015
@@ -3,9 +3,9 @@
 define <8 x i8> @vabas8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vabas8:
 ;CHECK: vaba.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp3)
 	%tmp5 = add <8 x i8> %tmp1, %tmp4
 	ret <8 x i8> %tmp5
@@ -14,9 +14,9 @@ define <8 x i8> @vabas8(<8 x i8>* %A, <8
 define <4 x i16> @vabas16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vabas16:
 ;CHECK: vaba.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp2, <4 x i16> %tmp3)
 	%tmp5 = add <4 x i16> %tmp1, %tmp4
 	ret <4 x i16> %tmp5
@@ -25,9 +25,9 @@ define <4 x i16> @vabas16(<4 x i16>* %A,
 define <2 x i32> @vabas32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vabas32:
 ;CHECK: vaba.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp2, <2 x i32> %tmp3)
 	%tmp5 = add <2 x i32> %tmp1, %tmp4
 	ret <2 x i32> %tmp5
@@ -36,9 +36,9 @@ define <2 x i32> @vabas32(<2 x i32>* %A,
 define <8 x i8> @vabau8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vabau8:
 ;CHECK: vaba.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp3)
 	%tmp5 = add <8 x i8> %tmp1, %tmp4
 	ret <8 x i8> %tmp5
@@ -47,9 +47,9 @@ define <8 x i8> @vabau8(<8 x i8>* %A, <8
 define <4 x i16> @vabau16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vabau16:
 ;CHECK: vaba.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp2, <4 x i16> %tmp3)
 	%tmp5 = add <4 x i16> %tmp1, %tmp4
 	ret <4 x i16> %tmp5
@@ -58,9 +58,9 @@ define <4 x i16> @vabau16(<4 x i16>* %A,
 define <2 x i32> @vabau32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vabau32:
 ;CHECK: vaba.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp2, <2 x i32> %tmp3)
 	%tmp5 = add <2 x i32> %tmp1, %tmp4
 	ret <2 x i32> %tmp5
@@ -69,9 +69,9 @@ define <2 x i32> @vabau32(<2 x i32>* %A,
 define <16 x i8> @vabaQs8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
 ;CHECK-LABEL: vabaQs8:
 ;CHECK: vaba.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-	%tmp3 = load <16 x i8>* %C
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp3 = load <16 x i8>, <16 x i8>* %C
 	%tmp4 = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %tmp2, <16 x i8> %tmp3)
 	%tmp5 = add <16 x i8> %tmp1, %tmp4
 	ret <16 x i8> %tmp5
@@ -80,9 +80,9 @@ define <16 x i8> @vabaQs8(<16 x i8>* %A,
 define <8 x i16> @vabaQs16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
 ;CHECK-LABEL: vabaQs16:
 ;CHECK: vaba.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-	%tmp3 = load <8 x i16>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp3 = load <8 x i16>, <8 x i16>* %C
 	%tmp4 = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp3)
 	%tmp5 = add <8 x i16> %tmp1, %tmp4
 	ret <8 x i16> %tmp5
@@ -91,9 +91,9 @@ define <8 x i16> @vabaQs16(<8 x i16>* %A
 define <4 x i32> @vabaQs32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: vabaQs32:
 ;CHECK: vaba.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-	%tmp3 = load <4 x i32>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp3 = load <4 x i32>, <4 x i32>* %C
 	%tmp4 = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %tmp2, <4 x i32> %tmp3)
 	%tmp5 = add <4 x i32> %tmp1, %tmp4
 	ret <4 x i32> %tmp5
@@ -102,9 +102,9 @@ define <4 x i32> @vabaQs32(<4 x i32>* %A
 define <16 x i8> @vabaQu8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
 ;CHECK-LABEL: vabaQu8:
 ;CHECK: vaba.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-	%tmp3 = load <16 x i8>* %C
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp3 = load <16 x i8>, <16 x i8>* %C
 	%tmp4 = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %tmp2, <16 x i8> %tmp3)
 	%tmp5 = add <16 x i8> %tmp1, %tmp4
 	ret <16 x i8> %tmp5
@@ -113,9 +113,9 @@ define <16 x i8> @vabaQu8(<16 x i8>* %A,
 define <8 x i16> @vabaQu16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
 ;CHECK-LABEL: vabaQu16:
 ;CHECK: vaba.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-	%tmp3 = load <8 x i16>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp3 = load <8 x i16>, <8 x i16>* %C
 	%tmp4 = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp3)
 	%tmp5 = add <8 x i16> %tmp1, %tmp4
 	ret <8 x i16> %tmp5
@@ -124,9 +124,9 @@ define <8 x i16> @vabaQu16(<8 x i16>* %A
 define <4 x i32> @vabaQu32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: vabaQu32:
 ;CHECK: vaba.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-	%tmp3 = load <4 x i32>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp3 = load <4 x i32>, <4 x i32>* %C
 	%tmp4 = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %tmp2, <4 x i32> %tmp3)
 	%tmp5 = add <4 x i32> %tmp1, %tmp4
 	ret <4 x i32> %tmp5
@@ -151,9 +151,9 @@ declare <4 x i32> @llvm.arm.neon.vabdu.v
 define <8 x i16> @vabals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vabals8:
 ;CHECK: vabal.s8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp3)
 	%tmp5 = zext <8 x i8> %tmp4 to <8 x i16>
 	%tmp6 = add <8 x i16> %tmp1, %tmp5
@@ -163,9 +163,9 @@ define <8 x i16> @vabals8(<8 x i16>* %A,
 define <4 x i32> @vabals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vabals16:
 ;CHECK: vabal.s16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp2, <4 x i16> %tmp3)
 	%tmp5 = zext <4 x i16> %tmp4 to <4 x i32>
 	%tmp6 = add <4 x i32> %tmp1, %tmp5
@@ -175,9 +175,9 @@ define <4 x i32> @vabals16(<4 x i32>* %A
 define <2 x i64> @vabals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vabals32:
 ;CHECK: vabal.s32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp2, <2 x i32> %tmp3)
 	%tmp5 = zext <2 x i32> %tmp4 to <2 x i64>
 	%tmp6 = add <2 x i64> %tmp1, %tmp5
@@ -187,9 +187,9 @@ define <2 x i64> @vabals32(<2 x i64>* %A
 define <8 x i16> @vabalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vabalu8:
 ;CHECK: vabal.u8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp3)
 	%tmp5 = zext <8 x i8> %tmp4 to <8 x i16>
 	%tmp6 = add <8 x i16> %tmp1, %tmp5
@@ -199,9 +199,9 @@ define <8 x i16> @vabalu8(<8 x i16>* %A,
 define <4 x i32> @vabalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vabalu16:
 ;CHECK: vabal.u16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp2, <4 x i16> %tmp3)
 	%tmp5 = zext <4 x i16> %tmp4 to <4 x i32>
 	%tmp6 = add <4 x i32> %tmp1, %tmp5
@@ -211,9 +211,9 @@ define <4 x i32> @vabalu16(<4 x i32>* %A
 define <2 x i64> @vabalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vabalu32:
 ;CHECK: vabal.u32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp2, <2 x i32> %tmp3)
 	%tmp5 = zext <2 x i32> %tmp4 to <2 x i64>
 	%tmp6 = add <2 x i64> %tmp1, %tmp5

Modified: llvm/trunk/test/CodeGen/ARM/vabd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vabd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vabd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vabd.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vabds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vabds8:
 ;CHECK: vabd.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vabds8(<8 x i8>* %A, <8
 define <4 x i16> @vabds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vabds16:
 ;CHECK: vabd.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vabds16(<4 x i16>* %A,
 define <2 x i32> @vabds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vabds32:
 ;CHECK: vabd.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vabds32(<2 x i32>* %A,
 define <8 x i8> @vabdu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vabdu8:
 ;CHECK: vabd.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i8> @vabdu8(<8 x i8>* %A, <8
 define <4 x i16> @vabdu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vabdu16:
 ;CHECK: vabd.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @vabdu16(<4 x i16>* %A,
 define <2 x i32> @vabdu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vabdu32:
 ;CHECK: vabd.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <2 x i32> @vabdu32(<2 x i32>* %A,
 define <2 x float> @vabdf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vabdf32:
 ;CHECK: vabd.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -66,8 +66,8 @@ define <2 x float> @vabdf32(<2 x float>*
 define <16 x i8> @vabdQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vabdQs8:
 ;CHECK: vabd.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -75,8 +75,8 @@ define <16 x i8> @vabdQs8(<16 x i8>* %A,
 define <8 x i16> @vabdQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vabdQs16:
 ;CHECK: vabd.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -84,8 +84,8 @@ define <8 x i16> @vabdQs16(<8 x i16>* %A
 define <4 x i32> @vabdQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vabdQs32:
 ;CHECK: vabd.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -93,8 +93,8 @@ define <4 x i32> @vabdQs32(<4 x i32>* %A
 define <16 x i8> @vabdQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vabdQu8:
 ;CHECK: vabd.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -102,8 +102,8 @@ define <16 x i8> @vabdQu8(<16 x i8>* %A,
 define <8 x i16> @vabdQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vabdQu16:
 ;CHECK: vabd.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -111,8 +111,8 @@ define <8 x i16> @vabdQu16(<8 x i16>* %A
 define <4 x i32> @vabdQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vabdQu32:
 ;CHECK: vabd.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -120,8 +120,8 @@ define <4 x i32> @vabdQu32(<4 x i32>* %A
 define <4 x float> @vabdQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vabdQf32:
 ;CHECK: vabd.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -149,8 +149,8 @@ declare <4 x float> @llvm.arm.neon.vabds
 define <8 x i16> @vabdls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vabdls8:
 ;CHECK: vabdl.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	%tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
@@ -159,8 +159,8 @@ define <8 x i16> @vabdls8(<8 x i8>* %A,
 define <4 x i32> @vabdls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vabdls16:
 ;CHECK: vabdl.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	%tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -169,8 +169,8 @@ define <4 x i32> @vabdls16(<4 x i16>* %A
 define <2 x i64> @vabdls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vabdls32:
 ;CHECK: vabdl.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	%tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
 	ret <2 x i64> %tmp4
@@ -179,8 +179,8 @@ define <2 x i64> @vabdls32(<2 x i32>* %A
 define <8 x i16> @vabdlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vabdlu8:
 ;CHECK: vabdl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	%tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
@@ -189,8 +189,8 @@ define <8 x i16> @vabdlu8(<8 x i8>* %A,
 define <4 x i32> @vabdlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vabdlu16:
 ;CHECK: vabdl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	%tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -199,8 +199,8 @@ define <4 x i32> @vabdlu16(<4 x i16>* %A
 define <2 x i64> @vabdlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vabdlu32:
 ;CHECK: vabdl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	%tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
 	ret <2 x i64> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vabs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vabs.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vabs.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <8 x i8> @vabss8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vabss8:
 ;CHECK: vabs.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -11,7 +11,7 @@ define <8 x i8> @vabss8(<8 x i8>* %A) no
 define <4 x i16> @vabss16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vabss16:
 ;CHECK: vabs.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -19,7 +19,7 @@ define <4 x i16> @vabss16(<4 x i16>* %A)
 define <2 x i32> @vabss32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vabss32:
 ;CHECK: vabs.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -27,7 +27,7 @@ define <2 x i32> @vabss32(<2 x i32>* %A)
 define <2 x float> @vabsf32(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vabsf32:
 ;CHECK: vabs.f32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = call <2 x float> @llvm.fabs.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp2
 }
@@ -35,7 +35,7 @@ define <2 x float> @vabsf32(<2 x float>*
 define <16 x i8> @vabsQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vabsQs8:
 ;CHECK: vabs.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp2
 }
@@ -43,7 +43,7 @@ define <16 x i8> @vabsQs8(<16 x i8>* %A)
 define <8 x i16> @vabsQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vabsQs16:
 ;CHECK: vabs.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %tmp1)
 	ret <8 x i16> %tmp2
 }
@@ -51,7 +51,7 @@ define <8 x i16> @vabsQs16(<8 x i16>* %A
 define <4 x i32> @vabsQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vabsQs32:
 ;CHECK: vabs.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }
@@ -59,7 +59,7 @@ define <4 x i32> @vabsQs32(<4 x i32>* %A
 define <4 x float> @vabsQf32(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vabsQf32:
 ;CHECK: vabs.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp2
 }
@@ -77,7 +77,7 @@ declare <4 x float> @llvm.fabs.v4f32(<4
 define <8 x i8> @vqabss8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqabss8:
 ;CHECK: vqabs.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqabs.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -85,7 +85,7 @@ define <8 x i8> @vqabss8(<8 x i8>* %A) n
 define <4 x i16> @vqabss16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqabss16:
 ;CHECK: vqabs.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqabs.v4i16(<4 x i16> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -93,7 +93,7 @@ define <4 x i16> @vqabss16(<4 x i16>* %A
 define <2 x i32> @vqabss32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqabss32:
 ;CHECK: vqabs.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqabs.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -101,7 +101,7 @@ define <2 x i32> @vqabss32(<2 x i32>* %A
 define <16 x i8> @vqabsQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqabsQs8:
 ;CHECK: vqabs.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp2
 }
@@ -109,7 +109,7 @@ define <16 x i8> @vqabsQs8(<16 x i8>* %A
 define <8 x i16> @vqabsQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqabsQs16:
 ;CHECK: vqabs.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16> %tmp1)
 	ret <8 x i16> %tmp2
 }
@@ -117,7 +117,7 @@ define <8 x i16> @vqabsQs16(<8 x i16>* %
 define <4 x i32> @vqabsQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqabsQs32:
 ;CHECK: vqabs.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vqabs.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vadd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vadd.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vaddi8:
 ;CHECK: vadd.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = add <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vaddi8(<8 x i8>* %A, <8
 define <4 x i16> @vaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vaddi16:
 ;CHECK: vadd.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = add <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vaddi16(<4 x i16>* %A,
 define <2 x i32> @vaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vaddi32:
 ;CHECK: vadd.i32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = add <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vaddi32(<2 x i32>* %A,
 define <1 x i64> @vaddi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vaddi64:
 ;CHECK: vadd.i64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = add <1 x i64> %tmp1, %tmp2
 	ret <1 x i64> %tmp3
 }
@@ -39,8 +39,8 @@ define <1 x i64> @vaddi64(<1 x i64>* %A,
 define <2 x float> @vaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vaddf32:
 ;CHECK: vadd.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fadd <2 x float> %tmp1, %tmp2
 	ret <2 x float> %tmp3
 }
@@ -48,8 +48,8 @@ define <2 x float> @vaddf32(<2 x float>*
 define <16 x i8> @vaddQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vaddQi8:
 ;CHECK: vadd.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = add <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -57,8 +57,8 @@ define <16 x i8> @vaddQi8(<16 x i8>* %A,
 define <8 x i16> @vaddQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vaddQi16:
 ;CHECK: vadd.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = add <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -66,8 +66,8 @@ define <8 x i16> @vaddQi16(<8 x i16>* %A
 define <4 x i32> @vaddQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vaddQi32:
 ;CHECK: vadd.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = add <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -75,8 +75,8 @@ define <4 x i32> @vaddQi32(<4 x i32>* %A
 define <2 x i64> @vaddQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vaddQi64:
 ;CHECK: vadd.i64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = add <2 x i64> %tmp1, %tmp2
 	ret <2 x i64> %tmp3
 }
@@ -84,8 +84,8 @@ define <2 x i64> @vaddQi64(<2 x i64>* %A
 define <4 x float> @vaddQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vaddQf32:
 ;CHECK: vadd.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = fadd <4 x float> %tmp1, %tmp2
 	ret <4 x float> %tmp3
 }
@@ -93,8 +93,8 @@ define <4 x float> @vaddQf32(<4 x float>
 define <8 x i8> @vraddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vraddhni16:
 ;CHECK: vraddhn.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -102,8 +102,8 @@ define <8 x i8> @vraddhni16(<8 x i16>* %
 define <4 x i16> @vraddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vraddhni32:
 ;CHECK: vraddhn.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -111,8 +111,8 @@ define <4 x i16> @vraddhni32(<4 x i32>*
 define <2 x i32> @vraddhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vraddhni64:
 ;CHECK: vraddhn.i64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -151,8 +151,8 @@ define <2 x i32> @vaddhni64_natural(<2 x
 define <8 x i16> @vaddls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vaddls8:
 ;CHECK: vaddl.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
 	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -162,8 +162,8 @@ define <8 x i16> @vaddls8(<8 x i8>* %A,
 define <4 x i32> @vaddls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vaddls16:
 ;CHECK: vaddl.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
 	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -173,8 +173,8 @@ define <4 x i32> @vaddls16(<4 x i16>* %A
 define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vaddls32:
 ;CHECK: vaddl.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
 	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = add <2 x i64> %tmp3, %tmp4
@@ -184,8 +184,8 @@ define <2 x i64> @vaddls32(<2 x i32>* %A
 define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vaddlu8:
 ;CHECK: vaddl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
 	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -195,8 +195,8 @@ define <8 x i16> @vaddlu8(<8 x i8>* %A,
 define <4 x i32> @vaddlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vaddlu16:
 ;CHECK: vaddl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
 	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -206,8 +206,8 @@ define <4 x i32> @vaddlu16(<4 x i16>* %A
 define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vaddlu32:
 ;CHECK: vaddl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
 	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = add <2 x i64> %tmp3, %tmp4
@@ -217,8 +217,8 @@ define <2 x i64> @vaddlu32(<2 x i32>* %A
 define <8 x i16> @vaddws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vaddws8:
 ;CHECK: vaddw.s8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
 	%tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -227,8 +227,8 @@ define <8 x i16> @vaddws8(<8 x i16>* %A,
 define <4 x i32> @vaddws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vaddws16:
 ;CHECK: vaddw.s16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
 	%tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -237,8 +237,8 @@ define <4 x i32> @vaddws16(<4 x i32>* %A
 define <2 x i64> @vaddws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vaddws32:
 ;CHECK: vaddw.s32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
 	%tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
@@ -247,8 +247,8 @@ define <2 x i64> @vaddws32(<2 x i64>* %A
 define <8 x i16> @vaddwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vaddwu8:
 ;CHECK: vaddw.u8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
 	%tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -257,8 +257,8 @@ define <8 x i16> @vaddwu8(<8 x i16>* %A,
 define <4 x i32> @vaddwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vaddwu16:
 ;CHECK: vaddw.u16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
 	%tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -267,8 +267,8 @@ define <4 x i32> @vaddwu16(<4 x i32>* %A
 define <2 x i64> @vaddwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vaddwu32:
 ;CHECK: vaddw.u32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
 	%tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vargs_align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vargs_align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vargs_align.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vargs_align.ll Fri Feb 27 15:17:42 2015
@@ -8,13 +8,13 @@ entry:
 	%tmp = alloca i32, align 4		; <i32*> [#uses=2]
 	store i32 %a, i32* %a_addr
 	store i32 0, i32* %tmp
-	%tmp1 = load i32* %tmp		; <i32> [#uses=1]
+	%tmp1 = load i32, i32* %tmp		; <i32> [#uses=1]
 	store i32 %tmp1, i32* %retval
 	call void @llvm.va_start(i8* null)
 	br label %return
 
 return:		; preds = %entry
-	%retval2 = load i32* %retval		; <i32> [#uses=1]
+	%retval2 = load i32, i32* %retval		; <i32> [#uses=1]
 	ret i32 %retval2
 ; EABI: add sp, sp, #12
 ; EABI: add sp, sp, #16

Modified: llvm/trunk/test/CodeGen/ARM/vbits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vbits.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vbits.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vbits.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_andi8:
 ;CHECK: vand
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = and <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @v_andi8(<8 x i8>* %A, <
 define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_andi16:
 ;CHECK: vand
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = and <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @v_andi16(<4 x i16>* %A
 define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_andi32:
 ;CHECK: vand
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = and <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @v_andi32(<2 x i32>* %A
 define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_andi64:
 ;CHECK: vand
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = and <1 x i64> %tmp1, %tmp2
 	ret <1 x i64> %tmp3
 }
@@ -39,8 +39,8 @@ define <1 x i64> @v_andi64(<1 x i64>* %A
 define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_andQi8:
 ;CHECK: vand
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = and <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -48,8 +48,8 @@ define <16 x i8> @v_andQi8(<16 x i8>* %A
 define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_andQi16:
 ;CHECK: vand
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = and <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -57,8 +57,8 @@ define <8 x i16> @v_andQi16(<8 x i16>* %
 define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_andQi32:
 ;CHECK: vand
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = and <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -66,8 +66,8 @@ define <4 x i32> @v_andQi32(<4 x i32>* %
 define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_andQi64:
 ;CHECK: vand
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = and <2 x i64> %tmp1, %tmp2
 	ret <2 x i64> %tmp3
 }
@@ -75,8 +75,8 @@ define <2 x i64> @v_andQi64(<2 x i64>* %
 define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_bici8:
 ;CHECK: vbic
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
 	%tmp4 = and <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
@@ -85,8 +85,8 @@ define <8 x i8> @v_bici8(<8 x i8>* %A, <
 define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_bici16:
 ;CHECK: vbic
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = xor <4 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1 >
 	%tmp4 = and <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
@@ -95,8 +95,8 @@ define <4 x i16> @v_bici16(<4 x i16>* %A
 define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_bici32:
 ;CHECK: vbic
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = xor <2 x i32> %tmp2, < i32 -1, i32 -1 >
 	%tmp4 = and <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
@@ -105,8 +105,8 @@ define <2 x i32> @v_bici32(<2 x i32>* %A
 define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_bici64:
 ;CHECK: vbic
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = xor <1 x i64> %tmp2, < i64 -1 >
 	%tmp4 = and <1 x i64> %tmp1, %tmp3
 	ret <1 x i64> %tmp4
@@ -115,8 +115,8 @@ define <1 x i64> @v_bici64(<1 x i64>* %A
 define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_bicQi8:
 ;CHECK: vbic
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
 	%tmp4 = and <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
@@ -125,8 +125,8 @@ define <16 x i8> @v_bicQi8(<16 x i8>* %A
 define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_bicQi16:
 ;CHECK: vbic
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = xor <8 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
 	%tmp4 = and <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -135,8 +135,8 @@ define <8 x i16> @v_bicQi16(<8 x i16>* %
 define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_bicQi32:
 ;CHECK: vbic
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = xor <4 x i32> %tmp2, < i32 -1, i32 -1, i32 -1, i32 -1 >
 	%tmp4 = and <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -145,8 +145,8 @@ define <4 x i32> @v_bicQi32(<4 x i32>* %
 define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_bicQi64:
 ;CHECK: vbic
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = xor <2 x i64> %tmp2, < i64 -1, i64 -1 >
 	%tmp4 = and <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
@@ -155,8 +155,8 @@ define <2 x i64> @v_bicQi64(<2 x i64>* %
 define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_eori8:
 ;CHECK: veor
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = xor <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -164,8 +164,8 @@ define <8 x i8> @v_eori8(<8 x i8>* %A, <
 define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_eori16:
 ;CHECK: veor
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = xor <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -173,8 +173,8 @@ define <4 x i16> @v_eori16(<4 x i16>* %A
 define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_eori32:
 ;CHECK: veor
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = xor <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -182,8 +182,8 @@ define <2 x i32> @v_eori32(<2 x i32>* %A
 define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_eori64:
 ;CHECK: veor
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = xor <1 x i64> %tmp1, %tmp2
 	ret <1 x i64> %tmp3
 }
@@ -191,8 +191,8 @@ define <1 x i64> @v_eori64(<1 x i64>* %A
 define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_eorQi8:
 ;CHECK: veor
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = xor <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -200,8 +200,8 @@ define <16 x i8> @v_eorQi8(<16 x i8>* %A
 define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_eorQi16:
 ;CHECK: veor
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = xor <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -209,8 +209,8 @@ define <8 x i16> @v_eorQi16(<8 x i16>* %
 define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_eorQi32:
 ;CHECK: veor
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = xor <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -218,8 +218,8 @@ define <4 x i32> @v_eorQi32(<4 x i32>* %
 define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_eorQi64:
 ;CHECK: veor
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = xor <2 x i64> %tmp1, %tmp2
 	ret <2 x i64> %tmp3
 }
@@ -227,7 +227,7 @@ define <2 x i64> @v_eorQi64(<2 x i64>* %
 define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: v_mvni8:
 ;CHECK: vmvn
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
 	ret <8 x i8> %tmp2
 }
@@ -235,7 +235,7 @@ define <8 x i8> @v_mvni8(<8 x i8>* %A) n
 define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: v_mvni16:
 ;CHECK: vmvn
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = xor <4 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1 >
 	ret <4 x i16> %tmp2
 }
@@ -243,7 +243,7 @@ define <4 x i16> @v_mvni16(<4 x i16>* %A
 define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: v_mvni32:
 ;CHECK: vmvn
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = xor <2 x i32> %tmp1, < i32 -1, i32 -1 >
 	ret <2 x i32> %tmp2
 }
@@ -251,7 +251,7 @@ define <2 x i32> @v_mvni32(<2 x i32>* %A
 define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: v_mvni64:
 ;CHECK: vmvn
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = xor <1 x i64> %tmp1, < i64 -1 >
 	ret <1 x i64> %tmp2
 }
@@ -259,7 +259,7 @@ define <1 x i64> @v_mvni64(<1 x i64>* %A
 define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: v_mvnQi8:
 ;CHECK: vmvn
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
 	ret <16 x i8> %tmp2
 }
@@ -267,7 +267,7 @@ define <16 x i8> @v_mvnQi8(<16 x i8>* %A
 define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: v_mvnQi16:
 ;CHECK: vmvn
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = xor <8 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
 	ret <8 x i16> %tmp2
 }
@@ -275,7 +275,7 @@ define <8 x i16> @v_mvnQi16(<8 x i16>* %
 define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: v_mvnQi32:
 ;CHECK: vmvn
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = xor <4 x i32> %tmp1, < i32 -1, i32 -1, i32 -1, i32 -1 >
 	ret <4 x i32> %tmp2
 }
@@ -283,7 +283,7 @@ define <4 x i32> @v_mvnQi32(<4 x i32>* %
 define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: v_mvnQi64:
 ;CHECK: vmvn
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = xor <2 x i64> %tmp1, < i64 -1, i64 -1 >
 	ret <2 x i64> %tmp2
 }
@@ -291,8 +291,8 @@ define <2 x i64> @v_mvnQi64(<2 x i64>* %
 define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_orri8:
 ;CHECK: vorr
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = or <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -300,8 +300,8 @@ define <8 x i8> @v_orri8(<8 x i8>* %A, <
 define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_orri16:
 ;CHECK: vorr
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = or <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -309,8 +309,8 @@ define <4 x i16> @v_orri16(<4 x i16>* %A
 define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_orri32:
 ;CHECK: vorr
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = or <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -318,8 +318,8 @@ define <2 x i32> @v_orri32(<2 x i32>* %A
 define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_orri64:
 ;CHECK: vorr
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = or <1 x i64> %tmp1, %tmp2
 	ret <1 x i64> %tmp3
 }
@@ -327,8 +327,8 @@ define <1 x i64> @v_orri64(<1 x i64>* %A
 define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_orrQi8:
 ;CHECK: vorr
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = or <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -336,8 +336,8 @@ define <16 x i8> @v_orrQi8(<16 x i8>* %A
 define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_orrQi16:
 ;CHECK: vorr
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = or <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -345,8 +345,8 @@ define <8 x i16> @v_orrQi16(<8 x i16>* %
 define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_orrQi32:
 ;CHECK: vorr
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = or <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -354,8 +354,8 @@ define <4 x i32> @v_orrQi32(<4 x i32>* %
 define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_orrQi64:
 ;CHECK: vorr
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = or <2 x i64> %tmp1, %tmp2
 	ret <2 x i64> %tmp3
 }
@@ -363,8 +363,8 @@ define <2 x i64> @v_orrQi64(<2 x i64>* %
 define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_orni8:
 ;CHECK: vorn
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
 	%tmp4 = or <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
@@ -373,8 +373,8 @@ define <8 x i8> @v_orni8(<8 x i8>* %A, <
 define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_orni16:
 ;CHECK: vorn
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = xor <4 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1 >
 	%tmp4 = or <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
@@ -383,8 +383,8 @@ define <4 x i16> @v_orni16(<4 x i16>* %A
 define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_orni32:
 ;CHECK: vorn
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = xor <2 x i32> %tmp2, < i32 -1, i32 -1 >
 	%tmp4 = or <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
@@ -393,8 +393,8 @@ define <2 x i32> @v_orni32(<2 x i32>* %A
 define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_orni64:
 ;CHECK: vorn
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = xor <1 x i64> %tmp2, < i64 -1 >
 	%tmp4 = or <1 x i64> %tmp1, %tmp3
 	ret <1 x i64> %tmp4
@@ -403,8 +403,8 @@ define <1 x i64> @v_orni64(<1 x i64>* %A
 define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: v_ornQi8:
 ;CHECK: vorn
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
 	%tmp4 = or <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
@@ -413,8 +413,8 @@ define <16 x i8> @v_ornQi8(<16 x i8>* %A
 define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: v_ornQi16:
 ;CHECK: vorn
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = xor <8 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
 	%tmp4 = or <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -423,8 +423,8 @@ define <8 x i16> @v_ornQi16(<8 x i16>* %
 define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: v_ornQi32:
 ;CHECK: vorn
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = xor <4 x i32> %tmp2, < i32 -1, i32 -1, i32 -1, i32 -1 >
 	%tmp4 = or <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -433,8 +433,8 @@ define <4 x i32> @v_ornQi32(<4 x i32>* %
 define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: v_ornQi64:
 ;CHECK: vorn
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = xor <2 x i64> %tmp2, < i64 -1, i64 -1 >
 	%tmp4 = or <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
@@ -443,8 +443,8 @@ define <2 x i64> @v_ornQi64(<2 x i64>* %
 define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vtsti8:
 ;CHECK: vtst.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = and <8 x i8> %tmp1, %tmp2
 	%tmp4 = icmp ne <8 x i8> %tmp3, zeroinitializer
         %tmp5 = sext <8 x i1> %tmp4 to <8 x i8>
@@ -454,8 +454,8 @@ define <8 x i8> @vtsti8(<8 x i8>* %A, <8
 define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vtsti16:
 ;CHECK: vtst.16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = and <4 x i16> %tmp1, %tmp2
 	%tmp4 = icmp ne <4 x i16> %tmp3, zeroinitializer
         %tmp5 = sext <4 x i1> %tmp4 to <4 x i16>
@@ -465,8 +465,8 @@ define <4 x i16> @vtsti16(<4 x i16>* %A,
 define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vtsti32:
 ;CHECK: vtst.32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = and <2 x i32> %tmp1, %tmp2
 	%tmp4 = icmp ne <2 x i32> %tmp3, zeroinitializer
         %tmp5 = sext <2 x i1> %tmp4 to <2 x i32>
@@ -476,8 +476,8 @@ define <2 x i32> @vtsti32(<2 x i32>* %A,
 define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vtstQi8:
 ;CHECK: vtst.8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = and <16 x i8> %tmp1, %tmp2
 	%tmp4 = icmp ne <16 x i8> %tmp3, zeroinitializer
         %tmp5 = sext <16 x i1> %tmp4 to <16 x i8>
@@ -487,8 +487,8 @@ define <16 x i8> @vtstQi8(<16 x i8>* %A,
 define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vtstQi16:
 ;CHECK: vtst.16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = and <8 x i16> %tmp1, %tmp2
 	%tmp4 = icmp ne <8 x i16> %tmp3, zeroinitializer
         %tmp5 = sext <8 x i1> %tmp4 to <8 x i16>
@@ -498,8 +498,8 @@ define <8 x i16> @vtstQi16(<8 x i16>* %A
 define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vtstQi32:
 ;CHECK: vtst.32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = and <4 x i32> %tmp1, %tmp2
 	%tmp4 = icmp ne <4 x i32> %tmp3, zeroinitializer
         %tmp5 = sext <4 x i1> %tmp4 to <4 x i32>
@@ -511,7 +511,7 @@ define <8 x i8> @v_orrimm(<8 x i8>* %A)
 ; CHECK-NOT: vmov
 ; CHECK-NOT: vmvn
 ; CHECK: vorr
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp3 = or <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
 	ret <8 x i8> %tmp3
 }
@@ -521,7 +521,7 @@ define <16 x i8> @v_orrimmQ(<16 x i8>* %
 ; CHECK-NOT: vmov
 ; CHECK-NOT: vmvn
 ; CHECK: vorr
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp3 = or <16 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
 	ret <16 x i8> %tmp3
 }
@@ -531,7 +531,7 @@ define <8 x i8> @v_bicimm(<8 x i8>* %A)
 ; CHECK-NOT: vmov
 ; CHECK-NOT: vmvn
 ; CHECK: vbic
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp3 = and <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
 	ret <8 x i8> %tmp3
 }
@@ -541,7 +541,7 @@ define <16 x i8> @v_bicimmQ(<16 x i8>* %
 ; CHECK-NOT: vmov
 ; CHECK-NOT: vmvn
 ; CHECK: vbic
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp3 = and <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
 	ret <16 x i8> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vbsl-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vbsl-constant.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vbsl-constant.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vbsl-constant.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <
 ;CHECK: vldr
 ;CHECK: vldr
 ;CHECK: vbsl
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = and <8 x i8> %tmp1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
 	%tmp6 = and <8 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4>
 	%tmp7 = or <8 x i8> %tmp4, %tmp6
@@ -19,9 +19,9 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A
 ;CHECK: vldr
 ;CHECK: vldr
 ;CHECK: vbsl
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = and <4 x i16> %tmp1, <i16 3, i16 3, i16 3, i16 3>
 	%tmp6 = and <4 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4>
 	%tmp7 = or <4 x i16> %tmp4, %tmp6
@@ -33,9 +33,9 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A
 ;CHECK: vldr
 ;CHECK: vldr
 ;CHECK: vbsl
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = and <2 x i32> %tmp1, <i32 3, i32 3>
 	%tmp6 = and <2 x i32> %tmp3, <i32 -4, i32 -4>
 	%tmp7 = or <2 x i32> %tmp4, %tmp6
@@ -48,9 +48,9 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A
 ;CHECK: vldr
 ;CHECK: vldr
 ;CHECK: vbsl
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
-	%tmp3 = load <1 x i64>* %C
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
+	%tmp3 = load <1 x i64>, <1 x i64>* %C
 	%tmp4 = and <1 x i64> %tmp1, <i64 3>
 	%tmp6 = and <1 x i64> %tmp3, <i64 -4>
 	%tmp7 = or <1 x i64> %tmp4, %tmp6
@@ -62,9 +62,9 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A
 ;CHECK: vld1.32
 ;CHECK: vld1.32
 ;CHECK: vbsl
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-	%tmp3 = load <16 x i8>* %C
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp3 = load <16 x i8>, <16 x i8>* %C
 	%tmp4 = and <16 x i8> %tmp1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
 	%tmp6 = and <16 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4>
 	%tmp7 = or <16 x i8> %tmp4, %tmp6
@@ -76,9 +76,9 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %
 ;CHECK: vld1.32
 ;CHECK: vld1.32
 ;CHECK: vbsl
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-	%tmp3 = load <8 x i16>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp3 = load <8 x i16>, <8 x i16>* %C
 	%tmp4 = and <8 x i16> %tmp1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
 	%tmp6 = and <8 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4, i16 -4, i16 -4, i16 -4, i16 -4>
 	%tmp7 = or <8 x i16> %tmp4, %tmp6
@@ -90,9 +90,9 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %
 ;CHECK: vld1.32
 ;CHECK: vld1.32
 ;CHECK: vbsl
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-	%tmp3 = load <4 x i32>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp3 = load <4 x i32>, <4 x i32>* %C
 	%tmp4 = and <4 x i32> %tmp1, <i32 3, i32 3, i32 3, i32 3>
 	%tmp6 = and <4 x i32> %tmp3, <i32 -4, i32 -4, i32 -4, i32 -4>
 	%tmp7 = or <4 x i32> %tmp4, %tmp6
@@ -105,9 +105,9 @@ define <2 x i64> @v_bslQi64(<2 x i64>* %
 ;CHECK: vld1.32
 ;CHECK: vld1.64
 ;CHECK: vbsl
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
-	%tmp3 = load <2 x i64>* %C
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp3 = load <2 x i64>, <2 x i64>* %C
 	%tmp4 = and <2 x i64> %tmp1, <i64 3, i64 3>
 	%tmp6 = and <2 x i64> %tmp3, <i64 -4, i64 -4>
 	%tmp7 = or <2 x i64> %tmp4, %tmp6

Modified: llvm/trunk/test/CodeGen/ARM/vbsl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vbsl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vbsl.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vbsl.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@
 define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: v_bsli8:
 ;CHECK: vbsl
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = and <8 x i8> %tmp1, %tmp2
 	%tmp5 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
 	%tmp6 = and <8 x i8> %tmp5, %tmp3
@@ -18,9 +18,9 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <
 define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: v_bsli16:
 ;CHECK: vbsl
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = and <4 x i16> %tmp1, %tmp2
 	%tmp5 = xor <4 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1 >
 	%tmp6 = and <4 x i16> %tmp5, %tmp3
@@ -31,9 +31,9 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A
 define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: v_bsli32:
 ;CHECK: vbsl
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = and <2 x i32> %tmp1, %tmp2
 	%tmp5 = xor <2 x i32> %tmp1, < i32 -1, i32 -1 >
 	%tmp6 = and <2 x i32> %tmp5, %tmp3
@@ -44,9 +44,9 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A
 define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
 ;CHECK-LABEL: v_bsli64:
 ;CHECK: vbsl
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
-	%tmp3 = load <1 x i64>* %C
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
+	%tmp3 = load <1 x i64>, <1 x i64>* %C
 	%tmp4 = and <1 x i64> %tmp1, %tmp2
 	%tmp5 = xor <1 x i64> %tmp1, < i64 -1 >
 	%tmp6 = and <1 x i64> %tmp5, %tmp3
@@ -57,9 +57,9 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A
 define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
 ;CHECK-LABEL: v_bslQi8:
 ;CHECK: vbsl
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-	%tmp3 = load <16 x i8>* %C
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp3 = load <16 x i8>, <16 x i8>* %C
 	%tmp4 = and <16 x i8> %tmp1, %tmp2
 	%tmp5 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
 	%tmp6 = and <16 x i8> %tmp5, %tmp3
@@ -70,9 +70,9 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A
 define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
 ;CHECK-LABEL: v_bslQi16:
 ;CHECK: vbsl
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-	%tmp3 = load <8 x i16>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp3 = load <8 x i16>, <8 x i16>* %C
 	%tmp4 = and <8 x i16> %tmp1, %tmp2
 	%tmp5 = xor <8 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
 	%tmp6 = and <8 x i16> %tmp5, %tmp3
@@ -83,9 +83,9 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %
 define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: v_bslQi32:
 ;CHECK: vbsl
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-	%tmp3 = load <4 x i32>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp3 = load <4 x i32>, <4 x i32>* %C
 	%tmp4 = and <4 x i32> %tmp1, %tmp2
 	%tmp5 = xor <4 x i32> %tmp1, < i32 -1, i32 -1, i32 -1, i32 -1 >
 	%tmp6 = and <4 x i32> %tmp5, %tmp3
@@ -96,9 +96,9 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %
 define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: v_bslQi64:
 ;CHECK: vbsl
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
-	%tmp3 = load <2 x i64>* %C
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp3 = load <2 x i64>, <2 x i64>* %C
 	%tmp4 = and <2 x i64> %tmp1, %tmp2
 	%tmp5 = xor <2 x i64> %tmp1, < i64 -1, i64 -1 >
 	%tmp6 = and <2 x i64> %tmp5, %tmp3

Modified: llvm/trunk/test/CodeGen/ARM/vceq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vceq.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vceq.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vceq.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vceqi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vceqi8:
 ;CHECK: vceq.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = icmp eq <8 x i8> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
@@ -13,8 +13,8 @@ define <8 x i8> @vceqi8(<8 x i8>* %A, <8
 define <4 x i16> @vceqi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vceqi16:
 ;CHECK: vceq.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = icmp eq <4 x i16> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
@@ -23,8 +23,8 @@ define <4 x i16> @vceqi16(<4 x i16>* %A,
 define <2 x i32> @vceqi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vceqi32:
 ;CHECK: vceq.i32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = icmp eq <2 x i32> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -33,8 +33,8 @@ define <2 x i32> @vceqi32(<2 x i32>* %A,
 define <2 x i32> @vceqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vceqf32:
 ;CHECK: vceq.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp oeq <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -43,8 +43,8 @@ define <2 x i32> @vceqf32(<2 x float>* %
 define <16 x i8> @vceqQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vceqQi8:
 ;CHECK: vceq.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = icmp eq <16 x i8> %tmp1, %tmp2
         %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
@@ -53,8 +53,8 @@ define <16 x i8> @vceqQi8(<16 x i8>* %A,
 define <8 x i16> @vceqQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vceqQi16:
 ;CHECK: vceq.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = icmp eq <8 x i16> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
@@ -63,8 +63,8 @@ define <8 x i16> @vceqQi16(<8 x i16>* %A
 define <4 x i32> @vceqQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vceqQi32:
 ;CHECK: vceq.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = icmp eq <4 x i32> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -73,8 +73,8 @@ define <4 x i32> @vceqQi32(<4 x i32>* %A
 define <4 x i32> @vceqQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vceqQf32:
 ;CHECK: vceq.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = fcmp oeq <4 x float> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -85,7 +85,7 @@ define <8 x i8> @vceqi8Z(<8 x i8>* %A) n
 ;CHECK-NOT: vmov
 ;CHECK-NOT: vmvn
 ;CHECK: vceq.i8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp3 = icmp eq <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vcge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vcge.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vcge.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vcge.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vcges8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcges8:
 ;CHECK: vcge.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = icmp sge <8 x i8> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
@@ -13,8 +13,8 @@ define <8 x i8> @vcges8(<8 x i8>* %A, <8
 define <4 x i16> @vcges16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcges16:
 ;CHECK: vcge.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = icmp sge <4 x i16> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
@@ -23,8 +23,8 @@ define <4 x i16> @vcges16(<4 x i16>* %A,
 define <2 x i32> @vcges32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcges32:
 ;CHECK: vcge.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = icmp sge <2 x i32> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -33,8 +33,8 @@ define <2 x i32> @vcges32(<2 x i32>* %A,
 define <8 x i8> @vcgeu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcgeu8:
 ;CHECK: vcge.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = icmp uge <8 x i8> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
@@ -43,8 +43,8 @@ define <8 x i8> @vcgeu8(<8 x i8>* %A, <8
 define <4 x i16> @vcgeu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcgeu16:
 ;CHECK: vcge.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = icmp uge <4 x i16> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
@@ -53,8 +53,8 @@ define <4 x i16> @vcgeu16(<4 x i16>* %A,
 define <2 x i32> @vcgeu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcgeu32:
 ;CHECK: vcge.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = icmp uge <2 x i32> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -63,8 +63,8 @@ define <2 x i32> @vcgeu32(<2 x i32>* %A,
 define <2 x i32> @vcgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vcgef32:
 ;CHECK: vcge.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp oge <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -73,8 +73,8 @@ define <2 x i32> @vcgef32(<2 x float>* %
 define <16 x i8> @vcgeQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcgeQs8:
 ;CHECK: vcge.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = icmp sge <16 x i8> %tmp1, %tmp2
         %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
@@ -83,8 +83,8 @@ define <16 x i8> @vcgeQs8(<16 x i8>* %A,
 define <8 x i16> @vcgeQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcgeQs16:
 ;CHECK: vcge.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = icmp sge <8 x i16> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
@@ -93,8 +93,8 @@ define <8 x i16> @vcgeQs16(<8 x i16>* %A
 define <4 x i32> @vcgeQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcgeQs32:
 ;CHECK: vcge.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = icmp sge <4 x i32> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -103,8 +103,8 @@ define <4 x i32> @vcgeQs32(<4 x i32>* %A
 define <16 x i8> @vcgeQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcgeQu8:
 ;CHECK: vcge.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = icmp uge <16 x i8> %tmp1, %tmp2
         %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
@@ -113,8 +113,8 @@ define <16 x i8> @vcgeQu8(<16 x i8>* %A,
 define <8 x i16> @vcgeQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcgeQu16:
 ;CHECK: vcge.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = icmp uge <8 x i16> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
@@ -123,8 +123,8 @@ define <8 x i16> @vcgeQu16(<8 x i16>* %A
 define <4 x i32> @vcgeQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcgeQu32:
 ;CHECK: vcge.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = icmp uge <4 x i32> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -133,8 +133,8 @@ define <4 x i32> @vcgeQu32(<4 x i32>* %A
 define <4 x i32> @vcgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vcgeQf32:
 ;CHECK: vcge.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = fcmp oge <4 x float> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -143,8 +143,8 @@ define <4 x i32> @vcgeQf32(<4 x float>*
 define <2 x i32> @vacgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vacgef32:
 ;CHECK: vacge.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vacge.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -152,8 +152,8 @@ define <2 x i32> @vacgef32(<2 x float>*
 define <4 x i32> @vacgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vacgeQf32:
 ;CHECK: vacge.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vacge.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -166,7 +166,7 @@ define <8 x i8> @vcgei8Z(<8 x i8>* %A) n
 ;CHECK-NOT: vmov
 ;CHECK-NOT: vmvn
 ;CHECK: vcge.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp3 = icmp sge <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
@@ -177,7 +177,7 @@ define <8 x i8> @vclei8Z(<8 x i8>* %A) n
 ;CHECK-NOT: vmov
 ;CHECK-NOT: vmvn
 ;CHECK: vcle.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp3 = icmp sle <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vcgt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vcgt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vcgt.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vcgt.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@
 define <8 x i8> @vcgts8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcgts8:
 ;CHECK: vcgt.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = icmp sgt <8 x i8> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
@@ -14,8 +14,8 @@ define <8 x i8> @vcgts8(<8 x i8>* %A, <8
 define <4 x i16> @vcgts16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcgts16:
 ;CHECK: vcgt.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = icmp sgt <4 x i16> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
@@ -24,8 +24,8 @@ define <4 x i16> @vcgts16(<4 x i16>* %A,
 define <2 x i32> @vcgts32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcgts32:
 ;CHECK: vcgt.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = icmp sgt <2 x i32> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -34,8 +34,8 @@ define <2 x i32> @vcgts32(<2 x i32>* %A,
 define <8 x i8> @vcgtu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcgtu8:
 ;CHECK: vcgt.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = icmp ugt <8 x i8> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
@@ -44,8 +44,8 @@ define <8 x i8> @vcgtu8(<8 x i8>* %A, <8
 define <4 x i16> @vcgtu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcgtu16:
 ;CHECK: vcgt.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = icmp ugt <4 x i16> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
@@ -54,8 +54,8 @@ define <4 x i16> @vcgtu16(<4 x i16>* %A,
 define <2 x i32> @vcgtu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcgtu32:
 ;CHECK: vcgt.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = icmp ugt <2 x i32> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -64,8 +64,8 @@ define <2 x i32> @vcgtu32(<2 x i32>* %A,
 define <2 x i32> @vcgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vcgtf32:
 ;CHECK: vcgt.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp ogt <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -74,8 +74,8 @@ define <2 x i32> @vcgtf32(<2 x float>* %
 define <16 x i8> @vcgtQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcgtQs8:
 ;CHECK: vcgt.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = icmp sgt <16 x i8> %tmp1, %tmp2
         %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
@@ -84,8 +84,8 @@ define <16 x i8> @vcgtQs8(<16 x i8>* %A,
 define <8 x i16> @vcgtQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcgtQs16:
 ;CHECK: vcgt.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = icmp sgt <8 x i16> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
@@ -94,8 +94,8 @@ define <8 x i16> @vcgtQs16(<8 x i16>* %A
 define <4 x i32> @vcgtQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcgtQs32:
 ;CHECK: vcgt.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = icmp sgt <4 x i32> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -104,8 +104,8 @@ define <4 x i32> @vcgtQs32(<4 x i32>* %A
 define <16 x i8> @vcgtQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcgtQu8:
 ;CHECK: vcgt.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = icmp ugt <16 x i8> %tmp1, %tmp2
         %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
@@ -114,8 +114,8 @@ define <16 x i8> @vcgtQu8(<16 x i8>* %A,
 define <8 x i16> @vcgtQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcgtQu16:
 ;CHECK: vcgt.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = icmp ugt <8 x i16> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
@@ -124,8 +124,8 @@ define <8 x i16> @vcgtQu16(<8 x i16>* %A
 define <4 x i32> @vcgtQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcgtQu32:
 ;CHECK: vcgt.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = icmp ugt <4 x i32> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -134,8 +134,8 @@ define <4 x i32> @vcgtQu32(<4 x i32>* %A
 define <4 x i32> @vcgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vcgtQf32:
 ;CHECK: vcgt.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = fcmp ogt <4 x float> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -144,8 +144,8 @@ define <4 x i32> @vcgtQf32(<4 x float>*
 define <2 x i32> @vacgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vacgtf32:
 ;CHECK: vacgt.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vacgt.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -153,8 +153,8 @@ define <2 x i32> @vacgtf32(<2 x float>*
 define <4 x i32> @vacgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vacgtQf32:
 ;CHECK: vacgt.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vacgt.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -165,8 +165,8 @@ define <4 x i32> @vcgt_zext(<4 x float>*
 ;CHECK: vmov.i32 [[Q0:q[0-9]+]], #0x1
 ;CHECK: vcgt.f32 [[Q1:q[0-9]+]]
 ;CHECK: vand [[Q2:q[0-9]+]], [[Q1]], [[Q0]]
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = fcmp ogt <4 x float> %tmp1, %tmp2
         %tmp4 = zext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -180,7 +180,7 @@ define <8 x i8> @vcgti8Z(<8 x i8>* %A) n
 ;CHECK-NOT: vmov
 ;CHECK-NOT: vmvn
 ;CHECK: vcgt.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp3 = icmp sgt <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
@@ -191,7 +191,7 @@ define <8 x i8> @vclti8Z(<8 x i8>* %A) n
 ;CHECK-NOT: vmov
 ;CHECK-NOT: vmvn
 ;CHECK: vclt.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp3 = icmp slt <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vcnt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vcnt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vcnt.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vcnt.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vcnt8:
 ;CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -12,7 +12,7 @@ define <8 x i8> @vcnt8(<8 x i8>* %A) nou
 define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vcntQ8:
 ;CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp2
 }
@@ -23,7 +23,7 @@ declare <16 x i8> @llvm.ctpop.v16i8(<16
 define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vclz8:
 ;CHECK: vclz.i8 {{d[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0)
 	ret <8 x i8> %tmp2
 }
@@ -31,7 +31,7 @@ define <8 x i8> @vclz8(<8 x i8>* %A) nou
 define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vclz16:
 ;CHECK: vclz.i16 {{d[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0)
 	ret <4 x i16> %tmp2
 }
@@ -39,7 +39,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A)
 define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vclz32:
 ;CHECK: vclz.i32 {{d[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0)
 	ret <2 x i32> %tmp2
 }
@@ -47,7 +47,7 @@ define <2 x i32> @vclz32(<2 x i32>* %A)
 define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vclzQ8:
 ;CHECK: vclz.i8 {{q[0-9]+}}, {{q[0-9]+}}
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
 	ret <16 x i8> %tmp2
 }
@@ -55,7 +55,7 @@ define <16 x i8> @vclzQ8(<16 x i8>* %A)
 define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vclzQ16:
 ;CHECK: vclz.i16 {{q[0-9]+}}, {{q[0-9]+}}
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0)
 	ret <8 x i16> %tmp2
 }
@@ -63,7 +63,7 @@ define <8 x i16> @vclzQ16(<8 x i16>* %A)
 define <4 x i32> @vclzQ32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vclzQ32:
 ;CHECK: vclz.i32 {{q[0-9]+}}, {{q[0-9]+}}
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0)
 	ret <4 x i32> %tmp2
 }
@@ -79,7 +79,7 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x
 define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vclss8:
 ;CHECK: vcls.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -87,7 +87,7 @@ define <8 x i8> @vclss8(<8 x i8>* %A) no
 define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vclss16:
 ;CHECK: vcls.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -95,7 +95,7 @@ define <4 x i16> @vclss16(<4 x i16>* %A)
 define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vclss32:
 ;CHECK: vcls.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -103,7 +103,7 @@ define <2 x i32> @vclss32(<2 x i32>* %A)
 define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vclsQs8:
 ;CHECK: vcls.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp2
 }
@@ -111,7 +111,7 @@ define <16 x i8> @vclsQs8(<16 x i8>* %A)
 define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vclsQs16:
 ;CHECK: vcls.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1)
 	ret <8 x i16> %tmp2
 }
@@ -119,7 +119,7 @@ define <8 x i16> @vclsQs16(<8 x i16>* %A
 define <4 x i32> @vclsQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vclsQs32:
 ;CHECK: vcls.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vcombine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vcombine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vcombine.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vcombine.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@ define <16 x i8> @vcombine8(<8 x i8>* %A
 ; CHECK-LE: vmov r2, r3, d17
 ; CHECK-BE: vmov r1, r0, d16
 ; CHECK-BE: vmov r3, r2, d17
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
 	ret <16 x i8> %tmp3
 }
@@ -19,8 +19,8 @@ define <8 x i16> @vcombine16(<4 x i16>*
 ; CHECK-LE: vmov r2, r3, d17
 ; CHECK-BE: vmov r1, r0, d16
 ; CHECK-BE: vmov r3, r2, d17
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
 	ret <8 x i16> %tmp3
 }
@@ -31,8 +31,8 @@ define <4 x i32> @vcombine32(<2 x i32>*
 ; CHECK-LE: vmov r2, r3, d17
 ; CHECK-BE: vmov r1, r0, d16
 ; CHECK-BE: vmov r3, r2, d17
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
 	ret <4 x i32> %tmp3
 }
@@ -43,8 +43,8 @@ define <4 x float> @vcombinefloat(<2 x f
 ; CHECK-LE: vmov r2, r3, d17
 ; CHECK-BE: vmov r1, r0, d16
 ; CHECK-BE: vmov r3, r2, d17
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
 	ret <4 x float> %tmp3
 }
@@ -55,8 +55,8 @@ define <2 x i64> @vcombine64(<1 x i64>*
 ; CHECK-LE: vmov r2, r3, d17
 ; CHECK-BE: vmov r1, r0, d16
 ; CHECK-BE: vmov r3, r2, d17
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = shufflevector <1 x i64> %tmp1, <1 x i64> %tmp2, <2 x i32> <i32 0, i32 1>
 	ret <2 x i64> %tmp3
 }
@@ -69,7 +69,7 @@ define <4 x i16> @vget_low16(<8 x i16>*
 ; CHECK-NOT: vst
 ; CHECK-LE: vmov r0, r1, d16
 ; CHECK-BE: vmov r1, r0, d16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i16> %tmp2
 }
@@ -79,7 +79,7 @@ define <8 x i8> @vget_high8(<16 x i8>* %
 ; CHECK-NOT: vst
 ; CHECK-LE: vmov r0, r1, d17
 ; CHECK-BE: vmov r1, r0, d16
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <8 x i8> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vcvt-cost.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vcvt-cost.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vcvt-cost.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vcvt-cost.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define void @func_cvt5(%T0_5* %loadaddr,
 ; CHECK: vmovl.s8
 ; CHECK: vmovl.s16
 ; CHECK: vmovl.s16
-  %v0 = load %T0_5* %loadaddr
+  %v0 = load %T0_5, %T0_5* %loadaddr
 ; COST: func_cvt5
 ; COST: cost of 3 {{.*}} sext
   %r = sext %T0_5 %v0 to %T1_5
@@ -25,7 +25,7 @@ define void @func_cvt1(%TA0_5* %loadaddr
 ; CHECK: vmovl.u8
 ; CHECK: vmovl.u16
 ; CHECK: vmovl.u16
-  %v0 = load %TA0_5* %loadaddr
+  %v0 = load %TA0_5, %TA0_5* %loadaddr
 ; COST: func_cvt1
 ; COST: cost of 3 {{.*}} zext
   %r = zext %TA0_5 %v0 to %TA1_5
@@ -40,7 +40,7 @@ define void @func_cvt51(%T0_51* %loadadd
 ; CHECK: vmovn.i32
 ; CHECK: vmovn.i32
 ; CHECK: vmovn.i16
-  %v0 = load %T0_51* %loadaddr
+  %v0 = load %T0_51, %T0_51* %loadaddr
 ; COST: func_cvt51
 ; COST: cost of 3 {{.*}} trunc
   %r = trunc %T0_51 %v0 to %T1_51
@@ -56,7 +56,7 @@ define void @func_cvt52(%TT0_5* %loadadd
 ; CHECK: vmovl.s16
 ; CHECK: vmovl.s16
 ; CHECK: vmovl.s16
-  %v0 = load %TT0_5* %loadaddr
+  %v0 = load %TT0_5, %TT0_5* %loadaddr
 ; COST: func_cvt52
 ; COST: cost of 6 {{.*}} sext
   %r = sext %TT0_5 %v0 to %TT1_5
@@ -73,7 +73,7 @@ define void @func_cvt12(%TTA0_5* %loadad
 ; CHECK: vmovl.u16
 ; CHECK: vmovl.u16
 ; CHECK: vmovl.u16
-  %v0 = load %TTA0_5* %loadaddr
+  %v0 = load %TTA0_5, %TTA0_5* %loadaddr
 ; COST: func_cvt12
 ; COST: cost of 6 {{.*}} zext
   %r = zext %TTA0_5 %v0 to %TTA1_5
@@ -91,7 +91,7 @@ define void @func_cvt512(%TT0_51* %loada
 ; CHECK: vmovn.i32
 ; CHECK: vmovn.i16
 ; CHECK: vmovn.i16
-  %v0 = load %TT0_51* %loadaddr
+  %v0 = load %TT0_51, %TT0_51* %loadaddr
 ; COST: func_cvt512
 ; COST: cost of 6 {{.*}} trunc
   %r = trunc %TT0_51 %v0 to %TT1_51
@@ -103,7 +103,7 @@ define void @func_cvt512(%TT0_51* %loada
 define void @sext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
 ; CHECK: vmovl.s32
 ; CHECK: vmovl.s32
-  %v0 = load <4 x i16>* %loadaddr
+  %v0 = load <4 x i16>, <4 x i16>* %loadaddr
 ; COST: sext_v4i16_v4i64
 ; COST: cost of 3 {{.*}} sext
   %r = sext <4 x i16> %v0 to <4 x i64>
@@ -115,7 +115,7 @@ define void @sext_v4i16_v4i64(<4 x i16>*
 define void @zext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
 ; CHECK: vmovl.u32
 ; CHECK: vmovl.u32
-  %v0 = load <4 x i16>* %loadaddr
+  %v0 = load <4 x i16>, <4 x i16>* %loadaddr
 ; COST: zext_v4i16_v4i64
 ; COST: cost of 3 {{.*}} zext
   %r = zext <4 x i16> %v0 to <4 x i64>
@@ -129,7 +129,7 @@ define void @sext_v8i16_v8i64(<8 x i16>*
 ; CHECK: vmovl.s32
 ; CHECK: vmovl.s32
 ; CHECK: vmovl.s32
-  %v0 = load <8 x i16>* %loadaddr
+  %v0 = load <8 x i16>, <8 x i16>* %loadaddr
 ; COST: sext_v8i16_v8i64
 ; COST: cost of 6 {{.*}} sext
   %r = sext <8 x i16> %v0 to <8 x i64>
@@ -143,7 +143,7 @@ define void @zext_v8i16_v8i64(<8 x i16>*
 ; CHECK: vmovl.u32
 ; CHECK: vmovl.u32
 ; CHECK: vmovl.u32
-  %v0 = load <8 x i16>* %loadaddr
+  %v0 = load <8 x i16>, <8 x i16>* %loadaddr
 ; COST: zext_v8i16_v8i64
 ; COST: cost of 6 {{.*}} zext
   %r = zext <8 x i16> %v0 to <8 x i64>

Modified: llvm/trunk/test/CodeGen/ARM/vcvt-v8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vcvt-v8.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vcvt-v8.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vcvt-v8.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
 define <4 x i32> @vcvtasq(<4 x float>* %A) {
 ; CHECK: vcvtasq
 ; CHECK: vcvta.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
+  %tmp1 = load <4 x float>, <4 x float>* %A
   %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %tmp1)
   ret <4 x i32> %tmp2
 }
@@ -10,7 +10,7 @@ define <4 x i32> @vcvtasq(<4 x float>* %
 define <2 x i32> @vcvtasd(<2 x float>* %A) {
 ; CHECK: vcvtasd
 ; CHECK: vcvta.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
+  %tmp1 = load <2 x float>, <2 x float>* %A
   %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %tmp1)
   ret <2 x i32> %tmp2
 }
@@ -18,7 +18,7 @@ define <2 x i32> @vcvtasd(<2 x float>* %
 define <4 x i32> @vcvtnsq(<4 x float>* %A) {
 ; CHECK: vcvtnsq
 ; CHECK: vcvtn.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
+  %tmp1 = load <4 x float>, <4 x float>* %A
   %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %tmp1)
   ret <4 x i32> %tmp2
 }
@@ -26,7 +26,7 @@ define <4 x i32> @vcvtnsq(<4 x float>* %
 define <2 x i32> @vcvtnsd(<2 x float>* %A) {
 ; CHECK: vcvtnsd
 ; CHECK: vcvtn.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
+  %tmp1 = load <2 x float>, <2 x float>* %A
   %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %tmp1)
   ret <2 x i32> %tmp2
 }
@@ -34,7 +34,7 @@ define <2 x i32> @vcvtnsd(<2 x float>* %
 define <4 x i32> @vcvtpsq(<4 x float>* %A) {
 ; CHECK: vcvtpsq
 ; CHECK: vcvtp.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
+  %tmp1 = load <4 x float>, <4 x float>* %A
   %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %tmp1)
   ret <4 x i32> %tmp2
 }
@@ -42,7 +42,7 @@ define <4 x i32> @vcvtpsq(<4 x float>* %
 define <2 x i32> @vcvtpsd(<2 x float>* %A) {
 ; CHECK: vcvtpsd
 ; CHECK: vcvtp.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
+  %tmp1 = load <2 x float>, <2 x float>* %A
   %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float> %tmp1)
   ret <2 x i32> %tmp2
 }
@@ -50,7 +50,7 @@ define <2 x i32> @vcvtpsd(<2 x float>* %
 define <4 x i32> @vcvtmsq(<4 x float>* %A) {
 ; CHECK: vcvtmsq
 ; CHECK: vcvtm.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
+  %tmp1 = load <4 x float>, <4 x float>* %A
   %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float> %tmp1)
   ret <4 x i32> %tmp2
 }
@@ -58,7 +58,7 @@ define <4 x i32> @vcvtmsq(<4 x float>* %
 define <2 x i32> @vcvtmsd(<2 x float>* %A) {
 ; CHECK: vcvtmsd
 ; CHECK: vcvtm.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
+  %tmp1 = load <2 x float>, <2 x float>* %A
   %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float> %tmp1)
   ret <2 x i32> %tmp2
 }
@@ -66,7 +66,7 @@ define <2 x i32> @vcvtmsd(<2 x float>* %
 define <4 x i32> @vcvtauq(<4 x float>* %A) {
 ; CHECK: vcvtauq
 ; CHECK: vcvta.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
+  %tmp1 = load <4 x float>, <4 x float>* %A
   %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float> %tmp1)
   ret <4 x i32> %tmp2
 }
@@ -74,7 +74,7 @@ define <4 x i32> @vcvtauq(<4 x float>* %
 define <2 x i32> @vcvtaud(<2 x float>* %A) {
 ; CHECK: vcvtaud
 ; CHECK: vcvta.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
+  %tmp1 = load <2 x float>, <2 x float>* %A
   %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtau.v2i32.v2f32(<2 x float> %tmp1)
   ret <2 x i32> %tmp2
 }
@@ -82,7 +82,7 @@ define <2 x i32> @vcvtaud(<2 x float>* %
 define <4 x i32> @vcvtnuq(<4 x float>* %A) {
 ; CHECK: vcvtnuq
 ; CHECK: vcvtn.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
+  %tmp1 = load <4 x float>, <4 x float>* %A
   %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtnu.v4i32.v4f32(<4 x float> %tmp1)
   ret <4 x i32> %tmp2
 }
@@ -90,7 +90,7 @@ define <4 x i32> @vcvtnuq(<4 x float>* %
 define <2 x i32> @vcvtnud(<2 x float>* %A) {
 ; CHECK: vcvtnud
 ; CHECK: vcvtn.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
+  %tmp1 = load <2 x float>, <2 x float>* %A
   %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtnu.v2i32.v2f32(<2 x float> %tmp1)
   ret <2 x i32> %tmp2
 }
@@ -98,7 +98,7 @@ define <2 x i32> @vcvtnud(<2 x float>* %
 define <4 x i32> @vcvtpuq(<4 x float>* %A) {
 ; CHECK: vcvtpuq
 ; CHECK: vcvtp.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
+  %tmp1 = load <4 x float>, <4 x float>* %A
   %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtpu.v4i32.v4f32(<4 x float> %tmp1)
   ret <4 x i32> %tmp2
 }
@@ -106,7 +106,7 @@ define <4 x i32> @vcvtpuq(<4 x float>* %
 define <2 x i32> @vcvtpud(<2 x float>* %A) {
 ; CHECK: vcvtpud
 ; CHECK: vcvtp.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
+  %tmp1 = load <2 x float>, <2 x float>* %A
   %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtpu.v2i32.v2f32(<2 x float> %tmp1)
   ret <2 x i32> %tmp2
 }
@@ -114,7 +114,7 @@ define <2 x i32> @vcvtpud(<2 x float>* %
 define <4 x i32> @vcvtmuq(<4 x float>* %A) {
 ; CHECK: vcvtmuq
 ; CHECK: vcvtm.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
+  %tmp1 = load <4 x float>, <4 x float>* %A
   %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtmu.v4i32.v4f32(<4 x float> %tmp1)
   ret <4 x i32> %tmp2
 }
@@ -122,7 +122,7 @@ define <4 x i32> @vcvtmuq(<4 x float>* %
 define <2 x i32> @vcvtmud(<2 x float>* %A) {
 ; CHECK: vcvtmud
 ; CHECK: vcvtm.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
+  %tmp1 = load <2 x float>, <2 x float>* %A
   %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtmu.v2i32.v2f32(<2 x float> %tmp1)
   ret <2 x i32> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vcvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vcvt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vcvt.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vcvt.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvt_f32tos32:
 ;CHECK: vcvt.s32.f32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
 	ret <2 x i32> %tmp2
 }
@@ -11,7 +11,7 @@ define <2 x i32> @vcvt_f32tos32(<2 x flo
 define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvt_f32tou32:
 ;CHECK: vcvt.u32.f32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
 	ret <2 x i32> %tmp2
 }
@@ -19,7 +19,7 @@ define <2 x i32> @vcvt_f32tou32(<2 x flo
 define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vcvt_s32tof32:
 ;CHECK: vcvt.f32.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = sitofp <2 x i32> %tmp1 to <2 x float>
 	ret <2 x float> %tmp2
 }
@@ -27,7 +27,7 @@ define <2 x float> @vcvt_s32tof32(<2 x i
 define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vcvt_u32tof32:
 ;CHECK: vcvt.f32.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = uitofp <2 x i32> %tmp1 to <2 x float>
 	ret <2 x float> %tmp2
 }
@@ -35,7 +35,7 @@ define <2 x float> @vcvt_u32tof32(<2 x i
 define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvtQ_f32tos32:
 ;CHECK: vcvt.s32.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
 	ret <4 x i32> %tmp2
 }
@@ -43,7 +43,7 @@ define <4 x i32> @vcvtQ_f32tos32(<4 x fl
 define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvtQ_f32tou32:
 ;CHECK: vcvt.u32.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
 	ret <4 x i32> %tmp2
 }
@@ -51,7 +51,7 @@ define <4 x i32> @vcvtQ_f32tou32(<4 x fl
 define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vcvtQ_s32tof32:
 ;CHECK: vcvt.f32.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = sitofp <4 x i32> %tmp1 to <4 x float>
 	ret <4 x float> %tmp2
 }
@@ -59,7 +59,7 @@ define <4 x float> @vcvtQ_s32tof32(<4 x
 define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vcvtQ_u32tof32:
 ;CHECK: vcvt.f32.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = uitofp <4 x i32> %tmp1 to <4 x float>
 	ret <4 x float> %tmp2
 }
@@ -67,7 +67,7 @@ define <4 x float> @vcvtQ_u32tof32(<4 x
 define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvt_n_f32tos32:
 ;CHECK: vcvt.s32.f32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float> %tmp1, i32 1)
 	ret <2 x i32> %tmp2
 }
@@ -75,7 +75,7 @@ define <2 x i32> @vcvt_n_f32tos32(<2 x f
 define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvt_n_f32tou32:
 ;CHECK: vcvt.u32.f32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float> %tmp1, i32 1)
 	ret <2 x i32> %tmp2
 }
@@ -83,7 +83,7 @@ define <2 x i32> @vcvt_n_f32tou32(<2 x f
 define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vcvt_n_s32tof32:
 ;CHECK: vcvt.f32.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %tmp1, i32 1)
 	ret <2 x float> %tmp2
 }
@@ -91,7 +91,7 @@ define <2 x float> @vcvt_n_s32tof32(<2 x
 define <2 x float> @vcvt_n_u32tof32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vcvt_n_u32tof32:
 ;CHECK: vcvt.f32.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %tmp1, i32 1)
 	ret <2 x float> %tmp2
 }
@@ -104,7 +104,7 @@ declare <2 x float> @llvm.arm.neon.vcvtf
 define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvtQ_n_f32tos32:
 ;CHECK: vcvt.s32.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> %tmp1, i32 1)
 	ret <4 x i32> %tmp2
 }
@@ -112,7 +112,7 @@ define <4 x i32> @vcvtQ_n_f32tos32(<4 x
 define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvtQ_n_f32tou32:
 ;CHECK: vcvt.u32.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> %tmp1, i32 1)
 	ret <4 x i32> %tmp2
 }
@@ -120,7 +120,7 @@ define <4 x i32> @vcvtQ_n_f32tou32(<4 x
 define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vcvtQ_n_s32tof32:
 ;CHECK: vcvt.f32.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1)
 	ret <4 x float> %tmp2
 }
@@ -128,7 +128,7 @@ define <4 x float> @vcvtQ_n_s32tof32(<4
 define <4 x float> @vcvtQ_n_u32tof32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vcvtQ_n_u32tof32:
 ;CHECK: vcvt.f32.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1)
 	ret <4 x float> %tmp2
 }
@@ -141,7 +141,7 @@ declare <4 x float> @llvm.arm.neon.vcvtf
 define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vcvt_f16tof32:
 ;CHECK: vcvt.f32.f16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %tmp1)
 	ret <4 x float> %tmp2
 }
@@ -149,7 +149,7 @@ define <4 x float> @vcvt_f16tof32(<4 x i
 define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vcvt_f32tof16:
 ;CHECK: vcvt.f16.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %tmp1)
 	ret <4 x i16> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vcvt_combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vcvt_combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vcvt_combine.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vcvt_combine.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 ; CHECK-NOT: vmul
 define void @t0() nounwind {
 entry:
-  %tmp = load float* @in, align 4
+  %tmp = load float, float* @in, align 4
   %vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
   %vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
   %mul.i = fmul <2 x float> %vecinit2.i, <float 8.000000e+00, float 8.000000e+00>
@@ -23,7 +23,7 @@ declare void @foo_int32x2_t(<2 x i32>)
 ; CHECK-NOT: vmul
 define void @t1() nounwind {
 entry:
-  %tmp = load float* @in, align 4
+  %tmp = load float, float* @in, align 4
   %vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
   %vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
   %mul.i = fmul <2 x float> %vecinit2.i, <float 8.000000e+00, float 8.000000e+00>
@@ -39,7 +39,7 @@ declare void @foo_uint32x2_t(<2 x i32>)
 ; CHECK: vmul
 define void @t2() nounwind {
 entry:
-  %tmp = load float* @in, align 4
+  %tmp = load float, float* @in, align 4
   %vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
   %vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
   %mul.i = fmul <2 x float> %vecinit2.i, <float 0x401B333340000000, float 0x401B333340000000>
@@ -53,7 +53,7 @@ entry:
 ; CHECK: vmul
 define void @t3() nounwind {
 entry:
-  %tmp = load float* @in, align 4
+  %tmp = load float, float* @in, align 4
   %vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
   %vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
   %mul.i = fmul <2 x float> %vecinit2.i, <float 0x4200000000000000, float 0x4200000000000000>
@@ -67,7 +67,7 @@ entry:
 ; CHECK-NOT: vmul
 define void @t4() nounwind {
 entry:
-  %tmp = load float* @in, align 4
+  %tmp = load float, float* @in, align 4
   %vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
   %vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
   %mul.i = fmul <2 x float> %vecinit2.i, <float 0x41F0000000000000, float 0x41F0000000000000>
@@ -81,7 +81,7 @@ entry:
 ; CHECK-NOT: vmul
 define void @t5() nounwind {
 entry:
-  %tmp = load float* @in, align 4
+  %tmp = load float, float* @in, align 4
   %vecinit.i = insertelement <4 x float> undef, float %tmp, i32 0
   %vecinit2.i = insertelement <4 x float> %vecinit.i, float %tmp, i32 1
   %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %tmp, i32 2

Modified: llvm/trunk/test/CodeGen/ARM/vdiv_combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vdiv_combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vdiv_combine.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vdiv_combine.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ declare void @foo_int32x4_t(<4 x i32>)
 ; CHECK-NOT: {{vdiv|vmul}}
 define void @t1() nounwind {
 entry:
-  %tmp = load i32* @iin, align 4
+  %tmp = load i32, i32* @iin, align 4
   %vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
   %vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
   %vcvt.i = sitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -27,7 +27,7 @@ declare void @foo_float32x2_t(<2 x float
 ; CHECK-NOT: {{vdiv|vmul}}
 define void @t2() nounwind {
 entry:
-  %tmp = load i32* @uin, align 4
+  %tmp = load i32, i32* @uin, align 4
   %vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
   %vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
   %vcvt.i = uitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -41,7 +41,7 @@ entry:
 ; CHECK: {{vdiv|vmul}}
 define void @t3() nounwind {
 entry:
-  %tmp = load i32* @iin, align 4
+  %tmp = load i32, i32* @iin, align 4
   %vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
   %vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
   %vcvt.i = sitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -55,7 +55,7 @@ entry:
 ; CHECK: {{vdiv|vmul}}
 define void @t4() nounwind {
 entry:
-  %tmp = load i32* @iin, align 4
+  %tmp = load i32, i32* @iin, align 4
   %vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
   %vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
   %vcvt.i = sitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -69,7 +69,7 @@ entry:
 ; CHECK-NOT: {{vdiv|vmul}}
 define void @t5() nounwind {
 entry:
-  %tmp = load i32* @iin, align 4
+  %tmp = load i32, i32* @iin, align 4
   %vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
   %vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
   %vcvt.i = sitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -83,7 +83,7 @@ entry:
 ; CHECK-NOT: {{vdiv|vmul}}
 define void @t6() nounwind {
 entry:
-  %tmp = load i32* @iin, align 4
+  %tmp = load i32, i32* @iin, align 4
   %vecinit.i = insertelement <4 x i32> undef, i32 %tmp, i32 0
   %vecinit2.i = insertelement <4 x i32> %vecinit.i, i32 %tmp, i32 1
   %vecinit4.i = insertelement <4 x i32> %vecinit2.i, i32 %tmp, i32 2

Modified: llvm/trunk/test/CodeGen/ARM/vdup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vdup.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vdup.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vdup.ll Fri Feb 27 15:17:42 2015
@@ -166,7 +166,7 @@ define <4 x float> @v_shuffledupQfloat(f
 define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vduplane8:
 ;CHECK: vdup.8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
 	ret <8 x i8> %tmp2
 }
@@ -174,7 +174,7 @@ define <8 x i8> @vduplane8(<8 x i8>* %A)
 define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vduplane16:
 ;CHECK: vdup.16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
 	ret <4 x i16> %tmp2
 }
@@ -182,7 +182,7 @@ define <4 x i16> @vduplane16(<4 x i16>*
 define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vduplane32:
 ;CHECK: vdup.32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
 	ret <2 x i32> %tmp2
 }
@@ -190,7 +190,7 @@ define <2 x i32> @vduplane32(<2 x i32>*
 define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vduplanefloat:
 ;CHECK: vdup.32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 >
 	ret <2 x float> %tmp2
 }
@@ -198,7 +198,7 @@ define <2 x float> @vduplanefloat(<2 x f
 define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vduplaneQ8:
 ;CHECK: vdup.8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
 	ret <16 x i8> %tmp2
 }
@@ -206,7 +206,7 @@ define <16 x i8> @vduplaneQ8(<8 x i8>* %
 define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vduplaneQ16:
 ;CHECK: vdup.16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
 	ret <8 x i16> %tmp2
 }
@@ -214,7 +214,7 @@ define <8 x i16> @vduplaneQ16(<4 x i16>*
 define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vduplaneQ32:
 ;CHECK: vdup.32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
 	ret <4 x i32> %tmp2
 }
@@ -222,7 +222,7 @@ define <4 x i32> @vduplaneQ32(<2 x i32>*
 define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vduplaneQfloat:
 ;CHECK: vdup.32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
 	ret <4 x float> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vector-DAGCombine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vector-DAGCombine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vector-DAGCombine.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vector-DAGCombine.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ bb.i19:
 define void @test_illegal_build_vector() nounwind {
 entry:
   store <2 x i64> undef, <2 x i64>* undef, align 16
-  %0 = load <16 x i8>* undef, align 16            ; <<16 x i8>> [#uses=1]
+  %0 = load <16 x i8>, <16 x i8>* undef, align 16            ; <<16 x i8>> [#uses=1]
   %1 = or <16 x i8> zeroinitializer, %0           ; <<16 x i8>> [#uses=1]
   store <16 x i8> %1, <16 x i8>* undef, align 16
   ret void
@@ -63,7 +63,7 @@ bb2:
 ; Test trying to do a ShiftCombine on illegal types.
 ; The vector should be split first.
 define void @lshrIllegalType(<8 x i32>* %A) nounwind {
-       %tmp1 = load <8 x i32>* %A
+       %tmp1 = load <8 x i32>, <8 x i32>* %A
        %tmp2 = lshr <8 x i32> %tmp1, < i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
        store <8 x i32> %tmp2, <8 x i32>* %A
        ret void
@@ -89,7 +89,7 @@ declare void @llvm.arm.neon.vst1.v8i8(i8
 define void @i64_buildvector(i64* %ptr, <2 x i64>* %vp) nounwind {
 ; CHECK: i64_buildvector
 ; CHECK: vldr
-  %t0 = load i64* %ptr, align 4
+  %t0 = load i64, i64* %ptr, align 4
   %t1 = insertelement <2 x i64> undef, i64 %t0, i32 0
   store <2 x i64> %t1, <2 x i64>* %vp
   ret void
@@ -98,8 +98,8 @@ define void @i64_buildvector(i64* %ptr,
 define void @i64_insertelement(i64* %ptr, <2 x i64>* %vp) nounwind {
 ; CHECK: i64_insertelement
 ; CHECK: vldr
-  %t0 = load i64* %ptr, align 4
-  %vec = load <2 x i64>* %vp
+  %t0 = load i64, i64* %ptr, align 4
+  %vec = load <2 x i64>, <2 x i64>* %vp
   %t1 = insertelement <2 x i64> %vec, i64 %t0, i32 0
   store <2 x i64> %t1, <2 x i64>* %vp
   ret void
@@ -108,7 +108,7 @@ define void @i64_insertelement(i64* %ptr
 define void @i64_extractelement(i64* %ptr, <2 x i64>* %vp) nounwind {
 ; CHECK: i64_extractelement
 ; CHECK: vstr
-  %vec = load <2 x i64>* %vp
+  %vec = load <2 x i64>, <2 x i64>* %vp
   %t1 = extractelement <2 x i64> %vec, i32 0
   store i64 %t1, i64* %ptr
   ret void
@@ -116,7 +116,7 @@ define void @i64_extractelement(i64* %pt
 
 ; Test trying to do a AND Combine on illegal types.
 define void @andVec(<3 x i8>* %A) nounwind {
-  %tmp = load <3 x i8>* %A, align 4
+  %tmp = load <3 x i8>, <3 x i8>* %A, align 4
   %and = and <3 x i8> %tmp, <i8 7, i8 7, i8 7>
   store <3 x i8> %and, <3 x i8>* %A
   ret void
@@ -125,7 +125,7 @@ define void @andVec(<3 x i8>* %A) nounwi
 
 ; Test trying to do an OR Combine on illegal types.
 define void @orVec(<3 x i8>* %A) nounwind {
-  %tmp = load <3 x i8>* %A, align 4
+  %tmp = load <3 x i8>, <3 x i8>* %A, align 4
   %or = or <3 x i8> %tmp, <i8 7, i8 7, i8 7>
   store <3 x i8> %or, <3 x i8>* %A
   ret void
@@ -146,7 +146,7 @@ define i16 @foldBuildVectors() {
 ; shuffles.
 ; CHECK-LABEL: reverse_v8i16:
 define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) {
-  %v0 = load <8 x i16>* %loadaddr
+  %v0 = load <8 x i16>, <8 x i16>* %loadaddr
   ; CHECK: vrev64.16
   ; CHECK: vext.16
   %v1 = shufflevector <8 x i16> %v0, <8 x i16> undef,
@@ -159,7 +159,7 @@ define void @reverse_v8i16(<8 x i16>* %l
 ; shuffles.
 ; CHECK-LABEL: reverse_v16i8:
 define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) {
-  %v0 = load <16 x i8>* %loadaddr
+  %v0 = load <16 x i8>, <16 x i8>* %loadaddr
   ; CHECK: vrev64.8
   ; CHECK: vext.8
   %v1 = shufflevector <16 x i8> %v0, <16 x i8> undef,
@@ -180,9 +180,9 @@ define void @reverse_v16i8(<16 x i8>* %l
 define <8 x i16> @t3(i8 zeroext %xf, i8* nocapture %sp0, i8* nocapture %sp1, i32* nocapture %outp) {
 entry:
   %pix_sp0.0.cast = bitcast i8* %sp0 to i32*
-  %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1
+  %pix_sp0.0.copyload = load i32, i32* %pix_sp0.0.cast, align 1
   %pix_sp1.0.cast = bitcast i8* %sp1 to i32*
-  %pix_sp1.0.copyload = load i32* %pix_sp1.0.cast, align 1
+  %pix_sp1.0.copyload = load i32, i32* %pix_sp1.0.cast, align 1
   %vecinit = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vecinit1 = insertelement <2 x i32> %vecinit, i32 %pix_sp1.0.copyload, i32 1
   %0 = bitcast <2 x i32> %vecinit1 to <8 x i8>
@@ -200,7 +200,7 @@ define <8 x i16> @t4(i8* nocapture %sp0)
 ; CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r0]
 entry:
   %pix_sp0.0.cast = bitcast i8* %sp0 to i32*
-  %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1
+  %pix_sp0.0.copyload = load i32, i32* %pix_sp0.0.cast, align 1
   %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %0 = bitcast <2 x i32> %vec to <8 x i8>
   %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %0, <8 x i8> %0)
@@ -219,11 +219,11 @@ entry:
 define <8 x i16> @t5(i8* nocapture %sp0, i8* nocapture %sp1, i8* nocapture %sp2) {
 entry:
   %pix_sp0.0.cast = bitcast i8* %sp0 to i32*
-  %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1
+  %pix_sp0.0.copyload = load i32, i32* %pix_sp0.0.cast, align 1
   %pix_sp1.0.cast = bitcast i8* %sp1 to i32*
-  %pix_sp1.0.copyload = load i32* %pix_sp1.0.cast, align 1
+  %pix_sp1.0.copyload = load i32, i32* %pix_sp1.0.cast, align 1
   %pix_sp2.0.cast = bitcast i8* %sp2 to i32*
-  %pix_sp2.0.copyload = load i32* %pix_sp2.0.cast, align 1
+  %pix_sp2.0.copyload = load i32, i32* %pix_sp2.0.cast, align 1
   %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 1
   %vecinit1 = insertelement <2 x i32> %vec, i32 %pix_sp1.0.copyload, i32 0
   %vecinit2 = insertelement <2 x i32> %vec, i32 %pix_sp2.0.copyload, i32 0

Modified: llvm/trunk/test/CodeGen/ARM/vector-extend-narrow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vector-extend-narrow.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vector-extend-narrow.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vector-extend-narrow.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 define float @f(<4 x i16>* nocapture %in) {
   ; CHECK: vldr
   ; CHECK: vmovl.u16
-  %1 = load <4 x i16>* %in
+  %1 = load <4 x i16>, <4 x i16>* %in
   ; CHECK: vcvt.f32.u32
   %2 = uitofp <4 x i16> %1 to <4 x float>
   %3 = extractelement <4 x float> %2, i32 0
@@ -25,7 +25,7 @@ define float @g(<4 x i8>* nocapture %in)
   ; CHECK: vld1
   ; CHECK: vmovl.u8
   ; CHECK: vmovl.u16
-  %1 = load <4 x i8>* %in
+  %1 = load <4 x i8>, <4 x i8>* %in
   ; CHECK: vcvt.f32.u32
   %2 = uitofp <4 x i8> %1 to <4 x float>
   %3 = extractelement <4 x float> %2, i32 0
@@ -58,7 +58,7 @@ define <4 x i8> @i(<4 x i8>* %x) {
   ; CHECK: vrecps
   ; CHECK: vmul
   ; CHECK: vmovn
-  %1 = load <4 x i8>* %x, align 4
+  %1 = load <4 x i8>, <4 x i8>* %x, align 4
   %2 = sdiv <4 x i8> zeroinitializer, %1
   ret <4 x i8> %2
 }
@@ -68,7 +68,7 @@ define <4 x i32> @j(<4 x i8>* %in) nounw
   ; CHECK: vmovl.u8
   ; CHECK: vmovl.u16
   ; CHECK-NOT: vand
-  %1 = load <4 x i8>* %in, align 4
+  %1 = load <4 x i8>, <4 x i8>* %in, align 4
   %2 = zext <4 x i8> %1 to <4 x i32>
   ret <4 x i32> %2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vector-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vector-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vector-load.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vector-load.ll Fri Feb 27 15:17:42 2015
@@ -6,16 +6,16 @@ target triple = "thumbv7s-apple-ios8.0.0
 define <8 x i8> @load_v8i8(<8 x i8>** %ptr) {
 ;CHECK-LABEL: load_v8i8:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <8 x i8>** %ptr
-	%lA = load <8 x i8>* %A, align 1
+	%A = load <8 x i8>*, <8 x i8>** %ptr
+	%lA = load <8 x i8>, <8 x i8>* %A, align 1
 	ret <8 x i8> %lA
 }
 
 define <8 x i8> @load_v8i8_update(<8 x i8>** %ptr) {
 ;CHECK-LABEL: load_v8i8_update:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <8 x i8>** %ptr
-	%lA = load <8 x i8>* %A, align 1
+	%A = load <8 x i8>*, <8 x i8>** %ptr
+	%lA = load <8 x i8>, <8 x i8>* %A, align 1
 	%inc = getelementptr <8 x i8>, <8 x i8>* %A, i38 1
         store <8 x i8>* %inc, <8 x i8>** %ptr
 	ret <8 x i8> %lA
@@ -24,16 +24,16 @@ define <8 x i8> @load_v8i8_update(<8 x i
 define <4 x i16> @load_v4i16(<4 x i16>** %ptr) {
 ;CHECK-LABEL: load_v4i16:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <4 x i16>** %ptr
-	%lA = load <4 x i16>* %A, align 1
+	%A = load <4 x i16>*, <4 x i16>** %ptr
+	%lA = load <4 x i16>, <4 x i16>* %A, align 1
 	ret <4 x i16> %lA
 }
 
 define <4 x i16> @load_v4i16_update(<4 x i16>** %ptr) {
 ;CHECK-LABEL: load_v4i16_update:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <4 x i16>** %ptr
-	%lA = load <4 x i16>* %A, align 1
+	%A = load <4 x i16>*, <4 x i16>** %ptr
+	%lA = load <4 x i16>, <4 x i16>* %A, align 1
 	%inc = getelementptr <4 x i16>, <4 x i16>* %A, i34 1
         store <4 x i16>* %inc, <4 x i16>** %ptr
 	ret <4 x i16> %lA
@@ -42,16 +42,16 @@ define <4 x i16> @load_v4i16_update(<4 x
 define <2 x i32> @load_v2i32(<2 x i32>** %ptr) {
 ;CHECK-LABEL: load_v2i32:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <2 x i32>** %ptr
-	%lA = load <2 x i32>* %A, align 1
+	%A = load <2 x i32>*, <2 x i32>** %ptr
+	%lA = load <2 x i32>, <2 x i32>* %A, align 1
 	ret <2 x i32> %lA
 }
 
 define <2 x i32> @load_v2i32_update(<2 x i32>** %ptr) {
 ;CHECK-LABEL: load_v2i32_update:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i32>** %ptr
-	%lA = load <2 x i32>* %A, align 1
+	%A = load <2 x i32>*, <2 x i32>** %ptr
+	%lA = load <2 x i32>, <2 x i32>* %A, align 1
 	%inc = getelementptr <2 x i32>, <2 x i32>* %A, i32 1
         store <2 x i32>* %inc, <2 x i32>** %ptr
 	ret <2 x i32> %lA
@@ -60,16 +60,16 @@ define <2 x i32> @load_v2i32_update(<2 x
 define <2 x float> @load_v2f32(<2 x float>** %ptr) {
 ;CHECK-LABEL: load_v2f32:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <2 x float>** %ptr
-	%lA = load <2 x float>* %A, align 1
+	%A = load <2 x float>*, <2 x float>** %ptr
+	%lA = load <2 x float>, <2 x float>* %A, align 1
 	ret <2 x float> %lA
 }
 
 define <2 x float> @load_v2f32_update(<2 x float>** %ptr) {
 ;CHECK-LABEL: load_v2f32_update:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x float>** %ptr
-	%lA = load <2 x float>* %A, align 1
+	%A = load <2 x float>*, <2 x float>** %ptr
+	%lA = load <2 x float>, <2 x float>* %A, align 1
 	%inc = getelementptr <2 x float>, <2 x float>* %A, i32 1
         store <2 x float>* %inc, <2 x float>** %ptr
 	ret <2 x float> %lA
@@ -78,16 +78,16 @@ define <2 x float> @load_v2f32_update(<2
 define <1 x i64> @load_v1i64(<1 x i64>** %ptr) {
 ;CHECK-LABEL: load_v1i64:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <1 x i64>** %ptr
-	%lA = load <1 x i64>* %A, align 1
+	%A = load <1 x i64>*, <1 x i64>** %ptr
+	%lA = load <1 x i64>, <1 x i64>* %A, align 1
 	ret <1 x i64> %lA
 }
 
 define <1 x i64> @load_v1i64_update(<1 x i64>** %ptr) {
 ;CHECK-LABEL: load_v1i64_update:
 ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <1 x i64>** %ptr
-	%lA = load <1 x i64>* %A, align 1
+	%A = load <1 x i64>*, <1 x i64>** %ptr
+	%lA = load <1 x i64>, <1 x i64>* %A, align 1
 	%inc = getelementptr <1 x i64>, <1 x i64>* %A, i31 1
         store <1 x i64>* %inc, <1 x i64>** %ptr
 	ret <1 x i64> %lA
@@ -96,16 +96,16 @@ define <1 x i64> @load_v1i64_update(<1 x
 define <16 x i8> @load_v16i8(<16 x i8>** %ptr) {
 ;CHECK-LABEL: load_v16i8:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <16 x i8>** %ptr
-	%lA = load <16 x i8>* %A, align 1
+	%A = load <16 x i8>*, <16 x i8>** %ptr
+	%lA = load <16 x i8>, <16 x i8>* %A, align 1
 	ret <16 x i8> %lA
 }
 
 define <16 x i8> @load_v16i8_update(<16 x i8>** %ptr) {
 ;CHECK-LABEL: load_v16i8_update:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <16 x i8>** %ptr
-	%lA = load <16 x i8>* %A, align 1
+	%A = load <16 x i8>*, <16 x i8>** %ptr
+	%lA = load <16 x i8>, <16 x i8>* %A, align 1
 	%inc = getelementptr <16 x i8>, <16 x i8>* %A, i316 1
         store <16 x i8>* %inc, <16 x i8>** %ptr
 	ret <16 x i8> %lA
@@ -114,16 +114,16 @@ define <16 x i8> @load_v16i8_update(<16
 define <8 x i16> @load_v8i16(<8 x i16>** %ptr) {
 ;CHECK-LABEL: load_v8i16:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <8 x i16>** %ptr
-	%lA = load <8 x i16>* %A, align 1
+	%A = load <8 x i16>*, <8 x i16>** %ptr
+	%lA = load <8 x i16>, <8 x i16>* %A, align 1
 	ret <8 x i16> %lA
 }
 
 define <8 x i16> @load_v8i16_update(<8 x i16>** %ptr) {
 ;CHECK-LABEL: load_v8i16_update:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <8 x i16>** %ptr
-	%lA = load <8 x i16>* %A, align 1
+	%A = load <8 x i16>*, <8 x i16>** %ptr
+	%lA = load <8 x i16>, <8 x i16>* %A, align 1
 	%inc = getelementptr <8 x i16>, <8 x i16>* %A, i38 1
         store <8 x i16>* %inc, <8 x i16>** %ptr
 	ret <8 x i16> %lA
@@ -132,16 +132,16 @@ define <8 x i16> @load_v8i16_update(<8 x
 define <4 x i32> @load_v4i32(<4 x i32>** %ptr) {
 ;CHECK-LABEL: load_v4i32:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <4 x i32>** %ptr
-	%lA = load <4 x i32>* %A, align 1
+	%A = load <4 x i32>*, <4 x i32>** %ptr
+	%lA = load <4 x i32>, <4 x i32>* %A, align 1
 	ret <4 x i32> %lA
 }
 
 define <4 x i32> @load_v4i32_update(<4 x i32>** %ptr) {
 ;CHECK-LABEL: load_v4i32_update:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <4 x i32>** %ptr
-	%lA = load <4 x i32>* %A, align 1
+	%A = load <4 x i32>*, <4 x i32>** %ptr
+	%lA = load <4 x i32>, <4 x i32>* %A, align 1
 	%inc = getelementptr <4 x i32>, <4 x i32>* %A, i34 1
         store <4 x i32>* %inc, <4 x i32>** %ptr
 	ret <4 x i32> %lA
@@ -150,16 +150,16 @@ define <4 x i32> @load_v4i32_update(<4 x
 define <4 x float> @load_v4f32(<4 x float>** %ptr) {
 ;CHECK-LABEL: load_v4f32:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <4 x float>** %ptr
-	%lA = load <4 x float>* %A, align 1
+	%A = load <4 x float>*, <4 x float>** %ptr
+	%lA = load <4 x float>, <4 x float>* %A, align 1
 	ret <4 x float> %lA
 }
 
 define <4 x float> @load_v4f32_update(<4 x float>** %ptr) {
 ;CHECK-LABEL: load_v4f32_update:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <4 x float>** %ptr
-	%lA = load <4 x float>* %A, align 1
+	%A = load <4 x float>*, <4 x float>** %ptr
+	%lA = load <4 x float>, <4 x float>* %A, align 1
 	%inc = getelementptr <4 x float>, <4 x float>* %A, i34 1
         store <4 x float>* %inc, <4 x float>** %ptr
 	ret <4 x float> %lA
@@ -168,16 +168,16 @@ define <4 x float> @load_v4f32_update(<4
 define <2 x i64> @load_v2i64(<2 x i64>** %ptr) {
 ;CHECK-LABEL: load_v2i64:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <2 x i64>** %ptr
-	%lA = load <2 x i64>* %A, align 1
+	%A = load <2 x i64>*, <2 x i64>** %ptr
+	%lA = load <2 x i64>, <2 x i64>* %A, align 1
 	ret <2 x i64> %lA
 }
 
 define <2 x i64> @load_v2i64_update(<2 x i64>** %ptr) {
 ;CHECK-LABEL: load_v2i64_update:
 ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i64>** %ptr
-	%lA = load <2 x i64>* %A, align 1
+	%A = load <2 x i64>*, <2 x i64>** %ptr
+	%lA = load <2 x i64>, <2 x i64>* %A, align 1
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
 	ret <2 x i64> %lA
@@ -187,8 +187,8 @@ define <2 x i64> @load_v2i64_update(<2 x
 define <2 x i64> @load_v2i64_update_aligned2(<2 x i64>** %ptr) {
 ;CHECK-LABEL: load_v2i64_update_aligned2:
 ;CHECK: vld1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i64>** %ptr
-	%lA = load <2 x i64>* %A, align 2
+	%A = load <2 x i64>*, <2 x i64>** %ptr
+	%lA = load <2 x i64>, <2 x i64>* %A, align 2
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
 	ret <2 x i64> %lA
@@ -197,8 +197,8 @@ define <2 x i64> @load_v2i64_update_alig
 define <2 x i64> @load_v2i64_update_aligned4(<2 x i64>** %ptr) {
 ;CHECK-LABEL: load_v2i64_update_aligned4:
 ;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i64>** %ptr
-	%lA = load <2 x i64>* %A, align 4
+	%A = load <2 x i64>*, <2 x i64>** %ptr
+	%lA = load <2 x i64>, <2 x i64>* %A, align 4
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
 	ret <2 x i64> %lA
@@ -207,8 +207,8 @@ define <2 x i64> @load_v2i64_update_alig
 define <2 x i64> @load_v2i64_update_aligned8(<2 x i64>** %ptr) {
 ;CHECK-LABEL: load_v2i64_update_aligned8:
 ;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i64>** %ptr
-	%lA = load <2 x i64>* %A, align 8
+	%A = load <2 x i64>*, <2 x i64>** %ptr
+	%lA = load <2 x i64>, <2 x i64>* %A, align 8
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
 	ret <2 x i64> %lA
@@ -217,8 +217,8 @@ define <2 x i64> @load_v2i64_update_alig
 define <2 x i64> @load_v2i64_update_aligned16(<2 x i64>** %ptr) {
 ;CHECK-LABEL: load_v2i64_update_aligned16:
 ;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]!
-	%A = load <2 x i64>** %ptr
-	%lA = load <2 x i64>* %A, align 16
+	%A = load <2 x i64>*, <2 x i64>** %ptr
+	%lA = load <2 x i64>, <2 x i64>* %A, align 16
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
 	ret <2 x i64> %lA
@@ -230,8 +230,8 @@ define <4 x i32> @zextload_v8i8tov8i32(<
 ;CHECK: vld1.32 {{{d[0-9]+}}[0]}, [{{r[0-9]+}}:32]
 ;CHECK: vmovl.u8        {{q[0-9]+}}, {{d[0-9]+}}
 ;CHECK: vmovl.u16       {{q[0-9]+}}, {{d[0-9]+}}
-	%A = load <4 x i8>** %ptr
-	%lA = load <4 x i8>* %A, align 4
+	%A = load <4 x i8>*, <4 x i8>** %ptr
+	%lA = load <4 x i8>, <4 x i8>* %A, align 4
         %zlA = zext <4 x i8> %lA to <4 x i32>
 	ret <4 x i32> %zlA
 }
@@ -244,8 +244,8 @@ define <4 x i32> @zextload_v8i8tov8i32_f
 ;CHECK: str.w   r[[INCREG]], [r0]
 ;CHECK: vmovl.u8        {{q[0-9]+}}, {{d[0-9]+}}
 ;CHECK: vmovl.u16       {{q[0-9]+}}, {{d[0-9]+}}
-	%A = load <4 x i8>** %ptr
-	%lA = load <4 x i8>* %A, align 4
+	%A = load <4 x i8>*, <4 x i8>** %ptr
+	%lA = load <4 x i8>, <4 x i8>* %A, align 4
 	%inc = getelementptr <4 x i8>, <4 x i8>* %A, i38 4
         store <4 x i8>* %inc, <4 x i8>** %ptr
         %zlA = zext <4 x i8> %lA to <4 x i32>

Modified: llvm/trunk/test/CodeGen/ARM/vector-promotion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vector-promotion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vector-promotion.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vector-promotion.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple=thumbv7-apple-ios %s -o - -mattr=+neon | FileCheck --check-prefix=ASM %s
 
 ; IR-BOTH-LABEL: @simpleOneInstructionPromotion
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; IR-BOTH-NEXT: [[VECTOR_OR:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[LOAD]], <i32 undef, i32 1>
 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[VECTOR_OR]], i32 1
 ; IR-BOTH-NEXT: store i32 [[EXTRACT]], i32* %dest
@@ -16,7 +16,7 @@
 ; ASM-NEXT: vst1.32 {[[LOAD]][1]}, [r1:32]
 ; ASM-NEXT: bx
 define void @simpleOneInstructionPromotion(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 1
   %out = or i32 %extract, 1
   store i32 %out, i32* %dest, align 4
@@ -24,7 +24,7 @@ define void @simpleOneInstructionPromoti
 }
 
 ; IR-BOTH-LABEL: @unsupportedInstructionForPromotion
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 0
 ; IR-BOTH-NEXT: [[CMP:%[a-zA-Z_0-9-]+]] = icmp eq i32 [[EXTRACT]], %in2
 ; IR-BOTH-NEXT: store i1 [[CMP]], i1* %dest
@@ -35,7 +35,7 @@ define void @simpleOneInstructionPromoti
 ; ASM: vmov.32 {{r[0-9]+}}, [[LOAD]]
 ; ASM: bx
 define void @unsupportedInstructionForPromotion(<2 x i32>* %addr1, i32 %in2, i1* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 0
   %out = icmp eq i32 %extract, %in2
   store i1 %out, i1* %dest, align 4
@@ -44,7 +44,7 @@ define void @unsupportedInstructionForPr
 
 
 ; IR-BOTH-LABEL: @unsupportedChainInDifferentBBs
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 0
 ; IR-BOTH-NEXT: br i1 %bool, label %bb2, label %end
 ; BB2
@@ -58,7 +58,7 @@ define void @unsupportedInstructionForPr
 ; ASM: bx
 define void @unsupportedChainInDifferentBBs(<2 x i32>* %addr1, i32* %dest, i1 %bool) {
 bb1:
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 0
   br i1 %bool, label %bb2, label %end
 bb2: 
@@ -70,7 +70,7 @@ end:
 }
 
 ; IR-LABEL: @chainOfInstructionsToPromote
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; IR-BOTH-NEXT: [[VECTOR_OR1:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[LOAD]], <i32 1, i32 undef>
 ; IR-BOTH-NEXT: [[VECTOR_OR2:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR1]], <i32 1, i32 undef>
 ; IR-BOTH-NEXT: [[VECTOR_OR3:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR2]], <i32 1, i32 undef>
@@ -87,7 +87,7 @@ end:
 ; ASM-NOT: vmov.32 {{r[0-9]+}}, [[LOAD]]
 ; ASM: bx
 define void @chainOfInstructionsToPromote(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 0
   %out1 = or i32 %extract, 1
   %out2 = or i32 %out1, 1
@@ -101,7 +101,7 @@ define void @chainOfInstructionsToPromot
 }
 
 ; IR-BOTH-LABEL: @unsupportedMultiUses
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1
 ; IR-BOTH-NEXT: [[OR:%[a-zA-Z_0-9-]+]] = or i32 [[EXTRACT]], 1
 ; IR-BOTH-NEXT: store i32 [[OR]], i32* %dest
@@ -112,7 +112,7 @@ define void @chainOfInstructionsToPromot
 ; ASM: vmov.32 {{r[0-9]+}}, [[LOAD]]
 ; ASM: bx
 define i32 @unsupportedMultiUses(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 1
   %out = or i32 %extract, 1
   store i32 %out, i32* %dest, align 4
@@ -122,7 +122,7 @@ define i32 @unsupportedMultiUses(<2 x i3
 ; Check that we promote we a splat constant when this is a division.
 ; The NORMAL mode does not promote anything as divisions are not legal.
 ; IR-BOTH-LABEL: @udivCase
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; Scalar version:
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = udiv i32 [[EXTRACT]], 7
@@ -133,7 +133,7 @@ define i32 @unsupportedMultiUses(<2 x i3
 ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest
 ; IR-BOTH-NEXT: ret
 define void @udivCase(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 1
   %out = udiv i32 %extract, 7
   store i32 %out, i32* %dest, align 4
@@ -141,7 +141,7 @@ define void @udivCase(<2 x i32>* %addr1,
 }
 
 ; IR-BOTH-LABEL: @uremCase
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; Scalar version:
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = urem i32 [[EXTRACT]], 7
@@ -152,7 +152,7 @@ define void @udivCase(<2 x i32>* %addr1,
 ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest
 ; IR-BOTH-NEXT: ret 
 define void @uremCase(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 1
   %out = urem i32 %extract, 7
   store i32 %out, i32* %dest, align 4
@@ -160,7 +160,7 @@ define void @uremCase(<2 x i32>* %addr1,
 }
 
 ; IR-BOTH-LABEL: @sdivCase
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; Scalar version:
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = sdiv i32 [[EXTRACT]], 7
@@ -171,7 +171,7 @@ define void @uremCase(<2 x i32>* %addr1,
 ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest
 ; IR-BOTH-NEXT: ret 
 define void @sdivCase(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 1
   %out = sdiv i32 %extract, 7
   store i32 %out, i32* %dest, align 4
@@ -179,7 +179,7 @@ define void @sdivCase(<2 x i32>* %addr1,
 }
 
 ; IR-BOTH-LABEL: @sremCase
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; Scalar version:
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = srem i32 [[EXTRACT]], 7
@@ -190,7 +190,7 @@ define void @sdivCase(<2 x i32>* %addr1,
 ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest
 ; IR-BOTH-NEXT: ret 
 define void @sremCase(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 1
   %out = srem i32 %extract, 7
   store i32 %out, i32* %dest, align 4
@@ -198,7 +198,7 @@ define void @sremCase(<2 x i32>* %addr1,
 }
 
 ; IR-BOTH-LABEL: @fdivCase
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>, <2 x float>* %addr1
 ; Scalar version:  
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = fdiv float [[EXTRACT]], 7.0
@@ -209,7 +209,7 @@ define void @sremCase(<2 x i32>* %addr1,
 ; IR-BOTH-NEXT: store float [[RES]], float* %dest
 ; IR-BOTH-NEXT: ret
 define void @fdivCase(<2 x float>* %addr1, float* %dest) {
-  %in1 = load <2 x float>* %addr1, align 8   
+  %in1 = load <2 x float>, <2 x float>* %addr1, align 8   
   %extract = extractelement <2 x float> %in1, i32 1
   %out = fdiv float %extract, 7.0
   store float %out, float* %dest, align 4
@@ -217,7 +217,7 @@ define void @fdivCase(<2 x float>* %addr
 }
 
 ; IR-BOTH-LABEL: @fremCase
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>, <2 x float>* %addr1
 ; Scalar version:  
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = frem float [[EXTRACT]], 7.0
@@ -228,7 +228,7 @@ define void @fdivCase(<2 x float>* %addr
 ; IR-BOTH-NEXT: store float [[RES]], float* %dest
 ; IR-BOTH-NEXT: ret
 define void @fremCase(<2 x float>* %addr1, float* %dest) {
-  %in1 = load <2 x float>* %addr1, align 8   
+  %in1 = load <2 x float>, <2 x float>* %addr1, align 8   
   %extract = extractelement <2 x float> %in1, i32 1
   %out = frem float %extract, 7.0
   store float %out, float* %dest, align 4
@@ -238,13 +238,13 @@ define void @fremCase(<2 x float>* %addr
 ; Check that we do not promote when we may introduce undefined behavior
 ; like division by zero.
 ; IR-BOTH-LABEL: @undefDivCase
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1
 ; IR-BOTH-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = udiv i32 7, [[EXTRACT]]
 ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest
 ; IR-BOTH-NEXT: ret
 define void @undefDivCase(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 1
   %out = udiv i32 7, %extract
   store i32 %out, i32* %dest, align 4
@@ -255,13 +255,13 @@ define void @undefDivCase(<2 x i32>* %ad
 ; Check that we do not promote when we may introduce undefined behavior
 ; like division by zero.
 ; IR-BOTH-LABEL: @undefRemCase
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1
 ; IR-BOTH-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = srem i32 7, [[EXTRACT]]
 ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest
 ; IR-BOTH-NEXT: ret
 define void @undefRemCase(<2 x i32>* %addr1, i32* %dest) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 1
   %out = srem i32 7, %extract
   store i32 %out, i32* %dest, align 4
@@ -271,7 +271,7 @@ define void @undefRemCase(<2 x i32>* %ad
 ; Check that we use an undef mask for undefined behavior if the fast-math
 ; flag is set.
 ; IR-BOTH-LABEL: @undefConstantFRemCaseWithFastMath
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>, <2 x float>* %addr1
 ; Scalar version:  
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = frem nnan float [[EXTRACT]], 7.0
@@ -282,7 +282,7 @@ define void @undefRemCase(<2 x i32>* %ad
 ; IR-BOTH-NEXT: store float [[RES]], float* %dest
 ; IR-BOTH-NEXT: ret
 define void @undefConstantFRemCaseWithFastMath(<2 x float>* %addr1, float* %dest) {
-  %in1 = load <2 x float>* %addr1, align 8   
+  %in1 = load <2 x float>, <2 x float>* %addr1, align 8   
   %extract = extractelement <2 x float> %in1, i32 1
   %out = frem nnan float %extract, 7.0
   store float %out, float* %dest, align 4
@@ -292,7 +292,7 @@ define void @undefConstantFRemCaseWithFa
 ; Check that we use an undef mask for undefined behavior if the fast-math
 ; flag is set.
 ; IR-BOTH-LABEL: @undefVectorFRemCaseWithFastMath
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>, <2 x float>* %addr1
 ; Scalar version:  
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = frem nnan float 7.000000e+00, [[EXTRACT]]
@@ -303,7 +303,7 @@ define void @undefConstantFRemCaseWithFa
 ; IR-BOTH-NEXT: store float [[RES]], float* %dest
 ; IR-BOTH-NEXT: ret
 define void @undefVectorFRemCaseWithFastMath(<2 x float>* %addr1, float* %dest) {
-  %in1 = load <2 x float>* %addr1, align 8   
+  %in1 = load <2 x float>, <2 x float>* %addr1, align 8   
   %extract = extractelement <2 x float> %in1, i32 1
   %out = frem nnan float 7.0, %extract
   store float %out, float* %dest, align 4
@@ -314,7 +314,7 @@ define void @undefVectorFRemCaseWithFast
 ; This requires the STRESS mode, as floating point value are
 ; not promote on armv7.
 ; IR-BOTH-LABEL: @simpleOneInstructionPromotionFloat
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x float>, <2 x float>* %addr1
 ; Scalar version: 
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = fadd float [[EXTRACT]], 1.0
@@ -325,7 +325,7 @@ define void @undefVectorFRemCaseWithFast
 ; IR-BOTH-NEXT: store float [[RES]], float* %dest
 ; IR-BOTH-NEXT: ret
 define void @simpleOneInstructionPromotionFloat(<2 x float>* %addr1, float* %dest) {
-  %in1 = load <2 x float>* %addr1, align 8
+  %in1 = load <2 x float>, <2 x float>* %addr1, align 8
   %extract = extractelement <2 x float> %in1, i32 1
   %out = fadd float %extract, 1.0
   store float %out, float* %dest, align 4
@@ -337,7 +337,7 @@ define void @simpleOneInstructionPromoti
 ; This requires the STRESS modes, as variable index are expensive
 ; to lower.
 ; IR-BOTH-LABEL: @simpleOneInstructionPromotionVariableIdx
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1
 ; Scalar version:
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 %idx
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = or i32 [[EXTRACT]], 1
@@ -348,7 +348,7 @@ define void @simpleOneInstructionPromoti
 ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest
 ; IR-BOTH-NEXT: ret
 define void @simpleOneInstructionPromotionVariableIdx(<2 x i32>* %addr1, i32* %dest, i32 %idx) {
-  %in1 = load <2 x i32>* %addr1, align 8
+  %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8
   %extract = extractelement <2 x i32> %in1, i32 %idx
   %out = or i32 %extract, 1
   store i32 %out, i32* %dest, align 4
@@ -360,7 +360,7 @@ define void @simpleOneInstructionPromoti
 ; as legal or custom, althought the actual assembly is better if we were
 ; promoting it.
 ; IR-BOTH-LABEL: @simpleOneInstructionPromotion8x8
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <8 x i8>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <8 x i8>, <8 x i8>* %addr1
 ; Scalar version:  
 ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <8 x i8> [[LOAD]], i32 1
 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = or i8 [[EXTRACT]], 1
@@ -371,7 +371,7 @@ define void @simpleOneInstructionPromoti
 ; IR-BOTH-NEXT: store i8 [[RES]], i8* %dest
 ; IR-BOTH-NEXT: ret
 define void @simpleOneInstructionPromotion8x8(<8 x i8>* %addr1, i8* %dest) {
-  %in1 = load <8 x i8>* %addr1, align 8
+  %in1 = load <8 x i8>, <8 x i8>* %addr1, align 8
   %extract = extractelement <8 x i8> %in1, i32 1
   %out = or i8 %extract, 1
   store i8 %out, i8* %dest, align 4
@@ -381,7 +381,7 @@ define void @simpleOneInstructionPromoti
 ; Check that we optimized the sequence correctly when it can be
 ; lowered on a Q register.
 ; IR-BOTH-LABEL: @simpleOneInstructionPromotion
-; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <4 x i32>* %addr1
+; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <4 x i32>, <4 x i32>* %addr1
 ; IR-BOTH-NEXT: [[VECTOR_OR:%[a-zA-Z_0-9-]+]] = or <4 x i32> [[LOAD]], <i32 undef, i32 1, i32 undef, i32 undef>
 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <4 x i32> [[VECTOR_OR]], i32 1
 ; IR-BOTH-NEXT: store i32 [[EXTRACT]], i32* %dest
@@ -395,7 +395,7 @@ define void @simpleOneInstructionPromoti
 ; ASM-NEXT: vst1.32 {[[LOAD]][1]}, [r1]
 ; ASM-NEXT: bx
 define void @simpleOneInstructionPromotion4x32(<4 x i32>* %addr1, i32* %dest) {
-  %in1 = load <4 x i32>* %addr1, align 8
+  %in1 = load <4 x i32>, <4 x i32>* %addr1, align 8
   %extract = extractelement <4 x i32> %in1, i32 1
   %out = or i32 %extract, 1
   store i32 %out, i32* %dest, align 1

Modified: llvm/trunk/test/CodeGen/ARM/vector-spilling.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vector-spilling.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vector-spilling.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vector-spilling.ll Fri Feb 27 15:17:42 2015
@@ -11,16 +11,16 @@ target datalayout = "e-p:32:32:32-i1:8:8
 define void @test(<8 x i64>* %src) #0 {
 entry:
   %0 = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 0
-  %1 = load <8 x i64>* %0, align 8
+  %1 = load <8 x i64>, <8 x i64>* %0, align 8
 
   %2 = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 1
-  %3 = load <8 x i64>* %2, align 8
+  %3 = load <8 x i64>, <8 x i64>* %2, align 8
 
   %4 = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 2
-  %5 = load <8 x i64>* %4, align 8
+  %5 = load <8 x i64>, <8 x i64>* %4, align 8
 
   %6 = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 3
-  %7 = load <8 x i64>* %6, align 8
+  %7 = load <8 x i64>, <8 x i64>* %6, align 8
 
   %8 = shufflevector <8 x i64> %1, <8 x i64> %3, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   %9 = shufflevector <8 x i64> %1, <8 x i64> %3, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>

Modified: llvm/trunk/test/CodeGen/ARM/vector-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vector-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vector-store.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vector-store.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ target triple = "thumbv7s-apple-ios8.0.0
 define void @store_v8i8(<8 x i8>** %ptr, <8 x i8> %val) {
 ;CHECK-LABEL: store_v8i8:
 ;CHECK: str r1, [r0]
-	%A = load <8 x i8>** %ptr
+	%A = load <8 x i8>*, <8 x i8>** %ptr
 	store  <8 x i8> %val, <8 x i8>* %A, align 1
 	ret void
 }
@@ -14,7 +14,7 @@ define void @store_v8i8(<8 x i8>** %ptr,
 define void @store_v8i8_update(<8 x i8>** %ptr, <8 x i8> %val) {
 ;CHECK-LABEL: store_v8i8_update:
 ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <8 x i8>** %ptr
+	%A = load <8 x i8>*, <8 x i8>** %ptr
 	store  <8 x i8> %val, <8 x i8>* %A, align 1
 	%inc = getelementptr <8 x i8>, <8 x i8>* %A, i38 1
         store <8 x i8>* %inc, <8 x i8>** %ptr
@@ -24,7 +24,7 @@ define void @store_v8i8_update(<8 x i8>*
 define void @store_v4i16(<4 x i16>** %ptr, <4 x i16> %val) {
 ;CHECK-LABEL: store_v4i16:
 ;CHECK: str r1, [r0]
-	%A = load <4 x i16>** %ptr
+	%A = load <4 x i16>*, <4 x i16>** %ptr
 	store  <4 x i16> %val, <4 x i16>* %A, align 1
 	ret void
 }
@@ -32,7 +32,7 @@ define void @store_v4i16(<4 x i16>** %pt
 define void @store_v4i16_update(<4 x i16>** %ptr, <4 x i16> %val) {
 ;CHECK-LABEL: store_v4i16_update:
 ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <4 x i16>** %ptr
+	%A = load <4 x i16>*, <4 x i16>** %ptr
 	store  <4 x i16> %val, <4 x i16>* %A, align 1
 	%inc = getelementptr <4 x i16>, <4 x i16>* %A, i34 1
         store <4 x i16>* %inc, <4 x i16>** %ptr
@@ -42,7 +42,7 @@ define void @store_v4i16_update(<4 x i16
 define void @store_v2i32(<2 x i32>** %ptr, <2 x i32> %val) {
 ;CHECK-LABEL: store_v2i32:
 ;CHECK: str r1, [r0]
-	%A = load <2 x i32>** %ptr
+	%A = load <2 x i32>*, <2 x i32>** %ptr
 	store  <2 x i32> %val, <2 x i32>* %A, align 1
 	ret void
 }
@@ -50,7 +50,7 @@ define void @store_v2i32(<2 x i32>** %pt
 define void @store_v2i32_update(<2 x i32>** %ptr, <2 x i32> %val) {
 ;CHECK-LABEL: store_v2i32_update:
 ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i32>** %ptr
+	%A = load <2 x i32>*, <2 x i32>** %ptr
 	store  <2 x i32> %val, <2 x i32>* %A, align 1
 	%inc = getelementptr <2 x i32>, <2 x i32>* %A, i32 1
         store <2 x i32>* %inc, <2 x i32>** %ptr
@@ -60,7 +60,7 @@ define void @store_v2i32_update(<2 x i32
 define void @store_v2f32(<2 x float>** %ptr, <2 x float> %val) {
 ;CHECK-LABEL: store_v2f32:
 ;CHECK: str r1, [r0]
-	%A = load <2 x float>** %ptr
+	%A = load <2 x float>*, <2 x float>** %ptr
 	store  <2 x float> %val, <2 x float>* %A, align 1
 	ret void
 }
@@ -68,7 +68,7 @@ define void @store_v2f32(<2 x float>** %
 define void @store_v2f32_update(<2 x float>** %ptr, <2 x float> %val) {
 ;CHECK-LABEL: store_v2f32_update:
 ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x float>** %ptr
+	%A = load <2 x float>*, <2 x float>** %ptr
 	store  <2 x float> %val, <2 x float>* %A, align 1
 	%inc = getelementptr <2 x float>, <2 x float>* %A, i32 1
         store <2 x float>* %inc, <2 x float>** %ptr
@@ -78,7 +78,7 @@ define void @store_v2f32_update(<2 x flo
 define void @store_v1i64(<1 x i64>** %ptr, <1 x i64> %val) {
 ;CHECK-LABEL: store_v1i64:
 ;CHECK: str r1, [r0]
-	%A = load <1 x i64>** %ptr
+	%A = load <1 x i64>*, <1 x i64>** %ptr
 	store  <1 x i64> %val, <1 x i64>* %A, align 1
 	ret void
 }
@@ -86,7 +86,7 @@ define void @store_v1i64(<1 x i64>** %pt
 define void @store_v1i64_update(<1 x i64>** %ptr, <1 x i64> %val) {
 ;CHECK-LABEL: store_v1i64_update:
 ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <1 x i64>** %ptr
+	%A = load <1 x i64>*, <1 x i64>** %ptr
 	store  <1 x i64> %val, <1 x i64>* %A, align 1
 	%inc = getelementptr <1 x i64>, <1 x i64>* %A, i31 1
         store <1 x i64>* %inc, <1 x i64>** %ptr
@@ -96,7 +96,7 @@ define void @store_v1i64_update(<1 x i64
 define void @store_v16i8(<16 x i8>** %ptr, <16 x i8> %val) {
 ;CHECK-LABEL: store_v16i8:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <16 x i8>** %ptr
+	%A = load <16 x i8>*, <16 x i8>** %ptr
 	store  <16 x i8> %val, <16 x i8>* %A, align 1
 	ret void
 }
@@ -104,7 +104,7 @@ define void @store_v16i8(<16 x i8>** %pt
 define void @store_v16i8_update(<16 x i8>** %ptr, <16 x i8> %val) {
 ;CHECK-LABEL: store_v16i8_update:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <16 x i8>** %ptr
+	%A = load <16 x i8>*, <16 x i8>** %ptr
 	store  <16 x i8> %val, <16 x i8>* %A, align 1
 	%inc = getelementptr <16 x i8>, <16 x i8>* %A, i316 1
         store <16 x i8>* %inc, <16 x i8>** %ptr
@@ -114,7 +114,7 @@ define void @store_v16i8_update(<16 x i8
 define void @store_v8i16(<8 x i16>** %ptr, <8 x i16> %val) {
 ;CHECK-LABEL: store_v8i16:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <8 x i16>** %ptr
+	%A = load <8 x i16>*, <8 x i16>** %ptr
 	store  <8 x i16> %val, <8 x i16>* %A, align 1
 	ret void
 }
@@ -122,7 +122,7 @@ define void @store_v8i16(<8 x i16>** %pt
 define void @store_v8i16_update(<8 x i16>** %ptr, <8 x i16> %val) {
 ;CHECK-LABEL: store_v8i16_update:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <8 x i16>** %ptr
+	%A = load <8 x i16>*, <8 x i16>** %ptr
 	store  <8 x i16> %val, <8 x i16>* %A, align 1
 	%inc = getelementptr <8 x i16>, <8 x i16>* %A, i38 1
         store <8 x i16>* %inc, <8 x i16>** %ptr
@@ -132,7 +132,7 @@ define void @store_v8i16_update(<8 x i16
 define void @store_v4i32(<4 x i32>** %ptr, <4 x i32> %val) {
 ;CHECK-LABEL: store_v4i32:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <4 x i32>** %ptr
+	%A = load <4 x i32>*, <4 x i32>** %ptr
 	store  <4 x i32> %val, <4 x i32>* %A, align 1
 	ret void
 }
@@ -140,7 +140,7 @@ define void @store_v4i32(<4 x i32>** %pt
 define void @store_v4i32_update(<4 x i32>** %ptr, <4 x i32> %val) {
 ;CHECK-LABEL: store_v4i32_update:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <4 x i32>** %ptr
+	%A = load <4 x i32>*, <4 x i32>** %ptr
 	store  <4 x i32> %val, <4 x i32>* %A, align 1
 	%inc = getelementptr <4 x i32>, <4 x i32>* %A, i34 1
         store <4 x i32>* %inc, <4 x i32>** %ptr
@@ -150,7 +150,7 @@ define void @store_v4i32_update(<4 x i32
 define void @store_v4f32(<4 x float>** %ptr, <4 x float> %val) {
 ;CHECK-LABEL: store_v4f32:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <4 x float>** %ptr
+	%A = load <4 x float>*, <4 x float>** %ptr
 	store  <4 x float> %val, <4 x float>* %A, align 1
 	ret void
 }
@@ -158,7 +158,7 @@ define void @store_v4f32(<4 x float>** %
 define void @store_v4f32_update(<4 x float>** %ptr, <4 x float> %val) {
 ;CHECK-LABEL: store_v4f32_update:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <4 x float>** %ptr
+	%A = load <4 x float>*, <4 x float>** %ptr
 	store  <4 x float> %val, <4 x float>* %A, align 1
 	%inc = getelementptr <4 x float>, <4 x float>* %A, i34 1
         store <4 x float>* %inc, <4 x float>** %ptr
@@ -168,7 +168,7 @@ define void @store_v4f32_update(<4 x flo
 define void @store_v2i64(<2 x i64>** %ptr, <2 x i64> %val) {
 ;CHECK-LABEL: store_v2i64:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
-	%A = load <2 x i64>** %ptr
+	%A = load <2 x i64>*, <2 x i64>** %ptr
 	store  <2 x i64> %val, <2 x i64>* %A, align 1
 	ret void
 }
@@ -176,7 +176,7 @@ define void @store_v2i64(<2 x i64>** %pt
 define void @store_v2i64_update(<2 x i64>** %ptr, <2 x i64> %val) {
 ;CHECK-LABEL: store_v2i64_update:
 ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i64>** %ptr
+	%A = load <2 x i64>*, <2 x i64>** %ptr
 	store  <2 x i64> %val, <2 x i64>* %A, align 1
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
@@ -186,7 +186,7 @@ define void @store_v2i64_update(<2 x i64
 define void @store_v2i64_update_aligned2(<2 x i64>** %ptr, <2 x i64> %val) {
 ;CHECK-LABEL: store_v2i64_update_aligned2:
 ;CHECK: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i64>** %ptr
+	%A = load <2 x i64>*, <2 x i64>** %ptr
 	store  <2 x i64> %val, <2 x i64>* %A, align 2
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
@@ -196,7 +196,7 @@ define void @store_v2i64_update_aligned2
 define void @store_v2i64_update_aligned4(<2 x i64>** %ptr, <2 x i64> %val) {
 ;CHECK-LABEL: store_v2i64_update_aligned4:
 ;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i64>** %ptr
+	%A = load <2 x i64>*, <2 x i64>** %ptr
 	store  <2 x i64> %val, <2 x i64>* %A, align 4
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
@@ -206,7 +206,7 @@ define void @store_v2i64_update_aligned4
 define void @store_v2i64_update_aligned8(<2 x i64>** %ptr, <2 x i64> %val) {
 ;CHECK-LABEL: store_v2i64_update_aligned8:
 ;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
-	%A = load <2 x i64>** %ptr
+	%A = load <2 x i64>*, <2 x i64>** %ptr
 	store  <2 x i64> %val, <2 x i64>* %A, align 8
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
@@ -216,7 +216,7 @@ define void @store_v2i64_update_aligned8
 define void @store_v2i64_update_aligned16(<2 x i64>** %ptr, <2 x i64> %val) {
 ;CHECK-LABEL: store_v2i64_update_aligned16:
 ;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]!
-	%A = load <2 x i64>** %ptr
+	%A = load <2 x i64>*, <2 x i64>** %ptr
 	store  <2 x i64> %val, <2 x i64>* %A, align 16
 	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
         store <2 x i64>* %inc, <2 x i64>** %ptr
@@ -232,7 +232,7 @@ define void @truncstore_v4i32tov4i8(<4 x
 ;CHECK: vuzp.8  [[VECLO]], {{d[0-9]+}}
 ;CHECK: ldr     r[[PTRREG:[0-9]+]], [r0]
 ;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]:32]
-	%A = load <4 x i8>** %ptr
+	%A = load <4 x i8>*, <4 x i8>** %ptr
         %trunc = trunc <4 x i32> %val to <4 x i8>
 	store  <4 x i8> %trunc, <4 x i8>* %A, align 4
 	ret void
@@ -249,7 +249,7 @@ define void @truncstore_v4i32tov4i8_fake
 ;CHECK: ldr     r[[PTRREG:[0-9]+]], [r0]
 ;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]:32], [[IMM16]]
 ;CHECK: str     r[[PTRREG]], [r0]
-	%A = load <4 x i8>** %ptr
+	%A = load <4 x i8>*, <4 x i8>** %ptr
         %trunc = trunc <4 x i32> %val to <4 x i8>
 	store  <4 x i8> %trunc, <4 x i8>* %A, align 4
 	%inc = getelementptr <4 x i8>, <4 x i8>* %A, i38 4

Modified: llvm/trunk/test/CodeGen/ARM/vext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vext.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vext.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: test_vextd:
 ;CHECK: vext
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @test_vextd(<8 x i8>* %A
 define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: test_vextRd:
 ;CHECK: vext
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
 	ret <8 x i8> %tmp3
 }
@@ -21,8 +21,8 @@ define <8 x i8> @test_vextRd(<8 x i8>* %
 define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: test_vextq:
 ;CHECK: vext
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
 	ret <16 x i8> %tmp3
 }
@@ -30,8 +30,8 @@ define <16 x i8> @test_vextq(<16 x i8>*
 define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: test_vextRq:
 ;CHECK: vext
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
 	ret <16 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <16 x i8> @test_vextRq(<16 x i8>*
 define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: test_vextd16:
 ;CHECK: vext
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
 	ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @test_vextd16(<4 x i16>
 define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: test_vextq32:
 ;CHECK: vext
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
 	ret <4 x i32> %tmp3
 }
@@ -59,8 +59,8 @@ define <4 x i32> @test_vextq32(<4 x i32>
 define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: test_vextd_undef:
 ;CHECK: vext
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10>
 	ret <8 x i8> %tmp3
 }
@@ -68,8 +68,8 @@ define <8 x i8> @test_vextd_undef(<8 x i
 define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: test_vextRq_undef:
 ;CHECK: vext
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 undef, i32 undef, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 undef, i32 6>
 	ret <16 x i8> %tmp3
 }
@@ -118,8 +118,8 @@ define <4 x i16> @test_interleaved(<8 x
 ;CHECK: vext.16
 ;CHECK-NOT: vext.16
 ;CHECK: vzip.16
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 3, i32 8, i32 5, i32 9>
         ret <4 x i16> %tmp3
 }
@@ -128,8 +128,8 @@ define <4 x i16> @test_interleaved(<8 x
 define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: test_undef:
 ;CHECK: vzip.16
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 undef, i32 8, i32 5, i32 9>
         ret <4 x i16> %tmp3
 }
@@ -143,7 +143,7 @@ define <4 x i16> @test_multisource(<32 x
 ;CHECK: vmov.16 [[REG]][1]
 ;CHECK: vmov.16 [[REG]][2]
 ;CHECK: vmov.16 [[REG]][3]
-        %tmp1 = load <32 x i16>* %B
+        %tmp1 = load <32 x i16>, <32 x i16>* %B
         %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
         ret <4 x i16> %tmp2
 }
@@ -156,7 +156,7 @@ define <4 x i16> @test_largespan(<8 x i1
 ;CHECK: vmov.16 [[REG]][1]
 ;CHECK: vmov.16 [[REG]][2]
 ;CHECK: vmov.16 [[REG]][3]
-        %tmp1 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %B
         %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
         ret <4 x i16> %tmp2
 }
@@ -174,8 +174,8 @@ define <8 x i16> @test_illegal(<8 x i16>
 ;CHECK: vmov.16 [[REG2]][1]
 ;CHECK: vmov.16 [[REG2]][2]
 ;CHECK: vmov.16 [[REG2]][3]
-       %tmp1 = load <8 x i16>* %A
-       %tmp2 = load <8 x i16>* %B
+       %tmp1 = load <8 x i16>, <8 x i16>* %A
+       %tmp2 = load <8 x i16>, <8 x i16>* %B
        %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 13, i32 3, i32 2, i32 2, i32 9>
        ret <8 x i16> %tmp3
 }
@@ -185,7 +185,7 @@ define <8 x i16> @test_illegal(<8 x i16>
 define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind {
 ; CHECK-LABEL: test_elem_mismatch:
 ; CHECK: vstr
-  %tmp0 = load <2 x i64>* %src, align 16
+  %tmp0 = load <2 x i64>, <2 x i64>* %src, align 16
   %tmp1 = bitcast <2 x i64> %tmp0 to <4 x i32>
   %tmp2 = extractelement <4 x i32> %tmp1, i32 0
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2

Modified: llvm/trunk/test/CodeGen/ARM/vfcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vfcmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vfcmp.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vfcmp.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@ define <2 x i32> @vcunef32(<2 x float>*
 ;CHECK-LABEL: vcunef32:
 ;CHECK: vceq.f32
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp une <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -18,8 +18,8 @@ define <2 x i32> @vcunef32(<2 x float>*
 define <2 x i32> @vcoltf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vcoltf32:
 ;CHECK: vcgt.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp olt <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -29,8 +29,8 @@ define <2 x i32> @vcoltf32(<2 x float>*
 define <2 x i32> @vcolef32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vcolef32:
 ;CHECK: vcge.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp ole <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -41,8 +41,8 @@ define <2 x i32> @vcugef32(<2 x float>*
 ;CHECK-LABEL: vcugef32:
 ;CHECK: vcgt.f32
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp uge <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -53,8 +53,8 @@ define <2 x i32> @vculef32(<2 x float>*
 ;CHECK-LABEL: vculef32:
 ;CHECK: vcgt.f32
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp ule <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -65,8 +65,8 @@ define <2 x i32> @vcugtf32(<2 x float>*
 ;CHECK-LABEL: vcugtf32:
 ;CHECK: vcge.f32
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp ugt <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -77,8 +77,8 @@ define <2 x i32> @vcultf32(<2 x float>*
 ;CHECK-LABEL: vcultf32:
 ;CHECK: vcge.f32
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp ult <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -91,8 +91,8 @@ define <2 x i32> @vcueqf32(<2 x float>*
 ;CHECK-NEXT: vcgt.f32
 ;CHECK-NEXT: vorr
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp ueq <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -104,8 +104,8 @@ define <2 x i32> @vconef32(<2 x float>*
 ;CHECK: vcgt.f32
 ;CHECK-NEXT: vcgt.f32
 ;CHECK-NEXT: vorr
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp one <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -118,8 +118,8 @@ define <2 x i32> @vcunof32(<2 x float>*
 ;CHECK-NEXT: vcgt.f32
 ;CHECK-NEXT: vorr
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp uno <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -131,8 +131,8 @@ define <2 x i32> @vcordf32(<2 x float>*
 ;CHECK: vcge.f32
 ;CHECK-NEXT: vcgt.f32
 ;CHECK-NEXT: vorr
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fcmp ord <2 x float> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vfp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vfp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vfp.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vfp.ll Fri Feb 27 15:17:42 2015
@@ -2,8 +2,8 @@
 ; RUN: llc < %s -mtriple=arm-apple-ios -mattr=+vfp2 -disable-post-ra -regalloc=basic | FileCheck %s
 
 define void @test(float* %P, double* %D) {
-	%A = load float* %P		; <float> [#uses=1]
-	%B = load double* %D		; <double> [#uses=1]
+	%A = load float, float* %P		; <float> [#uses=1]
+	%B = load double, double* %D		; <double> [#uses=1]
 	store float %A, float* %P
 	store double %B, double* %D
 	ret void
@@ -15,11 +15,11 @@ declare double @fabs(double)
 
 define void @test_abs(float* %P, double* %D) {
 ;CHECK-LABEL: test_abs:
-	%a = load float* %P		; <float> [#uses=1]
+	%a = load float, float* %P		; <float> [#uses=1]
 ;CHECK: vabs.f32
 	%b = call float @fabsf( float %a ) readnone	; <float> [#uses=1]
 	store float %b, float* %P
-	%A = load double* %D		; <double> [#uses=1]
+	%A = load double, double* %D		; <double> [#uses=1]
 ;CHECK: vabs.f64
 	%B = call double @fabs( double %A ) readnone	; <double> [#uses=1]
 	store double %B, double* %D
@@ -28,10 +28,10 @@ define void @test_abs(float* %P, double*
 
 define void @test_add(float* %P, double* %D) {
 ;CHECK-LABEL: test_add:
-	%a = load float* %P		; <float> [#uses=2]
+	%a = load float, float* %P		; <float> [#uses=2]
 	%b = fadd float %a, %a		; <float> [#uses=1]
 	store float %b, float* %P
-	%A = load double* %D		; <double> [#uses=2]
+	%A = load double, double* %D		; <double> [#uses=2]
 	%B = fadd double %A, %A		; <double> [#uses=1]
 	store double %B, double* %D
 	ret void
@@ -39,11 +39,11 @@ define void @test_add(float* %P, double*
 
 define void @test_ext_round(float* %P, double* %D) {
 ;CHECK-LABEL: test_ext_round:
-	%a = load float* %P		; <float> [#uses=1]
+	%a = load float, float* %P		; <float> [#uses=1]
 ;CHECK: vcvt.f64.f32
 ;CHECK: vcvt.f32.f64
 	%b = fpext float %a to double		; <double> [#uses=1]
-	%A = load double* %D		; <double> [#uses=1]
+	%A = load double, double* %D		; <double> [#uses=1]
 	%B = fptrunc double %A to float		; <float> [#uses=1]
 	store double %b, double* %D
 	store float %B, float* %P
@@ -52,9 +52,9 @@ define void @test_ext_round(float* %P, d
 
 define void @test_fma(float* %P1, float* %P2, float* %P3) {
 ;CHECK-LABEL: test_fma:
-	%a1 = load float* %P1		; <float> [#uses=1]
-	%a2 = load float* %P2		; <float> [#uses=1]
-	%a3 = load float* %P3		; <float> [#uses=1]
+	%a1 = load float, float* %P1		; <float> [#uses=1]
+	%a2 = load float, float* %P2		; <float> [#uses=1]
+	%a3 = load float, float* %P3		; <float> [#uses=1]
 ;CHECK: vnmls.f32
 	%X = fmul float %a1, %a2		; <float> [#uses=1]
 	%Y = fsub float %X, %a3		; <float> [#uses=1]
@@ -64,7 +64,7 @@ define void @test_fma(float* %P1, float*
 
 define i32 @test_ftoi(float* %P1) {
 ;CHECK-LABEL: test_ftoi:
-	%a1 = load float* %P1		; <float> [#uses=1]
+	%a1 = load float, float* %P1		; <float> [#uses=1]
 ;CHECK: vcvt.s32.f32
 	%b1 = fptosi float %a1 to i32		; <i32> [#uses=1]
 	ret i32 %b1
@@ -72,7 +72,7 @@ define i32 @test_ftoi(float* %P1) {
 
 define i32 @test_ftou(float* %P1) {
 ;CHECK-LABEL: test_ftou:
-	%a1 = load float* %P1		; <float> [#uses=1]
+	%a1 = load float, float* %P1		; <float> [#uses=1]
 ;CHECK: vcvt.u32.f32
 	%b1 = fptoui float %a1 to i32		; <i32> [#uses=1]
 	ret i32 %b1
@@ -80,7 +80,7 @@ define i32 @test_ftou(float* %P1) {
 
 define i32 @test_dtoi(double* %P1) {
 ;CHECK-LABEL: test_dtoi:
-	%a1 = load double* %P1		; <double> [#uses=1]
+	%a1 = load double, double* %P1		; <double> [#uses=1]
 ;CHECK: vcvt.s32.f64
 	%b1 = fptosi double %a1 to i32		; <i32> [#uses=1]
 	ret i32 %b1
@@ -88,7 +88,7 @@ define i32 @test_dtoi(double* %P1) {
 
 define i32 @test_dtou(double* %P1) {
 ;CHECK-LABEL: test_dtou:
-	%a1 = load double* %P1		; <double> [#uses=1]
+	%a1 = load double, double* %P1		; <double> [#uses=1]
 ;CHECK: vcvt.u32.f64
 	%b1 = fptoui double %a1 to i32		; <i32> [#uses=1]
 	ret i32 %b1
@@ -113,9 +113,9 @@ define void @test_utod2(double* %P1, i8
 define void @test_cmp(float* %glob, i32 %X) {
 ;CHECK-LABEL: test_cmp:
 entry:
-	%tmp = load float* %glob		; <float> [#uses=2]
+	%tmp = load float, float* %glob		; <float> [#uses=2]
 	%tmp3 = getelementptr float, float* %glob, i32 2		; <float*> [#uses=1]
-	%tmp4 = load float* %tmp3		; <float> [#uses=2]
+	%tmp4 = load float, float* %tmp3		; <float> [#uses=2]
 	%tmp.upgrd.1 = fcmp oeq float %tmp, %tmp4		; <i1> [#uses=1]
 	%tmp5 = fcmp uno float %tmp, %tmp4		; <i1> [#uses=1]
 	%tmp6 = or i1 %tmp.upgrd.1, %tmp5		; <i1> [#uses=1]
@@ -141,7 +141,7 @@ declare i32 @baz(...)
 define void @test_cmpfp0(float* %glob, i32 %X) {
 ;CHECK-LABEL: test_cmpfp0:
 entry:
-	%tmp = load float* %glob		; <float> [#uses=1]
+	%tmp = load float, float* %glob		; <float> [#uses=1]
 ;CHECK: vcmpe.f32
 	%tmp.upgrd.3 = fcmp ogt float %tmp, 0.000000e+00		; <i1> [#uses=1]
 	br i1 %tmp.upgrd.3, label %cond_true, label %cond_false

Modified: llvm/trunk/test/CodeGen/ARM/vget_lane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vget_lane.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vget_lane.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vget_lane.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ target triple = "thumbv7-elf"
 define i32 @vget_lanes8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vget_lanes8:
 ;CHECK: vmov.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = extractelement <8 x i8> %tmp1, i32 1
 	%tmp3 = sext i8 %tmp2 to i32
 	ret i32 %tmp3
@@ -14,7 +14,7 @@ define i32 @vget_lanes8(<8 x i8>* %A) no
 define i32 @vget_lanes16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vget_lanes16:
 ;CHECK: vmov.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = extractelement <4 x i16> %tmp1, i32 1
 	%tmp3 = sext i16 %tmp2 to i32
 	ret i32 %tmp3
@@ -23,7 +23,7 @@ define i32 @vget_lanes16(<4 x i16>* %A)
 define i32 @vget_laneu8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vget_laneu8:
 ;CHECK: vmov.u8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = extractelement <8 x i8> %tmp1, i32 1
 	%tmp3 = zext i8 %tmp2 to i32
 	ret i32 %tmp3
@@ -32,7 +32,7 @@ define i32 @vget_laneu8(<8 x i8>* %A) no
 define i32 @vget_laneu16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vget_laneu16:
 ;CHECK: vmov.u16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = extractelement <4 x i16> %tmp1, i32 1
 	%tmp3 = zext i16 %tmp2 to i32
 	ret i32 %tmp3
@@ -42,7 +42,7 @@ define i32 @vget_laneu16(<4 x i16>* %A)
 define i32 @vget_lanei32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vget_lanei32:
 ;CHECK: vmov.32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = add <2 x i32> %tmp1, %tmp1
 	%tmp3 = extractelement <2 x i32> %tmp2, i32 1
 	ret i32 %tmp3
@@ -51,7 +51,7 @@ define i32 @vget_lanei32(<2 x i32>* %A)
 define i32 @vgetQ_lanes8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vgetQ_lanes8:
 ;CHECK: vmov.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = extractelement <16 x i8> %tmp1, i32 1
 	%tmp3 = sext i8 %tmp2 to i32
 	ret i32 %tmp3
@@ -60,7 +60,7 @@ define i32 @vgetQ_lanes8(<16 x i8>* %A)
 define i32 @vgetQ_lanes16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vgetQ_lanes16:
 ;CHECK: vmov.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = extractelement <8 x i16> %tmp1, i32 1
 	%tmp3 = sext i16 %tmp2 to i32
 	ret i32 %tmp3
@@ -69,7 +69,7 @@ define i32 @vgetQ_lanes16(<8 x i16>* %A)
 define i32 @vgetQ_laneu8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vgetQ_laneu8:
 ;CHECK: vmov.u8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = extractelement <16 x i8> %tmp1, i32 1
 	%tmp3 = zext i8 %tmp2 to i32
 	ret i32 %tmp3
@@ -78,7 +78,7 @@ define i32 @vgetQ_laneu8(<16 x i8>* %A)
 define i32 @vgetQ_laneu16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vgetQ_laneu16:
 ;CHECK: vmov.u16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = extractelement <8 x i16> %tmp1, i32 1
 	%tmp3 = zext i16 %tmp2 to i32
 	ret i32 %tmp3
@@ -88,7 +88,7 @@ define i32 @vgetQ_laneu16(<8 x i16>* %A)
 define i32 @vgetQ_lanei32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vgetQ_lanei32:
 ;CHECK: vmov.32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = add <4 x i32> %tmp1, %tmp1
 	%tmp3 = extractelement <4 x i32> %tmp2, i32 1
 	ret i32 %tmp3
@@ -100,7 +100,7 @@ entry:
   %arg0_uint16x4_t = alloca <4 x i16>             ; <<4 x i16>*> [#uses=1]
   %out_uint16_t = alloca i16                      ; <i16*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  %0 = load <4 x i16>* %arg0_uint16x4_t, align 8  ; <<4 x i16>> [#uses=1]
+  %0 = load <4 x i16>, <4 x i16>* %arg0_uint16x4_t, align 8  ; <<4 x i16>> [#uses=1]
   %1 = extractelement <4 x i16> %0, i32 1         ; <i16> [#uses=1]
   %2 = add i16 %1, %1
   store i16 %2, i16* %out_uint16_t, align 2
@@ -116,7 +116,7 @@ entry:
   %arg0_uint8x8_t = alloca <8 x i8>               ; <<8 x i8>*> [#uses=1]
   %out_uint8_t = alloca i8                        ; <i8*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  %0 = load <8 x i8>* %arg0_uint8x8_t, align 8    ; <<8 x i8>> [#uses=1]
+  %0 = load <8 x i8>, <8 x i8>* %arg0_uint8x8_t, align 8    ; <<8 x i8>> [#uses=1]
   %1 = extractelement <8 x i8> %0, i32 1          ; <i8> [#uses=1]
   %2 = add i8 %1, %1
   store i8 %2, i8* %out_uint8_t, align 1
@@ -132,7 +132,7 @@ entry:
   %arg0_uint16x8_t = alloca <8 x i16>             ; <<8 x i16>*> [#uses=1]
   %out_uint16_t = alloca i16                      ; <i16*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  %0 = load <8 x i16>* %arg0_uint16x8_t, align 16 ; <<8 x i16>> [#uses=1]
+  %0 = load <8 x i16>, <8 x i16>* %arg0_uint16x8_t, align 16 ; <<8 x i16>> [#uses=1]
   %1 = extractelement <8 x i16> %0, i32 1         ; <i16> [#uses=1]
   %2 = add i16 %1, %1
   store i16 %2, i16* %out_uint16_t, align 2
@@ -148,7 +148,7 @@ entry:
   %arg0_uint8x16_t = alloca <16 x i8>             ; <<16 x i8>*> [#uses=1]
   %out_uint8_t = alloca i8                        ; <i8*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  %0 = load <16 x i8>* %arg0_uint8x16_t, align 16 ; <<16 x i8>> [#uses=1]
+  %0 = load <16 x i8>, <16 x i8>* %arg0_uint8x16_t, align 16 ; <<16 x i8>> [#uses=1]
   %1 = extractelement <16 x i8> %0, i32 1         ; <i8> [#uses=1]
   %2 = add i8 %1, %1
   store i8 %2, i8* %out_uint8_t, align 1
@@ -161,7 +161,7 @@ return:
 define <8 x i8> @vset_lane8(<8 x i8>* %A, i8 %B) nounwind {
 ;CHECK-LABEL: vset_lane8:
 ;CHECK: vmov.8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = insertelement <8 x i8> %tmp1, i8 %B, i32 1
 	ret <8 x i8> %tmp2
 }
@@ -169,7 +169,7 @@ define <8 x i8> @vset_lane8(<8 x i8>* %A
 define <4 x i16> @vset_lane16(<4 x i16>* %A, i16 %B) nounwind {
 ;CHECK-LABEL: vset_lane16:
 ;CHECK: vmov.16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = insertelement <4 x i16> %tmp1, i16 %B, i32 1
 	ret <4 x i16> %tmp2
 }
@@ -177,7 +177,7 @@ define <4 x i16> @vset_lane16(<4 x i16>*
 define <2 x i32> @vset_lane32(<2 x i32>* %A, i32 %B) nounwind {
 ;CHECK-LABEL: vset_lane32:
 ;CHECK: vmov.32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = insertelement <2 x i32> %tmp1, i32 %B, i32 1
 	ret <2 x i32> %tmp2
 }
@@ -185,7 +185,7 @@ define <2 x i32> @vset_lane32(<2 x i32>*
 define <16 x i8> @vsetQ_lane8(<16 x i8>* %A, i8 %B) nounwind {
 ;CHECK-LABEL: vsetQ_lane8:
 ;CHECK: vmov.8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = insertelement <16 x i8> %tmp1, i8 %B, i32 1
 	ret <16 x i8> %tmp2
 }
@@ -193,7 +193,7 @@ define <16 x i8> @vsetQ_lane8(<16 x i8>*
 define <8 x i16> @vsetQ_lane16(<8 x i16>* %A, i16 %B) nounwind {
 ;CHECK-LABEL: vsetQ_lane16:
 ;CHECK: vmov.16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = insertelement <8 x i16> %tmp1, i16 %B, i32 1
 	ret <8 x i16> %tmp2
 }
@@ -201,7 +201,7 @@ define <8 x i16> @vsetQ_lane16(<8 x i16>
 define <4 x i32> @vsetQ_lane32(<4 x i32>* %A, i32 %B) nounwind {
 ;CHECK-LABEL: vsetQ_lane32:
 ;CHECK: vmov.32 d{{.*}}[1], r1
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = insertelement <4 x i32> %tmp1, i32 %B, i32 1
 	ret <4 x i32> %tmp2
 }
@@ -219,14 +219,14 @@ entry:
 ; be an immediate constant.  Make sure a variable lane number is handled.
 
 define i32 @vget_variable_lanes8(<8 x i8>* %A, i32 %B) nounwind {
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = extractelement <8 x i8> %tmp1, i32 %B
 	%tmp3 = sext i8 %tmp2 to i32
 	ret i32 %tmp3
 }
 
 define i32 @vgetQ_variable_lanei32(<4 x i32>* %A, i32 %B) nounwind {
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = add <4 x i32> %tmp1, %tmp1
 	%tmp3 = extractelement <4 x i32> %tmp2, i32 %B
 	ret i32 %tmp3

Modified: llvm/trunk/test/CodeGen/ARM/vhadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vhadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vhadd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vhadd.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vhadds8:
 ;CHECK: vhadd.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vhadds8(<8 x i8>* %A, <
 define <4 x i16> @vhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vhadds16:
 ;CHECK: vhadd.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vhadds16(<4 x i16>* %A
 define <2 x i32> @vhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vhadds32:
 ;CHECK: vhadd.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vhadds32(<2 x i32>* %A
 define <8 x i8> @vhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vhaddu8:
 ;CHECK: vhadd.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i8> @vhaddu8(<8 x i8>* %A, <
 define <4 x i16> @vhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vhaddu16:
 ;CHECK: vhadd.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @vhaddu16(<4 x i16>* %A
 define <2 x i32> @vhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vhaddu32:
 ;CHECK: vhadd.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <2 x i32> @vhaddu32(<2 x i32>* %A
 define <16 x i8> @vhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vhaddQs8:
 ;CHECK: vhadd.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -66,8 +66,8 @@ define <16 x i8> @vhaddQs8(<16 x i8>* %A
 define <8 x i16> @vhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vhaddQs16:
 ;CHECK: vhadd.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -75,8 +75,8 @@ define <8 x i16> @vhaddQs16(<8 x i16>* %
 define <4 x i32> @vhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vhaddQs32:
 ;CHECK: vhadd.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -84,8 +84,8 @@ define <4 x i32> @vhaddQs32(<4 x i32>* %
 define <16 x i8> @vhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vhaddQu8:
 ;CHECK: vhadd.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -93,8 +93,8 @@ define <16 x i8> @vhaddQu8(<16 x i8>* %A
 define <8 x i16> @vhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vhaddQu16:
 ;CHECK: vhadd.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -102,8 +102,8 @@ define <8 x i16> @vhaddQu16(<8 x i16>* %
 define <4 x i32> @vhaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vhaddQu32:
 ;CHECK: vhadd.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vhaddu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -127,8 +127,8 @@ declare <4 x i32> @llvm.arm.neon.vhaddu.
 define <8 x i8> @vrhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrhadds8:
 ;CHECK: vrhadd.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -136,8 +136,8 @@ define <8 x i8> @vrhadds8(<8 x i8>* %A,
 define <4 x i16> @vrhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrhadds16:
 ;CHECK: vrhadd.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -145,8 +145,8 @@ define <4 x i16> @vrhadds16(<4 x i16>* %
 define <2 x i32> @vrhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrhadds32:
 ;CHECK: vrhadd.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrhadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -154,8 +154,8 @@ define <2 x i32> @vrhadds32(<2 x i32>* %
 define <8 x i8> @vrhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrhaddu8:
 ;CHECK: vrhadd.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -163,8 +163,8 @@ define <8 x i8> @vrhaddu8(<8 x i8>* %A,
 define <4 x i16> @vrhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrhaddu16:
 ;CHECK: vrhadd.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -172,8 +172,8 @@ define <4 x i16> @vrhaddu16(<4 x i16>* %
 define <2 x i32> @vrhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrhaddu32:
 ;CHECK: vrhadd.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrhaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -181,8 +181,8 @@ define <2 x i32> @vrhaddu32(<2 x i32>* %
 define <16 x i8> @vrhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrhaddQs8:
 ;CHECK: vrhadd.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -190,8 +190,8 @@ define <16 x i8> @vrhaddQs8(<16 x i8>* %
 define <8 x i16> @vrhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrhaddQs16:
 ;CHECK: vrhadd.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -199,8 +199,8 @@ define <8 x i16> @vrhaddQs16(<8 x i16>*
 define <4 x i32> @vrhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrhaddQs32:
 ;CHECK: vrhadd.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrhadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -208,8 +208,8 @@ define <4 x i32> @vrhaddQs32(<4 x i32>*
 define <16 x i8> @vrhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrhaddQu8:
 ;CHECK: vrhadd.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -217,8 +217,8 @@ define <16 x i8> @vrhaddQu8(<16 x i8>* %
 define <8 x i16> @vrhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrhaddQu16:
 ;CHECK: vrhadd.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -226,8 +226,8 @@ define <8 x i16> @vrhaddQu16(<8 x i16>*
 define <4 x i32> @vrhaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrhaddQu32:
 ;CHECK: vrhadd.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrhaddu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vhsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vhsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vhsub.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vhsub.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vhsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vhsubs8:
 ;CHECK: vhsub.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vhsubs8(<8 x i8>* %A, <
 define <4 x i16> @vhsubs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vhsubs16:
 ;CHECK: vhsub.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vhsubs16(<4 x i16>* %A
 define <2 x i32> @vhsubs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vhsubs32:
 ;CHECK: vhsub.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vhsubs32(<2 x i32>* %A
 define <8 x i8> @vhsubu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vhsubu8:
 ;CHECK: vhsub.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i8> @vhsubu8(<8 x i8>* %A, <
 define <4 x i16> @vhsubu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vhsubu16:
 ;CHECK: vhsub.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @vhsubu16(<4 x i16>* %A
 define <2 x i32> @vhsubu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vhsubu32:
 ;CHECK: vhsub.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vhsubu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <2 x i32> @vhsubu32(<2 x i32>* %A
 define <16 x i8> @vhsubQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vhsubQs8:
 ;CHECK: vhsub.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -66,8 +66,8 @@ define <16 x i8> @vhsubQs8(<16 x i8>* %A
 define <8 x i16> @vhsubQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vhsubQs16:
 ;CHECK: vhsub.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -75,8 +75,8 @@ define <8 x i16> @vhsubQs16(<8 x i16>* %
 define <4 x i32> @vhsubQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vhsubQs32:
 ;CHECK: vhsub.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vhsubs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -84,8 +84,8 @@ define <4 x i32> @vhsubQs32(<4 x i32>* %
 define <16 x i8> @vhsubQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vhsubQu8:
 ;CHECK: vhsub.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -93,8 +93,8 @@ define <16 x i8> @vhsubQu8(<16 x i8>* %A
 define <8 x i16> @vhsubQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vhsubQu16:
 ;CHECK: vhsub.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -102,8 +102,8 @@ define <8 x i16> @vhsubQu16(<8 x i16>* %
 define <4 x i32> @vhsubQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vhsubQu32:
 ;CHECK: vhsub.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vhsubu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vicmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vicmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vicmp.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vicmp.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ define <8 x i8> @vcnei8(<8 x i8>* %A, <8
 ;CHECK-LABEL: vcnei8:
 ;CHECK: vceq.i8
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = icmp ne <8 x i8> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
@@ -21,8 +21,8 @@ define <4 x i16> @vcnei16(<4 x i16>* %A,
 ;CHECK-LABEL: vcnei16:
 ;CHECK: vceq.i16
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = icmp ne <4 x i16> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
@@ -32,8 +32,8 @@ define <2 x i32> @vcnei32(<2 x i32>* %A,
 ;CHECK-LABEL: vcnei32:
 ;CHECK: vceq.i32
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = icmp ne <2 x i32> %tmp1, %tmp2
         %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
@@ -43,8 +43,8 @@ define <16 x i8> @vcneQi8(<16 x i8>* %A,
 ;CHECK-LABEL: vcneQi8:
 ;CHECK: vceq.i8
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = icmp ne <16 x i8> %tmp1, %tmp2
         %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
@@ -54,8 +54,8 @@ define <8 x i16> @vcneQi16(<8 x i16>* %A
 ;CHECK-LABEL: vcneQi16:
 ;CHECK: vceq.i16
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = icmp ne <8 x i16> %tmp1, %tmp2
         %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
@@ -65,8 +65,8 @@ define <4 x i32> @vcneQi32(<4 x i32>* %A
 ;CHECK-LABEL: vcneQi32:
 ;CHECK: vceq.i32
 ;CHECK-NEXT: vmvn
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = icmp ne <4 x i32> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
@@ -75,8 +75,8 @@ define <4 x i32> @vcneQi32(<4 x i32>* %A
 define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vcltQs8:
 ;CHECK: vcgt.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = icmp slt <16 x i8> %tmp1, %tmp2
         %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
@@ -85,8 +85,8 @@ define <16 x i8> @vcltQs8(<16 x i8>* %A,
 define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcles16:
 ;CHECK: vcge.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = icmp sle <4 x i16> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
@@ -95,8 +95,8 @@ define <4 x i16> @vcles16(<4 x i16>* %A,
 define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vcltu16:
 ;CHECK: vcgt.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = icmp ult <4 x i16> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
@@ -105,8 +105,8 @@ define <4 x i16> @vcltu16(<4 x i16>* %A,
 define <4 x i32> @vcleQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vcleQu32:
 ;CHECK: vcge.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = icmp ule <4 x i32> %tmp1, %tmp2
         %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vld1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vld1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vld1.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vld1.ll Fri Feb 27 15:17:42 2015
@@ -23,7 +23,7 @@ define <4 x i16> @vld1i16(i16* %A) nounw
 define <4 x i16> @vld1i16_update(i16** %ptr) nounwind {
 ;CHECK-LABEL: vld1i16_update:
 ;CHECK: vld1.16 {d16}, [{{r[0-9]+}}]!
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1)
 	%tmp2 = getelementptr i16, i16* %A, i32 4
@@ -43,7 +43,7 @@ define <2 x i32> @vld1i32(i32* %A) nounw
 define <2 x i32> @vld1i32_update(i32** %ptr, i32 %inc) nounwind {
 ;CHECK-LABEL: vld1i32_update:
 ;CHECK: vld1.32 {d16}, [{{r[0-9]+}}], {{r[0-9]+}}
-	%A = load i32** %ptr
+	%A = load i32*, i32** %ptr
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1)
 	%tmp2 = getelementptr i32, i32* %A, i32 %inc
@@ -79,7 +79,7 @@ define <16 x i8> @vld1Qi8(i8* %A) nounwi
 define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind {
 ;CHECK-LABEL: vld1Qi8_update:
 ;CHECK: vld1.8 {d16, d17}, [{{r[0-9]+}}:64]!
-	%A = load i8** %ptr
+	%A = load i8*, i8** %ptr
 	%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8)
 	%tmp2 = getelementptr i8, i8* %A, i32 16
 	store i8* %tmp2, i8** %ptr

Modified: llvm/trunk/test/CodeGen/ARM/vld2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vld2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vld2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vld2.ll Fri Feb 27 15:17:42 2015
@@ -60,7 +60,7 @@ define <2 x float> @vld2f(float* %A) nou
 define <2 x float> @vld2f_update(float** %ptr) nounwind {
 ;CHECK-LABEL: vld2f_update:
 ;CHECK: vld2.32 {d16, d17}, [r1]!
-	%A = load float** %ptr
+	%A = load float*, float** %ptr
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8* %tmp0, i32 1)
 	%tmp2 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 0
@@ -98,7 +98,7 @@ define <16 x i8> @vld2Qi8(i8* %A) nounwi
 define <16 x i8> @vld2Qi8_update(i8** %ptr, i32 %inc) nounwind {
 ;CHECK-LABEL: vld2Qi8_update:
 ;CHECK: vld2.8 {d16, d17, d18, d19}, [r2:128], r1
-	%A = load i8** %ptr
+	%A = load i8*, i8** %ptr
 	%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A, i32 16)
         %tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 1

Modified: llvm/trunk/test/CodeGen/ARM/vld3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vld3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vld3.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vld3.ll Fri Feb 27 15:17:42 2015
@@ -38,7 +38,7 @@ define <4 x i16> @vld3i16(i16* %A) nounw
 define <4 x i16> @vld3i16_update(i16** %ptr, i32 %inc) nounwind {
 ;CHECK-LABEL: vld3i16_update:
 ;CHECK: vld3.16 {d16, d17, d18}, [{{r[0-9]+}}], {{r[0-9]+}}
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8* %tmp0, i32 1)
 	%tmp2 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 0
@@ -137,7 +137,7 @@ define <4 x i32> @vld3Qi32_update(i32**
 ;CHECK-LABEL: vld3Qi32_update:
 ;CHECK: vld3.32 {d16, d18, d20}, [r[[R:[0-9]+]]]!
 ;CHECK: vld3.32 {d17, d19, d21}, [r[[R]]]!
-	%A = load i32** %ptr
+	%A = load i32*, i32** %ptr
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32(i8* %tmp0, i32 1)
 	%tmp2 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 0

Modified: llvm/trunk/test/CodeGen/ARM/vld4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vld4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vld4.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vld4.ll Fri Feb 27 15:17:42 2015
@@ -26,7 +26,7 @@ define <8 x i8> @vld4i8(i8* %A) nounwind
 define <8 x i8> @vld4i8_update(i8** %ptr, i32 %inc) nounwind {
 ;CHECK-LABEL: vld4i8_update:
 ;CHECK: vld4.8 {d16, d17, d18, d19}, [r2:128], r1
-	%A = load i8** %ptr
+	%A = load i8*, i8** %ptr
 	%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A, i32 16)
 	%tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
 	%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
@@ -126,7 +126,7 @@ define <8 x i16> @vld4Qi16_update(i16**
 ;CHECK-LABEL: vld4Qi16_update:
 ;CHECK: vld4.16 {d16, d18, d20, d22}, [r1:64]!
 ;CHECK: vld4.16 {d17, d19, d21, d23}, [r1:64]!
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8* %tmp0, i32 8)
 	%tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0

Modified: llvm/trunk/test/CodeGen/ARM/vlddup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vlddup.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vlddup.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vlddup.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define <8 x i8> @vld1dupi8(i8* %A) nounw
 ;CHECK-LABEL: vld1dupi8:
 ;Check the (default) alignment value.
 ;CHECK: vld1.8 {d16[]}, [r0]
-	%tmp1 = load i8* %A, align 8
+	%tmp1 = load i8, i8* %A, align 8
 	%tmp2 = insertelement <8 x i8> undef, i8 %tmp1, i32 0
 	%tmp3 = shufflevector <8 x i8> %tmp2, <8 x i8> undef, <8 x i32> zeroinitializer
         ret <8 x i8> %tmp3
@@ -14,7 +14,7 @@ define <4 x i16> @vld1dupi16(i16* %A) no
 ;CHECK-LABEL: vld1dupi16:
 ;Check the alignment value.  Max for this instruction is 16 bits:
 ;CHECK: vld1.16 {d16[]}, [r0:16]
-	%tmp1 = load i16* %A, align 8
+	%tmp1 = load i16, i16* %A, align 8
 	%tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0
 	%tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitializer
         ret <4 x i16> %tmp3
@@ -24,7 +24,7 @@ define <2 x i32> @vld1dupi32(i32* %A) no
 ;CHECK-LABEL: vld1dupi32:
 ;Check the alignment value.  Max for this instruction is 32 bits:
 ;CHECK: vld1.32 {d16[]}, [r0:32]
-	%tmp1 = load i32* %A, align 8
+	%tmp1 = load i32, i32* %A, align 8
 	%tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
 	%tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
         ret <2 x i32> %tmp3
@@ -33,7 +33,7 @@ define <2 x i32> @vld1dupi32(i32* %A) no
 define <2 x float> @vld1dupf(float* %A) nounwind {
 ;CHECK-LABEL: vld1dupf:
 ;CHECK: vld1.32 {d16[]}, [r0:32]
-	%tmp0 = load float* %A
+	%tmp0 = load float, float* %A
         %tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
         %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
         ret <2 x float> %tmp2
@@ -43,7 +43,7 @@ define <16 x i8> @vld1dupQi8(i8* %A) nou
 ;CHECK-LABEL: vld1dupQi8:
 ;Check the (default) alignment value.
 ;CHECK: vld1.8 {d16[], d17[]}, [r0]
-	%tmp1 = load i8* %A, align 8
+	%tmp1 = load i8, i8* %A, align 8
 	%tmp2 = insertelement <16 x i8> undef, i8 %tmp1, i32 0
 	%tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <16 x i32> zeroinitializer
         ret <16 x i8> %tmp3
@@ -52,7 +52,7 @@ define <16 x i8> @vld1dupQi8(i8* %A) nou
 define <4 x float> @vld1dupQf(float* %A) nounwind {
 ;CHECK-LABEL: vld1dupQf:
 ;CHECK: vld1.32 {d16[], d17[]}, [r0:32]
-        %tmp0 = load float* %A
+        %tmp0 = load float, float* %A
         %tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0
         %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
         ret <4 x float> %tmp2
@@ -93,7 +93,7 @@ define <4 x i16> @vld2dupi16(i8* %A) nou
 define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind {
 ;CHECK-LABEL: vld2dupi16_update:
 ;CHECK: vld2.16 {d16[], d17[]}, [r1]!
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
         %A2 = bitcast i16* %A to i8*
 	%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %A2, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
 	%tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0
@@ -130,7 +130,7 @@ declare %struct.__neon_int2x32x2_t @llvm
 define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind {
 ;CHECK-LABEL: vld3dupi8_update:
 ;CHECK: vld3.8 {d16[], d17[], d18[]}, [r2], r1
-	%A = load i8** %ptr
+	%A = load i8*, i8** %ptr
 	%tmp0 = tail call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 8)
 	%tmp1 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 0
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
@@ -171,7 +171,7 @@ declare %struct.__neon_int16x4x3_t @llvm
 define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind {
 ;CHECK-LABEL: vld4dupi16_update:
 ;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [r1]!
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
         %A2 = bitcast i16* %A to i8*
 	%tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %A2, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1)
 	%tmp1 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 0

Modified: llvm/trunk/test/CodeGen/ARM/vldlane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vldlane.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vldlane.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vldlane.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@ define <8 x i8> @vld1lanei8(i8* %A, <8 x
 ;CHECK-LABEL: vld1lanei8:
 ;Check the (default) alignment value.
 ;CHECK: vld1.8 {d16[3]}, [r0]
-	%tmp1 = load <8 x i8>* %B
-	%tmp2 = load i8* %A, align 8
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
+	%tmp2 = load i8, i8* %A, align 8
 	%tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 3
         ret <8 x i8> %tmp3
 }
@@ -17,8 +17,8 @@ define <4 x i16> @vld1lanei16(i16* %A, <
 ;CHECK-LABEL: vld1lanei16:
 ;Check the alignment value.  Max for this instruction is 16 bits:
 ;CHECK: vld1.16 {d16[2]}, [r0:16]
-	%tmp1 = load <4 x i16>* %B
-	%tmp2 = load i16* %A, align 8
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
+	%tmp2 = load i16, i16* %A, align 8
 	%tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 2
         ret <4 x i16> %tmp3
 }
@@ -27,8 +27,8 @@ define <2 x i32> @vld1lanei32(i32* %A, <
 ;CHECK-LABEL: vld1lanei32:
 ;Check the alignment value.  Max for this instruction is 32 bits:
 ;CHECK: vld1.32 {d16[1]}, [r0:32]
-	%tmp1 = load <2 x i32>* %B
-	%tmp2 = load i32* %A, align 8
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
+	%tmp2 = load i32, i32* %A, align 8
 	%tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
         ret <2 x i32> %tmp3
 }
@@ -37,8 +37,8 @@ define <2 x i32> @vld1lanei32a32(i32* %A
 ;CHECK-LABEL: vld1lanei32a32:
 ;Check the alignment value.  Legal values are none or :32.
 ;CHECK: vld1.32 {d16[1]}, [r0:32]
-	%tmp1 = load <2 x i32>* %B
-	%tmp2 = load i32* %A, align 4
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
+	%tmp2 = load i32, i32* %A, align 4
 	%tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
         ret <2 x i32> %tmp3
 }
@@ -46,8 +46,8 @@ define <2 x i32> @vld1lanei32a32(i32* %A
 define <2 x float> @vld1lanef(float* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vld1lanef:
 ;CHECK: vld1.32 {d16[1]}, [r0:32]
-	%tmp1 = load <2 x float>* %B
-	%tmp2 = load float* %A, align 4
+	%tmp1 = load <2 x float>, <2 x float>* %B
+	%tmp2 = load float, float* %A, align 4
 	%tmp3 = insertelement <2 x float> %tmp1, float %tmp2, i32 1
 	ret <2 x float> %tmp3
 }
@@ -55,8 +55,8 @@ define <2 x float> @vld1lanef(float* %A,
 define <16 x i8> @vld1laneQi8(i8* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vld1laneQi8:
 ;CHECK: vld1.8 {d17[1]}, [r0]
-	%tmp1 = load <16 x i8>* %B
-	%tmp2 = load i8* %A, align 8
+	%tmp1 = load <16 x i8>, <16 x i8>* %B
+	%tmp2 = load i8, i8* %A, align 8
 	%tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 9
 	ret <16 x i8> %tmp3
 }
@@ -64,8 +64,8 @@ define <16 x i8> @vld1laneQi8(i8* %A, <1
 define <8 x i16> @vld1laneQi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vld1laneQi16:
 ;CHECK: vld1.16 {d17[1]}, [r0:16]
-	%tmp1 = load <8 x i16>* %B
-	%tmp2 = load i16* %A, align 8
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
+	%tmp2 = load i16, i16* %A, align 8
 	%tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 5
 	ret <8 x i16> %tmp3
 }
@@ -73,8 +73,8 @@ define <8 x i16> @vld1laneQi16(i16* %A,
 define <4 x i32> @vld1laneQi32(i32* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vld1laneQi32:
 ;CHECK: vld1.32 {d17[1]}, [r0:32]
-	%tmp1 = load <4 x i32>* %B
-	%tmp2 = load i32* %A, align 8
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
+	%tmp2 = load i32, i32* %A, align 8
 	%tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 3
 	ret <4 x i32> %tmp3
 }
@@ -82,8 +82,8 @@ define <4 x i32> @vld1laneQi32(i32* %A,
 define <4 x float> @vld1laneQf(float* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vld1laneQf:
 ;CHECK: vld1.32 {d16[0]}, [r0:32]
-	%tmp1 = load <4 x float>* %B
-	%tmp2 = load float* %A
+	%tmp1 = load <4 x float>, <4 x float>* %B
+	%tmp2 = load float, float* %A
 	%tmp3 = insertelement <4 x float> %tmp1, float %tmp2, i32 0
 	ret <4 x float> %tmp3
 }
@@ -101,7 +101,7 @@ define <8 x i8> @vld2lanei8(i8* %A, <8 x
 ;CHECK-LABEL: vld2lanei8:
 ;Check the alignment value.  Max for this instruction is 16 bits:
 ;CHECK: vld2.8 {d16[1], d17[1]}, [r0:16]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	%tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 4)
         %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
@@ -114,7 +114,7 @@ define <4 x i16> @vld2lanei16(i16* %A, <
 ;Check the alignment value.  Max for this instruction is 32 bits:
 ;CHECK: vld2.16 {d16[1], d17[1]}, [r0:32]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	%tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 1
@@ -126,7 +126,7 @@ define <2 x i32> @vld2lanei32(i32* %A, <
 ;CHECK-LABEL: vld2lanei32:
 ;CHECK: vld2.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	%tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1
@@ -138,9 +138,9 @@ define <2 x i32> @vld2lanei32(i32* %A, <
 define <2 x i32> @vld2lanei32_update(i32** %ptr, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vld2lanei32_update:
 ;CHECK: vld2.32 {d16[1], d17[1]}, [{{r[0-9]+}}]!
-	%A = load i32** %ptr
+	%A = load i32*, i32** %ptr
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	%tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
 	%tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 0
 	%tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1
@@ -154,7 +154,7 @@ define <2 x float> @vld2lanef(float* %A,
 ;CHECK-LABEL: vld2lanef:
 ;CHECK: vld2.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	%tmp2 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 1
@@ -167,7 +167,7 @@ define <8 x i16> @vld2laneQi16(i16* %A,
 ;Check the (default) alignment.
 ;CHECK: vld2.16 {d17[1], d19[1]}, [{{r[0-9]+}}]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	%tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 5, i32 1)
         %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1
@@ -180,7 +180,7 @@ define <4 x i32> @vld2laneQi32(i32* %A,
 ;Check the alignment value.  Max for this instruction is 64 bits:
 ;CHECK: vld2.32 {d17[0], d19[0]}, [{{r[0-9]+}}:64]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 16)
         %tmp3 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 1
@@ -192,7 +192,7 @@ define <4 x float> @vld2laneQf(float* %A
 ;CHECK-LABEL: vld2laneQf:
 ;CHECK: vld2.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	%tmp2 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x4x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x4x2_t %tmp2, 1
@@ -221,7 +221,7 @@ declare %struct.__neon_float32x4x2_t @ll
 define <8 x i8> @vld3lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vld3lanei8:
 ;CHECK: vld3.8
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	%tmp2 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
@@ -236,7 +236,7 @@ define <4 x i16> @vld3lanei16(i16* %A, <
 ;Check the (default) alignment value.  VLD3 does not support alignment.
 ;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	%tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 1
@@ -250,7 +250,7 @@ define <2 x i32> @vld3lanei32(i32* %A, <
 ;CHECK-LABEL: vld3lanei32:
 ;CHECK: vld3.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	%tmp2 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 1
@@ -264,7 +264,7 @@ define <2 x float> @vld3lanef(float* %A,
 ;CHECK-LABEL: vld3lanef:
 ;CHECK: vld3.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	%tmp2 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 1
@@ -279,7 +279,7 @@ define <8 x i16> @vld3laneQi16(i16* %A,
 ;Check the (default) alignment value.  VLD3 does not support alignment.
 ;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	%tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 1
@@ -293,9 +293,9 @@ define <8 x i16> @vld3laneQi16(i16* %A,
 define <8 x i16> @vld3laneQi16_update(i16** %ptr, <8 x i16>* %B, i32 %inc) nounwind {
 ;CHECK-LABEL: vld3laneQi16_update:
 ;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}], {{r[0-9]+}}
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	%tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 8)
 	%tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 0
 	%tmp4 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 1
@@ -311,7 +311,7 @@ define <4 x i32> @vld3laneQi32(i32* %A,
 ;CHECK-LABEL: vld3laneQi32:
 ;CHECK: vld3.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 3, i32 1)
         %tmp3 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 1
@@ -325,7 +325,7 @@ define <4 x float> @vld3laneQf(float* %A
 ;CHECK-LABEL: vld3laneQf:
 ;CHECK: vld3.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	%tmp2 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 1
@@ -357,7 +357,7 @@ define <8 x i8> @vld4lanei8(i8* %A, <8 x
 ;CHECK-LABEL: vld4lanei8:
 ;Check the alignment value.  Max for this instruction is 32 bits:
 ;CHECK: vld4.8 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}:32]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	%tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
@@ -373,8 +373,8 @@ define <8 x i8> @vld4lanei8(i8* %A, <8 x
 define <8 x i8> @vld4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vld4lanei8_update:
 ;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}:32]!
-	%A = load i8** %ptr
-	%tmp1 = load <8 x i8>* %B
+	%A = load i8*, i8** %ptr
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	%tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
 	%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
 	%tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
@@ -394,7 +394,7 @@ define <4 x i16> @vld4lanei16(i16* %A, <
 ;being loaded is ignored.
 ;CHECK: vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	%tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 4)
         %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 1
@@ -412,7 +412,7 @@ define <2 x i32> @vld4lanei32(i32* %A, <
 ;it is smaller than the total size of the memory being loaded.
 ;CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}:64]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	%tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 1
@@ -428,7 +428,7 @@ define <2 x float> @vld4lanef(float* %A,
 ;CHECK-LABEL: vld4lanef:
 ;CHECK: vld4.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	%tmp2 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 1
@@ -445,7 +445,7 @@ define <8 x i16> @vld4laneQi16(i16* %A,
 ;Check the alignment value.  Max for this instruction is 64 bits:
 ;CHECK: vld4.16 {d16[1], d18[1], d20[1], d22[1]}, [{{r[0-9]+}}:64]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	%tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 16)
         %tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 1
@@ -462,7 +462,7 @@ define <4 x i32> @vld4laneQi32(i32* %A,
 ;Check the (default) alignment.
 ;CHECK: vld4.32 {d17[0], d19[0], d21[0], d23[0]}, [{{r[0-9]+}}]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)
         %tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 1
@@ -478,7 +478,7 @@ define <4 x float> @vld4laneQf(float* %A
 ;CHECK-LABEL: vld4laneQf:
 ;CHECK: vld4.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	%tmp2 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 1

Modified: llvm/trunk/test/CodeGen/ARM/vldm-liveness.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vldm-liveness.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vldm-liveness.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vldm-liveness.ll Fri Feb 27 15:17:42 2015
@@ -23,13 +23,13 @@ define arm_aapcs_vfpcc <4 x float> @foo(
 ; CHECK: vldmia r0, {s0, s1}
 ; CHECK: vldr s2, [r0, #16]
    %off0 = getelementptr float, float* %ptr, i32 0
-   %val0 = load float* %off0
+   %val0 = load float, float* %off0
    %off1 = getelementptr float, float* %ptr, i32 1
-   %val1 = load float* %off1
+   %val1 = load float, float* %off1
    %off4 = getelementptr float, float* %ptr, i32 4
-   %val4 = load float* %off4
+   %val4 = load float, float* %off4
    %off2 = getelementptr float, float* %ptr, i32 2
-   %val2 = load float* %off2
+   %val2 = load float, float* %off2
 
    %vec1 = insertelement <4 x float> undef, float %val0, i32 0
    %vec2 = insertelement <4 x float> %vec1, float %val1, i32 1

Modified: llvm/trunk/test/CodeGen/ARM/vldm-sched-a9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vldm-sched-a9.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vldm-sched-a9.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vldm-sched-a9.ll Fri Feb 27 15:17:42 2015
@@ -13,9 +13,9 @@ entry:
   %arrayidx39 = getelementptr inbounds i64, i64* %src, i32 13
   %vecinit285 = shufflevector <16 x i64> undef, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
   store <16 x i64> %vecinit285, <16 x i64>* undef, align 128
-  %0 = load i64* undef, align 8
+  %0 = load i64, i64* undef, align 8
   %vecinit379 = insertelement <16 x i64> undef, i64 %0, i32 9
-  %1 = load i64* undef, align 8
+  %1 = load i64, i64* undef, align 8
   %vecinit419 = insertelement <16 x i64> undef, i64 %1, i32 15
   store <16 x i64> %vecinit419, <16 x i64>* undef, align 128
   %vecinit579 = insertelement <16 x i64> undef, i64 0, i32 4
@@ -23,14 +23,14 @@ entry:
   %vecinit584 = insertelement <16 x i64> %vecinit582, i64 undef, i32 9
   %vecinit586 = insertelement <16 x i64> %vecinit584, i64 0, i32 10
   %vecinit589 = shufflevector <16 x i64> %vecinit586, <16 x i64> <i64 12, i64 13, i64 14, i64 15, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 16, i32 17, i32 18, i32 19, i32 undef>
-  %2 = load i64* undef, align 8
+  %2 = load i64, i64* undef, align 8
   %vecinit591 = insertelement <16 x i64> %vecinit589, i64 %2, i32 15
   store <16 x i64> %vecinit591, <16 x i64>* undef, align 128
   %vecinit694 = shufflevector <16 x i64> undef, <16 x i64> <i64 13, i64 14, i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
   store <16 x i64> %vecinit694, <16 x i64>* undef, align 128
-  %3 = load i64* undef, align 8
+  %3 = load i64, i64* undef, align 8
   %vecinit1331 = insertelement <16 x i64> undef, i64 %3, i32 14
-  %4 = load i64* undef, align 8
+  %4 = load i64, i64* undef, align 8
   %vecinit1468 = insertelement <16 x i64> undef, i64 %4, i32 11
   %vecinit1471 = shufflevector <16 x i64> %vecinit1468, <16 x i64> <i64 13, i64 14, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 undef, i32 undef>
   %vecinit1474 = shufflevector <16 x i64> %vecinit1471, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
@@ -45,14 +45,14 @@ entry:
   %vecinit1599 = insertelement <16 x i64> %vecinit1597, i64 undef, i32 8
   %vecinit1601 = insertelement <16 x i64> %vecinit1599, i64 undef, i32 9
   %vecinit1603 = insertelement <16 x i64> %vecinit1601, i64 undef, i32 10
-  %5 = load i64* undef, align 8
+  %5 = load i64, i64* undef, align 8
   %vecinit1605 = insertelement <16 x i64> %vecinit1603, i64 %5, i32 11
   %vecinit1608 = shufflevector <16 x i64> %vecinit1605, <16 x i64> <i64 13, i64 14, i64 15, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 undef>
-  %6 = load i64* undef, align 8
+  %6 = load i64, i64* undef, align 8
   %vecinit1610 = insertelement <16 x i64> %vecinit1608, i64 %6, i32 15
   store <16 x i64> %vecinit1610, <16 x i64>* undef, align 128
   %vecinit2226 = shufflevector <16 x i64> undef, <16 x i64> <i64 6, i64 7, i64 8, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 16, i32 17, i32 18, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %7 = load i64* undef, align 8
+  %7 = load i64, i64* undef, align 8
   %vecinit2228 = insertelement <16 x i64> %vecinit2226, i64 %7, i32 8
   %vecinit2230 = insertelement <16 x i64> %vecinit2228, i64 undef, i32 9
   %vecinit2233 = shufflevector <16 x i64> %vecinit2230, <16 x i64> <i64 11, i64 12, i64 13, i64 14, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef>
@@ -62,7 +62,7 @@ entry:
   %vecinit2249 = shufflevector <16 x i64> %vecinit2246, <16 x i64> <i64 7, i64 8, i64 9, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 16, i32 17, i32 18, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %vecinit2252 = shufflevector <16 x i64> %vecinit2249, <16 x i64> <i64 10, i64 11, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 16, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %vecinit2255 = shufflevector <16 x i64> %vecinit2252, <16 x i64> <i64 12, i64 13, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 16, i32 17, i32 undef, i32 undef, i32 undef>
-  %8 = load i64* %arrayidx39, align 8
+  %8 = load i64, i64* %arrayidx39, align 8
   %vecinit2257 = insertelement <16 x i64> %vecinit2255, i64 %8, i32 13
   %vecinit2260 = shufflevector <16 x i64> %vecinit2257, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
   store <16 x i64> %vecinit2260, <16 x i64>* null, align 128

Modified: llvm/trunk/test/CodeGen/ARM/vminmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vminmax.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vminmax.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vminmax.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmins8:
 ;CHECK: vmin.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vmins8(<8 x i8>* %A, <8
 define <4 x i16> @vmins16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmins16:
 ;CHECK: vmin.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vmins16(<4 x i16>* %A,
 define <2 x i32> @vmins32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmins32:
 ;CHECK: vmin.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vmins32(<2 x i32>* %A,
 define <8 x i8> @vminu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vminu8:
 ;CHECK: vmin.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i8> @vminu8(<8 x i8>* %A, <8
 define <4 x i16> @vminu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vminu16:
 ;CHECK: vmin.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @vminu16(<4 x i16>* %A,
 define <2 x i32> @vminu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vminu32:
 ;CHECK: vmin.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <2 x i32> @vminu32(<2 x i32>* %A,
 define <2 x float> @vminf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vminf32:
 ;CHECK: vmin.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -66,8 +66,8 @@ define <2 x float> @vminf32(<2 x float>*
 define <16 x i8> @vminQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vminQs8:
 ;CHECK: vmin.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -75,8 +75,8 @@ define <16 x i8> @vminQs8(<16 x i8>* %A,
 define <8 x i16> @vminQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vminQs16:
 ;CHECK: vmin.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -84,8 +84,8 @@ define <8 x i16> @vminQs16(<8 x i16>* %A
 define <4 x i32> @vminQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vminQs32:
 ;CHECK: vmin.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -93,8 +93,8 @@ define <4 x i32> @vminQs32(<4 x i32>* %A
 define <16 x i8> @vminQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vminQu8:
 ;CHECK: vmin.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -102,8 +102,8 @@ define <16 x i8> @vminQu8(<16 x i8>* %A,
 define <8 x i16> @vminQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vminQu16:
 ;CHECK: vmin.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -111,8 +111,8 @@ define <8 x i16> @vminQu16(<8 x i16>* %A
 define <4 x i32> @vminQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vminQu32:
 ;CHECK: vmin.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -120,8 +120,8 @@ define <4 x i32> @vminQu32(<4 x i32>* %A
 define <4 x float> @vminQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vminQf32:
 ;CHECK: vmin.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -149,8 +149,8 @@ declare <4 x float> @llvm.arm.neon.vmins
 define <8 x i8> @vmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmaxs8:
 ;CHECK: vmax.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -158,8 +158,8 @@ define <8 x i8> @vmaxs8(<8 x i8>* %A, <8
 define <4 x i16> @vmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmaxs16:
 ;CHECK: vmax.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -167,8 +167,8 @@ define <4 x i16> @vmaxs16(<4 x i16>* %A,
 define <2 x i32> @vmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmaxs32:
 ;CHECK: vmax.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -176,8 +176,8 @@ define <2 x i32> @vmaxs32(<2 x i32>* %A,
 define <8 x i8> @vmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmaxu8:
 ;CHECK: vmax.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -185,8 +185,8 @@ define <8 x i8> @vmaxu8(<8 x i8>* %A, <8
 define <4 x i16> @vmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmaxu16:
 ;CHECK: vmax.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -194,8 +194,8 @@ define <4 x i16> @vmaxu16(<4 x i16>* %A,
 define <2 x i32> @vmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmaxu32:
 ;CHECK: vmax.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -203,8 +203,8 @@ define <2 x i32> @vmaxu32(<2 x i32>* %A,
 define <2 x float> @vmaxf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vmaxf32:
 ;CHECK: vmax.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -212,8 +212,8 @@ define <2 x float> @vmaxf32(<2 x float>*
 define <16 x i8> @vmaxQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmaxQs8:
 ;CHECK: vmax.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -221,8 +221,8 @@ define <16 x i8> @vmaxQs8(<16 x i8>* %A,
 define <8 x i16> @vmaxQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmaxQs16:
 ;CHECK: vmax.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -230,8 +230,8 @@ define <8 x i16> @vmaxQs16(<8 x i16>* %A
 define <4 x i32> @vmaxQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmaxQs32:
 ;CHECK: vmax.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -239,8 +239,8 @@ define <4 x i32> @vmaxQs32(<4 x i32>* %A
 define <16 x i8> @vmaxQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmaxQu8:
 ;CHECK: vmax.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -248,8 +248,8 @@ define <16 x i8> @vmaxQu8(<16 x i8>* %A,
 define <8 x i16> @vmaxQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmaxQu16:
 ;CHECK: vmax.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -257,8 +257,8 @@ define <8 x i16> @vmaxQu16(<8 x i16>* %A
 define <4 x i32> @vmaxQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmaxQu32:
 ;CHECK: vmax.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -266,8 +266,8 @@ define <4 x i32> @vmaxQu32(<4 x i32>* %A
 define <4 x float> @vmaxQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vmaxQf32:
 ;CHECK: vmax.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vminmaxnm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vminmaxnm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vminmaxnm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vminmaxnm.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@
 define <4 x float> @vmaxnmq(<4 x float>* %A, <4 x float>* %B) nounwind {
 ; CHECK-LABEL: vmaxnmq:
 ; CHECK: vmaxnm.f32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
-  %tmp2 = load <4 x float>* %B
+  %tmp1 = load <4 x float>, <4 x float>* %A
+  %tmp2 = load <4 x float>, <4 x float>* %B
   %tmp3 = call <4 x float> @llvm.arm.neon.vmaxnm.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
   ret <4 x float> %tmp3
 }
@@ -13,8 +13,8 @@ define <4 x float> @vmaxnmq(<4 x float>*
 define <2 x float> @vmaxnmd(<2 x float>* %A, <2 x float>* %B) nounwind {
 ; CHECK-LABEL: vmaxnmd:
 ; CHECK: vmaxnm.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
-  %tmp2 = load <2 x float>* %B
+  %tmp1 = load <2 x float>, <2 x float>* %A
+  %tmp2 = load <2 x float>, <2 x float>* %B
   %tmp3 = call <2 x float> @llvm.arm.neon.vmaxnm.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
   ret <2 x float> %tmp3
 }
@@ -22,8 +22,8 @@ define <2 x float> @vmaxnmd(<2 x float>*
 define <4 x float> @vminnmq(<4 x float>* %A, <4 x float>* %B) nounwind {
 ; CHECK-LABEL: vminnmq:
 ; CHECK: vminnm.f32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
-  %tmp1 = load <4 x float>* %A
-  %tmp2 = load <4 x float>* %B
+  %tmp1 = load <4 x float>, <4 x float>* %A
+  %tmp2 = load <4 x float>, <4 x float>* %B
   %tmp3 = call <4 x float> @llvm.arm.neon.vminnm.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
   ret <4 x float> %tmp3
 }
@@ -31,8 +31,8 @@ define <4 x float> @vminnmq(<4 x float>*
 define <2 x float> @vminnmd(<2 x float>* %A, <2 x float>* %B) nounwind {
 ; CHECK-LABEL: vminnmd:
 ; CHECK: vminnm.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
-  %tmp1 = load <2 x float>* %A
-  %tmp2 = load <2 x float>* %B
+  %tmp1 = load <2 x float>, <2 x float>* %A
+  %tmp2 = load <2 x float>, <2 x float>* %B
   %tmp3 = call <2 x float> @llvm.arm.neon.vminnm.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
   ret <2 x float> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vmla.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vmla.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vmla.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vmla.ll Fri Feb 27 15:17:42 2015
@@ -3,9 +3,9 @@
 define <8 x i8> @vmlai8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
 ;CHECK-LABEL: vmlai8:
 ;CHECK: vmla.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = mul <8 x i8> %tmp2, %tmp3
 	%tmp5 = add <8 x i8> %tmp1, %tmp4
 	ret <8 x i8> %tmp5
@@ -14,9 +14,9 @@ define <8 x i8> @vmlai8(<8 x i8>* %A, <8
 define <4 x i16> @vmlai16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vmlai16:
 ;CHECK: vmla.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = mul <4 x i16> %tmp2, %tmp3
 	%tmp5 = add <4 x i16> %tmp1, %tmp4
 	ret <4 x i16> %tmp5
@@ -25,9 +25,9 @@ define <4 x i16> @vmlai16(<4 x i16>* %A,
 define <2 x i32> @vmlai32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vmlai32:
 ;CHECK: vmla.i32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = mul <2 x i32> %tmp2, %tmp3
 	%tmp5 = add <2 x i32> %tmp1, %tmp4
 	ret <2 x i32> %tmp5
@@ -36,9 +36,9 @@ define <2 x i32> @vmlai32(<2 x i32>* %A,
 define <2 x float> @vmlaf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
 ;CHECK-LABEL: vmlaf32:
 ;CHECK: vmla.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
-	%tmp3 = load <2 x float>* %C
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp3 = load <2 x float>, <2 x float>* %C
 	%tmp4 = fmul <2 x float> %tmp2, %tmp3
 	%tmp5 = fadd <2 x float> %tmp1, %tmp4
 	ret <2 x float> %tmp5
@@ -47,9 +47,9 @@ define <2 x float> @vmlaf32(<2 x float>*
 define <16 x i8> @vmlaQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind {
 ;CHECK-LABEL: vmlaQi8:
 ;CHECK: vmla.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-	%tmp3 = load <16 x i8>* %C
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp3 = load <16 x i8>, <16 x i8>* %C
 	%tmp4 = mul <16 x i8> %tmp2, %tmp3
 	%tmp5 = add <16 x i8> %tmp1, %tmp4
 	ret <16 x i8> %tmp5
@@ -58,9 +58,9 @@ define <16 x i8> @vmlaQi8(<16 x i8>* %A,
 define <8 x i16> @vmlaQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
 ;CHECK-LABEL: vmlaQi16:
 ;CHECK: vmla.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-	%tmp3 = load <8 x i16>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp3 = load <8 x i16>, <8 x i16>* %C
 	%tmp4 = mul <8 x i16> %tmp2, %tmp3
 	%tmp5 = add <8 x i16> %tmp1, %tmp4
 	ret <8 x i16> %tmp5
@@ -69,9 +69,9 @@ define <8 x i16> @vmlaQi16(<8 x i16>* %A
 define <4 x i32> @vmlaQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: vmlaQi32:
 ;CHECK: vmla.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-	%tmp3 = load <4 x i32>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp3 = load <4 x i32>, <4 x i32>* %C
 	%tmp4 = mul <4 x i32> %tmp2, %tmp3
 	%tmp5 = add <4 x i32> %tmp1, %tmp4
 	ret <4 x i32> %tmp5
@@ -80,9 +80,9 @@ define <4 x i32> @vmlaQi32(<4 x i32>* %A
 define <4 x float> @vmlaQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
 ;CHECK-LABEL: vmlaQf32:
 ;CHECK: vmla.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
-	%tmp3 = load <4 x float>* %C
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp3 = load <4 x float>, <4 x float>* %C
 	%tmp4 = fmul <4 x float> %tmp2, %tmp3
 	%tmp5 = fadd <4 x float> %tmp1, %tmp4
 	ret <4 x float> %tmp5
@@ -91,9 +91,9 @@ define <4 x float> @vmlaQf32(<4 x float>
 define <8 x i16> @vmlals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vmlals8:
 ;CHECK: vmlal.s8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
 	%tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -104,9 +104,9 @@ define <8 x i16> @vmlals8(<8 x i16>* %A,
 define <4 x i32> @vmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vmlals16:
 ;CHECK: vmlal.s16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
 	%tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -117,9 +117,9 @@ define <4 x i32> @vmlals16(<4 x i32>* %A
 define <2 x i64> @vmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vmlals32:
 ;CHECK: vmlal.s32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
 	%tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -130,9 +130,9 @@ define <2 x i64> @vmlals32(<2 x i64>* %A
 define <8 x i16> @vmlalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vmlalu8:
 ;CHECK: vmlal.u8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
 	%tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -143,9 +143,9 @@ define <8 x i16> @vmlalu8(<8 x i16>* %A,
 define <4 x i32> @vmlalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vmlalu16:
 ;CHECK: vmlal.u16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
 	%tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -156,9 +156,9 @@ define <4 x i32> @vmlalu16(<4 x i32>* %A
 define <2 x i64> @vmlalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vmlalu32:
 ;CHECK: vmlal.u32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
 	%tmp6 = mul <2 x i64> %tmp4, %tmp5

Modified: llvm/trunk/test/CodeGen/ARM/vmls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vmls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vmls.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vmls.ll Fri Feb 27 15:17:42 2015
@@ -3,9 +3,9 @@
 define <8 x i8> @vmlsi8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
 ;CHECK-LABEL: vmlsi8:
 ;CHECK: vmls.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = mul <8 x i8> %tmp2, %tmp3
 	%tmp5 = sub <8 x i8> %tmp1, %tmp4
 	ret <8 x i8> %tmp5
@@ -14,9 +14,9 @@ define <8 x i8> @vmlsi8(<8 x i8>* %A, <8
 define <4 x i16> @vmlsi16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vmlsi16:
 ;CHECK: vmls.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = mul <4 x i16> %tmp2, %tmp3
 	%tmp5 = sub <4 x i16> %tmp1, %tmp4
 	ret <4 x i16> %tmp5
@@ -25,9 +25,9 @@ define <4 x i16> @vmlsi16(<4 x i16>* %A,
 define <2 x i32> @vmlsi32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vmlsi32:
 ;CHECK: vmls.i32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = mul <2 x i32> %tmp2, %tmp3
 	%tmp5 = sub <2 x i32> %tmp1, %tmp4
 	ret <2 x i32> %tmp5
@@ -36,9 +36,9 @@ define <2 x i32> @vmlsi32(<2 x i32>* %A,
 define <2 x float> @vmlsf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
 ;CHECK-LABEL: vmlsf32:
 ;CHECK: vmls.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
-	%tmp3 = load <2 x float>* %C
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp3 = load <2 x float>, <2 x float>* %C
 	%tmp4 = fmul <2 x float> %tmp2, %tmp3
 	%tmp5 = fsub <2 x float> %tmp1, %tmp4
 	ret <2 x float> %tmp5
@@ -47,9 +47,9 @@ define <2 x float> @vmlsf32(<2 x float>*
 define <16 x i8> @vmlsQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind {
 ;CHECK-LABEL: vmlsQi8:
 ;CHECK: vmls.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-	%tmp3 = load <16 x i8>* %C
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp3 = load <16 x i8>, <16 x i8>* %C
 	%tmp4 = mul <16 x i8> %tmp2, %tmp3
 	%tmp5 = sub <16 x i8> %tmp1, %tmp4
 	ret <16 x i8> %tmp5
@@ -58,9 +58,9 @@ define <16 x i8> @vmlsQi8(<16 x i8>* %A,
 define <8 x i16> @vmlsQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
 ;CHECK-LABEL: vmlsQi16:
 ;CHECK: vmls.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-	%tmp3 = load <8 x i16>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp3 = load <8 x i16>, <8 x i16>* %C
 	%tmp4 = mul <8 x i16> %tmp2, %tmp3
 	%tmp5 = sub <8 x i16> %tmp1, %tmp4
 	ret <8 x i16> %tmp5
@@ -69,9 +69,9 @@ define <8 x i16> @vmlsQi16(<8 x i16>* %A
 define <4 x i32> @vmlsQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: vmlsQi32:
 ;CHECK: vmls.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-	%tmp3 = load <4 x i32>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp3 = load <4 x i32>, <4 x i32>* %C
 	%tmp4 = mul <4 x i32> %tmp2, %tmp3
 	%tmp5 = sub <4 x i32> %tmp1, %tmp4
 	ret <4 x i32> %tmp5
@@ -80,9 +80,9 @@ define <4 x i32> @vmlsQi32(<4 x i32>* %A
 define <4 x float> @vmlsQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
 ;CHECK-LABEL: vmlsQf32:
 ;CHECK: vmls.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
-	%tmp3 = load <4 x float>* %C
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp3 = load <4 x float>, <4 x float>* %C
 	%tmp4 = fmul <4 x float> %tmp2, %tmp3
 	%tmp5 = fsub <4 x float> %tmp1, %tmp4
 	ret <4 x float> %tmp5
@@ -91,9 +91,9 @@ define <4 x float> @vmlsQf32(<4 x float>
 define <8 x i16> @vmlsls8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vmlsls8:
 ;CHECK: vmlsl.s8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
 	%tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -104,9 +104,9 @@ define <8 x i16> @vmlsls8(<8 x i16>* %A,
 define <4 x i32> @vmlsls16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vmlsls16:
 ;CHECK: vmlsl.s16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
 	%tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -117,9 +117,9 @@ define <4 x i32> @vmlsls16(<4 x i32>* %A
 define <2 x i64> @vmlsls32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vmlsls32:
 ;CHECK: vmlsl.s32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
 	%tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -130,9 +130,9 @@ define <2 x i64> @vmlsls32(<2 x i64>* %A
 define <8 x i16> @vmlslu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vmlslu8:
 ;CHECK: vmlsl.u8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
 	%tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -143,9 +143,9 @@ define <8 x i16> @vmlslu8(<8 x i16>* %A,
 define <4 x i32> @vmlslu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vmlslu16:
 ;CHECK: vmlsl.u16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
-	%tmp3 = load <4 x i16>* %C
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp3 = load <4 x i16>, <4 x i16>* %C
 	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
 	%tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -156,9 +156,9 @@ define <4 x i32> @vmlslu16(<4 x i32>* %A
 define <2 x i64> @vmlslu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vmlslu32:
 ;CHECK: vmlsl.u32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
-	%tmp3 = load <2 x i32>* %C
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp3 = load <2 x i32>, <2 x i32>* %C
 	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
 	%tmp6 = mul <2 x i64> %tmp4, %tmp5

Modified: llvm/trunk/test/CodeGen/ARM/vmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vmov.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vmov.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vmov.ll Fri Feb 27 15:17:42 2015
@@ -191,7 +191,7 @@ entry:
 define <8 x i16> @vmovls8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vmovls8:
 ;CHECK: vmovl.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
 	ret <8 x i16> %tmp2
 }
@@ -199,7 +199,7 @@ define <8 x i16> @vmovls8(<8 x i8>* %A)
 define <4 x i32> @vmovls16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vmovls16:
 ;CHECK: vmovl.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
 	ret <4 x i32> %tmp2
 }
@@ -207,7 +207,7 @@ define <4 x i32> @vmovls16(<4 x i16>* %A
 define <2 x i64> @vmovls32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vmovls32:
 ;CHECK: vmovl.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
 	ret <2 x i64> %tmp2
 }
@@ -215,7 +215,7 @@ define <2 x i64> @vmovls32(<2 x i32>* %A
 define <8 x i16> @vmovlu8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vmovlu8:
 ;CHECK: vmovl.u8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
 	ret <8 x i16> %tmp2
 }
@@ -223,7 +223,7 @@ define <8 x i16> @vmovlu8(<8 x i8>* %A)
 define <4 x i32> @vmovlu16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vmovlu16:
 ;CHECK: vmovl.u16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
 	ret <4 x i32> %tmp2
 }
@@ -231,7 +231,7 @@ define <4 x i32> @vmovlu16(<4 x i16>* %A
 define <2 x i64> @vmovlu32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vmovlu32:
 ;CHECK: vmovl.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
 	ret <2 x i64> %tmp2
 }
@@ -239,7 +239,7 @@ define <2 x i64> @vmovlu32(<2 x i32>* %A
 define <8 x i8> @vmovni16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vmovni16:
 ;CHECK: vmovn.i16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = trunc <8 x i16> %tmp1 to <8 x i8>
 	ret <8 x i8> %tmp2
 }
@@ -247,7 +247,7 @@ define <8 x i8> @vmovni16(<8 x i16>* %A)
 define <4 x i16> @vmovni32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vmovni32:
 ;CHECK: vmovn.i32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = trunc <4 x i32> %tmp1 to <4 x i16>
 	ret <4 x i16> %tmp2
 }
@@ -255,7 +255,7 @@ define <4 x i16> @vmovni32(<4 x i32>* %A
 define <2 x i32> @vmovni64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vmovni64:
 ;CHECK: vmovn.i64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = trunc <2 x i64> %tmp1 to <2 x i32>
 	ret <2 x i32> %tmp2
 }
@@ -263,7 +263,7 @@ define <2 x i32> @vmovni64(<2 x i64>* %A
 define <8 x i8> @vqmovns16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqmovns16:
 ;CHECK: vqmovn.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -271,7 +271,7 @@ define <8 x i8> @vqmovns16(<8 x i16>* %A
 define <4 x i16> @vqmovns32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqmovns32:
 ;CHECK: vqmovn.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -279,7 +279,7 @@ define <4 x i16> @vqmovns32(<4 x i32>* %
 define <2 x i32> @vqmovns64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqmovns64:
 ;CHECK: vqmovn.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -287,7 +287,7 @@ define <2 x i32> @vqmovns64(<2 x i64>* %
 define <8 x i8> @vqmovnu16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqmovnu16:
 ;CHECK: vqmovn.u16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -295,7 +295,7 @@ define <8 x i8> @vqmovnu16(<8 x i16>* %A
 define <4 x i16> @vqmovnu32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqmovnu32:
 ;CHECK: vqmovn.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -303,7 +303,7 @@ define <4 x i16> @vqmovnu32(<4 x i32>* %
 define <2 x i32> @vqmovnu64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqmovnu64:
 ;CHECK: vqmovn.u64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -311,7 +311,7 @@ define <2 x i32> @vqmovnu64(<2 x i64>* %
 define <8 x i8> @vqmovuns16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqmovuns16:
 ;CHECK: vqmovun.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -319,7 +319,7 @@ define <8 x i8> @vqmovuns16(<8 x i16>* %
 define <4 x i16> @vqmovuns32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqmovuns32:
 ;CHECK: vqmovun.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -327,7 +327,7 @@ define <4 x i16> @vqmovuns32(<4 x i32>*
 define <2 x i32> @vqmovuns64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqmovuns64:
 ;CHECK: vqmovun.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -348,7 +348,7 @@ declare <2 x i32> @llvm.arm.neon.vqmovns
 ; Radar 8598391.
 define void @noTruncStore(<4 x i32>* %a, <4 x i16>* %b) nounwind {
 ;CHECK: vmovn
-  %tmp1 = load <4 x i32>* %a, align 16
+  %tmp1 = load <4 x i32>, <4 x i32>* %a, align 16
   %tmp2 = trunc <4 x i32> %tmp1 to <4 x i16>
   store <4 x i16> %tmp2, <4 x i16>* %b, align 8
   ret void
@@ -376,7 +376,7 @@ define void @v_mov_v4f32_undef(<4 x floa
 entry:
 ;CHECK-LABEL: v_mov_v4f32_undef:
 ;CHECK: vmov.f32 q{{.*}}, #1.000000e+00
-  %a = load <4 x float> *%p
+  %a = load <4 x float> , <4 x float> *%p
   %b = fadd <4 x float> %a, <float undef, float 1.0, float 1.0, float 1.0>
   store <4 x float> %b, <4 x float> *%p
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/vmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vmul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vmul.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vmul.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmuli8:
 ;CHECK: vmul.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = mul <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vmuli8(<8 x i8>* %A, <8
 define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmuli16:
 ;CHECK: vmul.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = mul <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vmuli16(<4 x i16>* %A,
 define <2 x i32> @vmuli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmuli32:
 ;CHECK: vmul.i32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = mul <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vmuli32(<2 x i32>* %A,
 define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vmulf32:
 ;CHECK: vmul.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fmul <2 x float> %tmp1, %tmp2
 	ret <2 x float> %tmp3
 }
@@ -39,8 +39,8 @@ define <2 x float> @vmulf32(<2 x float>*
 define <8 x i8> @vmulp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmulp8:
 ;CHECK: vmul.p8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -48,8 +48,8 @@ define <8 x i8> @vmulp8(<8 x i8>* %A, <8
 define <16 x i8> @vmulQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmulQi8:
 ;CHECK: vmul.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = mul <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -57,8 +57,8 @@ define <16 x i8> @vmulQi8(<16 x i8>* %A,
 define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmulQi16:
 ;CHECK: vmul.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = mul <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -66,8 +66,8 @@ define <8 x i16> @vmulQi16(<8 x i16>* %A
 define <4 x i32> @vmulQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmulQi32:
 ;CHECK: vmul.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = mul <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -75,8 +75,8 @@ define <4 x i32> @vmulQi32(<4 x i32>* %A
 define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vmulQf32:
 ;CHECK: vmul.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = fmul <4 x float> %tmp1, %tmp2
 	ret <4 x float> %tmp3
 }
@@ -84,8 +84,8 @@ define <4 x float> @vmulQf32(<4 x float>
 define <16 x i8> @vmulQp8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmulQp8:
 ;CHECK: vmul.p8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -150,8 +150,8 @@ entry:
 define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmulls8:
 ;CHECK: vmull.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
 	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = mul <8 x i16> %tmp3, %tmp4
@@ -161,8 +161,8 @@ define <8 x i16> @vmulls8(<8 x i8>* %A,
 define <8 x i16> @vmulls8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmulls8_int:
 ;CHECK: vmull.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -170,8 +170,8 @@ define <8 x i16> @vmulls8_int(<8 x i8>*
 define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmulls16:
 ;CHECK: vmull.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
 	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = mul <4 x i32> %tmp3, %tmp4
@@ -181,8 +181,8 @@ define <4 x i32> @vmulls16(<4 x i16>* %A
 define <4 x i32> @vmulls16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmulls16_int:
 ;CHECK: vmull.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -190,8 +190,8 @@ define <4 x i32> @vmulls16_int(<4 x i16>
 define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmulls32:
 ;CHECK: vmull.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
 	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = mul <2 x i64> %tmp3, %tmp4
@@ -201,8 +201,8 @@ define <2 x i64> @vmulls32(<2 x i32>* %A
 define <2 x i64> @vmulls32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmulls32_int:
 ;CHECK: vmull.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -210,8 +210,8 @@ define <2 x i64> @vmulls32_int(<2 x i32>
 define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmullu8:
 ;CHECK: vmull.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
 	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = mul <8 x i16> %tmp3, %tmp4
@@ -221,8 +221,8 @@ define <8 x i16> @vmullu8(<8 x i8>* %A,
 define <8 x i16> @vmullu8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmullu8_int:
 ;CHECK: vmull.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -230,8 +230,8 @@ define <8 x i16> @vmullu8_int(<8 x i8>*
 define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmullu16:
 ;CHECK: vmull.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
 	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = mul <4 x i32> %tmp3, %tmp4
@@ -241,8 +241,8 @@ define <4 x i32> @vmullu16(<4 x i16>* %A
 define <4 x i32> @vmullu16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vmullu16_int:
 ;CHECK: vmull.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -250,8 +250,8 @@ define <4 x i32> @vmullu16_int(<4 x i16>
 define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmullu32:
 ;CHECK: vmull.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
 	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = mul <2 x i64> %tmp3, %tmp4
@@ -261,8 +261,8 @@ define <2 x i64> @vmullu32(<2 x i32>* %A
 define <2 x i64> @vmullu32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vmullu32_int:
 ;CHECK: vmull.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -270,8 +270,8 @@ define <2 x i64> @vmullu32_int(<2 x i32>
 define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vmullp8:
 ;CHECK: vmull.p8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -560,7 +560,7 @@ for.body33.lr.ph:
 for.body33:                                       ; preds = %for.body33, %for.body33.lr.ph
   %add45 = add i32 undef, undef
   %vld155 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* undef, i32 1)
-  %0 = load i32** undef, align 4
+  %0 = load i32*, i32** undef, align 4
   %shuffle.i250 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
   %1 = bitcast <1 x i64> %shuffle.i250 to <8 x i8>
   %vmovl.i249 = zext <8 x i8> %1 to <8 x i16>
@@ -616,7 +616,7 @@ declare <8 x i8> @llvm.arm.neon.vqmovnu.
 ; PR15970
 define void @no_illegal_types_vmull_sext(<4 x i32> %a) {
 entry:
-  %wide.load283.i = load <4 x i8>* undef, align 1
+  %wide.load283.i = load <4 x i8>, <4 x i8>* undef, align 1
   %0 = sext <4 x i8> %wide.load283.i to <4 x i32>
   %1 = sub nsw <4 x i32> %0, %a
   %2 = mul nsw <4 x i32> %1, %1
@@ -626,7 +626,7 @@ entry:
 }
 define void @no_illegal_types_vmull_zext(<4 x i32> %a) {
 entry:
-  %wide.load283.i = load <4 x i8>* undef, align 1
+  %wide.load283.i = load <4 x i8>, <4 x i8>* undef, align 1
   %0 = zext <4 x i8> %wide.load283.i to <4 x i32>
   %1 = sub nsw <4 x i32> %0, %a
   %2 = mul nsw <4 x i32> %1, %1
@@ -642,8 +642,8 @@ define void @foo(<4 x float> * %a, <4 x
 ;   and used a vector * scalar instruction.
 ; CHECK: vldr  {{s[0-9]+}}, [r2]
 ; CHECK: vmul.f32  q8, q8, d0[0]
-  %tmp = load float* %src, align 4
-  %tmp5 = load <4 x float>* %a, align 4
+  %tmp = load float, float* %src, align 4
+  %tmp5 = load <4 x float>, <4 x float>* %a, align 4
   %tmp6 = insertelement <4 x float> undef, float %tmp, i32 0
   %tmp7 = insertelement <4 x float> %tmp6, float %tmp, i32 1
   %tmp8 = insertelement <4 x float> %tmp7, float %tmp, i32 2

Modified: llvm/trunk/test/CodeGen/ARM/vneg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vneg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vneg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vneg.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <8 x i8> @vnegs8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vnegs8:
 ;CHECK: vneg.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = sub <8 x i8> zeroinitializer, %tmp1
 	ret <8 x i8> %tmp2
 }
@@ -11,7 +11,7 @@ define <8 x i8> @vnegs8(<8 x i8>* %A) no
 define <4 x i16> @vnegs16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vnegs16:
 ;CHECK: vneg.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = sub <4 x i16> zeroinitializer, %tmp1
 	ret <4 x i16> %tmp2
 }
@@ -19,7 +19,7 @@ define <4 x i16> @vnegs16(<4 x i16>* %A)
 define <2 x i32> @vnegs32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vnegs32:
 ;CHECK: vneg.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = sub <2 x i32> zeroinitializer, %tmp1
 	ret <2 x i32> %tmp2
 }
@@ -27,7 +27,7 @@ define <2 x i32> @vnegs32(<2 x i32>* %A)
 define <2 x float> @vnegf32(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vnegf32:
 ;CHECK: vneg.f32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1
 	ret <2 x float> %tmp2
 }
@@ -35,7 +35,7 @@ define <2 x float> @vnegf32(<2 x float>*
 define <16 x i8> @vnegQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vnegQs8:
 ;CHECK: vneg.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = sub <16 x i8> zeroinitializer, %tmp1
 	ret <16 x i8> %tmp2
 }
@@ -43,7 +43,7 @@ define <16 x i8> @vnegQs8(<16 x i8>* %A)
 define <8 x i16> @vnegQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vnegQs16:
 ;CHECK: vneg.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = sub <8 x i16> zeroinitializer, %tmp1
 	ret <8 x i16> %tmp2
 }
@@ -51,7 +51,7 @@ define <8 x i16> @vnegQs16(<8 x i16>* %A
 define <4 x i32> @vnegQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vnegQs32:
 ;CHECK: vneg.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = sub <4 x i32> zeroinitializer, %tmp1
 	ret <4 x i32> %tmp2
 }
@@ -59,7 +59,7 @@ define <4 x i32> @vnegQs32(<4 x i32>* %A
 define <4 x float> @vnegQf32(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vnegQf32:
 ;CHECK: vneg.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1
 	ret <4 x float> %tmp2
 }
@@ -67,7 +67,7 @@ define <4 x float> @vnegQf32(<4 x float>
 define <8 x i8> @vqnegs8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqnegs8:
 ;CHECK: vqneg.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -75,7 +75,7 @@ define <8 x i8> @vqnegs8(<8 x i8>* %A) n
 define <4 x i16> @vqnegs16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqnegs16:
 ;CHECK: vqneg.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -83,7 +83,7 @@ define <4 x i16> @vqnegs16(<4 x i16>* %A
 define <2 x i32> @vqnegs32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqnegs32:
 ;CHECK: vqneg.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -91,7 +91,7 @@ define <2 x i32> @vqnegs32(<2 x i32>* %A
 define <16 x i8> @vqnegQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqnegQs8:
 ;CHECK: vqneg.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp2
 }
@@ -99,7 +99,7 @@ define <16 x i8> @vqnegQs8(<16 x i8>* %A
 define <8 x i16> @vqnegQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqnegQs16:
 ;CHECK: vqneg.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %tmp1)
 	ret <8 x i16> %tmp2
 }
@@ -107,7 +107,7 @@ define <8 x i16> @vqnegQs16(<8 x i16>* %
 define <4 x i32> @vqnegQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqnegQs32:
 ;CHECK: vqneg.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vpadal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vpadal.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vpadal.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vpadal.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <4 x i16> @vpadals8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpadals8:
 ;CHECK: vpadal.s8
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -12,8 +12,8 @@ define <4 x i16> @vpadals8(<4 x i16>* %A
 define <2 x i32> @vpadals16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpadals16:
 ;CHECK: vpadal.s16
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -21,8 +21,8 @@ define <2 x i32> @vpadals16(<2 x i32>* %
 define <1 x i64> @vpadals32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpadals32:
 ;CHECK: vpadal.s32
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -30,8 +30,8 @@ define <1 x i64> @vpadals32(<1 x i64>* %
 define <4 x i16> @vpadalu8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpadalu8:
 ;CHECK: vpadal.u8
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -39,8 +39,8 @@ define <4 x i16> @vpadalu8(<4 x i16>* %A
 define <2 x i32> @vpadalu16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpadalu16:
 ;CHECK: vpadal.u16
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -48,8 +48,8 @@ define <2 x i32> @vpadalu16(<2 x i32>* %
 define <1 x i64> @vpadalu32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpadalu32:
 ;CHECK: vpadal.u32
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -57,8 +57,8 @@ define <1 x i64> @vpadalu32(<1 x i64>* %
 define <8 x i16> @vpadalQs8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpadalQs8:
 ;CHECK: vpadal.s8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -66,8 +66,8 @@ define <8 x i16> @vpadalQs8(<8 x i16>* %
 define <4 x i32> @vpadalQs16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpadalQs16:
 ;CHECK: vpadal.s16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -75,8 +75,8 @@ define <4 x i32> @vpadalQs16(<4 x i32>*
 define <2 x i64> @vpadalQs32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpadalQs32:
 ;CHECK: vpadal.s32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -84,8 +84,8 @@ define <2 x i64> @vpadalQs32(<2 x i64>*
 define <8 x i16> @vpadalQu8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpadalQu8:
 ;CHECK: vpadal.u8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -93,8 +93,8 @@ define <8 x i16> @vpadalQu8(<8 x i16>* %
 define <4 x i32> @vpadalQu16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpadalQu16:
 ;CHECK: vpadal.u16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -102,8 +102,8 @@ define <4 x i32> @vpadalQu16(<4 x i32>*
 define <2 x i64> @vpadalQu32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpadalQu32:
 ;CHECK: vpadal.u32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2)
 	ret <2 x i64> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vpadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vpadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vpadd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vpadd.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpaddi8:
 ;CHECK: vpadd.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vpaddi8(<8 x i8>* %A, <
 define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpaddi16:
 ;CHECK: vpadd.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vpaddi16(<4 x i16>* %A
 define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpaddi32:
 ;CHECK: vpadd.i32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vpaddi32(<2 x i32>* %A
 define <2 x float> @vpaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vpaddf32:
 ;CHECK: vpadd.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -45,7 +45,7 @@ declare <2 x float> @llvm.arm.neon.vpadd
 define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vpaddls8:
 ;CHECK: vpaddl.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -53,7 +53,7 @@ define <4 x i16> @vpaddls8(<8 x i8>* %A)
 define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vpaddls16:
 ;CHECK: vpaddl.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -61,7 +61,7 @@ define <2 x i32> @vpaddls16(<4 x i16>* %
 define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vpaddls32:
 ;CHECK: vpaddl.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %tmp1)
 	ret <1 x i64> %tmp2
 }
@@ -69,7 +69,7 @@ define <1 x i64> @vpaddls32(<2 x i32>* %
 define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vpaddlu8:
 ;CHECK: vpaddl.u8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -77,7 +77,7 @@ define <4 x i16> @vpaddlu8(<8 x i8>* %A)
 define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vpaddlu16:
 ;CHECK: vpaddl.u16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -85,7 +85,7 @@ define <2 x i32> @vpaddlu16(<4 x i16>* %
 define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vpaddlu32:
 ;CHECK: vpaddl.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %tmp1)
 	ret <1 x i64> %tmp2
 }
@@ -93,7 +93,7 @@ define <1 x i64> @vpaddlu32(<2 x i32>* %
 define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vpaddlQs8:
 ;CHECK: vpaddl.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %tmp1)
 	ret <8 x i16> %tmp2
 }
@@ -101,7 +101,7 @@ define <8 x i16> @vpaddlQs8(<16 x i8>* %
 define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vpaddlQs16:
 ;CHECK: vpaddl.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %tmp1)
 	ret <4 x i32> %tmp2
 }
@@ -109,7 +109,7 @@ define <4 x i32> @vpaddlQs16(<8 x i16>*
 define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vpaddlQs32:
 ;CHECK: vpaddl.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1)
 	ret <2 x i64> %tmp2
 }
@@ -117,7 +117,7 @@ define <2 x i64> @vpaddlQs32(<4 x i32>*
 define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vpaddlQu8:
 ;CHECK: vpaddl.u8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %tmp1)
 	ret <8 x i16> %tmp2
 }
@@ -125,7 +125,7 @@ define <8 x i16> @vpaddlQu8(<16 x i8>* %
 define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vpaddlQu16:
 ;CHECK: vpaddl.u16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %tmp1)
 	ret <4 x i32> %tmp2
 }
@@ -133,7 +133,7 @@ define <4 x i32> @vpaddlQu16(<8 x i16>*
 define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vpaddlQu32:
 ;CHECK: vpaddl.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1)
 	ret <2 x i64> %tmp2
 }
@@ -143,9 +143,9 @@ define void @addCombineToVPADDL() nounwi
 ; CHECK: vpaddl.s8
   %cbcr = alloca <16 x i8>, align 16
   %X = alloca <8 x i8>, align 8
-  %tmp = load <16 x i8>* %cbcr
+  %tmp = load <16 x i8>, <16 x i8>* %cbcr
   %tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %tmp2 = load <16 x i8>* %cbcr
+  %tmp2 = load <16 x i8>, <16 x i8>* %cbcr
   %tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %add = add <8 x i8> %tmp3, %tmp1
   store <8 x i8> %add, <8 x i8>* %X, align 8

Modified: llvm/trunk/test/CodeGen/ARM/vpminmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vpminmax.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vpminmax.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vpminmax.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vpmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpmins8:
 ;CHECK: vpmin.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vpmins8(<8 x i8>* %A, <
 define <4 x i16> @vpmins16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpmins16:
 ;CHECK: vpmin.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vpmins16(<4 x i16>* %A
 define <2 x i32> @vpmins32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpmins32:
 ;CHECK: vpmin.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vpmins32(<2 x i32>* %A
 define <8 x i8> @vpminu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpminu8:
 ;CHECK: vpmin.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i8> @vpminu8(<8 x i8>* %A, <
 define <4 x i16> @vpminu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpminu16:
 ;CHECK: vpmin.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @vpminu16(<4 x i16>* %A
 define <2 x i32> @vpminu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpminu32:
 ;CHECK: vpmin.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <2 x i32> @vpminu32(<2 x i32>* %A
 define <2 x float> @vpminf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vpminf32:
 ;CHECK: vpmin.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -76,8 +76,8 @@ declare <2 x float> @llvm.arm.neon.vpmin
 define <8 x i8> @vpmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpmaxs8:
 ;CHECK: vpmax.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -85,8 +85,8 @@ define <8 x i8> @vpmaxs8(<8 x i8>* %A, <
 define <4 x i16> @vpmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpmaxs16:
 ;CHECK: vpmax.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -94,8 +94,8 @@ define <4 x i16> @vpmaxs16(<4 x i16>* %A
 define <2 x i32> @vpmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpmaxs32:
 ;CHECK: vpmax.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -103,8 +103,8 @@ define <2 x i32> @vpmaxs32(<2 x i32>* %A
 define <8 x i8> @vpmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vpmaxu8:
 ;CHECK: vpmax.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -112,8 +112,8 @@ define <8 x i8> @vpmaxu8(<8 x i8>* %A, <
 define <4 x i16> @vpmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vpmaxu16:
 ;CHECK: vpmax.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -121,8 +121,8 @@ define <4 x i16> @vpmaxu16(<4 x i16>* %A
 define <2 x i32> @vpmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vpmaxu32:
 ;CHECK: vpmax.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -130,8 +130,8 @@ define <2 x i32> @vpmaxu32(<2 x i32>* %A
 define <2 x float> @vpmaxf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vpmaxf32:
 ;CHECK: vpmax.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vqadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vqadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vqadd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vqadd.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vqadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqadds8:
 ;CHECK: vqadd.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vqadds8(<8 x i8>* %A, <
 define <4 x i16> @vqadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqadds16:
 ;CHECK: vqadd.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vqadds16(<4 x i16>* %A
 define <2 x i32> @vqadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqadds32:
 ;CHECK: vqadd.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vqadds32(<2 x i32>* %A
 define <1 x i64> @vqadds64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqadds64:
 ;CHECK: vqadd.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -39,8 +39,8 @@ define <1 x i64> @vqadds64(<1 x i64>* %A
 define <8 x i8> @vqaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqaddu8:
 ;CHECK: vqadd.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -48,8 +48,8 @@ define <8 x i8> @vqaddu8(<8 x i8>* %A, <
 define <4 x i16> @vqaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqaddu16:
 ;CHECK: vqadd.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -57,8 +57,8 @@ define <4 x i16> @vqaddu16(<4 x i16>* %A
 define <2 x i32> @vqaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqaddu32:
 ;CHECK: vqadd.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -66,8 +66,8 @@ define <2 x i32> @vqaddu32(<2 x i32>* %A
 define <1 x i64> @vqaddu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqaddu64:
 ;CHECK: vqadd.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -75,8 +75,8 @@ define <1 x i64> @vqaddu64(<1 x i64>* %A
 define <16 x i8> @vqaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqaddQs8:
 ;CHECK: vqadd.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -84,8 +84,8 @@ define <16 x i8> @vqaddQs8(<16 x i8>* %A
 define <8 x i16> @vqaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqaddQs16:
 ;CHECK: vqadd.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -93,8 +93,8 @@ define <8 x i16> @vqaddQs16(<8 x i16>* %
 define <4 x i32> @vqaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqaddQs32:
 ;CHECK: vqadd.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -102,8 +102,8 @@ define <4 x i32> @vqaddQs32(<4 x i32>* %
 define <2 x i64> @vqaddQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqaddQs64:
 ;CHECK: vqadd.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -111,8 +111,8 @@ define <2 x i64> @vqaddQs64(<2 x i64>* %
 define <16 x i8> @vqaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqaddQu8:
 ;CHECK: vqadd.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -120,8 +120,8 @@ define <16 x i8> @vqaddQu8(<16 x i8>* %A
 define <8 x i16> @vqaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqaddQu16:
 ;CHECK: vqadd.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -129,8 +129,8 @@ define <8 x i16> @vqaddQu16(<8 x i16>* %
 define <4 x i32> @vqaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqaddQu32:
 ;CHECK: vqadd.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -138,8 +138,8 @@ define <4 x i32> @vqaddQu32(<4 x i32>* %
 define <2 x i64> @vqaddQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqaddQu64:
 ;CHECK: vqadd.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vqdmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vqdmul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vqdmul.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vqdmul.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@ target triple = "thumbv7-elf"
 define <4 x i16> @vqdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqdmulhs16:
 ;CHECK: vqdmulh.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -14,8 +14,8 @@ define <4 x i16> @vqdmulhs16(<4 x i16>*
 define <2 x i32> @vqdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqdmulhs32:
 ;CHECK: vqdmulh.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -23,8 +23,8 @@ define <2 x i32> @vqdmulhs32(<2 x i32>*
 define <8 x i16> @vqdmulhQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqdmulhQs16:
 ;CHECK: vqdmulh.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -32,8 +32,8 @@ define <8 x i16> @vqdmulhQs16(<8 x i16>*
 define <4 x i32> @vqdmulhQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqdmulhQs32:
 ;CHECK: vqdmulh.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -83,8 +83,8 @@ declare <4 x i32> @llvm.arm.neon.vqdmulh
 define <4 x i16> @vqrdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqrdmulhs16:
 ;CHECK: vqrdmulh.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -92,8 +92,8 @@ define <4 x i16> @vqrdmulhs16(<4 x i16>*
 define <2 x i32> @vqrdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqrdmulhs32:
 ;CHECK: vqrdmulh.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -101,8 +101,8 @@ define <2 x i32> @vqrdmulhs32(<2 x i32>*
 define <8 x i16> @vqrdmulhQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqrdmulhQs16:
 ;CHECK: vqrdmulh.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -110,8 +110,8 @@ define <8 x i16> @vqrdmulhQs16(<8 x i16>
 define <4 x i32> @vqrdmulhQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqrdmulhQs32:
 ;CHECK: vqrdmulh.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -161,8 +161,8 @@ declare <4 x i32> @llvm.arm.neon.vqrdmul
 define <4 x i32> @vqdmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqdmulls16:
 ;CHECK: vqdmull.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -170,8 +170,8 @@ define <4 x i32> @vqdmulls16(<4 x i16>*
 define <2 x i64> @vqdmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqdmulls32:
 ;CHECK: vqdmull.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -200,9 +200,9 @@ declare <2 x i64>  @llvm.arm.neon.vqdmul
 define <4 x i32> @vqdmlals16_natural(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vqdmlals16_natural:
 ;CHECK: vqdmlal.s16
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i16>* %B
-        %tmp3 = load <4 x i16>* %C
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp3 = load <4 x i16>, <4 x i16>* %C
         %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %tmp2, <4 x i16> %tmp3)
         %tmp5 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp4)
         ret <4 x i32> %tmp5
@@ -211,9 +211,9 @@ define <4 x i32> @vqdmlals16_natural(<4
 define <2 x i64> @vqdmlals32_natural(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vqdmlals32_natural:
 ;CHECK: vqdmlal.s32
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i32>* %B
-        %tmp3 = load <2 x i32>* %C
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp3 = load <2 x i32>, <2 x i32>* %C
         %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3)
         %tmp5 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4)
         ret <2 x i64> %tmp5
@@ -245,9 +245,9 @@ declare <2 x i64>  @llvm.arm.neon.vqadds
 define <4 x i32> @vqdmlsls16_natural(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
 ;CHECK-LABEL: vqdmlsls16_natural:
 ;CHECK: vqdmlsl.s16
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i16>* %B
-        %tmp3 = load <4 x i16>* %C
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp3 = load <4 x i16>, <4 x i16>* %C
         %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %tmp2, <4 x i16> %tmp3)
         %tmp5 = call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp4)
         ret <4 x i32> %tmp5
@@ -256,9 +256,9 @@ define <4 x i32> @vqdmlsls16_natural(<4
 define <2 x i64> @vqdmlsls32_natural(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
 ;CHECK-LABEL: vqdmlsls32_natural:
 ;CHECK: vqdmlsl.s32
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i32>* %B
-        %tmp3 = load <2 x i32>* %C
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp3 = load <2 x i32>, <2 x i32>* %C
         %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3)
         %tmp5 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4)
         ret <2 x i64> %tmp5

Modified: llvm/trunk/test/CodeGen/ARM/vqshl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vqshl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vqshl.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vqshl.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vqshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqshls8:
 ;CHECK: vqshl.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vqshls8(<8 x i8>* %A, <
 define <4 x i16> @vqshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqshls16:
 ;CHECK: vqshl.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vqshls16(<4 x i16>* %A
 define <2 x i32> @vqshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqshls32:
 ;CHECK: vqshl.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vqshls32(<2 x i32>* %A
 define <1 x i64> @vqshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqshls64:
 ;CHECK: vqshl.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -39,8 +39,8 @@ define <1 x i64> @vqshls64(<1 x i64>* %A
 define <8 x i8> @vqshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqshlu8:
 ;CHECK: vqshl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -48,8 +48,8 @@ define <8 x i8> @vqshlu8(<8 x i8>* %A, <
 define <4 x i16> @vqshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqshlu16:
 ;CHECK: vqshl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -57,8 +57,8 @@ define <4 x i16> @vqshlu16(<4 x i16>* %A
 define <2 x i32> @vqshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqshlu32:
 ;CHECK: vqshl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -66,8 +66,8 @@ define <2 x i32> @vqshlu32(<2 x i32>* %A
 define <1 x i64> @vqshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqshlu64:
 ;CHECK: vqshl.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -75,8 +75,8 @@ define <1 x i64> @vqshlu64(<1 x i64>* %A
 define <16 x i8> @vqshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqshlQs8:
 ;CHECK: vqshl.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -84,8 +84,8 @@ define <16 x i8> @vqshlQs8(<16 x i8>* %A
 define <8 x i16> @vqshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqshlQs16:
 ;CHECK: vqshl.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -93,8 +93,8 @@ define <8 x i16> @vqshlQs16(<8 x i16>* %
 define <4 x i32> @vqshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqshlQs32:
 ;CHECK: vqshl.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -102,8 +102,8 @@ define <4 x i32> @vqshlQs32(<4 x i32>* %
 define <2 x i64> @vqshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqshlQs64:
 ;CHECK: vqshl.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -111,8 +111,8 @@ define <2 x i64> @vqshlQs64(<2 x i64>* %
 define <16 x i8> @vqshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqshlQu8:
 ;CHECK: vqshl.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -120,8 +120,8 @@ define <16 x i8> @vqshlQu8(<16 x i8>* %A
 define <8 x i16> @vqshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqshlQu16:
 ;CHECK: vqshl.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -129,8 +129,8 @@ define <8 x i16> @vqshlQu16(<8 x i16>* %
 define <4 x i32> @vqshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqshlQu32:
 ;CHECK: vqshl.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -138,8 +138,8 @@ define <4 x i32> @vqshlQu32(<4 x i32>* %
 define <2 x i64> @vqshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqshlQu64:
 ;CHECK: vqshl.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -147,7 +147,7 @@ define <2 x i64> @vqshlQu64(<2 x i64>* %
 define <8 x i8> @vqshls_n8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqshls_n8:
 ;CHECK: vqshl.s8{{.*#7}}
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <8 x i8> %tmp2
 }
@@ -155,7 +155,7 @@ define <8 x i8> @vqshls_n8(<8 x i8>* %A)
 define <4 x i16> @vqshls_n16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshls_n16:
 ;CHECK: vqshl.s16{{.*#15}}
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
 	ret <4 x i16> %tmp2
 }
@@ -163,7 +163,7 @@ define <4 x i16> @vqshls_n16(<4 x i16>*
 define <2 x i32> @vqshls_n32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshls_n32:
 ;CHECK: vqshl.s32{{.*#31}}
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
 	ret <2 x i32> %tmp2
 }
@@ -171,7 +171,7 @@ define <2 x i32> @vqshls_n32(<2 x i32>*
 define <1 x i64> @vqshls_n64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshls_n64:
 ;CHECK: vqshl.s64{{.*#63}}
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
 	ret <1 x i64> %tmp2
 }
@@ -179,7 +179,7 @@ define <1 x i64> @vqshls_n64(<1 x i64>*
 define <8 x i8> @vqshlu_n8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqshlu_n8:
 ;CHECK: vqshl.u8{{.*#7}}
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <8 x i8> %tmp2
 }
@@ -187,7 +187,7 @@ define <8 x i8> @vqshlu_n8(<8 x i8>* %A)
 define <4 x i16> @vqshlu_n16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshlu_n16:
 ;CHECK: vqshl.u16{{.*#15}}
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
 	ret <4 x i16> %tmp2
 }
@@ -195,7 +195,7 @@ define <4 x i16> @vqshlu_n16(<4 x i16>*
 define <2 x i32> @vqshlu_n32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshlu_n32:
 ;CHECK: vqshl.u32{{.*#31}}
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
 	ret <2 x i32> %tmp2
 }
@@ -203,7 +203,7 @@ define <2 x i32> @vqshlu_n32(<2 x i32>*
 define <1 x i64> @vqshlu_n64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshlu_n64:
 ;CHECK: vqshl.u64{{.*#63}}
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
 	ret <1 x i64> %tmp2
 }
@@ -211,7 +211,7 @@ define <1 x i64> @vqshlu_n64(<1 x i64>*
 define <8 x i8> @vqshlsu_n8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqshlsu_n8:
 ;CHECK: vqshlu.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftsu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <8 x i8> %tmp2
 }
@@ -219,7 +219,7 @@ define <8 x i8> @vqshlsu_n8(<8 x i8>* %A
 define <4 x i16> @vqshlsu_n16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshlsu_n16:
 ;CHECK: vqshlu.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftsu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
 	ret <4 x i16> %tmp2
 }
@@ -227,7 +227,7 @@ define <4 x i16> @vqshlsu_n16(<4 x i16>*
 define <2 x i32> @vqshlsu_n32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshlsu_n32:
 ;CHECK: vqshlu.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftsu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
 	ret <2 x i32> %tmp2
 }
@@ -235,7 +235,7 @@ define <2 x i32> @vqshlsu_n32(<2 x i32>*
 define <1 x i64> @vqshlsu_n64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshlsu_n64:
 ;CHECK: vqshlu.s64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftsu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
 	ret <1 x i64> %tmp2
 }
@@ -243,7 +243,7 @@ define <1 x i64> @vqshlsu_n64(<1 x i64>*
 define <16 x i8> @vqshlQs_n8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqshlQs_n8:
 ;CHECK: vqshl.s8{{.*#7}}
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <16 x i8> %tmp2
 }
@@ -251,7 +251,7 @@ define <16 x i8> @vqshlQs_n8(<16 x i8>*
 define <8 x i16> @vqshlQs_n16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshlQs_n16:
 ;CHECK: vqshl.s16{{.*#15}}
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
 	ret <8 x i16> %tmp2
 }
@@ -259,7 +259,7 @@ define <8 x i16> @vqshlQs_n16(<8 x i16>*
 define <4 x i32> @vqshlQs_n32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshlQs_n32:
 ;CHECK: vqshl.s32{{.*#31}}
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
 	ret <4 x i32> %tmp2
 }
@@ -267,7 +267,7 @@ define <4 x i32> @vqshlQs_n32(<4 x i32>*
 define <2 x i64> @vqshlQs_n64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshlQs_n64:
 ;CHECK: vqshl.s64{{.*#63}}
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
 	ret <2 x i64> %tmp2
 }
@@ -275,7 +275,7 @@ define <2 x i64> @vqshlQs_n64(<2 x i64>*
 define <16 x i8> @vqshlQu_n8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqshlQu_n8:
 ;CHECK: vqshl.u8{{.*#7}}
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <16 x i8> %tmp2
 }
@@ -283,7 +283,7 @@ define <16 x i8> @vqshlQu_n8(<16 x i8>*
 define <8 x i16> @vqshlQu_n16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshlQu_n16:
 ;CHECK: vqshl.u16{{.*#15}}
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
 	ret <8 x i16> %tmp2
 }
@@ -291,7 +291,7 @@ define <8 x i16> @vqshlQu_n16(<8 x i16>*
 define <4 x i32> @vqshlQu_n32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshlQu_n32:
 ;CHECK: vqshl.u32{{.*#31}}
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
 	ret <4 x i32> %tmp2
 }
@@ -299,7 +299,7 @@ define <4 x i32> @vqshlQu_n32(<4 x i32>*
 define <2 x i64> @vqshlQu_n64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshlQu_n64:
 ;CHECK: vqshl.u64{{.*#63}}
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
 	ret <2 x i64> %tmp2
 }
@@ -307,7 +307,7 @@ define <2 x i64> @vqshlQu_n64(<2 x i64>*
 define <16 x i8> @vqshlQsu_n8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vqshlQsu_n8:
 ;CHECK: vqshlu.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftsu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <16 x i8> %tmp2
 }
@@ -315,7 +315,7 @@ define <16 x i8> @vqshlQsu_n8(<16 x i8>*
 define <8 x i16> @vqshlQsu_n16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshlQsu_n16:
 ;CHECK: vqshlu.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vqshiftsu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
 	ret <8 x i16> %tmp2
 }
@@ -323,7 +323,7 @@ define <8 x i16> @vqshlQsu_n16(<8 x i16>
 define <4 x i32> @vqshlQsu_n32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshlQsu_n32:
 ;CHECK: vqshlu.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vqshiftsu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
 	ret <4 x i32> %tmp2
 }
@@ -331,7 +331,7 @@ define <4 x i32> @vqshlQsu_n32(<4 x i32>
 define <2 x i64> @vqshlQsu_n64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshlQsu_n64:
 ;CHECK: vqshlu.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
 	ret <2 x i64> %tmp2
 }
@@ -369,8 +369,8 @@ declare <2 x i64> @llvm.arm.neon.vqshift
 define <8 x i8> @vqrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqrshls8:
 ;CHECK: vqrshl.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -378,8 +378,8 @@ define <8 x i8> @vqrshls8(<8 x i8>* %A,
 define <4 x i16> @vqrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqrshls16:
 ;CHECK: vqrshl.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -387,8 +387,8 @@ define <4 x i16> @vqrshls16(<4 x i16>* %
 define <2 x i32> @vqrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqrshls32:
 ;CHECK: vqrshl.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -396,8 +396,8 @@ define <2 x i32> @vqrshls32(<2 x i32>* %
 define <1 x i64> @vqrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqrshls64:
 ;CHECK: vqrshl.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -405,8 +405,8 @@ define <1 x i64> @vqrshls64(<1 x i64>* %
 define <8 x i8> @vqrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqrshlu8:
 ;CHECK: vqrshl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -414,8 +414,8 @@ define <8 x i8> @vqrshlu8(<8 x i8>* %A,
 define <4 x i16> @vqrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqrshlu16:
 ;CHECK: vqrshl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -423,8 +423,8 @@ define <4 x i16> @vqrshlu16(<4 x i16>* %
 define <2 x i32> @vqrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqrshlu32:
 ;CHECK: vqrshl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -432,8 +432,8 @@ define <2 x i32> @vqrshlu32(<2 x i32>* %
 define <1 x i64> @vqrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqrshlu64:
 ;CHECK: vqrshl.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -441,8 +441,8 @@ define <1 x i64> @vqrshlu64(<1 x i64>* %
 define <16 x i8> @vqrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqrshlQs8:
 ;CHECK: vqrshl.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -450,8 +450,8 @@ define <16 x i8> @vqrshlQs8(<16 x i8>* %
 define <8 x i16> @vqrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqrshlQs16:
 ;CHECK: vqrshl.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -459,8 +459,8 @@ define <8 x i16> @vqrshlQs16(<8 x i16>*
 define <4 x i32> @vqrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqrshlQs32:
 ;CHECK: vqrshl.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -468,8 +468,8 @@ define <4 x i32> @vqrshlQs32(<4 x i32>*
 define <2 x i64> @vqrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqrshlQs64:
 ;CHECK: vqrshl.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -477,8 +477,8 @@ define <2 x i64> @vqrshlQs64(<2 x i64>*
 define <16 x i8> @vqrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqrshlQu8:
 ;CHECK: vqrshl.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -486,8 +486,8 @@ define <16 x i8> @vqrshlQu8(<16 x i8>* %
 define <8 x i16> @vqrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqrshlQu16:
 ;CHECK: vqrshl.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -495,8 +495,8 @@ define <8 x i16> @vqrshlQu16(<8 x i16>*
 define <4 x i32> @vqrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqrshlQu32:
 ;CHECK: vqrshl.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -504,8 +504,8 @@ define <4 x i32> @vqrshlQu32(<4 x i32>*
 define <2 x i64> @vqrshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqrshlQu64:
 ;CHECK: vqrshl.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vqshrn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vqshrn.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vqshrn.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vqshrn.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <8 x i8> @vqshrns8(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshrns8:
 ;CHECK: vqshrn.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -11,7 +11,7 @@ define <8 x i8> @vqshrns8(<8 x i16>* %A)
 define <4 x i16> @vqshrns16(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshrns16:
 ;CHECK: vqshrn.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -19,7 +19,7 @@ define <4 x i16> @vqshrns16(<4 x i32>* %
 define <2 x i32> @vqshrns32(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshrns32:
 ;CHECK: vqshrn.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -27,7 +27,7 @@ define <2 x i32> @vqshrns32(<2 x i64>* %
 define <8 x i8> @vqshrnu8(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshrnu8:
 ;CHECK: vqshrn.u16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -35,7 +35,7 @@ define <8 x i8> @vqshrnu8(<8 x i16>* %A)
 define <4 x i16> @vqshrnu16(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshrnu16:
 ;CHECK: vqshrn.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -43,7 +43,7 @@ define <4 x i16> @vqshrnu16(<4 x i32>* %
 define <2 x i32> @vqshrnu32(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshrnu32:
 ;CHECK: vqshrn.u64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -51,7 +51,7 @@ define <2 x i32> @vqshrnu32(<2 x i64>* %
 define <8 x i8> @vqshruns8(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqshruns8:
 ;CHECK: vqshrun.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -59,7 +59,7 @@ define <8 x i8> @vqshruns8(<8 x i16>* %A
 define <4 x i16> @vqshruns16(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqshruns16:
 ;CHECK: vqshrun.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -67,7 +67,7 @@ define <4 x i16> @vqshruns16(<4 x i32>*
 define <2 x i32> @vqshruns32(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqshruns32:
 ;CHECK: vqshrun.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -87,7 +87,7 @@ declare <2 x i32> @llvm.arm.neon.vqshift
 define <8 x i8> @vqrshrns8(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqrshrns8:
 ;CHECK: vqrshrn.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -95,7 +95,7 @@ define <8 x i8> @vqrshrns8(<8 x i16>* %A
 define <4 x i16> @vqrshrns16(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqrshrns16:
 ;CHECK: vqrshrn.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -103,7 +103,7 @@ define <4 x i16> @vqrshrns16(<4 x i32>*
 define <2 x i32> @vqrshrns32(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqrshrns32:
 ;CHECK: vqrshrn.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -111,7 +111,7 @@ define <2 x i32> @vqrshrns32(<2 x i64>*
 define <8 x i8> @vqrshrnu8(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqrshrnu8:
 ;CHECK: vqrshrn.u16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -119,7 +119,7 @@ define <8 x i8> @vqrshrnu8(<8 x i16>* %A
 define <4 x i16> @vqrshrnu16(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqrshrnu16:
 ;CHECK: vqrshrn.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -127,7 +127,7 @@ define <4 x i16> @vqrshrnu16(<4 x i32>*
 define <2 x i32> @vqrshrnu32(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqrshrnu32:
 ;CHECK: vqrshrn.u64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -135,7 +135,7 @@ define <2 x i32> @vqrshrnu32(<2 x i64>*
 define <8 x i8> @vqrshruns8(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vqrshruns8:
 ;CHECK: vqrshrun.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -143,7 +143,7 @@ define <8 x i8> @vqrshruns8(<8 x i16>* %
 define <4 x i16> @vqrshruns16(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vqrshruns16:
 ;CHECK: vqrshrun.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -151,7 +151,7 @@ define <4 x i16> @vqrshruns16(<4 x i32>*
 define <2 x i32> @vqrshruns32(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vqrshruns32:
 ;CHECK: vqrshrun.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
 	ret <2 x i32> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vqsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vqsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vqsub.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vqsub.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vqsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqsubs8:
 ;CHECK: vqsub.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vqsubs8(<8 x i8>* %A, <
 define <4 x i16> @vqsubs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqsubs16:
 ;CHECK: vqsub.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vqsubs16(<4 x i16>* %A
 define <2 x i32> @vqsubs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqsubs32:
 ;CHECK: vqsub.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vqsubs32(<2 x i32>* %A
 define <1 x i64> @vqsubs64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqsubs64:
 ;CHECK: vqsub.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -39,8 +39,8 @@ define <1 x i64> @vqsubs64(<1 x i64>* %A
 define <8 x i8> @vqsubu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqsubu8:
 ;CHECK: vqsub.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -48,8 +48,8 @@ define <8 x i8> @vqsubu8(<8 x i8>* %A, <
 define <4 x i16> @vqsubu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqsubu16:
 ;CHECK: vqsub.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -57,8 +57,8 @@ define <4 x i16> @vqsubu16(<4 x i16>* %A
 define <2 x i32> @vqsubu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqsubu32:
 ;CHECK: vqsub.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -66,8 +66,8 @@ define <2 x i32> @vqsubu32(<2 x i32>* %A
 define <1 x i64> @vqsubu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqsubu64:
 ;CHECK: vqsub.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -75,8 +75,8 @@ define <1 x i64> @vqsubu64(<1 x i64>* %A
 define <16 x i8> @vqsubQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqsubQs8:
 ;CHECK: vqsub.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -84,8 +84,8 @@ define <16 x i8> @vqsubQs8(<16 x i8>* %A
 define <8 x i16> @vqsubQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqsubQs16:
 ;CHECK: vqsub.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -93,8 +93,8 @@ define <8 x i16> @vqsubQs16(<8 x i16>* %
 define <4 x i32> @vqsubQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqsubQs32:
 ;CHECK: vqsub.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -102,8 +102,8 @@ define <4 x i32> @vqsubQs32(<4 x i32>* %
 define <2 x i64> @vqsubQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqsubQs64:
 ;CHECK: vqsub.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -111,8 +111,8 @@ define <2 x i64> @vqsubQs64(<2 x i64>* %
 define <16 x i8> @vqsubQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vqsubQu8:
 ;CHECK: vqsub.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -120,8 +120,8 @@ define <16 x i8> @vqsubQu8(<16 x i8>* %A
 define <8 x i16> @vqsubQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vqsubQu16:
 ;CHECK: vqsub.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -129,8 +129,8 @@ define <8 x i16> @vqsubQu16(<8 x i16>* %
 define <4 x i32> @vqsubQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vqsubQu32:
 ;CHECK: vqsub.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -138,8 +138,8 @@ define <4 x i32> @vqsubQu32(<4 x i32>* %
 define <2 x i64> @vqsubQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vqsubQu64:
 ;CHECK: vqsub.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vrec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vrec.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vrec.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vrec.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <2 x i32> @vrecpei32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrecpei32:
 ;CHECK: vrecpe.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -11,7 +11,7 @@ define <2 x i32> @vrecpei32(<2 x i32>* %
 define <4 x i32> @vrecpeQi32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrecpeQi32:
 ;CHECK: vrecpe.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }
@@ -19,7 +19,7 @@ define <4 x i32> @vrecpeQi32(<4 x i32>*
 define <2 x float> @vrecpef32(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vrecpef32:
 ;CHECK: vrecpe.f32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp2
 }
@@ -27,7 +27,7 @@ define <2 x float> @vrecpef32(<2 x float
 define <4 x float> @vrecpeQf32(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vrecpeQf32:
 ;CHECK: vrecpe.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp2
 }
@@ -41,8 +41,8 @@ declare <4 x float> @llvm.arm.neon.vrecp
 define <2 x float> @vrecpsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vrecpsf32:
 ;CHECK: vrecps.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -50,8 +50,8 @@ define <2 x float> @vrecpsf32(<2 x float
 define <4 x float> @vrecpsQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vrecpsQf32:
 ;CHECK: vrecps.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -62,7 +62,7 @@ declare <4 x float> @llvm.arm.neon.vrecp
 define <2 x i32> @vrsqrtei32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrsqrtei32:
 ;CHECK: vrsqrte.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrsqrte.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -70,7 +70,7 @@ define <2 x i32> @vrsqrtei32(<2 x i32>*
 define <4 x i32> @vrsqrteQi32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrsqrteQi32:
 ;CHECK: vrsqrte.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vrsqrte.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }
@@ -78,7 +78,7 @@ define <4 x i32> @vrsqrteQi32(<4 x i32>*
 define <2 x float> @vrsqrtef32(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: vrsqrtef32:
 ;CHECK: vrsqrte.f32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp2
 }
@@ -86,7 +86,7 @@ define <2 x float> @vrsqrtef32(<2 x floa
 define <4 x float> @vrsqrteQf32(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: vrsqrteQf32:
 ;CHECK: vrsqrte.f32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp2
 }
@@ -100,8 +100,8 @@ declare <4 x float> @llvm.arm.neon.vrsqr
 define <2 x float> @vrsqrtsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vrsqrtsf32:
 ;CHECK: vrsqrts.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vrsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -109,8 +109,8 @@ define <2 x float> @vrsqrtsf32(<2 x floa
 define <4 x float> @vrsqrtsQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vrsqrtsQf32:
 ;CHECK: vrsqrts.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vrev.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vrev.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vrev.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vrev.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64D8:
 ;CHECK: vrev64.8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
 	ret <8 x i8> %tmp2
 }
@@ -11,7 +11,7 @@ define <8 x i8> @test_vrev64D8(<8 x i8>*
 define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64D16:
 ;CHECK: vrev64.16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
 	ret <4 x i16> %tmp2
 }
@@ -19,7 +19,7 @@ define <4 x i16> @test_vrev64D16(<4 x i1
 define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64D32:
 ;CHECK: vrev64.32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
 	ret <2 x i32> %tmp2
 }
@@ -27,7 +27,7 @@ define <2 x i32> @test_vrev64D32(<2 x i3
 define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64Df:
 ;CHECK: vrev64.32
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
 	ret <2 x float> %tmp2
 }
@@ -35,7 +35,7 @@ define <2 x float> @test_vrev64Df(<2 x f
 define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64Q8:
 ;CHECK: vrev64.8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
 	ret <16 x i8> %tmp2
 }
@@ -43,7 +43,7 @@ define <16 x i8> @test_vrev64Q8(<16 x i8
 define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64Q16:
 ;CHECK: vrev64.16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
 	ret <8 x i16> %tmp2
 }
@@ -51,7 +51,7 @@ define <8 x i16> @test_vrev64Q16(<8 x i1
 define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64Q32:
 ;CHECK: vrev64.32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 	ret <4 x i32> %tmp2
 }
@@ -59,7 +59,7 @@ define <4 x i32> @test_vrev64Q32(<4 x i3
 define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64Qf:
 ;CHECK: vrev64.32
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 	ret <4 x float> %tmp2
 }
@@ -67,7 +67,7 @@ define <4 x float> @test_vrev64Qf(<4 x f
 define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: test_vrev32D8:
 ;CHECK: vrev32.8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
 	ret <8 x i8> %tmp2
 }
@@ -75,7 +75,7 @@ define <8 x i8> @test_vrev32D8(<8 x i8>*
 define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: test_vrev32D16:
 ;CHECK: vrev32.16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 	ret <4 x i16> %tmp2
 }
@@ -83,7 +83,7 @@ define <4 x i16> @test_vrev32D16(<4 x i1
 define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: test_vrev32Q8:
 ;CHECK: vrev32.8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
 	ret <16 x i8> %tmp2
 }
@@ -91,7 +91,7 @@ define <16 x i8> @test_vrev32Q8(<16 x i8
 define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: test_vrev32Q16:
 ;CHECK: vrev32.16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
 	ret <8 x i16> %tmp2
 }
@@ -99,7 +99,7 @@ define <8 x i16> @test_vrev32Q16(<8 x i1
 define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: test_vrev16D8:
 ;CHECK: vrev16.8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
 	ret <8 x i8> %tmp2
 }
@@ -107,7 +107,7 @@ define <8 x i8> @test_vrev16D8(<8 x i8>*
 define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: test_vrev16Q8:
 ;CHECK: vrev16.8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
 	ret <16 x i8> %tmp2
 }
@@ -117,7 +117,7 @@ define <16 x i8> @test_vrev16Q8(<16 x i8
 define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: test_vrev64D8_undef:
 ;CHECK: vrev64.8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
 	ret <8 x i8> %tmp2
 }
@@ -125,7 +125,7 @@ define <8 x i8> @test_vrev64D8_undef(<8
 define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: test_vrev32Q16_undef:
 ;CHECK: vrev32.16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
 	ret <8 x i16> %tmp2
 }
@@ -136,7 +136,7 @@ define void @test_with_vcombine(<4 x flo
 ;CHECK-LABEL: test_with_vcombine:
 ;CHECK-NOT: vext
 ;CHECK: vrev64.32
-  %tmp1 = load <4 x float>* %v, align 16
+  %tmp1 = load <4 x float>, <4 x float>* %v, align 16
   %tmp2 = bitcast <4 x float> %tmp1 to <2 x double>
   %tmp3 = extractelement <2 x double> %tmp2, i32 0
   %tmp4 = bitcast double %tmp3 to <2 x float>
@@ -155,7 +155,7 @@ define void @test_vrev64(<4 x i16>* noca
 ; CHECK: vst1.32
 entry:
   %0 = bitcast <4 x i16>* %source to <8 x i16>*
-  %tmp2 = load <8 x i16>* %0, align 4
+  %tmp2 = load <8 x i16>, <8 x i16>* %0, align 4
   %tmp3 = extractelement <8 x i16> %tmp2, i32 6
   %tmp5 = insertelement <2 x i16> undef, i16 %tmp3, i32 0
   %tmp9 = extractelement <8 x i16> %tmp2, i32 5
@@ -171,7 +171,7 @@ define void @float_vrev64(float* nocaptu
 ; CHECK: vrev64.32
 entry:
   %0 = bitcast float* %source to <4 x float>*
-  %tmp2 = load <4 x float>* %0, align 4
+  %tmp2 = load <4 x float>, <4 x float>* %0, align 4
   %tmp5 = shufflevector <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x float> %tmp2, <4 x i32> <i32 0, i32 7, i32 0, i32 0>
   %arrayidx8 = getelementptr inbounds <4 x float>, <4 x float>* %dest, i32 11
   store <4 x float> %tmp5, <4 x float>* %arrayidx8, align 4

Modified: llvm/trunk/test/CodeGen/ARM/vselect_imax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vselect_imax.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vselect_imax.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vselect_imax.ll Fri Feb 27 15:17:42 2015
@@ -18,8 +18,8 @@ define void @vmax_v4i32(<4 x i32>* %m, <
 ; CHECK-LABEL: func_blend10:
 define void @func_blend10(%T0_10* %loadaddr, %T0_10* %loadaddr2,
                            %T1_10* %blend, %T0_10* %storeaddr) {
-  %v0 = load %T0_10* %loadaddr
-  %v1 = load %T0_10* %loadaddr2
+  %v0 = load %T0_10, %T0_10* %loadaddr
+  %v1 = load %T0_10, %T0_10* %loadaddr2
   %c = icmp slt %T0_10 %v0, %v1
 ; CHECK: vbsl
 ; CHECK: vbsl
@@ -34,8 +34,8 @@ define void @func_blend10(%T0_10* %loada
 ; CHECK-LABEL: func_blend14:
 define void @func_blend14(%T0_14* %loadaddr, %T0_14* %loadaddr2,
                            %T1_14* %blend, %T0_14* %storeaddr) {
-  %v0 = load %T0_14* %loadaddr
-  %v1 = load %T0_14* %loadaddr2
+  %v0 = load %T0_14, %T0_14* %loadaddr
+  %v1 = load %T0_14, %T0_14* %loadaddr2
   %c = icmp slt %T0_14 %v0, %v1
 ; CHECK: vbsl
 ; CHECK: vbsl
@@ -52,8 +52,8 @@ define void @func_blend15(%T0_15* %loada
                            %T1_15* %blend, %T0_15* %storeaddr) {
 ; CHECK: vbsl
 ; CHECK: vbsl
-  %v0 = load %T0_15* %loadaddr
-  %v1 = load %T0_15* %loadaddr2
+  %v0 = load %T0_15, %T0_15* %loadaddr
+  %v1 = load %T0_15, %T0_15* %loadaddr2
   %c = icmp slt %T0_15 %v0, %v1
 ; COST: func_blend15
 ; COST: cost of 82 {{.*}} select
@@ -68,8 +68,8 @@ define void @func_blend18(%T0_18* %loada
                            %T1_18* %blend, %T0_18* %storeaddr) {
 ; CHECK: vbsl
 ; CHECK: vbsl
-  %v0 = load %T0_18* %loadaddr
-  %v1 = load %T0_18* %loadaddr2
+  %v0 = load %T0_18, %T0_18* %loadaddr
+  %v1 = load %T0_18, %T0_18* %loadaddr2
   %c = icmp slt %T0_18 %v0, %v1
 ; COST: func_blend18
 ; COST: cost of 19 {{.*}} select
@@ -86,8 +86,8 @@ define void @func_blend19(%T0_19* %loada
 ; CHECK: vbsl
 ; CHECK: vbsl
 ; CHECK: vbsl
-  %v0 = load %T0_19* %loadaddr
-  %v1 = load %T0_19* %loadaddr2
+  %v0 = load %T0_19, %T0_19* %loadaddr
+  %v1 = load %T0_19, %T0_19* %loadaddr2
   %c = icmp slt %T0_19 %v0, %v1
 ; COST: func_blend19
 ; COST: cost of 50 {{.*}} select
@@ -108,8 +108,8 @@ define void @func_blend20(%T0_20* %loada
 ; CHECK: vbsl
 ; CHECK: vbsl
 ; CHECK: vbsl
-  %v0 = load %T0_20* %loadaddr
-  %v1 = load %T0_20* %loadaddr2
+  %v0 = load %T0_20, %T0_20* %loadaddr
+  %v1 = load %T0_20, %T0_20* %loadaddr2
   %c = icmp slt %T0_20 %v0, %v1
 ; COST: func_blend20
 ; COST: cost of 100 {{.*}} select

Modified: llvm/trunk/test/CodeGen/ARM/vshift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vshift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vshift.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vshift.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vshls8:
 ;CHECK: vshl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shl <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vshls8(<8 x i8>* %A, <8
 define <4 x i16> @vshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vshls16:
 ;CHECK: vshl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = shl <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vshls16(<4 x i16>* %A,
 define <2 x i32> @vshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vshls32:
 ;CHECK: vshl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = shl <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vshls32(<2 x i32>* %A,
 define <1 x i64> @vshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vshls64:
 ;CHECK: vshl.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = shl <1 x i64> %tmp1, %tmp2
 	ret <1 x i64> %tmp3
 }
@@ -39,7 +39,7 @@ define <1 x i64> @vshls64(<1 x i64>* %A,
 define <8 x i8> @vshli8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshli8:
 ;CHECK: vshl.i8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
 	ret <8 x i8> %tmp2
 }
@@ -47,7 +47,7 @@ define <8 x i8> @vshli8(<8 x i8>* %A) no
 define <4 x i16> @vshli16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshli16:
 ;CHECK: vshl.i16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = shl <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
 	ret <4 x i16> %tmp2
 }
@@ -55,7 +55,7 @@ define <4 x i16> @vshli16(<4 x i16>* %A)
 define <2 x i32> @vshli32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshli32:
 ;CHECK: vshl.i32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = shl <2 x i32> %tmp1, < i32 31, i32 31 >
 	ret <2 x i32> %tmp2
 }
@@ -63,7 +63,7 @@ define <2 x i32> @vshli32(<2 x i32>* %A)
 define <1 x i64> @vshli64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshli64:
 ;CHECK: vshl.i64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = shl <1 x i64> %tmp1, < i64 63 >
 	ret <1 x i64> %tmp2
 }
@@ -71,8 +71,8 @@ define <1 x i64> @vshli64(<1 x i64>* %A)
 define <16 x i8> @vshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vshlQs8:
 ;CHECK: vshl.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shl <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -80,8 +80,8 @@ define <16 x i8> @vshlQs8(<16 x i8>* %A,
 define <8 x i16> @vshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vshlQs16:
 ;CHECK: vshl.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = shl <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -89,8 +89,8 @@ define <8 x i16> @vshlQs16(<8 x i16>* %A
 define <4 x i32> @vshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vshlQs32:
 ;CHECK: vshl.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = shl <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -98,8 +98,8 @@ define <4 x i32> @vshlQs32(<4 x i32>* %A
 define <2 x i64> @vshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vshlQs64:
 ;CHECK: vshl.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = shl <2 x i64> %tmp1, %tmp2
 	ret <2 x i64> %tmp3
 }
@@ -107,7 +107,7 @@ define <2 x i64> @vshlQs64(<2 x i64>* %A
 define <16 x i8> @vshlQi8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshlQi8:
 ;CHECK: vshl.i8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = shl <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
 	ret <16 x i8> %tmp2
 }
@@ -115,7 +115,7 @@ define <16 x i8> @vshlQi8(<16 x i8>* %A)
 define <8 x i16> @vshlQi16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshlQi16:
 ;CHECK: vshl.i16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = shl <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
 	ret <8 x i16> %tmp2
 }
@@ -123,7 +123,7 @@ define <8 x i16> @vshlQi16(<8 x i16>* %A
 define <4 x i32> @vshlQi32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshlQi32:
 ;CHECK: vshl.i32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = shl <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
 	ret <4 x i32> %tmp2
 }
@@ -131,7 +131,7 @@ define <4 x i32> @vshlQi32(<4 x i32>* %A
 define <2 x i64> @vshlQi64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshlQi64:
 ;CHECK: vshl.i64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = shl <2 x i64> %tmp1, < i64 63, i64 63 >
 	ret <2 x i64> %tmp2
 }
@@ -140,8 +140,8 @@ define <8 x i8> @vlshru8(<8 x i8>* %A, <
 ;CHECK-LABEL: vlshru8:
 ;CHECK: vneg.s8
 ;CHECK: vshl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = lshr <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -150,8 +150,8 @@ define <4 x i16> @vlshru16(<4 x i16>* %A
 ;CHECK-LABEL: vlshru16:
 ;CHECK: vneg.s16
 ;CHECK: vshl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = lshr <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -160,8 +160,8 @@ define <2 x i32> @vlshru32(<2 x i32>* %A
 ;CHECK-LABEL: vlshru32:
 ;CHECK: vneg.s32
 ;CHECK: vshl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = lshr <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -170,8 +170,8 @@ define <1 x i64> @vlshru64(<1 x i64>* %A
 ;CHECK-LABEL: vlshru64:
 ;CHECK: vsub.i64
 ;CHECK: vshl.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = lshr <1 x i64> %tmp1, %tmp2
 	ret <1 x i64> %tmp3
 }
@@ -179,7 +179,7 @@ define <1 x i64> @vlshru64(<1 x i64>* %A
 define <8 x i8> @vlshri8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vlshri8:
 ;CHECK: vshr.u8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = lshr <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
 	ret <8 x i8> %tmp2
 }
@@ -187,7 +187,7 @@ define <8 x i8> @vlshri8(<8 x i8>* %A) n
 define <4 x i16> @vlshri16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vlshri16:
 ;CHECK: vshr.u16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = lshr <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
 	ret <4 x i16> %tmp2
 }
@@ -195,7 +195,7 @@ define <4 x i16> @vlshri16(<4 x i16>* %A
 define <2 x i32> @vlshri32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vlshri32:
 ;CHECK: vshr.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = lshr <2 x i32> %tmp1, < i32 31, i32 31 >
 	ret <2 x i32> %tmp2
 }
@@ -203,7 +203,7 @@ define <2 x i32> @vlshri32(<2 x i32>* %A
 define <1 x i64> @vlshri64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vlshri64:
 ;CHECK: vshr.u64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = lshr <1 x i64> %tmp1, < i64 63 >
 	ret <1 x i64> %tmp2
 }
@@ -212,8 +212,8 @@ define <16 x i8> @vlshrQu8(<16 x i8>* %A
 ;CHECK-LABEL: vlshrQu8:
 ;CHECK: vneg.s8
 ;CHECK: vshl.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = lshr <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -222,8 +222,8 @@ define <8 x i16> @vlshrQu16(<8 x i16>* %
 ;CHECK-LABEL: vlshrQu16:
 ;CHECK: vneg.s16
 ;CHECK: vshl.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = lshr <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -232,8 +232,8 @@ define <4 x i32> @vlshrQu32(<4 x i32>* %
 ;CHECK-LABEL: vlshrQu32:
 ;CHECK: vneg.s32
 ;CHECK: vshl.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = lshr <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -242,8 +242,8 @@ define <2 x i64> @vlshrQu64(<2 x i64>* %
 ;CHECK-LABEL: vlshrQu64:
 ;CHECK: vsub.i64
 ;CHECK: vshl.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = lshr <2 x i64> %tmp1, %tmp2
 	ret <2 x i64> %tmp3
 }
@@ -251,7 +251,7 @@ define <2 x i64> @vlshrQu64(<2 x i64>* %
 define <16 x i8> @vlshrQi8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vlshrQi8:
 ;CHECK: vshr.u8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = lshr <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
 	ret <16 x i8> %tmp2
 }
@@ -259,7 +259,7 @@ define <16 x i8> @vlshrQi8(<16 x i8>* %A
 define <8 x i16> @vlshrQi16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vlshrQi16:
 ;CHECK: vshr.u16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = lshr <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
 	ret <8 x i16> %tmp2
 }
@@ -267,7 +267,7 @@ define <8 x i16> @vlshrQi16(<8 x i16>* %
 define <4 x i32> @vlshrQi32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vlshrQi32:
 ;CHECK: vshr.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = lshr <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
 	ret <4 x i32> %tmp2
 }
@@ -275,7 +275,7 @@ define <4 x i32> @vlshrQi32(<4 x i32>* %
 define <2 x i64> @vlshrQi64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vlshrQi64:
 ;CHECK: vshr.u64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = lshr <2 x i64> %tmp1, < i64 63, i64 63 >
 	ret <2 x i64> %tmp2
 }
@@ -291,8 +291,8 @@ define <8 x i8> @vashrs8(<8 x i8>* %A, <
 ;CHECK-LABEL: vashrs8:
 ;CHECK: vneg.s8
 ;CHECK: vshl.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = ashr <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -301,8 +301,8 @@ define <4 x i16> @vashrs16(<4 x i16>* %A
 ;CHECK-LABEL: vashrs16:
 ;CHECK: vneg.s16
 ;CHECK: vshl.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = ashr <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -311,8 +311,8 @@ define <2 x i32> @vashrs32(<2 x i32>* %A
 ;CHECK-LABEL: vashrs32:
 ;CHECK: vneg.s32
 ;CHECK: vshl.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = ashr <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -321,8 +321,8 @@ define <1 x i64> @vashrs64(<1 x i64>* %A
 ;CHECK-LABEL: vashrs64:
 ;CHECK: vsub.i64
 ;CHECK: vshl.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = ashr <1 x i64> %tmp1, %tmp2
 	ret <1 x i64> %tmp3
 }
@@ -330,7 +330,7 @@ define <1 x i64> @vashrs64(<1 x i64>* %A
 define <8 x i8> @vashri8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vashri8:
 ;CHECK: vshr.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = ashr <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
 	ret <8 x i8> %tmp2
 }
@@ -338,7 +338,7 @@ define <8 x i8> @vashri8(<8 x i8>* %A) n
 define <4 x i16> @vashri16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vashri16:
 ;CHECK: vshr.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = ashr <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
 	ret <4 x i16> %tmp2
 }
@@ -346,7 +346,7 @@ define <4 x i16> @vashri16(<4 x i16>* %A
 define <2 x i32> @vashri32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vashri32:
 ;CHECK: vshr.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = ashr <2 x i32> %tmp1, < i32 31, i32 31 >
 	ret <2 x i32> %tmp2
 }
@@ -354,7 +354,7 @@ define <2 x i32> @vashri32(<2 x i32>* %A
 define <1 x i64> @vashri64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vashri64:
 ;CHECK: vshr.s64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = ashr <1 x i64> %tmp1, < i64 63 >
 	ret <1 x i64> %tmp2
 }
@@ -363,8 +363,8 @@ define <16 x i8> @vashrQs8(<16 x i8>* %A
 ;CHECK-LABEL: vashrQs8:
 ;CHECK: vneg.s8
 ;CHECK: vshl.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = ashr <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -373,8 +373,8 @@ define <8 x i16> @vashrQs16(<8 x i16>* %
 ;CHECK-LABEL: vashrQs16:
 ;CHECK: vneg.s16
 ;CHECK: vshl.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = ashr <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -383,8 +383,8 @@ define <4 x i32> @vashrQs32(<4 x i32>* %
 ;CHECK-LABEL: vashrQs32:
 ;CHECK: vneg.s32
 ;CHECK: vshl.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = ashr <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -393,8 +393,8 @@ define <2 x i64> @vashrQs64(<2 x i64>* %
 ;CHECK-LABEL: vashrQs64:
 ;CHECK: vsub.i64
 ;CHECK: vshl.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = ashr <2 x i64> %tmp1, %tmp2
 	ret <2 x i64> %tmp3
 }
@@ -402,7 +402,7 @@ define <2 x i64> @vashrQs64(<2 x i64>* %
 define <16 x i8> @vashrQi8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vashrQi8:
 ;CHECK: vshr.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = ashr <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
 	ret <16 x i8> %tmp2
 }
@@ -410,7 +410,7 @@ define <16 x i8> @vashrQi8(<16 x i8>* %A
 define <8 x i16> @vashrQi16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vashrQi16:
 ;CHECK: vshr.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = ashr <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
 	ret <8 x i16> %tmp2
 }
@@ -418,7 +418,7 @@ define <8 x i16> @vashrQi16(<8 x i16>* %
 define <4 x i32> @vashrQi32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vashrQi32:
 ;CHECK: vshr.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = ashr <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
 	ret <4 x i32> %tmp2
 }
@@ -426,7 +426,7 @@ define <4 x i32> @vashrQi32(<4 x i32>* %
 define <2 x i64> @vashrQi64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vashrQi64:
 ;CHECK: vshr.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = ashr <2 x i64> %tmp1, < i64 63, i64 63 >
 	ret <2 x i64> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vshiftins.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vshiftins.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vshiftins.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vshiftins.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vsli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsli8:
 ;CHECK: vsli.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vsli8(<8 x i8>* %A, <8
 define <4 x i16> @vsli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsli16:
 ;CHECK: vsli.16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vsli16(<4 x i16>* %A,
 define <2 x i32> @vsli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsli32:
 ;CHECK: vsli.32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 31, i32 31 >)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vsli32(<2 x i32>* %A,
 define <1 x i64> @vsli64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsli64:
 ;CHECK: vsli.64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 63 >)
 	ret <1 x i64> %tmp3
 }
@@ -39,8 +39,8 @@ define <1 x i64> @vsli64(<1 x i64>* %A,
 define <16 x i8> @vsliQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsliQ8:
 ;CHECK: vsli.8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <16 x i8> %tmp3
 }
@@ -48,8 +48,8 @@ define <16 x i8> @vsliQ8(<16 x i8>* %A,
 define <8 x i16> @vsliQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsliQ16:
 ;CHECK: vsli.16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
 	ret <8 x i16> %tmp3
 }
@@ -57,8 +57,8 @@ define <8 x i16> @vsliQ16(<8 x i16>* %A,
 define <4 x i32> @vsliQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsliQ32:
 ;CHECK: vsli.32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
 	ret <4 x i32> %tmp3
 }
@@ -66,8 +66,8 @@ define <4 x i32> @vsliQ32(<4 x i32>* %A,
 define <2 x i64> @vsliQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsliQ64:
 ;CHECK: vsli.64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 63, i64 63 >)
 	ret <2 x i64> %tmp3
 }
@@ -75,8 +75,8 @@ define <2 x i64> @vsliQ64(<2 x i64>* %A,
 define <8 x i8> @vsri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsri8:
 ;CHECK: vsri.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <8 x i8> %tmp3
 }
@@ -84,8 +84,8 @@ define <8 x i8> @vsri8(<8 x i8>* %A, <8
 define <4 x i16> @vsri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsri16:
 ;CHECK: vsri.16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <4 x i16> %tmp3
 }
@@ -93,8 +93,8 @@ define <4 x i16> @vsri16(<4 x i16>* %A,
 define <2 x i32> @vsri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsri32:
 ;CHECK: vsri.32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
 	ret <2 x i32> %tmp3
 }
@@ -102,8 +102,8 @@ define <2 x i32> @vsri32(<2 x i32>* %A,
 define <1 x i64> @vsri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsri64:
 ;CHECK: vsri.64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 -64 >)
 	ret <1 x i64> %tmp3
 }
@@ -111,8 +111,8 @@ define <1 x i64> @vsri64(<1 x i64>* %A,
 define <16 x i8> @vsriQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsriQ8:
 ;CHECK: vsri.8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <16 x i8> %tmp3
 }
@@ -120,8 +120,8 @@ define <16 x i8> @vsriQ8(<16 x i8>* %A,
 define <8 x i16> @vsriQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsriQ16:
 ;CHECK: vsri.16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <8 x i16> %tmp3
 }
@@ -129,8 +129,8 @@ define <8 x i16> @vsriQ16(<8 x i16>* %A,
 define <4 x i32> @vsriQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsriQ32:
 ;CHECK: vsri.32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
 	ret <4 x i32> %tmp3
 }
@@ -138,8 +138,8 @@ define <4 x i32> @vsriQ32(<4 x i32>* %A,
 define <2 x i64> @vsriQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsriQ64:
 ;CHECK: vsri.64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
 	ret <2 x i64> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/vshl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vshl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vshl.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vshl.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vshls8:
 ;CHECK: vshl.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vshls8(<8 x i8>* %A, <8
 define <4 x i16> @vshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vshls16:
 ;CHECK: vshl.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vshls16(<4 x i16>* %A,
 define <2 x i32> @vshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vshls32:
 ;CHECK: vshl.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vshls32(<2 x i32>* %A,
 define <1 x i64> @vshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vshls64:
 ;CHECK: vshl.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -39,8 +39,8 @@ define <1 x i64> @vshls64(<1 x i64>* %A,
 define <8 x i8> @vshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vshlu8:
 ;CHECK: vshl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -48,8 +48,8 @@ define <8 x i8> @vshlu8(<8 x i8>* %A, <8
 define <4 x i16> @vshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vshlu16:
 ;CHECK: vshl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -57,8 +57,8 @@ define <4 x i16> @vshlu16(<4 x i16>* %A,
 define <2 x i32> @vshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vshlu32:
 ;CHECK: vshl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -66,8 +66,8 @@ define <2 x i32> @vshlu32(<2 x i32>* %A,
 define <1 x i64> @vshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vshlu64:
 ;CHECK: vshl.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -75,8 +75,8 @@ define <1 x i64> @vshlu64(<1 x i64>* %A,
 define <16 x i8> @vshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vshlQs8:
 ;CHECK: vshl.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -84,8 +84,8 @@ define <16 x i8> @vshlQs8(<16 x i8>* %A,
 define <8 x i16> @vshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vshlQs16:
 ;CHECK: vshl.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -93,8 +93,8 @@ define <8 x i16> @vshlQs16(<8 x i16>* %A
 define <4 x i32> @vshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vshlQs32:
 ;CHECK: vshl.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -102,8 +102,8 @@ define <4 x i32> @vshlQs32(<4 x i32>* %A
 define <2 x i64> @vshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vshlQs64:
 ;CHECK: vshl.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -111,8 +111,8 @@ define <2 x i64> @vshlQs64(<2 x i64>* %A
 define <16 x i8> @vshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vshlQu8:
 ;CHECK: vshl.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -120,8 +120,8 @@ define <16 x i8> @vshlQu8(<16 x i8>* %A,
 define <8 x i16> @vshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vshlQu16:
 ;CHECK: vshl.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -129,8 +129,8 @@ define <8 x i16> @vshlQu16(<8 x i16>* %A
 define <4 x i32> @vshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vshlQu32:
 ;CHECK: vshl.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -138,8 +138,8 @@ define <4 x i32> @vshlQu32(<4 x i32>* %A
 define <2 x i64> @vshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vshlQu64:
 ;CHECK: vshl.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -150,7 +150,7 @@ define <2 x i64> @vshlQu64(<2 x i64>* %A
 define <8 x i8> @vshli8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshli8:
 ;CHECK: vshl.i8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <8 x i8> %tmp2
 }
@@ -158,7 +158,7 @@ define <8 x i8> @vshli8(<8 x i8>* %A) no
 define <4 x i16> @vshli16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshli16:
 ;CHECK: vshl.i16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
 	ret <4 x i16> %tmp2
 }
@@ -166,7 +166,7 @@ define <4 x i16> @vshli16(<4 x i16>* %A)
 define <2 x i32> @vshli32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshli32:
 ;CHECK: vshl.i32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
 	ret <2 x i32> %tmp2
 }
@@ -174,7 +174,7 @@ define <2 x i32> @vshli32(<2 x i32>* %A)
 define <1 x i64> @vshli64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshli64:
 ;CHECK: vshl.i64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
 	ret <1 x i64> %tmp2
 }
@@ -182,7 +182,7 @@ define <1 x i64> @vshli64(<1 x i64>* %A)
 define <16 x i8> @vshlQi8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshlQi8:
 ;CHECK: vshl.i8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
 	ret <16 x i8> %tmp2
 }
@@ -190,7 +190,7 @@ define <16 x i8> @vshlQi8(<16 x i8>* %A)
 define <8 x i16> @vshlQi16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshlQi16:
 ;CHECK: vshl.i16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
 	ret <8 x i16> %tmp2
 }
@@ -198,7 +198,7 @@ define <8 x i16> @vshlQi16(<8 x i16>* %A
 define <4 x i32> @vshlQi32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshlQi32:
 ;CHECK: vshl.i32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
 	ret <4 x i32> %tmp2
 }
@@ -206,7 +206,7 @@ define <4 x i32> @vshlQi32(<4 x i32>* %A
 define <2 x i64> @vshlQi64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshlQi64:
 ;CHECK: vshl.i64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
 	ret <2 x i64> %tmp2
 }
@@ -216,7 +216,7 @@ define <2 x i64> @vshlQi64(<2 x i64>* %A
 define <8 x i8> @vshrs8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshrs8:
 ;CHECK: vshr.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -224,7 +224,7 @@ define <8 x i8> @vshrs8(<8 x i8>* %A) no
 define <4 x i16> @vshrs16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshrs16:
 ;CHECK: vshr.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -232,7 +232,7 @@ define <4 x i16> @vshrs16(<4 x i16>* %A)
 define <2 x i32> @vshrs32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshrs32:
 ;CHECK: vshr.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -240,7 +240,7 @@ define <2 x i32> @vshrs32(<2 x i32>* %A)
 define <1 x i64> @vshrs64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshrs64:
 ;CHECK: vshr.s64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
 	ret <1 x i64> %tmp2
 }
@@ -248,7 +248,7 @@ define <1 x i64> @vshrs64(<1 x i64>* %A)
 define <8 x i8> @vshru8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshru8:
 ;CHECK: vshr.u8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -256,7 +256,7 @@ define <8 x i8> @vshru8(<8 x i8>* %A) no
 define <4 x i16> @vshru16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshru16:
 ;CHECK: vshr.u16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -264,7 +264,7 @@ define <4 x i16> @vshru16(<4 x i16>* %A)
 define <2 x i32> @vshru32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshru32:
 ;CHECK: vshr.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -272,7 +272,7 @@ define <2 x i32> @vshru32(<2 x i32>* %A)
 define <1 x i64> @vshru64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshru64:
 ;CHECK: vshr.u64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
 	ret <1 x i64> %tmp2
 }
@@ -280,7 +280,7 @@ define <1 x i64> @vshru64(<1 x i64>* %A)
 define <16 x i8> @vshrQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshrQs8:
 ;CHECK: vshr.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <16 x i8> %tmp2
 }
@@ -288,7 +288,7 @@ define <16 x i8> @vshrQs8(<16 x i8>* %A)
 define <8 x i16> @vshrQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshrQs16:
 ;CHECK: vshr.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <8 x i16> %tmp2
 }
@@ -296,7 +296,7 @@ define <8 x i16> @vshrQs16(<8 x i16>* %A
 define <4 x i32> @vshrQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshrQs32:
 ;CHECK: vshr.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
 	ret <4 x i32> %tmp2
 }
@@ -304,7 +304,7 @@ define <4 x i32> @vshrQs32(<4 x i32>* %A
 define <2 x i64> @vshrQs64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshrQs64:
 ;CHECK: vshr.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
 	ret <2 x i64> %tmp2
 }
@@ -312,7 +312,7 @@ define <2 x i64> @vshrQs64(<2 x i64>* %A
 define <16 x i8> @vshrQu8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshrQu8:
 ;CHECK: vshr.u8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <16 x i8> %tmp2
 }
@@ -320,7 +320,7 @@ define <16 x i8> @vshrQu8(<16 x i8>* %A)
 define <8 x i16> @vshrQu16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshrQu16:
 ;CHECK: vshr.u16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <8 x i16> %tmp2
 }
@@ -328,7 +328,7 @@ define <8 x i16> @vshrQu16(<8 x i16>* %A
 define <4 x i32> @vshrQu32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshrQu32:
 ;CHECK: vshr.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
 	ret <4 x i32> %tmp2
 }
@@ -336,7 +336,7 @@ define <4 x i32> @vshrQu32(<4 x i32>* %A
 define <2 x i64> @vshrQu64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshrQu64:
 ;CHECK: vshr.u64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
 	ret <2 x i64> %tmp2
 }
@@ -364,8 +364,8 @@ declare <2 x i64> @llvm.arm.neon.vshiftu
 define <8 x i8> @vrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrshls8:
 ;CHECK: vrshl.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -373,8 +373,8 @@ define <8 x i8> @vrshls8(<8 x i8>* %A, <
 define <4 x i16> @vrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrshls16:
 ;CHECK: vrshl.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -382,8 +382,8 @@ define <4 x i16> @vrshls16(<4 x i16>* %A
 define <2 x i32> @vrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrshls32:
 ;CHECK: vrshl.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -391,8 +391,8 @@ define <2 x i32> @vrshls32(<2 x i32>* %A
 define <1 x i64> @vrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrshls64:
 ;CHECK: vrshl.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -400,8 +400,8 @@ define <1 x i64> @vrshls64(<1 x i64>* %A
 define <8 x i8> @vrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrshlu8:
 ;CHECK: vrshl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -409,8 +409,8 @@ define <8 x i8> @vrshlu8(<8 x i8>* %A, <
 define <4 x i16> @vrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrshlu16:
 ;CHECK: vrshl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -418,8 +418,8 @@ define <4 x i16> @vrshlu16(<4 x i16>* %A
 define <2 x i32> @vrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrshlu32:
 ;CHECK: vrshl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -427,8 +427,8 @@ define <2 x i32> @vrshlu32(<2 x i32>* %A
 define <1 x i64> @vrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrshlu64:
 ;CHECK: vrshl.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
 	ret <1 x i64> %tmp3
 }
@@ -436,8 +436,8 @@ define <1 x i64> @vrshlu64(<1 x i64>* %A
 define <16 x i8> @vrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrshlQs8:
 ;CHECK: vrshl.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -445,8 +445,8 @@ define <16 x i8> @vrshlQs8(<16 x i8>* %A
 define <8 x i16> @vrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrshlQs16:
 ;CHECK: vrshl.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -454,8 +454,8 @@ define <8 x i16> @vrshlQs16(<8 x i16>* %
 define <4 x i32> @vrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrshlQs32:
 ;CHECK: vrshl.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -463,8 +463,8 @@ define <4 x i32> @vrshlQs32(<4 x i32>* %
 define <2 x i64> @vrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrshlQs64:
 ;CHECK: vrshl.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -472,8 +472,8 @@ define <2 x i64> @vrshlQs64(<2 x i64>* %
 define <16 x i8> @vrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrshlQu8:
 ;CHECK: vrshl.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -481,8 +481,8 @@ define <16 x i8> @vrshlQu8(<16 x i8>* %A
 define <8 x i16> @vrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrshlQu16:
 ;CHECK: vrshl.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -490,8 +490,8 @@ define <8 x i16> @vrshlQu16(<8 x i16>* %
 define <4 x i32> @vrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrshlQu32:
 ;CHECK: vrshl.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -499,8 +499,8 @@ define <4 x i32> @vrshlQu32(<4 x i32>* %
 define <2 x i64> @vrshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrshlQu64:
 ;CHECK: vrshl.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -508,7 +508,7 @@ define <2 x i64> @vrshlQu64(<2 x i64>* %
 define <8 x i8> @vrshrs8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vrshrs8:
 ;CHECK: vrshr.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -516,7 +516,7 @@ define <8 x i8> @vrshrs8(<8 x i8>* %A) n
 define <4 x i16> @vrshrs16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vrshrs16:
 ;CHECK: vrshr.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -524,7 +524,7 @@ define <4 x i16> @vrshrs16(<4 x i16>* %A
 define <2 x i32> @vrshrs32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrshrs32:
 ;CHECK: vrshr.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -532,7 +532,7 @@ define <2 x i32> @vrshrs32(<2 x i32>* %A
 define <1 x i64> @vrshrs64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vrshrs64:
 ;CHECK: vrshr.s64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
 	ret <1 x i64> %tmp2
 }
@@ -540,7 +540,7 @@ define <1 x i64> @vrshrs64(<1 x i64>* %A
 define <8 x i8> @vrshru8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vrshru8:
 ;CHECK: vrshr.u8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -548,7 +548,7 @@ define <8 x i8> @vrshru8(<8 x i8>* %A) n
 define <4 x i16> @vrshru16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vrshru16:
 ;CHECK: vrshr.u16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -556,7 +556,7 @@ define <4 x i16> @vrshru16(<4 x i16>* %A
 define <2 x i32> @vrshru32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrshru32:
 ;CHECK: vrshr.u32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
 	ret <2 x i32> %tmp2
 }
@@ -564,7 +564,7 @@ define <2 x i32> @vrshru32(<2 x i32>* %A
 define <1 x i64> @vrshru64(<1 x i64>* %A) nounwind {
 ;CHECK-LABEL: vrshru64:
 ;CHECK: vrshr.u64
-	%tmp1 = load <1 x i64>* %A
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
 	ret <1 x i64> %tmp2
 }
@@ -572,7 +572,7 @@ define <1 x i64> @vrshru64(<1 x i64>* %A
 define <16 x i8> @vrshrQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vrshrQs8:
 ;CHECK: vrshr.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <16 x i8> %tmp2
 }
@@ -580,7 +580,7 @@ define <16 x i8> @vrshrQs8(<16 x i8>* %A
 define <8 x i16> @vrshrQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vrshrQs16:
 ;CHECK: vrshr.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <8 x i16> %tmp2
 }
@@ -588,7 +588,7 @@ define <8 x i16> @vrshrQs16(<8 x i16>* %
 define <4 x i32> @vrshrQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrshrQs32:
 ;CHECK: vrshr.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
 	ret <4 x i32> %tmp2
 }
@@ -596,7 +596,7 @@ define <4 x i32> @vrshrQs32(<4 x i32>* %
 define <2 x i64> @vrshrQs64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vrshrQs64:
 ;CHECK: vrshr.s64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
 	ret <2 x i64> %tmp2
 }
@@ -604,7 +604,7 @@ define <2 x i64> @vrshrQs64(<2 x i64>* %
 define <16 x i8> @vrshrQu8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vrshrQu8:
 ;CHECK: vrshr.u8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <16 x i8> %tmp2
 }
@@ -612,7 +612,7 @@ define <16 x i8> @vrshrQu8(<16 x i8>* %A
 define <8 x i16> @vrshrQu16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vrshrQu16:
 ;CHECK: vrshr.u16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <8 x i16> %tmp2
 }
@@ -620,7 +620,7 @@ define <8 x i16> @vrshrQu16(<8 x i16>* %
 define <4 x i32> @vrshrQu32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrshrQu32:
 ;CHECK: vrshr.u32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
 	ret <4 x i32> %tmp2
 }
@@ -628,7 +628,7 @@ define <4 x i32> @vrshrQu32(<4 x i32>* %
 define <2 x i64> @vrshrQu64(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vrshrQu64:
 ;CHECK: vrshr.u64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
 	ret <2 x i64> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vshll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vshll.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vshll.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vshll.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <8 x i16> @vshlls8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshlls8:
 ;CHECK: vshll.s8
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %sext = sext <8 x i8> %tmp1 to <8 x i16>
         %shift = shl <8 x i16> %sext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
         ret <8 x i16> %shift
@@ -12,7 +12,7 @@ define <8 x i16> @vshlls8(<8 x i8>* %A)
 define <4 x i32> @vshlls16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshlls16:
 ;CHECK: vshll.s16
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %sext = sext <4 x i16> %tmp1 to <4 x i32>
         %shift = shl <4 x i32> %sext, <i32 15, i32 15, i32 15, i32 15>
         ret <4 x i32> %shift
@@ -21,7 +21,7 @@ define <4 x i32> @vshlls16(<4 x i16>* %A
 define <2 x i64> @vshlls32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshlls32:
 ;CHECK: vshll.s32
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %sext = sext <2 x i32> %tmp1 to <2 x i64>
         %shift = shl <2 x i64> %sext, <i64 31, i64 31>
         ret <2 x i64> %shift
@@ -30,7 +30,7 @@ define <2 x i64> @vshlls32(<2 x i32>* %A
 define <8 x i16> @vshllu8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshllu8:
 ;CHECK: vshll.u8
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %zext = zext <8 x i8> %tmp1 to <8 x i16>
         %shift = shl <8 x i16> %zext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
         ret <8 x i16> %shift
@@ -39,7 +39,7 @@ define <8 x i16> @vshllu8(<8 x i8>* %A)
 define <4 x i32> @vshllu16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshllu16:
 ;CHECK: vshll.u16
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %zext = zext <4 x i16> %tmp1 to <4 x i32>
         %shift = shl <4 x i32> %zext, <i32 15, i32 15, i32 15, i32 15>
         ret <4 x i32> %shift
@@ -48,7 +48,7 @@ define <4 x i32> @vshllu16(<4 x i16>* %A
 define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshllu32:
 ;CHECK: vshll.u32
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %zext = zext <2 x i32> %tmp1 to <2 x i64>
         %shift = shl <2 x i64> %zext, <i64 31, i64 31>
         ret <2 x i64> %shift
@@ -59,7 +59,7 @@ define <2 x i64> @vshllu32(<2 x i32>* %A
 define <8 x i16> @vshlli8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vshlli8:
 ;CHECK: vshll.i8
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %sext = sext <8 x i8> %tmp1 to <8 x i16>
         %shift = shl <8 x i16> %sext, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
         ret <8 x i16> %shift
@@ -68,7 +68,7 @@ define <8 x i16> @vshlli8(<8 x i8>* %A)
 define <4 x i32> @vshlli16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshlli16:
 ;CHECK: vshll.i16
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %zext = zext <4 x i16> %tmp1 to <4 x i32>
         %shift = shl <4 x i32> %zext, <i32 16, i32 16, i32 16, i32 16>
         ret <4 x i32> %shift
@@ -77,7 +77,7 @@ define <4 x i32> @vshlli16(<4 x i16>* %A
 define <2 x i64> @vshlli32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshlli32:
 ;CHECK: vshll.i32
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %zext = zext <2 x i32> %tmp1 to <2 x i64>
         %shift = shl <2 x i64> %zext, <i64 32, i64 32>
         ret <2 x i64> %shift
@@ -89,7 +89,7 @@ define <8 x i16> @vshllu8_bad(<8 x i8>*
 ; CHECK-LABEL: vshllu8_bad:
 ; CHECK: vmovl.u8
 ; CHECK: vshl.i16
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %zext = zext <8 x i8> %tmp1 to <8 x i16>
         %shift = shl <8 x i16> %zext, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
         ret <8 x i16> %shift
@@ -99,7 +99,7 @@ define <4 x i32> @vshlls16_bad(<4 x i16>
 ; CHECK-LABEL: vshlls16_bad:
 ; CHECK: vmovl.s16
 ; CHECK: vshl.i32
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %sext = sext <4 x i16> %tmp1 to <4 x i32>
         %shift = shl <4 x i32> %sext, <i32 17, i32 17, i32 17, i32 17>
         ret <4 x i32> %shift
@@ -109,7 +109,7 @@ define <2 x i64> @vshllu32_bad(<2 x i32>
 ; CHECK-LABEL: vshllu32_bad:
 ; CHECK: vmovl.u32
 ; CHECK: vshl.i64
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %zext = zext <2 x i32> %tmp1 to <2 x i64>
         %shift = shl <2 x i64> %zext, <i64 33, i64 33>
         ret <2 x i64> %shift

Modified: llvm/trunk/test/CodeGen/ARM/vshrn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vshrn.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vshrn.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vshrn.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <8 x i8> @vshrns8(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vshrns8:
 ;CHECK: vshrn.i16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp2 = lshr <8 x i16> %tmp1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
         %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
 	ret <8 x i8> %tmp3
@@ -12,7 +12,7 @@ define <8 x i8> @vshrns8(<8 x i16>* %A)
 define <4 x i16> @vshrns16(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vshrns16:
 ;CHECK: vshrn.i32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp2 = ashr <4 x i32> %tmp1, <i32 16, i32 16, i32 16, i32 16>
         %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
 	ret <4 x i16> %tmp3
@@ -21,7 +21,7 @@ define <4 x i16> @vshrns16(<4 x i32>* %A
 define <2 x i32> @vshrns32(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vshrns32:
 ;CHECK: vshrn.i64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp2 = ashr <2 x i64> %tmp1, <i64 32, i64 32>
         %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
 	ret <2 x i32> %tmp3
@@ -31,7 +31,7 @@ define <8 x i8> @vshrns8_bad(<8 x i16>*
 ; CHECK-LABEL: vshrns8_bad:
 ; CHECK: vshr.s16
 ; CHECK: vmovn.i16
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp2 = ashr <8 x i16> %tmp1, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
         %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
         ret <8 x i8> %tmp3
@@ -41,7 +41,7 @@ define <4 x i16> @vshrns16_bad(<4 x i32>
 ; CHECK-LABEL: vshrns16_bad:
 ; CHECK: vshr.u32
 ; CHECK: vmovn.i32
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp2 = lshr <4 x i32> %tmp1, <i32 17, i32 17, i32 17, i32 17>
         %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
         ret <4 x i16> %tmp3
@@ -51,7 +51,7 @@ define <2 x i32> @vshrns32_bad(<2 x i64>
 ; CHECK-LABEL: vshrns32_bad:
 ; CHECK: vshr.u64
 ; CHECK: vmovn.i64
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp2 = lshr <2 x i64> %tmp1, <i64 33, i64 33>
         %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
         ret <2 x i32> %tmp3
@@ -60,7 +60,7 @@ define <2 x i32> @vshrns32_bad(<2 x i64>
 define <8 x i8> @vrshrns8(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vrshrns8:
 ;CHECK: vrshrn.i16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
 	ret <8 x i8> %tmp2
 }
@@ -68,7 +68,7 @@ define <8 x i8> @vrshrns8(<8 x i16>* %A)
 define <4 x i16> @vrshrns16(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vrshrns16:
 ;CHECK: vrshrn.i32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftn.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
 	ret <4 x i16> %tmp2
 }
@@ -76,7 +76,7 @@ define <4 x i16> @vrshrns16(<4 x i32>* %
 define <2 x i32> @vrshrns32(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: vrshrns32:
 ;CHECK: vrshrn.i64
-	%tmp1 = load <2 x i64>* %A
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftn.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
 	ret <2 x i32> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/vsra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vsra.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vsra.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vsra.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsras8:
 ;CHECK: vsra.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
     %tmp4 = add <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
@@ -13,8 +13,8 @@ define <8 x i8> @vsras8(<8 x i8>* %A, <8
 define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsras16:
 ;CHECK: vsra.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
@@ -23,8 +23,8 @@ define <4 x i16> @vsras16(<4 x i16>* %A,
 define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsras32:
 ;CHECK: vsra.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 >
         %tmp4 = add <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
@@ -33,8 +33,8 @@ define <2 x i32> @vsras32(<2 x i32>* %A,
 define <1 x i64> @vsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsras64:
 ;CHECK: vsra.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = ashr <1 x i64> %tmp2, < i64 63 >
         %tmp4 = add <1 x i64> %tmp1, %tmp3
 	ret <1 x i64> %tmp4
@@ -43,8 +43,8 @@ define <1 x i64> @vsras64(<1 x i64>* %A,
 define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsraQs8:
 ;CHECK: vsra.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
@@ -53,8 +53,8 @@ define <16 x i8> @vsraQs8(<16 x i8>* %A,
 define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsraQs16:
 ;CHECK: vsra.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -63,8 +63,8 @@ define <8 x i16> @vsraQs16(<8 x i16>* %A
 define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsraQs32:
 ;CHECK: vsra.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = ashr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
         %tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -73,8 +73,8 @@ define <4 x i32> @vsraQs32(<4 x i32>* %A
 define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsraQs64:
 ;CHECK: vsra.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = ashr <2 x i64> %tmp2, < i64 63, i64 63 >
         %tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
@@ -83,8 +83,8 @@ define <2 x i64> @vsraQs64(<2 x i64>* %A
 define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsrau8:
 ;CHECK: vsra.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
@@ -93,8 +93,8 @@ define <8 x i8> @vsrau8(<8 x i8>* %A, <8
 define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsrau16:
 ;CHECK: vsra.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
@@ -103,8 +103,8 @@ define <4 x i16> @vsrau16(<4 x i16>* %A,
 define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsrau32:
 ;CHECK: vsra.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 >
         %tmp4 = add <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
@@ -113,8 +113,8 @@ define <2 x i32> @vsrau32(<2 x i32>* %A,
 define <1 x i64> @vsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsrau64:
 ;CHECK: vsra.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = lshr <1 x i64> %tmp2, < i64 63 >
         %tmp4 = add <1 x i64> %tmp1, %tmp3
 	ret <1 x i64> %tmp4
@@ -123,8 +123,8 @@ define <1 x i64> @vsrau64(<1 x i64>* %A,
 define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsraQu8:
 ;CHECK: vsra.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
@@ -133,8 +133,8 @@ define <16 x i8> @vsraQu8(<16 x i8>* %A,
 define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsraQu16:
 ;CHECK: vsra.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -143,8 +143,8 @@ define <8 x i16> @vsraQu16(<8 x i16>* %A
 define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsraQu32:
 ;CHECK: vsra.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
         %tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -153,8 +153,8 @@ define <4 x i32> @vsraQu32(<4 x i32>* %A
 define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsraQu64:
 ;CHECK: vsra.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 >
         %tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
@@ -163,8 +163,8 @@ define <2 x i64> @vsraQu64(<2 x i64>* %A
 define <8 x i8> @vrsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrsras8:
 ;CHECK: vrsra.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
         %tmp4 = add <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
@@ -173,8 +173,8 @@ define <8 x i8> @vrsras8(<8 x i8>* %A, <
 define <4 x i16> @vrsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrsras16:
 ;CHECK: vrsra.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
         %tmp4 = add <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
@@ -183,8 +183,8 @@ define <4 x i16> @vrsras16(<4 x i16>* %A
 define <2 x i32> @vrsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrsras32:
 ;CHECK: vrsra.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
         %tmp4 = add <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
@@ -193,8 +193,8 @@ define <2 x i32> @vrsras32(<2 x i32>* %A
 define <1 x i64> @vrsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrsras64:
 ;CHECK: vrsra.s64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >)
         %tmp4 = add <1 x i64> %tmp1, %tmp3
 	ret <1 x i64> %tmp4
@@ -203,8 +203,8 @@ define <1 x i64> @vrsras64(<1 x i64>* %A
 define <8 x i8> @vrsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrsrau8:
 ;CHECK: vrsra.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
         %tmp4 = add <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
@@ -213,8 +213,8 @@ define <8 x i8> @vrsrau8(<8 x i8>* %A, <
 define <4 x i16> @vrsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrsrau16:
 ;CHECK: vrsra.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
         %tmp4 = add <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
@@ -223,8 +223,8 @@ define <4 x i16> @vrsrau16(<4 x i16>* %A
 define <2 x i32> @vrsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrsrau32:
 ;CHECK: vrsra.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
         %tmp4 = add <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
@@ -233,8 +233,8 @@ define <2 x i32> @vrsrau32(<2 x i32>* %A
 define <1 x i64> @vrsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrsrau64:
 ;CHECK: vrsra.u64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >)
         %tmp4 = add <1 x i64> %tmp1, %tmp3
 	ret <1 x i64> %tmp4
@@ -243,8 +243,8 @@ define <1 x i64> @vrsrau64(<1 x i64>* %A
 define <16 x i8> @vrsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrsraQs8:
 ;CHECK: vrsra.s8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
         %tmp4 = add <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
@@ -253,8 +253,8 @@ define <16 x i8> @vrsraQs8(<16 x i8>* %A
 define <8 x i16> @vrsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrsraQs16:
 ;CHECK: vrsra.s16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
         %tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -263,8 +263,8 @@ define <8 x i16> @vrsraQs16(<8 x i16>* %
 define <4 x i32> @vrsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrsraQs32:
 ;CHECK: vrsra.s32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
         %tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -273,8 +273,8 @@ define <4 x i32> @vrsraQs32(<4 x i32>* %
 define <2 x i64> @vrsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrsraQs64:
 ;CHECK: vrsra.s64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
         %tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
@@ -283,8 +283,8 @@ define <2 x i64> @vrsraQs64(<2 x i64>* %
 define <16 x i8> @vrsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vrsraQu8:
 ;CHECK: vrsra.u8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
         %tmp4 = add <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
@@ -293,8 +293,8 @@ define <16 x i8> @vrsraQu8(<16 x i8>* %A
 define <8 x i16> @vrsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrsraQu16:
 ;CHECK: vrsra.u16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
         %tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -303,8 +303,8 @@ define <8 x i16> @vrsraQu16(<8 x i16>* %
 define <4 x i32> @vrsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrsraQu32:
 ;CHECK: vrsra.u32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
         %tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -313,8 +313,8 @@ define <4 x i32> @vrsraQu32(<4 x i32>* %
 define <2 x i64> @vrsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrsraQu64:
 ;CHECK: vrsra.u64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
         %tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vst1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vst1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vst1.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vst1.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define void @vst1i8(i8* %A, <8 x i8>* %B
 ;CHECK-LABEL: vst1i8:
 ;Check the alignment value.  Max for this instruction is 64 bits:
 ;CHECK: vst1.8 {d16}, [r0:64]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst1.v8i8(i8* %A, <8 x i8> %tmp1, i32 16)
 	ret void
 }
@@ -13,7 +13,7 @@ define void @vst1i16(i16* %A, <4 x i16>*
 ;CHECK-LABEL: vst1i16:
 ;CHECK: vst1.16
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	call void @llvm.arm.neon.vst1.v4i16(i8* %tmp0, <4 x i16> %tmp1, i32 1)
 	ret void
 }
@@ -22,7 +22,7 @@ define void @vst1i32(i32* %A, <2 x i32>*
 ;CHECK-LABEL: vst1i32:
 ;CHECK: vst1.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	call void @llvm.arm.neon.vst1.v2i32(i8* %tmp0, <2 x i32> %tmp1, i32 1)
 	ret void
 }
@@ -31,7 +31,7 @@ define void @vst1f(float* %A, <2 x float
 ;CHECK-LABEL: vst1f:
 ;CHECK: vst1.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
 	ret void
 }
@@ -40,9 +40,9 @@ define void @vst1f(float* %A, <2 x float
 define void @vst1f_update(float** %ptr, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vst1f_update:
 ;CHECK: vst1.32 {d16}, [r1]!
-	%A = load float** %ptr
+	%A = load float*, float** %ptr
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
 	%tmp2 = getelementptr float, float* %A, i32 2
 	store float* %tmp2, float** %ptr
@@ -53,7 +53,7 @@ define void @vst1i64(i64* %A, <1 x i64>*
 ;CHECK-LABEL: vst1i64:
 ;CHECK: vst1.64
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %B
 	call void @llvm.arm.neon.vst1.v1i64(i8* %tmp0, <1 x i64> %tmp1, i32 1)
 	ret void
 }
@@ -62,7 +62,7 @@ define void @vst1Qi8(i8* %A, <16 x i8>*
 ;CHECK-LABEL: vst1Qi8:
 ;Check the alignment value.  Max for this instruction is 128 bits:
 ;CHECK: vst1.8 {d16, d17}, [r0:64]
-	%tmp1 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %B
 	call void @llvm.arm.neon.vst1.v16i8(i8* %A, <16 x i8> %tmp1, i32 8)
 	ret void
 }
@@ -72,7 +72,7 @@ define void @vst1Qi16(i16* %A, <8 x i16>
 ;Check the alignment value.  Max for this instruction is 128 bits:
 ;CHECK: vst1.16 {d16, d17}, [r0:128]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 32)
 	ret void
 }
@@ -81,9 +81,9 @@ define void @vst1Qi16(i16* %A, <8 x i16>
 define void @vst1Qi16_update(i16** %ptr, <8 x i16>* %B, i32 %inc) nounwind {
 ;CHECK-LABEL: vst1Qi16_update:
 ;CHECK: vst1.16 {d16, d17}, [r1:64], r2
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 8)
 	%tmp2 = getelementptr i16, i16* %A, i32 %inc
 	store i16* %tmp2, i16** %ptr
@@ -94,7 +94,7 @@ define void @vst1Qi32(i32* %A, <4 x i32>
 ;CHECK-LABEL: vst1Qi32:
 ;CHECK: vst1.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	call void @llvm.arm.neon.vst1.v4i32(i8* %tmp0, <4 x i32> %tmp1, i32 1)
 	ret void
 }
@@ -103,7 +103,7 @@ define void @vst1Qf(float* %A, <4 x floa
 ;CHECK-LABEL: vst1Qf:
 ;CHECK: vst1.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	call void @llvm.arm.neon.vst1.v4f32(i8* %tmp0, <4 x float> %tmp1, i32 1)
 	ret void
 }
@@ -112,7 +112,7 @@ define void @vst1Qi64(i64* %A, <2 x i64>
 ;CHECK-LABEL: vst1Qi64:
 ;CHECK: vst1.64
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %B
 	call void @llvm.arm.neon.vst1.v2i64(i8* %tmp0, <2 x i64> %tmp1, i32 1)
 	ret void
 }
@@ -121,7 +121,7 @@ define void @vst1Qf64(double* %A, <2 x d
 ;CHECK-LABEL: vst1Qf64:
 ;CHECK: vst1.64
 	%tmp0 = bitcast double* %A to i8*
-	%tmp1 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %B
 	call void @llvm.arm.neon.vst1.v2f64(i8* %tmp0, <2 x double> %tmp1, i32 1)
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/vst2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vst2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vst2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vst2.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define void @vst2i8(i8* %A, <8 x i8>* %B
 ;CHECK-LABEL: vst2i8:
 ;Check the alignment value.  Max for this instruction is 128 bits:
 ;CHECK: vst2.8 {d16, d17}, [r0:64]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8)
 	ret void
 }
@@ -13,8 +13,8 @@ define void @vst2i8(i8* %A, <8 x i8>* %B
 define void @vst2i8_update(i8** %ptr, <8 x i8>* %B, i32 %inc) nounwind {
 ;CHECK-LABEL: vst2i8_update:
 ;CHECK: vst2.8 {d16, d17}, [r1], r2
-	%A = load i8** %ptr
-	%tmp1 = load <8 x i8>* %B
+	%A = load i8*, i8** %ptr
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 4)
 	%tmp2 = getelementptr i8, i8* %A, i32 %inc
 	store i8* %tmp2, i8** %ptr
@@ -26,7 +26,7 @@ define void @vst2i16(i16* %A, <4 x i16>*
 ;Check the alignment value.  Max for this instruction is 128 bits:
 ;CHECK: vst2.16 {d16, d17}, [r0:128]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 32)
 	ret void
 }
@@ -35,7 +35,7 @@ define void @vst2i32(i32* %A, <2 x i32>*
 ;CHECK-LABEL: vst2i32:
 ;CHECK: vst2.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	call void @llvm.arm.neon.vst2.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
 	ret void
 }
@@ -44,7 +44,7 @@ define void @vst2f(float* %A, <2 x float
 ;CHECK-LABEL: vst2f:
 ;CHECK: vst2.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	call void @llvm.arm.neon.vst2.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
 	ret void
 }
@@ -54,7 +54,7 @@ define void @vst2i64(i64* %A, <1 x i64>*
 ;Check the alignment value.  Max for this instruction is 128 bits:
 ;CHECK: vst1.64 {d16, d17}, [r0:128]
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %B
 	call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 32)
 	ret void
 }
@@ -63,9 +63,9 @@ define void @vst2i64(i64* %A, <1 x i64>*
 define void @vst2i64_update(i64** %ptr, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vst2i64_update:
 ;CHECK: vst1.64 {d16, d17}, [r1:64]!
-	%A = load i64** %ptr
+	%A = load i64*, i64** %ptr
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %B
 	call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 8)
 	%tmp2 = getelementptr i64, i64* %A, i32 2
 	store i64* %tmp2, i64** %ptr
@@ -76,7 +76,7 @@ define void @vst2Qi8(i8* %A, <16 x i8>*
 ;CHECK-LABEL: vst2Qi8:
 ;Check the alignment value.  Max for this instruction is 256 bits:
 ;CHECK: vst2.8 {d16, d17, d18, d19}, [r0:64]
-	%tmp1 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %B
 	call void @llvm.arm.neon.vst2.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 8)
 	ret void
 }
@@ -86,7 +86,7 @@ define void @vst2Qi16(i16* %A, <8 x i16>
 ;Check the alignment value.  Max for this instruction is 256 bits:
 ;CHECK: vst2.16 {d16, d17, d18, d19}, [r0:128]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst2.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 16)
 	ret void
 }
@@ -96,7 +96,7 @@ define void @vst2Qi32(i32* %A, <4 x i32>
 ;Check the alignment value.  Max for this instruction is 256 bits:
 ;CHECK: vst2.32 {d16, d17, d18, d19}, [r0:256]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	call void @llvm.arm.neon.vst2.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 64)
 	ret void
 }
@@ -105,7 +105,7 @@ define void @vst2Qf(float* %A, <4 x floa
 ;CHECK-LABEL: vst2Qf:
 ;CHECK: vst2.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	call void @llvm.arm.neon.vst2.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
 	ret void
 }
@@ -113,7 +113,7 @@ define void @vst2Qf(float* %A, <4 x floa
 define i8* @vst2update(i8* %out, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vst2update:
 ;CHECK: vst2.16 {d16, d17}, [r0]!
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	tail call void @llvm.arm.neon.vst2.v4i16(i8* %out, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 2)
 	%t5 = getelementptr inbounds i8, i8* %out, i32 16
 	ret i8* %t5
@@ -122,7 +122,7 @@ define i8* @vst2update(i8* %out, <4 x i1
 define i8* @vst2update2(i8 * %out, <4 x float> * %this) nounwind optsize ssp align 2 {
 ;CHECK-LABEL: vst2update2:
 ;CHECK: vst2.32 {d16, d17, d18, d19}, [r0]!
-  %tmp1 = load <4 x float>* %this
+  %tmp1 = load <4 x float>, <4 x float>* %this
   call void @llvm.arm.neon.vst2.v4f32(i8* %out, <4 x float> %tmp1, <4 x float> %tmp1, i32 4) nounwind
   %tmp2 = getelementptr inbounds i8, i8* %out, i32  32
   ret i8* %tmp2

Modified: llvm/trunk/test/CodeGen/ARM/vst3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vst3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vst3.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vst3.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define void @vst3i8(i8* %A, <8 x i8>* %B
 ;Check the alignment value.  Max for this instruction is 64 bits:
 ;This test runs at -O0 so do not check for specific register numbers.
 ;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}:64]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 32)
 	ret void
 }
@@ -14,7 +14,7 @@ define void @vst3i16(i16* %A, <4 x i16>*
 ;CHECK-LABEL: vst3i16:
 ;CHECK: vst3.16
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	call void @llvm.arm.neon.vst3.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
 	ret void
 }
@@ -23,7 +23,7 @@ define void @vst3i32(i32* %A, <2 x i32>*
 ;CHECK-LABEL: vst3i32:
 ;CHECK: vst3.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
 	ret void
 }
@@ -32,9 +32,9 @@ define void @vst3i32(i32* %A, <2 x i32>*
 define void @vst3i32_update(i32** %ptr, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vst3i32_update:
 ;CHECK: vst3.32 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}]!
-	%A = load i32** %ptr
+	%A = load i32*, i32** %ptr
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
 	%tmp2 = getelementptr i32, i32* %A, i32 6
 	store i32* %tmp2, i32** %ptr
@@ -45,7 +45,7 @@ define void @vst3f(float* %A, <2 x float
 ;CHECK-LABEL: vst3f:
 ;CHECK: vst3.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	call void @llvm.arm.neon.vst3.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
 	ret void
 }
@@ -56,7 +56,7 @@ define void @vst3i64(i64* %A, <1 x i64>*
 ;This test runs at -O0 so do not check for specific register numbers.
 ;CHECK: vst1.64 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}:64]
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %B
 	call void @llvm.arm.neon.vst3.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 16)
 	ret void
 }
@@ -64,9 +64,9 @@ define void @vst3i64(i64* %A, <1 x i64>*
 define void @vst3i64_update(i64** %ptr, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vst3i64_update
 ;CHECK: vst1.64	{d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}]!
-        %A = load i64** %ptr
+        %A = load i64*, i64** %ptr
         %tmp0 = bitcast i64* %A to i8*
-        %tmp1 = load <1 x i64>* %B
+        %tmp1 = load <1 x i64>, <1 x i64>* %B
         call void @llvm.arm.neon.vst3.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 1)
         %tmp2 = getelementptr i64, i64* %A, i32 3
         store i64* %tmp2, i64** %ptr
@@ -79,7 +79,7 @@ define void @vst3Qi8(i8* %A, <16 x i8>*
 ;This test runs at -O0 so do not check for specific register numbers.
 ;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}:64]!
 ;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}:64]
-	%tmp1 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %B
 	call void @llvm.arm.neon.vst3.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 32)
 	ret void
 }
@@ -89,7 +89,7 @@ define void @vst3Qi16(i16* %A, <8 x i16>
 ;CHECK: vst3.16
 ;CHECK: vst3.16
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
 	ret void
 }
@@ -99,9 +99,9 @@ define void @vst3Qi16_update(i16** %ptr,
 ;CHECK-LABEL: vst3Qi16_update:
 ;CHECK: vst3.16 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}]!
 ;CHECK: vst3.16 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}]!
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
 	%tmp2 = getelementptr i16, i16* %A, i32 24
 	store i16* %tmp2, i16** %ptr
@@ -113,7 +113,7 @@ define void @vst3Qi32(i32* %A, <4 x i32>
 ;CHECK: vst3.32
 ;CHECK: vst3.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	call void @llvm.arm.neon.vst3.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
 	ret void
 }
@@ -123,7 +123,7 @@ define void @vst3Qf(float* %A, <4 x floa
 ;CHECK: vst3.32
 ;CHECK: vst3.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	call void @llvm.arm.neon.vst3.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/vst4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vst4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vst4.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vst4.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define void @vst4i8(i8* %A, <8 x i8>* %B
 ;CHECK-LABEL: vst4i8:
 ;Check the alignment value.  Max for this instruction is 256 bits:
 ;CHECK: vst4.8 {d16, d17, d18, d19}, [r0:64]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8)
 	ret void
 }
@@ -13,8 +13,8 @@ define void @vst4i8(i8* %A, <8 x i8>* %B
 define void @vst4i8_update(i8** %ptr, <8 x i8>* %B, i32 %inc) nounwind {
 ;CHECK-LABEL: vst4i8_update:
 ;CHECK: vst4.8 {d16, d17, d18, d19}, [r1:128], r2
-	%A = load i8** %ptr
-	%tmp1 = load <8 x i8>* %B
+	%A = load i8*, i8** %ptr
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 16)
 	%tmp2 = getelementptr i8, i8* %A, i32 %inc
 	store i8* %tmp2, i8** %ptr
@@ -26,7 +26,7 @@ define void @vst4i16(i16* %A, <4 x i16>*
 ;Check the alignment value.  Max for this instruction is 256 bits:
 ;CHECK: vst4.16 {d16, d17, d18, d19}, [r0:128]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	call void @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 16)
 	ret void
 }
@@ -36,7 +36,7 @@ define void @vst4i32(i32* %A, <2 x i32>*
 ;Check the alignment value.  Max for this instruction is 256 bits:
 ;CHECK: vst4.32 {d16, d17, d18, d19}, [r0:256]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	call void @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 32)
 	ret void
 }
@@ -45,7 +45,7 @@ define void @vst4f(float* %A, <2 x float
 ;CHECK-LABEL: vst4f:
 ;CHECK: vst4.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	call void @llvm.arm.neon.vst4.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
 	ret void
 }
@@ -55,7 +55,7 @@ define void @vst4i64(i64* %A, <1 x i64>*
 ;Check the alignment value.  Max for this instruction is 256 bits:
 ;CHECK: vst1.64 {d16, d17, d18, d19}, [r0:256]
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %B
 	call void @llvm.arm.neon.vst4.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 64)
 	ret void
 }
@@ -63,9 +63,9 @@ define void @vst4i64(i64* %A, <1 x i64>*
 define void @vst4i64_update(i64** %ptr, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vst4i64_update:
 ;CHECK: vst1.64	{d16, d17, d18, d19}, [r1]!
-        %A = load i64** %ptr
+        %A = load i64*, i64** %ptr
         %tmp0 = bitcast i64* %A to i8*
-        %tmp1 = load <1 x i64>* %B
+        %tmp1 = load <1 x i64>, <1 x i64>* %B
         call void @llvm.arm.neon.vst4.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 1)
         %tmp2 = getelementptr i64, i64* %A, i32 4
         store i64* %tmp2, i64** %ptr
@@ -77,7 +77,7 @@ define void @vst4Qi8(i8* %A, <16 x i8>*
 ;Check the alignment value.  Max for this instruction is 256 bits:
 ;CHECK: vst4.8 {d16, d18, d20, d22}, [r0:256]!
 ;CHECK: vst4.8 {d17, d19, d21, d23}, [r0:256]
-	%tmp1 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %B
 	call void @llvm.arm.neon.vst4.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 64)
 	ret void
 }
@@ -88,7 +88,7 @@ define void @vst4Qi16(i16* %A, <8 x i16>
 ;CHECK: vst4.16 {d16, d18, d20, d22}, [r0]!
 ;CHECK: vst4.16 {d17, d19, d21, d23}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst4.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
 	ret void
 }
@@ -98,7 +98,7 @@ define void @vst4Qi32(i32* %A, <4 x i32>
 ;CHECK: vst4.32
 ;CHECK: vst4.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	call void @llvm.arm.neon.vst4.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
 	ret void
 }
@@ -108,7 +108,7 @@ define void @vst4Qf(float* %A, <4 x floa
 ;CHECK: vst4.32
 ;CHECK: vst4.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
 	ret void
 }
@@ -118,9 +118,9 @@ define void @vst4Qf_update(float** %ptr,
 ;CHECK-LABEL: vst4Qf_update:
 ;CHECK: vst4.32 {d16, d18, d20, d22}, [r1]!
 ;CHECK: vst4.32 {d17, d19, d21, d23}, [r1]!
-	%A = load float** %ptr
+	%A = load float*, float** %ptr
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
 	%tmp2 = getelementptr float, float* %A, i32 16
 	store float* %tmp2, float** %ptr

Modified: llvm/trunk/test/CodeGen/ARM/vstlane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vstlane.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vstlane.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vstlane.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define void @vst1lanei8(i8* %A, <8 x i8>
 ;CHECK-LABEL: vst1lanei8:
 ;Check the (default) alignment.
 ;CHECK: vst1.8 {d16[3]}, [r0]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
         %tmp2 = extractelement <8 x i8> %tmp1, i32 3
         store i8 %tmp2, i8* %A, align 8
 	ret void
@@ -14,8 +14,8 @@ define void @vst1lanei8(i8* %A, <8 x i8>
 define void @vst1lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vst1lanei8_update:
 ;CHECK: vst1.8 {d16[3]}, [{{r[0-9]}}]!
-	%A = load i8** %ptr
-	%tmp1 = load <8 x i8>* %B
+	%A = load i8*, i8** %ptr
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	%tmp2 = extractelement <8 x i8> %tmp1, i32 3
 	store i8 %tmp2, i8* %A, align 8
 	%tmp3 = getelementptr i8, i8* %A, i32 1
@@ -27,7 +27,7 @@ define void @vst1lanei16(i16* %A, <4 x i
 ;CHECK-LABEL: vst1lanei16:
 ;Check the alignment value.  Max for this instruction is 16 bits:
 ;CHECK: vst1.16 {d16[2]}, [r0:16]
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
         %tmp2 = extractelement <4 x i16> %tmp1, i32 2
         store i16 %tmp2, i16* %A, align 8
 	ret void
@@ -37,7 +37,7 @@ define void @vst1lanei32(i32* %A, <2 x i
 ;CHECK-LABEL: vst1lanei32:
 ;Check the alignment value.  Max for this instruction is 32 bits:
 ;CHECK: vst1.32 {d16[1]}, [r0:32]
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
         %tmp2 = extractelement <2 x i32> %tmp1, i32 1
         store i32 %tmp2, i32* %A, align 8
 	ret void
@@ -46,7 +46,7 @@ define void @vst1lanei32(i32* %A, <2 x i
 define void @vst1lanef(float* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vst1lanef:
 ;CHECK: vst1.32 {d16[1]}, [r0:32]
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
         %tmp2 = extractelement <2 x float> %tmp1, i32 1
         store float %tmp2, float* %A
 	ret void
@@ -56,7 +56,7 @@ define void @vst1laneQi8(i8* %A, <16 x i
 ;CHECK-LABEL: vst1laneQi8:
 ; // Can use scalar load. No need to use vectors.
 ; // CHE-CK: vst1.8 {d17[1]}, [r0]
-	%tmp1 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %B
         %tmp2 = extractelement <16 x i8> %tmp1, i32 9
         store i8 %tmp2, i8* %A, align 8
 	ret void
@@ -65,7 +65,7 @@ define void @vst1laneQi8(i8* %A, <16 x i
 define void @vst1laneQi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vst1laneQi16:
 ;CHECK: vst1.16 {d17[1]}, [r0:16]
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
         %tmp2 = extractelement <8 x i16> %tmp1, i32 5
         store i16 %tmp2, i16* %A, align 8
 	ret void
@@ -75,7 +75,7 @@ define void @vst1laneQi32(i32* %A, <4 x
 ;CHECK-LABEL: vst1laneQi32:
 ; // Can use scalar load. No need to use vectors.
 ; // CHE-CK: vst1.32 {d17[1]}, [r0:32]
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
         %tmp2 = extractelement <4 x i32> %tmp1, i32 3
         store i32 %tmp2, i32* %A, align 8
 	ret void
@@ -86,8 +86,8 @@ define void @vst1laneQi32_update(i32** %
 ;CHECK-LABEL: vst1laneQi32_update:
 ; // Can use scalar load. No need to use vectors.
 ; // CHE-CK: vst1.32 {d17[1]}, [r1:32]!
-	%A = load i32** %ptr
-	%tmp1 = load <4 x i32>* %B
+	%A = load i32*, i32** %ptr
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	%tmp2 = extractelement <4 x i32> %tmp1, i32 3
 	store i32 %tmp2, i32* %A, align 8
 	%tmp3 = getelementptr i32, i32* %A, i32 1
@@ -99,7 +99,7 @@ define void @vst1laneQf(float* %A, <4 x
 ;CHECK-LABEL: vst1laneQf:
 ; // Can use scalar load. No need to use vectors.
 ; // CHE-CK: vst1.32 {d17[1]}, [r0]
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
         %tmp2 = extractelement <4 x float> %tmp1, i32 3
         store float %tmp2, float* %A
 	ret void
@@ -109,7 +109,7 @@ define void @vst2lanei8(i8* %A, <8 x i8>
 ;CHECK-LABEL: vst2lanei8:
 ;Check the alignment value.  Max for this instruction is 16 bits:
 ;CHECK: vst2.8 {d16[1], d17[1]}, [r0:16]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 4)
 	ret void
 }
@@ -119,7 +119,7 @@ define void @vst2lanei16(i16* %A, <4 x i
 ;Check the alignment value.  Max for this instruction is 32 bits:
 ;CHECK: vst2.16 {d16[1], d17[1]}, [r0:32]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
 	ret void
 }
@@ -128,9 +128,9 @@ define void @vst2lanei16(i16* %A, <4 x i
 define void @vst2lanei16_update(i16** %ptr, <4 x i16>* %B, i32 %inc) nounwind {
 ;CHECK-LABEL: vst2lanei16_update:
 ;CHECK: vst2.16 {d16[1], d17[1]}, [r1], r2
-	%A = load i16** %ptr
+	%A = load i16*, i16** %ptr
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 2)
 	%tmp2 = getelementptr i16, i16* %A, i32 %inc
 	store i16* %tmp2, i16** %ptr
@@ -141,7 +141,7 @@ define void @vst2lanei32(i32* %A, <2 x i
 ;CHECK-LABEL: vst2lanei32:
 ;CHECK: vst2.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	call void @llvm.arm.neon.vst2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
 	ret void
 }
@@ -150,7 +150,7 @@ define void @vst2lanef(float* %A, <2 x f
 ;CHECK-LABEL: vst2lanef:
 ;CHECK: vst2.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	call void @llvm.arm.neon.vst2lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
@@ -160,7 +160,7 @@ define void @vst2laneQi16(i16* %A, <8 x
 ;Check the (default) alignment.
 ;CHECK: vst2.16 {d17[1], d19[1]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 5, i32 1)
 	ret void
 }
@@ -170,7 +170,7 @@ define void @vst2laneQi32(i32* %A, <4 x
 ;Check the alignment value.  Max for this instruction is 64 bits:
 ;CHECK: vst2.32 {d17[0], d19[0]}, [r0:64]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	call void @llvm.arm.neon.vst2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 16)
 	ret void
 }
@@ -179,7 +179,7 @@ define void @vst2laneQf(float* %A, <4 x
 ;CHECK-LABEL: vst2laneQf:
 ;CHECK: vst2.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	call void @llvm.arm.neon.vst2lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 3, i32 1)
 	ret void
 }
@@ -196,7 +196,7 @@ declare void @llvm.arm.neon.vst2lane.v4f
 define void @vst3lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vst3lanei8:
 ;CHECK: vst3.8
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
 	ret void
 }
@@ -206,7 +206,7 @@ define void @vst3lanei16(i16* %A, <4 x i
 ;Check the (default) alignment value.  VST3 does not support alignment.
 ;CHECK: vst3.16 {d16[1], d17[1], d18[1]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	call void @llvm.arm.neon.vst3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
 	ret void
 }
@@ -215,7 +215,7 @@ define void @vst3lanei32(i32* %A, <2 x i
 ;CHECK-LABEL: vst3lanei32:
 ;CHECK: vst3.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	call void @llvm.arm.neon.vst3lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
 	ret void
 }
@@ -224,7 +224,7 @@ define void @vst3lanef(float* %A, <2 x f
 ;CHECK-LABEL: vst3lanef:
 ;CHECK: vst3.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	call void @llvm.arm.neon.vst3lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
@@ -234,7 +234,7 @@ define void @vst3laneQi16(i16* %A, <8 x
 ;Check the (default) alignment value.  VST3 does not support alignment.
 ;CHECK: vst3.16 {d17[2], d19[2], d21[2]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 6, i32 8)
 	ret void
 }
@@ -243,7 +243,7 @@ define void @vst3laneQi32(i32* %A, <4 x
 ;CHECK-LABEL: vst3laneQi32:
 ;CHECK: vst3.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	call void @llvm.arm.neon.vst3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 0, i32 1)
 	ret void
 }
@@ -252,9 +252,9 @@ define void @vst3laneQi32(i32* %A, <4 x
 define void @vst3laneQi32_update(i32** %ptr, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vst3laneQi32_update:
 ;CHECK: vst3.32 {d16[0], d18[0], d20[0]}, [r1]!
-	%A = load i32** %ptr
+	%A = load i32*, i32** %ptr
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	call void @llvm.arm.neon.vst3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 0, i32 1)
 	%tmp2 = getelementptr i32, i32* %A, i32 3
 	store i32* %tmp2, i32** %ptr
@@ -265,7 +265,7 @@ define void @vst3laneQf(float* %A, <4 x
 ;CHECK-LABEL: vst3laneQf:
 ;CHECK: vst3.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	call void @llvm.arm.neon.vst3lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
@@ -284,7 +284,7 @@ define void @vst4lanei8(i8* %A, <8 x i8>
 ;CHECK-LABEL: vst4lanei8:
 ;Check the alignment value.  Max for this instruction is 32 bits:
 ;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r0:32]
-	%tmp1 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
 	ret void
 }
@@ -293,8 +293,8 @@ define void @vst4lanei8(i8* %A, <8 x i8>
 define void @vst4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vst4lanei8_update:
 ;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r1:32]!
-	%A = load i8** %ptr
-	%tmp1 = load <8 x i8>* %B
+	%A = load i8*, i8** %ptr
+	%tmp1 = load <8 x i8>, <8 x i8>* %B
 	call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
 	%tmp2 = getelementptr i8, i8* %A, i32 4
 	store i8* %tmp2, i8** %ptr
@@ -305,7 +305,7 @@ define void @vst4lanei16(i16* %A, <4 x i
 ;CHECK-LABEL: vst4lanei16:
 ;CHECK: vst4.16
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	call void @llvm.arm.neon.vst4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1)
 	ret void
 }
@@ -315,7 +315,7 @@ define void @vst4lanei32(i32* %A, <2 x i
 ;Check the alignment value.  Max for this instruction is 128 bits:
 ;CHECK: vst4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0:128]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %B
 	call void @llvm.arm.neon.vst4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 16)
 	ret void
 }
@@ -324,7 +324,7 @@ define void @vst4lanef(float* %A, <2 x f
 ;CHECK-LABEL: vst4lanef:
 ;CHECK: vst4.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %B
 	call void @llvm.arm.neon.vst4lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
@@ -334,7 +334,7 @@ define void @vst4laneQi16(i16* %A, <8 x
 ;Check the alignment value.  Max for this instruction is 64 bits:
 ;CHECK: vst4.16 {d17[3], d19[3], d21[3], d23[3]}, [r0:64]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %B
 	call void @llvm.arm.neon.vst4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7, i32 16)
 	ret void
 }
@@ -344,7 +344,7 @@ define void @vst4laneQi32(i32* %A, <4 x
 ;Check the (default) alignment.
 ;CHECK: vst4.32 {d17[0], d19[0], d21[0], d23[0]}, [r0]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %B
 	call void @llvm.arm.neon.vst4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)
 	ret void
 }
@@ -353,7 +353,7 @@ define void @vst4laneQf(float* %A, <4 x
 ;CHECK-LABEL: vst4laneQf:
 ;CHECK: vst4.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %B
 	call void @llvm.arm.neon.vst4lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/vsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vsub.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vsub.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vsubi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsubi8:
 ;CHECK: vsub.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = sub <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @vsubi8(<8 x i8>* %A, <8
 define <4 x i16> @vsubi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsubi16:
 ;CHECK: vsub.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = sub <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @vsubi16(<4 x i16>* %A,
 define <2 x i32> @vsubi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsubi32:
 ;CHECK: vsub.i32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = sub <2 x i32> %tmp1, %tmp2
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @vsubi32(<2 x i32>* %A,
 define <1 x i64> @vsubi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsubi64:
 ;CHECK: vsub.i64
-	%tmp1 = load <1 x i64>* %A
-	%tmp2 = load <1 x i64>* %B
+	%tmp1 = load <1 x i64>, <1 x i64>* %A
+	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = sub <1 x i64> %tmp1, %tmp2
 	ret <1 x i64> %tmp3
 }
@@ -39,8 +39,8 @@ define <1 x i64> @vsubi64(<1 x i64>* %A,
 define <2 x float> @vsubf32(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: vsubf32:
 ;CHECK: vsub.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = fsub <2 x float> %tmp1, %tmp2
 	ret <2 x float> %tmp3
 }
@@ -48,8 +48,8 @@ define <2 x float> @vsubf32(<2 x float>*
 define <16 x i8> @vsubQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsubQi8:
 ;CHECK: vsub.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = sub <16 x i8> %tmp1, %tmp2
 	ret <16 x i8> %tmp3
 }
@@ -57,8 +57,8 @@ define <16 x i8> @vsubQi8(<16 x i8>* %A,
 define <8 x i16> @vsubQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsubQi16:
 ;CHECK: vsub.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = sub <8 x i16> %tmp1, %tmp2
 	ret <8 x i16> %tmp3
 }
@@ -66,8 +66,8 @@ define <8 x i16> @vsubQi16(<8 x i16>* %A
 define <4 x i32> @vsubQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsubQi32:
 ;CHECK: vsub.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = sub <4 x i32> %tmp1, %tmp2
 	ret <4 x i32> %tmp3
 }
@@ -75,8 +75,8 @@ define <4 x i32> @vsubQi32(<4 x i32>* %A
 define <2 x i64> @vsubQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsubQi64:
 ;CHECK: vsub.i64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = sub <2 x i64> %tmp1, %tmp2
 	ret <2 x i64> %tmp3
 }
@@ -84,8 +84,8 @@ define <2 x i64> @vsubQi64(<2 x i64>* %A
 define <4 x float> @vsubQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: vsubQf32:
 ;CHECK: vsub.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = fsub <4 x float> %tmp1, %tmp2
 	ret <4 x float> %tmp3
 }
@@ -120,8 +120,8 @@ define <2 x i32> @vsubhni64_natural(<2 x
 define <8 x i8> @vrsubhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vrsubhni16:
 ;CHECK: vrsubhn.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -129,8 +129,8 @@ define <8 x i8> @vrsubhni16(<8 x i16>* %
 define <4 x i16> @vrsubhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vrsubhni32:
 ;CHECK: vrsubhn.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -138,8 +138,8 @@ define <4 x i16> @vrsubhni32(<4 x i32>*
 define <2 x i32> @vrsubhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vrsubhni64:
 ;CHECK: vrsubhn.i64
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -151,8 +151,8 @@ declare <2 x i32> @llvm.arm.neon.vrsubhn
 define <8 x i16> @vsubls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsubls8:
 ;CHECK: vsubl.s8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
 	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = sub <8 x i16> %tmp3, %tmp4
@@ -162,8 +162,8 @@ define <8 x i16> @vsubls8(<8 x i8>* %A,
 define <4 x i32> @vsubls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsubls16:
 ;CHECK: vsubl.s16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
 	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = sub <4 x i32> %tmp3, %tmp4
@@ -173,8 +173,8 @@ define <4 x i32> @vsubls16(<4 x i16>* %A
 define <2 x i64> @vsubls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsubls32:
 ;CHECK: vsubl.s32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
 	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = sub <2 x i64> %tmp3, %tmp4
@@ -184,8 +184,8 @@ define <2 x i64> @vsubls32(<2 x i32>* %A
 define <8 x i16> @vsublu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsublu8:
 ;CHECK: vsubl.u8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
 	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
 	%tmp5 = sub <8 x i16> %tmp3, %tmp4
@@ -195,8 +195,8 @@ define <8 x i16> @vsublu8(<8 x i8>* %A,
 define <4 x i32> @vsublu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsublu16:
 ;CHECK: vsubl.u16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
 	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
 	%tmp5 = sub <4 x i32> %tmp3, %tmp4
@@ -206,8 +206,8 @@ define <4 x i32> @vsublu16(<4 x i16>* %A
 define <2 x i64> @vsublu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsublu32:
 ;CHECK: vsubl.u32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
 	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
 	%tmp5 = sub <2 x i64> %tmp3, %tmp4
@@ -217,8 +217,8 @@ define <2 x i64> @vsublu32(<2 x i32>* %A
 define <8 x i16> @vsubws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsubws8:
 ;CHECK: vsubw.s8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
 	%tmp4 = sub <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -227,8 +227,8 @@ define <8 x i16> @vsubws8(<8 x i16>* %A,
 define <4 x i32> @vsubws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsubws16:
 ;CHECK: vsubw.s16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
 	%tmp4 = sub <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -237,8 +237,8 @@ define <4 x i32> @vsubws16(<4 x i32>* %A
 define <2 x i64> @vsubws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsubws32:
 ;CHECK: vsubw.s32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
 	%tmp4 = sub <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
@@ -247,8 +247,8 @@ define <2 x i64> @vsubws32(<2 x i64>* %A
 define <8 x i16> @vsubwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsubwu8:
 ;CHECK: vsubw.u8
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
 	%tmp4 = sub <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -257,8 +257,8 @@ define <8 x i16> @vsubwu8(<8 x i16>* %A,
 define <4 x i32> @vsubwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsubwu16:
 ;CHECK: vsubw.u16
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
 	%tmp4 = sub <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -267,8 +267,8 @@ define <4 x i32> @vsubwu16(<4 x i32>* %A
 define <2 x i64> @vsubwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsubwu32:
 ;CHECK: vsubw.u32
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
 	%tmp4 = sub <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vtbl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vtbl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vtbl.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vtbl.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 define <8 x i8> @vtbl1(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vtbl1:
 ;CHECK: vtbl.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -16,8 +16,8 @@ define <8 x i8> @vtbl1(<8 x i8>* %A, <8
 define <8 x i8> @vtbl2(<8 x i8>* %A, %struct.__neon_int8x8x2_t* %B) nounwind {
 ;CHECK-LABEL: vtbl2:
 ;CHECK: vtbl.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load %struct.__neon_int8x8x2_t* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load %struct.__neon_int8x8x2_t, %struct.__neon_int8x8x2_t* %B
         %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
 	%tmp5 = call <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4)
@@ -27,8 +27,8 @@ define <8 x i8> @vtbl2(<8 x i8>* %A, %st
 define <8 x i8> @vtbl3(<8 x i8>* %A, %struct.__neon_int8x8x3_t* %B) nounwind {
 ;CHECK-LABEL: vtbl3:
 ;CHECK: vtbl.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load %struct.__neon_int8x8x3_t* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load %struct.__neon_int8x8x3_t, %struct.__neon_int8x8x3_t* %B
         %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
@@ -39,8 +39,8 @@ define <8 x i8> @vtbl3(<8 x i8>* %A, %st
 define <8 x i8> @vtbl4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B) nounwind {
 ;CHECK-LABEL: vtbl4:
 ;CHECK: vtbl.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load %struct.__neon_int8x8x4_t* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load %struct.__neon_int8x8x4_t, %struct.__neon_int8x8x4_t* %B
         %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
@@ -52,9 +52,9 @@ define <8 x i8> @vtbl4(<8 x i8>* %A, %st
 define <8 x i8> @vtbx1(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vtbx1:
 ;CHECK: vtbx.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-	%tmp3 = load <8 x i8>* %C
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp3 = load <8 x i8>, <8 x i8>* %C
 	%tmp4 = call <8 x i8> @llvm.arm.neon.vtbx1(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
 	ret <8 x i8> %tmp4
 }
@@ -62,11 +62,11 @@ define <8 x i8> @vtbx1(<8 x i8>* %A, <8
 define <8 x i8> @vtbx2(<8 x i8>* %A, %struct.__neon_int8x8x2_t* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vtbx2:
 ;CHECK: vtbx.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load %struct.__neon_int8x8x2_t* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load %struct.__neon_int8x8x2_t, %struct.__neon_int8x8x2_t* %B
         %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
-	%tmp5 = load <8 x i8>* %C
+	%tmp5 = load <8 x i8>, <8 x i8>* %C
 	%tmp6 = call <8 x i8> @llvm.arm.neon.vtbx2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5)
 	ret <8 x i8> %tmp6
 }
@@ -74,12 +74,12 @@ define <8 x i8> @vtbx2(<8 x i8>* %A, %st
 define <8 x i8> @vtbx3(<8 x i8>* %A, %struct.__neon_int8x8x3_t* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vtbx3:
 ;CHECK: vtbx.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load %struct.__neon_int8x8x3_t* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load %struct.__neon_int8x8x3_t, %struct.__neon_int8x8x3_t* %B
         %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
-	%tmp6 = load <8 x i8>* %C
+	%tmp6 = load <8 x i8>, <8 x i8>* %C
 	%tmp7 = call <8 x i8> @llvm.arm.neon.vtbx3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6)
 	ret <8 x i8> %tmp7
 }
@@ -87,13 +87,13 @@ define <8 x i8> @vtbx3(<8 x i8>* %A, %st
 define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind {
 ;CHECK-LABEL: vtbx4:
 ;CHECK: vtbx.8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load %struct.__neon_int8x8x4_t* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load %struct.__neon_int8x8x4_t, %struct.__neon_int8x8x4_t* %B
         %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
         %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
-	%tmp7 = load <8 x i8>* %C
+	%tmp7 = load <8 x i8>, <8 x i8>* %C
 	%tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
 	ret <8 x i8> %tmp8
 }

Modified: llvm/trunk/test/CodeGen/ARM/vtrn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vtrn.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vtrn.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vtrn.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8
 ;CHECK-LABEL: vtrni8:
 ;CHECK: vtrn.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -16,8 +16,8 @@ define <4 x i16> @vtrni16(<4 x i16>* %A,
 ;CHECK-LABEL: vtrni16:
 ;CHECK: vtrn.16
 ;CHECK-NEXT: vadd.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
 	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
         %tmp5 = add <4 x i16> %tmp3, %tmp4
@@ -28,8 +28,8 @@ define <2 x i32> @vtrni32(<2 x i32>* %A,
 ;CHECK-LABEL: vtrni32:
 ;CHECK: vtrn.32
 ;CHECK-NEXT: vadd.i32
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
 	%tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
         %tmp5 = add <2 x i32> %tmp3, %tmp4
@@ -40,8 +40,8 @@ define <2 x float> @vtrnf(<2 x float>* %
 ;CHECK-LABEL: vtrnf:
 ;CHECK: vtrn.32
 ;CHECK-NEXT: vadd.f32
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
 	%tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
         %tmp5 = fadd <2 x float> %tmp3, %tmp4
@@ -52,8 +52,8 @@ define <16 x i8> @vtrnQi8(<16 x i8>* %A,
 ;CHECK-LABEL: vtrnQi8:
 ;CHECK: vtrn.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4
@@ -64,8 +64,8 @@ define <8 x i16> @vtrnQi16(<8 x i16>* %A
 ;CHECK-LABEL: vtrnQi16:
 ;CHECK: vtrn.16
 ;CHECK-NEXT: vadd.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -76,8 +76,8 @@ define <4 x i32> @vtrnQi32(<4 x i32>* %A
 ;CHECK-LABEL: vtrnQi32:
 ;CHECK: vtrn.32
 ;CHECK-NEXT: vadd.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
 	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
         %tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -88,8 +88,8 @@ define <4 x float> @vtrnQf(<4 x float>*
 ;CHECK-LABEL: vtrnQf:
 ;CHECK: vtrn.32
 ;CHECK-NEXT: vadd.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
 	%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -102,8 +102,8 @@ define <8 x i8> @vtrni8_undef(<8 x i8>*
 ;CHECK-LABEL: vtrni8_undef:
 ;CHECK: vtrn.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -114,8 +114,8 @@ define <8 x i16> @vtrnQi16_undef(<8 x i1
 ;CHECK-LABEL: vtrnQi16_undef:
 ;CHECK: vtrn.16
 ;CHECK-NEXT: vadd.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
         %tmp5 = add <8 x i16> %tmp3, %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vuzp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vuzp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vuzp.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vuzp.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@ define <8 x i8> @vuzpi8(<8 x i8>* %A, <8
 ;CHECK-LABEL: vuzpi8:
 ;CHECK: vuzp.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -16,8 +16,8 @@ define <4 x i16> @vuzpi16(<4 x i16>* %A,
 ;CHECK-LABEL: vuzpi16:
 ;CHECK: vuzp.16
 ;CHECK-NEXT: vadd.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
 	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
         %tmp5 = add <4 x i16> %tmp3, %tmp4
@@ -30,8 +30,8 @@ define <16 x i8> @vuzpQi8(<16 x i8>* %A,
 ;CHECK-LABEL: vuzpQi8:
 ;CHECK: vuzp.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4
@@ -42,8 +42,8 @@ define <8 x i16> @vuzpQi16(<8 x i16>* %A
 ;CHECK-LABEL: vuzpQi16:
 ;CHECK: vuzp.16
 ;CHECK-NEXT: vadd.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -54,8 +54,8 @@ define <4 x i32> @vuzpQi32(<4 x i32>* %A
 ;CHECK-LABEL: vuzpQi32:
 ;CHECK: vuzp.32
 ;CHECK-NEXT: vadd.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
 	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
         %tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -66,8 +66,8 @@ define <4 x float> @vuzpQf(<4 x float>*
 ;CHECK-LABEL: vuzpQf:
 ;CHECK: vuzp.32
 ;CHECK-NEXT: vadd.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
 	%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -80,8 +80,8 @@ define <8 x i8> @vuzpi8_undef(<8 x i8>*
 ;CHECK-LABEL: vuzpi8_undef:
 ;CHECK: vuzp.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -92,8 +92,8 @@ define <8 x i16> @vuzpQi16_undef(<8 x i1
 ;CHECK-LABEL: vuzpQi16_undef:
 ;CHECK: vuzp.16
 ;CHECK-NEXT: vadd.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 14>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 undef, i32 11, i32 13, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/vzip.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vzip.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vzip.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vzip.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@ define <8 x i8> @vzipi8(<8 x i8>* %A, <8
 ;CHECK-LABEL: vzipi8:
 ;CHECK: vzip.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -16,8 +16,8 @@ define <4 x i16> @vzipi16(<4 x i16>* %A,
 ;CHECK-LABEL: vzipi16:
 ;CHECK: vzip.16
 ;CHECK-NEXT: vadd.i16
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = add <4 x i16> %tmp3, %tmp4
@@ -30,8 +30,8 @@ define <16 x i8> @vzipQi8(<16 x i8>* %A,
 ;CHECK-LABEL: vzipQi8:
 ;CHECK: vzip.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4
@@ -42,8 +42,8 @@ define <8 x i16> @vzipQi16(<8 x i16>* %A
 ;CHECK-LABEL: vzipQi16:
 ;CHECK: vzip.16
 ;CHECK-NEXT: vadd.i16
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -54,8 +54,8 @@ define <4 x i32> @vzipQi32(<4 x i32>* %A
 ;CHECK-LABEL: vzipQi32:
 ;CHECK: vzip.32
 ;CHECK-NEXT: vadd.i32
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -66,8 +66,8 @@ define <4 x float> @vzipQf(<4 x float>*
 ;CHECK-LABEL: vzipQf:
 ;CHECK: vzip.32
 ;CHECK-NEXT: vadd.f32
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -80,8 +80,8 @@ define <8 x i8> @vzipi8_undef(<8 x i8>*
 ;CHECK-LABEL: vzipi8_undef:
 ;CHECK: vzip.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 1, i32 9, i32 undef, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 undef, i32 undef, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -92,8 +92,8 @@ define <16 x i8> @vzipQi8_undef(<16 x i8
 ;CHECK-LABEL: vzipQi8_undef:
 ;CHECK: vzip.8
 ;CHECK-NEXT: vadd.i8
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 undef, i32 undef, i32 undef, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 undef, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 undef, i32 14, i32 30, i32 undef, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4

Modified: llvm/trunk/test/CodeGen/ARM/zextload_demandedbits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/zextload_demandedbits.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/zextload_demandedbits.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/zextload_demandedbits.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ target datalayout = "e-p:32:32:32-i1:8:3
 define void @quux(%struct.eggs* %arg) {
 bb:
   %tmp1 = getelementptr inbounds %struct.eggs, %struct.eggs* %arg, i32 0, i32 1
-  %0 = load i16* %tmp1, align 2
+  %0 = load i16, i16* %tmp1, align 2
   %tobool = icmp eq i16 %0, 0
   br i1 %tobool, label %bb16, label %bb3
 

Modified: llvm/trunk/test/CodeGen/BPF/basictest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/BPF/basictest.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/BPF/basictest.ll (original)
+++ llvm/trunk/test/CodeGen/BPF/basictest.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ entry:
 
 @G = external global i8
 define zeroext i8 @loadG() {
-  %tmp = load i8* @G
+  %tmp = load i8, i8* @G
   ret i8 %tmp
 ; CHECK-LABEL: loadG:
 ; CHECK: ld_64 r1

Modified: llvm/trunk/test/CodeGen/BPF/ex1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/BPF/ex1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/BPF/ex1.ll (original)
+++ llvm/trunk/test/CodeGen/BPF/ex1.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define i32 @bpf_prog1(%struct.bpf_contex
   %1 = getelementptr inbounds [3 x i8], [3 x i8]* %devname, i64 0, i64 0
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i32 1, i1 false)
   %2 = getelementptr inbounds %struct.bpf_context, %struct.bpf_context* %ctx, i64 0, i32 0
-  %3 = load i64* %2, align 8
+  %3 = load i64, i64* %2, align 8
   %4 = inttoptr i64 %3 to %struct.sk_buff*
   %5 = getelementptr inbounds %struct.sk_buff, %struct.sk_buff* %4, i64 0, i32 2
   %6 = bitcast i64* %5 to i8*

Modified: llvm/trunk/test/CodeGen/BPF/intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/BPF/intrinsics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/BPF/intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/BPF/intrinsics.ll Fri Feb 27 15:17:42 2015
@@ -4,11 +4,11 @@
 define i32 @ld_b(i64 %foo, i64* nocapture %bar, i8* %ctx, i8* %ctx2) #0 {
   %1 = tail call i64 @llvm.bpf.load.byte(i8* %ctx, i64 123) #2
   %2 = add i64 %1, %foo
-  %3 = load volatile i64* %bar, align 8
+  %3 = load volatile i64, i64* %bar, align 8
   %4 = add i64 %2, %3
   %5 = tail call i64 @llvm.bpf.load.byte(i8* %ctx2, i64 %foo) #2
   %6 = add i64 %4, %5
-  %7 = load volatile i64* %bar, align 8
+  %7 = load volatile i64, i64* %bar, align 8
   %8 = add i64 %6, %7
   %9 = trunc i64 %8 to i32
   ret i32 %9

Modified: llvm/trunk/test/CodeGen/BPF/load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/BPF/load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/BPF/load.ll (original)
+++ llvm/trunk/test/CodeGen/BPF/load.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=bpf | FileCheck %s
 
 define i16 @am1(i16* %a) nounwind {
-  %1 = load i16* %a
+  %1 = load i16, i16* %a
   ret i16 %1
 }
 ; CHECK-LABEL: am1:
@@ -10,14 +10,14 @@ define i16 @am1(i16* %a) nounwind {
 @foo = external global i16
 
 define i16 @am2() nounwind {
-  %1 = load i16* @foo
+  %1 = load i16, i16* @foo
   ret i16 %1
 }
 ; CHECK-LABEL: am2:
 ; CHECK: ldh r0, 0(r1)
 
 define i16 @am4() nounwind {
-  %1 = load volatile i16* inttoptr(i16 32 to i16*)
+  %1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
   ret i16 %1
 }
 ; CHECK-LABEL: am4:
@@ -26,7 +26,7 @@ define i16 @am4() nounwind {
 
 define i16 @am5(i16* %a) nounwind {
   %1 = getelementptr i16, i16* %a, i16 2
-  %2 = load i16* %1
+  %2 = load i16, i16* %1
   ret i16 %2
 }
 ; CHECK-LABEL: am5:
@@ -36,7 +36,7 @@ define i16 @am5(i16* %a) nounwind {
 @baz = common global %S zeroinitializer, align 1
 
 define i16 @am6() nounwind {
-  %1 = load i16* getelementptr (%S* @baz, i32 0, i32 1)
+  %1 = load i16, i16* getelementptr (%S* @baz, i32 0, i32 1)
   ret i16 %1
 }
 ; CHECK-LABEL: am6:

Modified: llvm/trunk/test/CodeGen/BPF/loops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/BPF/loops.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/BPF/loops.ll (original)
+++ llvm/trunk/test/CodeGen/BPF/loops.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: add:
 ; CHECK: add r{{[0-9]+}}, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = add i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -33,7 +33,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: sub:
 ; CHECK: sub r{{[0-9]+}}, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = sub i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -55,7 +55,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: or:
 ; CHECK: or r{{[0-9]+}}, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = or i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -77,7 +77,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: xor:
 ; CHECK: xor r{{[0-9]+}}, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = xor i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -99,7 +99,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: and:
 ; CHECK: and r{{[0-9]+}}, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = and i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/BPF/struct_ret1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/BPF/struct_ret1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/BPF/struct_ret1.ll (original)
+++ llvm/trunk/test/CodeGen/BPF/struct_ret1.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@
 ; Function Attrs: nounwind readonly uwtable
 define { i64, i32 } @bar(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) #0 {
 entry:
-  %retval.sroa.0.0.copyload = load i64* bitcast (%struct.S* @s to i64*), align 4
-  %retval.sroa.2.0.copyload = load i32* getelementptr inbounds (%struct.S* @s, i64 0, i32 2), align 4
+  %retval.sroa.0.0.copyload = load i64, i64* bitcast (%struct.S* @s to i64*), align 4
+  %retval.sroa.2.0.copyload = load i32, i32* getelementptr inbounds (%struct.S* @s, i64 0, i32 2), align 4
   %.fca.0.insert = insertvalue { i64, i32 } undef, i64 %retval.sroa.0.0.copyload, 0
   %.fca.1.insert = insertvalue { i64, i32 } %.fca.0.insert, i32 %retval.sroa.2.0.copyload, 1
   ret { i64, i32 } %.fca.1.insert

Modified: llvm/trunk/test/CodeGen/CPP/2009-05-01-Long-Double.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/CPP/2009-05-01-Long-Double.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/CPP/2009-05-01-Long-Double.ll (original)
+++ llvm/trunk/test/CodeGen/CPP/2009-05-01-Long-Double.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ entry:
 	%call = call i32 (...)* @other_func()		; <i32> [#uses=1]
 	%conv = sitofp i32 %call to x86_fp80		; <x86_fp80> [#uses=1]
 	store x86_fp80 %conv, x86_fp80* %retval
-	%0 = load x86_fp80* %retval		; <x86_fp80> [#uses=1]
+	%0 = load x86_fp80, x86_fp80* %retval		; <x86_fp80> [#uses=1]
 	ret x86_fp80 %0
 }
 

Modified: llvm/trunk/test/CodeGen/CPP/2009-05-04-CondBr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/CPP/2009-05-04-CondBr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/CPP/2009-05-04-CondBr.ll (original)
+++ llvm/trunk/test/CodeGen/CPP/2009-05-04-CondBr.ll Fri Feb 27 15:17:42 2015
@@ -6,10 +6,10 @@ entry:
 	%retval = alloca i32		; <i32*> [#uses=2]
 	%a.addr = alloca i32		; <i32*> [#uses=8]
 	store i32 %a, i32* %a.addr
-	%tmp = load i32* %a.addr		; <i32> [#uses=1]
+	%tmp = load i32, i32* %a.addr		; <i32> [#uses=1]
 	%inc = add i32 %tmp, 1		; <i32> [#uses=1]
 	store i32 %inc, i32* %a.addr
-	%tmp1 = load i32* %a.addr		; <i32> [#uses=1]
+	%tmp1 = load i32, i32* %a.addr		; <i32> [#uses=1]
 	%cmp = icmp slt i32 %tmp1, 3		; <i1> [#uses=1]
 	br i1 %cmp, label %if.then, label %if.end
 
@@ -18,11 +18,11 @@ if.then:		; preds = %entry
 	br label %if.end
 
 if.end:		; preds = %if.then, %entry
-	%tmp2 = load i32* %a.addr		; <i32> [#uses=1]
+	%tmp2 = load i32, i32* %a.addr		; <i32> [#uses=1]
 	%inc3 = add i32 %tmp2, 1		; <i32> [#uses=1]
 	store i32 %inc3, i32* %a.addr
-	%tmp4 = load i32* %a.addr		; <i32> [#uses=1]
+	%tmp4 = load i32, i32* %a.addr		; <i32> [#uses=1]
 	store i32 %tmp4, i32* %retval
-	%0 = load i32* %retval		; <i32> [#uses=1]
+	%0 = load i32, i32* %retval		; <i32> [#uses=1]
 	ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/Generic/2003-05-28-ManyArgs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2003-05-28-ManyArgs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2003-05-28-ManyArgs.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2003-05-28-ManyArgs.ll Fri Feb 27 15:17:42 2015
@@ -51,93 +51,93 @@ entry:
 	%tmp.112 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 0		; <i32*> [#uses=1]
 	%tmp.114 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 6		; <i32*> [#uses=1]
 	%tmp.118 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 7		; <i32*> [#uses=1]
-	%tmp.135 = load i32* %operation		; <i32> [#uses=1]
-	%tmp.137 = load i32* %tmp.112		; <i32> [#uses=1]
+	%tmp.135 = load i32, i32* %operation		; <i32> [#uses=1]
+	%tmp.137 = load i32, i32* %tmp.112		; <i32> [#uses=1]
 	%tmp.138 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 1		; <float*> [#uses=1]
-	%tmp.139 = load float* %tmp.138		; <float> [#uses=1]
+	%tmp.139 = load float, float* %tmp.138		; <float> [#uses=1]
 	%tmp.140 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 2		; <i32*> [#uses=1]
-	%tmp.141 = load i32* %tmp.140		; <i32> [#uses=1]
+	%tmp.141 = load i32, i32* %tmp.140		; <i32> [#uses=1]
 	%tmp.142 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 3		; <i32*> [#uses=1]
-	%tmp.143 = load i32* %tmp.142		; <i32> [#uses=1]
-	%tmp.145 = load i8** %tmp.101		; <i8*> [#uses=1]
+	%tmp.143 = load i32, i32* %tmp.142		; <i32> [#uses=1]
+	%tmp.145 = load i8*, i8** %tmp.101		; <i8*> [#uses=1]
 	%tmp.146 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 5		; <i32*> [#uses=1]
-	%tmp.147 = load i32* %tmp.146		; <i32> [#uses=1]
-	%tmp.149 = load i32* %tmp.114		; <i32> [#uses=1]
-	%tmp.154 = load i32* %full_stats		; <i32> [#uses=1]
-	%tmp.155 = load i32* %verify_binary_search		; <i32> [#uses=1]
+	%tmp.147 = load i32, i32* %tmp.146		; <i32> [#uses=1]
+	%tmp.149 = load i32, i32* %tmp.114		; <i32> [#uses=1]
+	%tmp.154 = load i32, i32* %full_stats		; <i32> [#uses=1]
+	%tmp.155 = load i32, i32* %verify_binary_search		; <i32> [#uses=1]
 	%tmp.156 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 0		; <i32*> [#uses=1]
-	%tmp.157 = load i32* %tmp.156		; <i32> [#uses=1]
+	%tmp.157 = load i32, i32* %tmp.156		; <i32> [#uses=1]
 	%tmp.158 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 1		; <float*> [#uses=1]
-	%tmp.159 = load float* %tmp.158		; <float> [#uses=1]
+	%tmp.159 = load float, float* %tmp.158		; <float> [#uses=1]
 	%tmp.160 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 2		; <float*> [#uses=1]
-	%tmp.161 = load float* %tmp.160		; <float> [#uses=1]
+	%tmp.161 = load float, float* %tmp.160		; <float> [#uses=1]
 	%tmp.162 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 3		; <float*> [#uses=1]
-	%tmp.163 = load float* %tmp.162		; <float> [#uses=1]
+	%tmp.163 = load float, float* %tmp.162		; <float> [#uses=1]
 	%tmp.164 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 4		; <float*> [#uses=1]
-	%tmp.165 = load float* %tmp.164		; <float> [#uses=1]
+	%tmp.165 = load float, float* %tmp.164		; <float> [#uses=1]
 	%tmp.166 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 0		; <float*> [#uses=1]
-	%tmp.167 = load float* %tmp.166		; <float> [#uses=1]
+	%tmp.167 = load float, float* %tmp.166		; <float> [#uses=1]
 	%tmp.168 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 1		; <float*> [#uses=1]
-	%tmp.169 = load float* %tmp.168		; <float> [#uses=1]
+	%tmp.169 = load float, float* %tmp.168		; <float> [#uses=1]
 	%tmp.170 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 2		; <float*> [#uses=1]
-	%tmp.171 = load float* %tmp.170		; <float> [#uses=1]
+	%tmp.171 = load float, float* %tmp.170		; <float> [#uses=1]
 	%tmp.172 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 3		; <float*> [#uses=1]
-	%tmp.173 = load float* %tmp.172		; <float> [#uses=1]
+	%tmp.173 = load float, float* %tmp.172		; <float> [#uses=1]
 	%tmp.174 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 4		; <float*> [#uses=1]
-	%tmp.175 = load float* %tmp.174		; <float> [#uses=1]
+	%tmp.175 = load float, float* %tmp.174		; <float> [#uses=1]
 	%tmp.176 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 5		; <i32*> [#uses=1]
-	%tmp.177 = load i32* %tmp.176		; <i32> [#uses=1]
+	%tmp.177 = load i32, i32* %tmp.176		; <i32> [#uses=1]
 	%tmp.178 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 6		; <i32*> [#uses=1]
-	%tmp.179 = load i32* %tmp.178		; <i32> [#uses=1]
-	%tmp.181 = load i32* %tmp.118		; <i32> [#uses=1]
+	%tmp.179 = load i32, i32* %tmp.178		; <i32> [#uses=1]
+	%tmp.181 = load i32, i32* %tmp.118		; <i32> [#uses=1]
 	%tmp.182 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 8		; <i32*> [#uses=1]
-	%tmp.183 = load i32* %tmp.182		; <i32> [#uses=1]
+	%tmp.183 = load i32, i32* %tmp.182		; <i32> [#uses=1]
 	%tmp.184 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 0		; <i32*> [#uses=1]
-	%tmp.185 = load i32* %tmp.184		; <i32> [#uses=1]
+	%tmp.185 = load i32, i32* %tmp.184		; <i32> [#uses=1]
 	%tmp.186 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 1		; <float*> [#uses=1]
-	%tmp.187 = load float* %tmp.186		; <float> [#uses=1]
+	%tmp.187 = load float, float* %tmp.186		; <float> [#uses=1]
 	%tmp.188 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 2		; <float*> [#uses=1]
-	%tmp.189 = load float* %tmp.188		; <float> [#uses=1]
+	%tmp.189 = load float, float* %tmp.188		; <float> [#uses=1]
 	%tmp.190 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 3		; <float*> [#uses=1]
-	%tmp.191 = load float* %tmp.190		; <float> [#uses=1]
+	%tmp.191 = load float, float* %tmp.190		; <float> [#uses=1]
 	%tmp.192 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 4		; <i32*> [#uses=1]
-	%tmp.193 = load i32* %tmp.192		; <i32> [#uses=1]
+	%tmp.193 = load i32, i32* %tmp.192		; <i32> [#uses=1]
 	%tmp.194 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 5		; <i32*> [#uses=1]
-	%tmp.195 = load i32* %tmp.194		; <i32> [#uses=1]
+	%tmp.195 = load i32, i32* %tmp.194		; <i32> [#uses=1]
 	%tmp.196 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 6		; <i16*> [#uses=1]
-	%tmp.197 = load i16* %tmp.196		; <i16> [#uses=1]
+	%tmp.197 = load i16, i16* %tmp.196		; <i16> [#uses=1]
 	%tmp.198 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 7		; <i16*> [#uses=1]
-	%tmp.199 = load i16* %tmp.198		; <i16> [#uses=1]
+	%tmp.199 = load i16, i16* %tmp.198		; <i16> [#uses=1]
 	%tmp.200 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 8		; <i16*> [#uses=1]
-	%tmp.201 = load i16* %tmp.200		; <i16> [#uses=1]
+	%tmp.201 = load i16, i16* %tmp.200		; <i16> [#uses=1]
 	%tmp.202 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 9		; <float*> [#uses=1]
-	%tmp.203 = load float* %tmp.202		; <float> [#uses=1]
+	%tmp.203 = load float, float* %tmp.202		; <float> [#uses=1]
 	%tmp.204 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 10		; <float*> [#uses=1]
-	%tmp.205 = load float* %tmp.204		; <float> [#uses=1]
-	%tmp.206 = load %struct..s_segment_inf** %segment_inf		; <%struct..s_segment_inf*> [#uses=1]
-	%tmp.208 = load i32* %tmp.109		; <i32> [#uses=1]
+	%tmp.205 = load float, float* %tmp.204		; <float> [#uses=1]
+	%tmp.206 = load %struct..s_segment_inf*, %struct..s_segment_inf** %segment_inf		; <%struct..s_segment_inf*> [#uses=1]
+	%tmp.208 = load i32, i32* %tmp.109		; <i32> [#uses=1]
 	%tmp.209 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 1		; <float*> [#uses=1]
-	%tmp.210 = load float* %tmp.209		; <float> [#uses=1]
+	%tmp.210 = load float, float* %tmp.209		; <float> [#uses=1]
 	%tmp.211 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 2		; <float*> [#uses=1]
-	%tmp.212 = load float* %tmp.211		; <float> [#uses=1]
+	%tmp.212 = load float, float* %tmp.211		; <float> [#uses=1]
 	%tmp.213 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 3		; <float*> [#uses=1]
-	%tmp.214 = load float* %tmp.213		; <float> [#uses=1]
+	%tmp.214 = load float, float* %tmp.213		; <float> [#uses=1]
 	%tmp.215 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 4		; <float*> [#uses=1]
-	%tmp.216 = load float* %tmp.215		; <float> [#uses=1]
+	%tmp.216 = load float, float* %tmp.215		; <float> [#uses=1]
 	%tmp.217 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 5		; <float*> [#uses=1]
-	%tmp.218 = load float* %tmp.217		; <float> [#uses=1]
+	%tmp.218 = load float, float* %tmp.217		; <float> [#uses=1]
 	%tmp.219 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 6		; <float*> [#uses=1]
-	%tmp.220 = load float* %tmp.219		; <float> [#uses=1]
+	%tmp.220 = load float, float* %tmp.219		; <float> [#uses=1]
 	%tmp.221 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 7		; <float*> [#uses=1]
-	%tmp.222 = load float* %tmp.221		; <float> [#uses=1]
+	%tmp.222 = load float, float* %tmp.221		; <float> [#uses=1]
 	%tmp.223 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 8		; <float*> [#uses=1]
-	%tmp.224 = load float* %tmp.223		; <float> [#uses=1]
+	%tmp.224 = load float, float* %tmp.223		; <float> [#uses=1]
 	%tmp.225 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 9		; <float*> [#uses=1]
-	%tmp.226 = load float* %tmp.225		; <float> [#uses=1]
+	%tmp.226 = load float, float* %tmp.225		; <float> [#uses=1]
 	%tmp.227 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 10		; <float*> [#uses=1]
-	%tmp.228 = load float* %tmp.227		; <float> [#uses=1]
+	%tmp.228 = load float, float* %tmp.227		; <float> [#uses=1]
 	call void @place_and_route( i32 %tmp.135, i32 %tmp.137, float %tmp.139, i32 %tmp.141, i32 %tmp.143, i8* %tmp.145, i32 %tmp.147, i32 %tmp.149, i8* %tmp.107, i8* %tmp.105, i8* %tmp.106, i8* %tmp.108, i32 %tmp.154, i32 %tmp.155, i32 %tmp.157, float %tmp.159, float %tmp.161, float %tmp.163, float %tmp.165, float %tmp.167, float %tmp.169, float %tmp.171, float %tmp.173, float %tmp.175, i32 %tmp.177, i32 %tmp.179, i32 %tmp.181, i32 %tmp.183, i32 %tmp.185, float %tmp.187, float %tmp.189, float %tmp.191, i32 %tmp.193, i32 %tmp.195, i16 %tmp.197, i16 %tmp.199, i16 %tmp.201, float %tmp.203, float %tmp.205, %struct..s_segment_inf* %tmp.206, i32 %tmp.208, float %tmp.210, float %tmp.212, float %tmp.214, float %tmp.216, float %tmp.218, float %tmp.220, float %tmp.222, float %tmp.224, float %tmp.226, float %tmp.228 )
-	%tmp.231 = load i32* %show_graphics		; <i32> [#uses=1]
+	%tmp.231 = load i32, i32* %show_graphics		; <i32> [#uses=1]
 	%tmp.232 = icmp ne i32 %tmp.231, 0		; <i1> [#uses=1]
 	br i1 %tmp.232, label %then.2, label %endif.2
 

Modified: llvm/trunk/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@
 
 define internal i32 @OpenOutput(i8* %filename.1) {
 entry:
-        %tmp.0 = load %FileType** @Output               ; <%FileType*> [#uses=1]
+        %tmp.0 = load %FileType*, %FileType** @Output               ; <%FileType*> [#uses=1]
         %tmp.4 = getelementptr %FileType, %FileType* %tmp.0, i64 1         ; <%FileType*> [#uses=1]
         %addrOfGlobal = getelementptr [16 x %FileType], [16 x %FileType]* @OutputFiles, i64 0             ; <[16 x %FileType]*> [#uses=1]
         %constantGEP = getelementptr [16 x %FileType], [16 x %FileType]* %addrOfGlobal, i64 1             ; <[16 x %FileType]*> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll Fri Feb 27 15:17:42 2015
@@ -27,7 +27,7 @@ entry:
 loopentry:              ; preds = %loopentry, %entry
         %i = phi i64 [ 0, %entry ], [ %inc.i, %loopentry ]              ; <i64> [#uses=3]
         %cptr = getelementptr [6 x i8], [6 x i8]* @yy_ec, i64 0, i64 %i           ; <i8*> [#uses=1]
-        %c = load i8* %cptr             ; <i8> [#uses=1]
+        %c = load i8, i8* %cptr             ; <i8> [#uses=1]
         %ignore = call i32 (i8*, ...)* @printf( i8* getelementptr ([8 x i8]* @.str_3, i64 0, i64 0), i64 %i )        ; <i32> [#uses=0]
         %ignore2 = call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @.str_4, i64 0, i64 0), i8 %c )        ; <i32> [#uses=0]
         %inc.i = add i64 %i, 1          ; <i64> [#uses=2]

Modified: llvm/trunk/test/CodeGen/Generic/2004-05-09-LiveVarPartialRegister.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2004-05-09-LiveVarPartialRegister.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2004-05-09-LiveVarPartialRegister.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2004-05-09-LiveVarPartialRegister.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 @global_long_2 = linkonce global i64 49         ; <i64*> [#uses=1]
 
 define i32 @main() {
-        %l1 = load i64* @global_long_1          ; <i64> [#uses=1]
-        %l2 = load i64* @global_long_2          ; <i64> [#uses=1]
+        %l1 = load i64, i64* @global_long_1          ; <i64> [#uses=1]
+        %l2 = load i64, i64* @global_long_2          ; <i64> [#uses=1]
         %cond = icmp sle i64 %l1, %l2           ; <i1> [#uses=1]
         %cast2 = zext i1 %cond to i32           ; <i32> [#uses=1]
         %RV = sub i32 1, %cast2         ; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/2006-02-12-InsertLibcall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2006-02-12-InsertLibcall.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2006-02-12-InsertLibcall.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2006-02-12-InsertLibcall.ll Fri Feb 27 15:17:42 2015
@@ -42,7 +42,7 @@ then.1.i52:		; preds = %then.0.i40
 	ret void
 
 else.1.i56:		; preds = %then.0.i40
-	%tmp.28.i = load i32* @G		; <i32> [#uses=1]
+	%tmp.28.i = load i32, i32* @G		; <i32> [#uses=1]
 	%tmp.29.i = icmp eq i32 %tmp.28.i, 1		; <i1> [#uses=1]
 	br i1 %tmp.29.i, label %shortcirc_next.i, label %shortcirc_done.i
 

Modified: llvm/trunk/test/CodeGen/Generic/2006-03-01-dagcombineinfloop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2006-03-01-dagcombineinfloop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2006-03-01-dagcombineinfloop.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2006-03-01-dagcombineinfloop.ll Fri Feb 27 15:17:42 2015
@@ -37,7 +37,7 @@ cond_next12:		; preds = %cond_true92
 cond_next18:		; preds = %cond_next12, %cond_true
 	%tmp20 = bitcast %struct.tree_node* %tmp2 to %struct.tree_type*		; <%struct.tree_type*> [#uses=1]
 	%tmp21 = getelementptr %struct.tree_type, %struct.tree_type* %tmp20, i32 0, i32 17		; <%struct.tree_node**> [#uses=1]
-	%tmp22 = load %struct.tree_node** %tmp21		; <%struct.tree_node*> [#uses=6]
+	%tmp22 = load %struct.tree_node*, %struct.tree_node** %tmp21		; <%struct.tree_node*> [#uses=6]
 	%tmp24 = icmp eq %struct.tree_node* %tmp22, %tmp23		; <i1> [#uses=1]
 	br i1 %tmp24, label %return, label %cond_next28
 
@@ -45,7 +45,7 @@ cond_next28:		; preds = %cond_next18
 	%tmp30 = bitcast %struct.tree_node* %tmp2 to %struct.tree_common*		; <%struct.tree_common*> [#uses=1]
 	%tmp = getelementptr %struct.tree_common, %struct.tree_common* %tmp30, i32 0, i32 2		; <i8*> [#uses=1]
 	%tmp.upgrd.1 = bitcast i8* %tmp to i32*		; <i32*> [#uses=1]
-	%tmp.upgrd.2 = load i32* %tmp.upgrd.1		; <i32> [#uses=1]
+	%tmp.upgrd.2 = load i32, i32* %tmp.upgrd.1		; <i32> [#uses=1]
 	%tmp32 = trunc i32 %tmp.upgrd.2 to i8		; <i8> [#uses=1]
 	%tmp33 = icmp eq i8 %tmp32, 7		; <i1> [#uses=1]
 	br i1 %tmp33, label %cond_true34, label %cond_next84
@@ -69,23 +69,23 @@ cond_next84:		; preds = %cond_next28
 	br i1 %tmp.upgrd.6, label %return, label %cond_true92
 
 cond_true92.preheader:		; preds = %entry
-	%tmp7 = load %struct.tree_node** @void_type_node		; <%struct.tree_node*> [#uses=1]
-	%tmp23 = load %struct.tree_node** @float_type_node		; <%struct.tree_node*> [#uses=1]
-	%tmp39 = load %struct.tree_node** @char_type_node		; <%struct.tree_node*> [#uses=1]
-	%tmp48 = load %struct.tree_node** @signed_char_type_node		; <%struct.tree_node*> [#uses=1]
-	%tmp57 = load %struct.tree_node** @unsigned_char_type_node		; <%struct.tree_node*> [#uses=1]
-	%tmp66 = load %struct.tree_node** @short_integer_type_node		; <%struct.tree_node*> [#uses=1]
-	%tmp75 = load %struct.tree_node** @short_unsigned_type_node		; <%struct.tree_node*> [#uses=1]
+	%tmp7 = load %struct.tree_node*, %struct.tree_node** @void_type_node		; <%struct.tree_node*> [#uses=1]
+	%tmp23 = load %struct.tree_node*, %struct.tree_node** @float_type_node		; <%struct.tree_node*> [#uses=1]
+	%tmp39 = load %struct.tree_node*, %struct.tree_node** @char_type_node		; <%struct.tree_node*> [#uses=1]
+	%tmp48 = load %struct.tree_node*, %struct.tree_node** @signed_char_type_node		; <%struct.tree_node*> [#uses=1]
+	%tmp57 = load %struct.tree_node*, %struct.tree_node** @unsigned_char_type_node		; <%struct.tree_node*> [#uses=1]
+	%tmp66 = load %struct.tree_node*, %struct.tree_node** @short_integer_type_node		; <%struct.tree_node*> [#uses=1]
+	%tmp75 = load %struct.tree_node*, %struct.tree_node** @short_unsigned_type_node		; <%struct.tree_node*> [#uses=1]
 	br label %cond_true92
 
 cond_true92:		; preds = %cond_true92.preheader, %cond_next84, %cond_true34
 	%t.0.0 = phi %struct.tree_node* [ %parms, %cond_true92.preheader ], [ %tmp6, %cond_true34 ], [ %tmp6, %cond_next84 ]		; <%struct.tree_node*> [#uses=2]
 	%tmp.upgrd.4 = bitcast %struct.tree_node* %t.0.0 to %struct.tree_list*		; <%struct.tree_list*> [#uses=1]
 	%tmp.upgrd.5 = getelementptr %struct.tree_list, %struct.tree_list* %tmp.upgrd.4, i32 0, i32 2		; <%struct.tree_node**> [#uses=1]
-	%tmp2 = load %struct.tree_node** %tmp.upgrd.5		; <%struct.tree_node*> [#uses=5]
+	%tmp2 = load %struct.tree_node*, %struct.tree_node** %tmp.upgrd.5		; <%struct.tree_node*> [#uses=5]
 	%tmp4 = bitcast %struct.tree_node* %t.0.0 to %struct.tree_common*		; <%struct.tree_common*> [#uses=1]
 	%tmp5 = getelementptr %struct.tree_common, %struct.tree_common* %tmp4, i32 0, i32 0		; <%struct.tree_node**> [#uses=1]
-	%tmp6 = load %struct.tree_node** %tmp5		; <%struct.tree_node*> [#uses=3]
+	%tmp6 = load %struct.tree_node*, %struct.tree_node** %tmp5		; <%struct.tree_node*> [#uses=3]
 	%tmp.upgrd.6 = icmp eq %struct.tree_node* %tmp6, null		; <i1> [#uses=3]
 	br i1 %tmp.upgrd.6, label %cond_true, label %cond_next12
 

Modified: llvm/trunk/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll Fri Feb 27 15:17:42 2015
@@ -22,8 +22,8 @@ else.3:		; preds = %endif.4
 	]
 
 then.10:		; preds = %else.3, %else.3
-	%tmp.112 = load i16* null		; <i16> [#uses=2]
-	%tmp.113 = load i16* @G		; <i16> [#uses=2]
+	%tmp.112 = load i16, i16* null		; <i16> [#uses=2]
+	%tmp.113 = load i16, i16* @G		; <i16> [#uses=2]
 	%tmp.114 = icmp ugt i16 %tmp.112, %tmp.113		; <i1> [#uses=1]
 	%tmp.120 = icmp ult i16 %tmp.112, %tmp.113		; <i1> [#uses=1]
 	%bothcond = and i1 %tmp.114, %tmp.120		; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/2006-06-13-ComputeMaskedBitsCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2006-06-13-ComputeMaskedBitsCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2006-06-13-ComputeMaskedBitsCrash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2006-06-13-ComputeMaskedBitsCrash.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ entry:
 cond_true23:		; preds = %entry
 	%tmp138 = getelementptr %struct.cl_perfunc_opts, %struct.cl_perfunc_opts* @cl_pf_opts, i32 0, i32 8		; <i8*> [#uses=1]
 	%tmp138.upgrd.1 = bitcast i8* %tmp138 to i32*		; <i32*> [#uses=2]
-	%tmp139 = load i32* %tmp138.upgrd.1		; <i32> [#uses=1]
+	%tmp139 = load i32, i32* %tmp138.upgrd.1		; <i32> [#uses=1]
 	%tmp140 = shl i32 1, 27		; <i32> [#uses=1]
 	%tmp141 = and i32 %tmp140, 134217728		; <i32> [#uses=1]
 	%tmp142 = and i32 %tmp139, -134217729		; <i32> [#uses=1]
@@ -19,7 +19,7 @@ cond_true23:		; preds = %entry
 	store i32 %tmp143, i32* %tmp138.upgrd.1
 	%tmp144 = getelementptr %struct.cl_perfunc_opts, %struct.cl_perfunc_opts* @cl_pf_opts, i32 0, i32 8		; <i8*> [#uses=1]
 	%tmp144.upgrd.2 = bitcast i8* %tmp144 to i32*		; <i32*> [#uses=1]
-	%tmp145 = load i32* %tmp144.upgrd.2		; <i32> [#uses=1]
+	%tmp145 = load i32, i32* %tmp144.upgrd.2		; <i32> [#uses=1]
 	%tmp146 = shl i32 %tmp145, 22		; <i32> [#uses=1]
 	%tmp147 = lshr i32 %tmp146, 31		; <i32> [#uses=1]
 	%tmp147.upgrd.3 = trunc i32 %tmp147 to i8		; <i8> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/2006-06-28-SimplifySetCCCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2006-06-28-SimplifySetCCCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2006-06-28-SimplifySetCCCrash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2006-06-28-SimplifySetCCCrash.ll Fri Feb 27 15:17:42 2015
@@ -169,7 +169,7 @@ cond_next778:		; preds = %cond_next6.i11
 	br i1 %tmp781, label %cond_next784, label %bb790
 
 cond_next784:		; preds = %cond_next778
-	%tmp785 = load i32* @ix86_cpu		; <i32> [#uses=1]
+	%tmp785 = load i32, i32* @ix86_cpu		; <i32> [#uses=1]
 	%tmp786 = icmp eq i32 %tmp785, 5		; <i1> [#uses=1]
 	br i1 %tmp786, label %UnifiedReturnBlock, label %bb790
 
@@ -208,7 +208,7 @@ bb1419:		; preds = %cond_true.i
 	ret void
 
 bb1648:		; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
-	%tmp1650 = load i32* @which_alternative		; <i32> [#uses=1]
+	%tmp1650 = load i32, i32* @which_alternative		; <i32> [#uses=1]
 	switch i32 %tmp1650, label %bb1701 [
 		 i32 0, label %cond_next1675
 		 i32 1, label %cond_next1675
@@ -219,7 +219,7 @@ cond_next1675:		; preds = %bb1648, %bb16
 	ret void
 
 bb1701:		; preds = %bb1648
-	%tmp1702 = load i32* @which_alternative		; <i32> [#uses=1]
+	%tmp1702 = load i32, i32* @which_alternative		; <i32> [#uses=1]
 	switch i32 %tmp1702, label %bb1808 [
 		 i32 0, label %cond_next1727
 		 i32 1, label %cond_next1727
@@ -237,7 +237,7 @@ cond_next1834:		; preds = %bb1808
 	ret void
 
 bb1876:		; preds = %bb1808
-	%tmp1877signed = load i32* @which_alternative		; <i32> [#uses=4]
+	%tmp1877signed = load i32, i32* @which_alternative		; <i32> [#uses=4]
 	%tmp1877 = bitcast i32 %tmp1877signed to i32		; <i32> [#uses=1]
 	%bothcond699 = icmp ult i32 %tmp1877, 2		; <i1> [#uses=1]
 	%tmp1888 = icmp eq i32 %tmp1877signed, 2		; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll Fri Feb 27 15:17:42 2015
@@ -78,11 +78,11 @@ cond_true1369.preheader:		; preds = %con
 	ret void
 
 bb1567:		; preds = %cond_true1254
-	%tmp1580 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 3)		; <i64> [#uses=1]
-	%tmp1591 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 4)		; <i64> [#uses=1]
+	%tmp1580 = load i64, i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 3)		; <i64> [#uses=1]
+	%tmp1591 = load i64, i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 4)		; <i64> [#uses=1]
 	%tmp1572 = tail call fastcc i32 @FirstOne( )		; <i32> [#uses=5]
 	%tmp1582 = getelementptr [64 x i32], [64 x i32]* @bishop_shift_rl45, i32 0, i32 %tmp1572		; <i32*> [#uses=1]
-	%tmp1583 = load i32* %tmp1582		; <i32> [#uses=1]
+	%tmp1583 = load i32, i32* %tmp1582		; <i32> [#uses=1]
 	%tmp1583.upgrd.1 = trunc i32 %tmp1583 to i8		; <i8> [#uses=1]
 	%shift.upgrd.2 = zext i8 %tmp1583.upgrd.1 to i64		; <i64> [#uses=1]
 	%tmp1584 = lshr i64 %tmp1580, %shift.upgrd.2		; <i64> [#uses=1]
@@ -90,9 +90,9 @@ bb1567:		; preds = %cond_true1254
 	%tmp1585 = and i32 %tmp1584.upgrd.3, 255		; <i32> [#uses=1]
 	%gep.upgrd.4 = zext i32 %tmp1585 to i64		; <i64> [#uses=1]
 	%tmp1587 = getelementptr [64 x [256 x i32]], [64 x [256 x i32]]* @bishop_mobility_rl45, i32 0, i32 %tmp1572, i64 %gep.upgrd.4		; <i32*> [#uses=1]
-	%tmp1588 = load i32* %tmp1587		; <i32> [#uses=1]
+	%tmp1588 = load i32, i32* %tmp1587		; <i32> [#uses=1]
 	%tmp1593 = getelementptr [64 x i32], [64 x i32]* @bishop_shift_rr45, i32 0, i32 %tmp1572		; <i32*> [#uses=1]
-	%tmp1594 = load i32* %tmp1593		; <i32> [#uses=1]
+	%tmp1594 = load i32, i32* %tmp1593		; <i32> [#uses=1]
 	%tmp1594.upgrd.5 = trunc i32 %tmp1594 to i8		; <i8> [#uses=1]
 	%shift.upgrd.6 = zext i8 %tmp1594.upgrd.5 to i64		; <i64> [#uses=1]
 	%tmp1595 = lshr i64 %tmp1591, %shift.upgrd.6		; <i64> [#uses=1]
@@ -100,11 +100,11 @@ bb1567:		; preds = %cond_true1254
 	%tmp1596 = and i32 %tmp1595.upgrd.7, 255		; <i32> [#uses=1]
 	%gep.upgrd.8 = zext i32 %tmp1596 to i64		; <i64> [#uses=1]
 	%tmp1598 = getelementptr [64 x [256 x i32]], [64 x [256 x i32]]* @bishop_mobility_rr45, i32 0, i32 %tmp1572, i64 %gep.upgrd.8		; <i32*> [#uses=1]
-	%tmp1599 = load i32* %tmp1598		; <i32> [#uses=1]
+	%tmp1599 = load i32, i32* %tmp1598		; <i32> [#uses=1]
 	%tmp1600.neg = sub i32 0, %tmp1588		; <i32> [#uses=1]
 	%tmp1602 = sub i32 %tmp1600.neg, %tmp1599		; <i32> [#uses=1]
 	%tmp1604 = getelementptr [64 x i8], [64 x i8]* @black_outpost, i32 0, i32 %tmp1572		; <i8*> [#uses=1]
-	%tmp1605 = load i8* %tmp1604		; <i8> [#uses=1]
+	%tmp1605 = load i8, i8* %tmp1604		; <i8> [#uses=1]
 	%tmp1606 = icmp eq i8 %tmp1605, 0		; <i1> [#uses=1]
 	br i1 %tmp1606, label %cond_next1637, label %cond_true1607
 

Modified: llvm/trunk/test/CodeGen/Generic/2006-11-20-DAGCombineCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2006-11-20-DAGCombineCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2006-11-20-DAGCombineCrash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2006-11-20-DAGCombineCrash.ll Fri Feb 27 15:17:42 2015
@@ -10,9 +10,9 @@ bb.preheader:		; preds = %entry
 	br i1 false, label %cond_true48, label %cond_next80
 
 cond_true48:		; preds = %bb.preheader
-	%tmp = load i8* null		; <i8> [#uses=1]
+	%tmp = load i8, i8* null		; <i8> [#uses=1]
 	%tmp51 = zext i8 %tmp to i16		; <i16> [#uses=1]
-	%tmp99 = load i8* null		; <i8> [#uses=1]
+	%tmp99 = load i8, i8* null		; <i8> [#uses=1]
 	%tmp54 = bitcast i8 %tmp99 to i8		; <i8> [#uses=1]
 	%tmp54.upgrd.1 = zext i8 %tmp54 to i32		; <i32> [#uses=1]
 	%tmp55 = lshr i32 %tmp54.upgrd.1, 3		; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/2007-01-15-LoadSelectCycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2007-01-15-LoadSelectCycle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2007-01-15-LoadSelectCycle.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2007-01-15-LoadSelectCycle.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@
 declare i1 @foo()
 
 define i32 @test(i32* %A, i32* %B) {
-	%a = load i32* %A
-	%b = load i32* %B
+	%a = load i32, i32* %A
+	%b = load i32, i32* %B
 	%cond = call i1 @foo()
 	%c = select i1 %cond, i32 %a, i32 %b
 	ret i32 %c

Modified: llvm/trunk/test/CodeGen/Generic/2008-01-25-dag-combine-mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2008-01-25-dag-combine-mul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2008-01-25-dag-combine-mul.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2008-01-25-dag-combine-mul.ll Fri Feb 27 15:17:42 2015
@@ -4,24 +4,24 @@
 define i32 @f(i16* %pc) {
 entry:
 	%acc = alloca i64, align 8		; <i64*> [#uses=4]
-	%tmp97 = load i64* %acc, align 8		; <i64> [#uses=1]
+	%tmp97 = load i64, i64* %acc, align 8		; <i64> [#uses=1]
 	%tmp98 = and i64 %tmp97, 4294967295		; <i64> [#uses=1]
-	%tmp99 = load i64* null, align 8		; <i64> [#uses=1]
+	%tmp99 = load i64, i64* null, align 8		; <i64> [#uses=1]
 	%tmp100 = and i64 %tmp99, 4294967295		; <i64> [#uses=1]
 	%tmp101 = mul i64 %tmp98, %tmp100		; <i64> [#uses=1]
 	%tmp103 = lshr i64 %tmp101, 0		; <i64> [#uses=1]
-	%tmp104 = load i64* %acc, align 8		; <i64> [#uses=1]
+	%tmp104 = load i64, i64* %acc, align 8		; <i64> [#uses=1]
 	%.cast105 = zext i32 32 to i64		; <i64> [#uses=1]
 	%tmp106 = lshr i64 %tmp104, %.cast105		; <i64> [#uses=1]
-	%tmp107 = load i64* null, align 8		; <i64> [#uses=1]
+	%tmp107 = load i64, i64* null, align 8		; <i64> [#uses=1]
 	%tmp108 = and i64 %tmp107, 4294967295		; <i64> [#uses=1]
 	%tmp109 = mul i64 %tmp106, %tmp108		; <i64> [#uses=1]
 	%tmp112 = add i64 %tmp109, 0		; <i64> [#uses=1]
 	%tmp116 = add i64 %tmp112, 0		; <i64> [#uses=1]
 	%tmp117 = add i64 %tmp103, %tmp116		; <i64> [#uses=1]
-	%tmp118 = load i64* %acc, align 8		; <i64> [#uses=1]
+	%tmp118 = load i64, i64* %acc, align 8		; <i64> [#uses=1]
 	%tmp120 = lshr i64 %tmp118, 0		; <i64> [#uses=1]
-	%tmp121 = load i64* null, align 8		; <i64> [#uses=1]
+	%tmp121 = load i64, i64* null, align 8		; <i64> [#uses=1]
 	%tmp123 = lshr i64 %tmp121, 0		; <i64> [#uses=1]
 	%tmp124 = mul i64 %tmp120, %tmp123		; <i64> [#uses=1]
 	%tmp126 = shl i64 %tmp124, 0		; <i64> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/2008-01-30-LoadCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2008-01-30-LoadCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2008-01-30-LoadCrash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2008-01-30-LoadCrash.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ bb20:
 bb41:		; preds = %bb20
 	%tmp8182 = trunc i64 %tmp42.rle to i32		; <i32> [#uses=1]
 	%tmp83 = getelementptr [63 x i8], [63 x i8]* @letters.3100, i32 0, i32 %tmp8182		; <i8*> [#uses=1]
-	%tmp84 = load i8* %tmp83, align 1		; <i8> [#uses=1]
+	%tmp84 = load i8, i8* %tmp83, align 1		; <i8> [#uses=1]
 	store i8 %tmp84, i8* null, align 1
 	%tmp90 = urem i64 %tmp42.rle, 62		; <i64> [#uses=1]
 	%tmp9091 = trunc i64 %tmp90 to i32		; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/2008-02-25-NegateZero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2008-02-25-NegateZero.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2008-02-25-NegateZero.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2008-02-25-NegateZero.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 
 define void @test() {
 entry:
-	%tmp98 = load float* null, align 4		; <float> [#uses=1]
-	%tmp106 = load float* null, align 4		; <float> [#uses=1]
+	%tmp98 = load float, float* null, align 4		; <float> [#uses=1]
+	%tmp106 = load float, float* null, align 4		; <float> [#uses=1]
 	%tmp113 = fadd float %tmp98, %tmp106		; <float> [#uses=1]
 	%tmp119 = fsub float %tmp113, 0.000000e+00		; <float> [#uses=1]
 	call void (i32, ...)* @foo( i32 0, float 0.000000e+00, float %tmp119 ) nounwind 

Modified: llvm/trunk/test/CodeGen/Generic/2009-03-29-SoftFloatVectorExtract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2009-03-29-SoftFloatVectorExtract.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2009-03-29-SoftFloatVectorExtract.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2009-03-29-SoftFloatVectorExtract.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 @m = external global <2 x double>
 
 define double @vector_ex() nounwind {
-       %v = load <2 x double>* @m
+       %v = load <2 x double>, <2 x double>* @m
        %x = extractelement <2 x double> %v, i32 1
        ret double %x
 }

Modified: llvm/trunk/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define i32 @test(i128* %P) nounwind {
 entry:
-	%tmp48 = load i128* %P
+	%tmp48 = load i128, i128* %P
 	%and49 = and i128 %tmp48, 18446744073709551616		; <i128> [#uses=1]
 	%tobool = icmp ne i128 %and49, 0		; <i1> [#uses=1]
 	br i1 %tobool, label %if.then50, label %if.end61
@@ -19,7 +19,7 @@ if.end61:		; preds = %if.then50, %if.the
 
 define i32 @test2(i320* %P) nounwind {
 entry:
-	%tmp48 = load i320* %P
+	%tmp48 = load i320, i320* %P
 	%and49 = and i320 %tmp48, 25108406941546723055343157692830665664409421777856138051584
 	%tobool = icmp ne i320 %and49, 0		; <i1> [#uses=1]
 	br i1 %tobool, label %if.then50, label %if.end61

Modified: llvm/trunk/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@
 
 define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256* nocapture %dd) nounwind uwtable noinline ssp {
 entry:
-  %c = load i256* %cc
-  %d = load i256* %dd
+  %c = load i256, i256* %cc
+  %d = load i256, i256* %dd
   %add = add nsw i256 %c, %d
   store i256 %add, i256* %a, align 8
   %or = or i256 %c, 1606938044258990275541962092341162602522202993782792835301376

Modified: llvm/trunk/test/CodeGen/Generic/2012-06-08-APIntCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2012-06-08-APIntCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2012-06-08-APIntCrash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2012-06-08-APIntCrash.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
 
 define void @test1(<8 x i32>* %ptr)
 {
-	%1 = load <8 x i32>* %ptr, align 32
+	%1 = load <8 x i32>, <8 x i32>* %ptr, align 32
 	%2 = and <8 x i32> %1, <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 -1>
 	store <8 x i32> %2, <8 x i32>* %ptr, align 16
 	ret void

Modified: llvm/trunk/test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 ; Function Attrs: nounwind ssp uwtable
 define void @fn() {
   store i32* inttoptr (i64 68719476735 to i32*), i32** @a, align 8
-  %1 = load i32* @c, align 4
+  %1 = load i32, i32* @c, align 4
   %2 = sext i32 %1 to i64
   %3 = lshr i64 %2, 12
   %4 = and i64 %3, 68719476735

Modified: llvm/trunk/test/CodeGen/Generic/APIntLoadStore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/APIntLoadStore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/APIntLoadStore.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/APIntLoadStore.ll Fri Feb 27 15:17:42 2015
@@ -513,1537 +513,1537 @@
 @i256_s = external global i256		; <i256*> [#uses=1]
 
 define void @i1_ls() nounwind  {
-	%tmp = load i1* @i1_l		; <i1> [#uses=1]
+	%tmp = load i1, i1* @i1_l		; <i1> [#uses=1]
 	store i1 %tmp, i1* @i1_s
 	ret void
 }
 
 define void @i2_ls() nounwind  {
-	%tmp = load i2* @i2_l		; <i2> [#uses=1]
+	%tmp = load i2, i2* @i2_l		; <i2> [#uses=1]
 	store i2 %tmp, i2* @i2_s
 	ret void
 }
 
 define void @i3_ls() nounwind  {
-	%tmp = load i3* @i3_l		; <i3> [#uses=1]
+	%tmp = load i3, i3* @i3_l		; <i3> [#uses=1]
 	store i3 %tmp, i3* @i3_s
 	ret void
 }
 
 define void @i4_ls() nounwind  {
-	%tmp = load i4* @i4_l		; <i4> [#uses=1]
+	%tmp = load i4, i4* @i4_l		; <i4> [#uses=1]
 	store i4 %tmp, i4* @i4_s
 	ret void
 }
 
 define void @i5_ls() nounwind  {
-	%tmp = load i5* @i5_l		; <i5> [#uses=1]
+	%tmp = load i5, i5* @i5_l		; <i5> [#uses=1]
 	store i5 %tmp, i5* @i5_s
 	ret void
 }
 
 define void @i6_ls() nounwind  {
-	%tmp = load i6* @i6_l		; <i6> [#uses=1]
+	%tmp = load i6, i6* @i6_l		; <i6> [#uses=1]
 	store i6 %tmp, i6* @i6_s
 	ret void
 }
 
 define void @i7_ls() nounwind  {
-	%tmp = load i7* @i7_l		; <i7> [#uses=1]
+	%tmp = load i7, i7* @i7_l		; <i7> [#uses=1]
 	store i7 %tmp, i7* @i7_s
 	ret void
 }
 
 define void @i8_ls() nounwind  {
-	%tmp = load i8* @i8_l		; <i8> [#uses=1]
+	%tmp = load i8, i8* @i8_l		; <i8> [#uses=1]
 	store i8 %tmp, i8* @i8_s
 	ret void
 }
 
 define void @i9_ls() nounwind  {
-	%tmp = load i9* @i9_l		; <i9> [#uses=1]
+	%tmp = load i9, i9* @i9_l		; <i9> [#uses=1]
 	store i9 %tmp, i9* @i9_s
 	ret void
 }
 
 define void @i10_ls() nounwind  {
-	%tmp = load i10* @i10_l		; <i10> [#uses=1]
+	%tmp = load i10, i10* @i10_l		; <i10> [#uses=1]
 	store i10 %tmp, i10* @i10_s
 	ret void
 }
 
 define void @i11_ls() nounwind  {
-	%tmp = load i11* @i11_l		; <i11> [#uses=1]
+	%tmp = load i11, i11* @i11_l		; <i11> [#uses=1]
 	store i11 %tmp, i11* @i11_s
 	ret void
 }
 
 define void @i12_ls() nounwind  {
-	%tmp = load i12* @i12_l		; <i12> [#uses=1]
+	%tmp = load i12, i12* @i12_l		; <i12> [#uses=1]
 	store i12 %tmp, i12* @i12_s
 	ret void
 }
 
 define void @i13_ls() nounwind  {
-	%tmp = load i13* @i13_l		; <i13> [#uses=1]
+	%tmp = load i13, i13* @i13_l		; <i13> [#uses=1]
 	store i13 %tmp, i13* @i13_s
 	ret void
 }
 
 define void @i14_ls() nounwind  {
-	%tmp = load i14* @i14_l		; <i14> [#uses=1]
+	%tmp = load i14, i14* @i14_l		; <i14> [#uses=1]
 	store i14 %tmp, i14* @i14_s
 	ret void
 }
 
 define void @i15_ls() nounwind  {
-	%tmp = load i15* @i15_l		; <i15> [#uses=1]
+	%tmp = load i15, i15* @i15_l		; <i15> [#uses=1]
 	store i15 %tmp, i15* @i15_s
 	ret void
 }
 
 define void @i16_ls() nounwind  {
-	%tmp = load i16* @i16_l		; <i16> [#uses=1]
+	%tmp = load i16, i16* @i16_l		; <i16> [#uses=1]
 	store i16 %tmp, i16* @i16_s
 	ret void
 }
 
 define void @i17_ls() nounwind  {
-	%tmp = load i17* @i17_l		; <i17> [#uses=1]
+	%tmp = load i17, i17* @i17_l		; <i17> [#uses=1]
 	store i17 %tmp, i17* @i17_s
 	ret void
 }
 
 define void @i18_ls() nounwind  {
-	%tmp = load i18* @i18_l		; <i18> [#uses=1]
+	%tmp = load i18, i18* @i18_l		; <i18> [#uses=1]
 	store i18 %tmp, i18* @i18_s
 	ret void
 }
 
 define void @i19_ls() nounwind  {
-	%tmp = load i19* @i19_l		; <i19> [#uses=1]
+	%tmp = load i19, i19* @i19_l		; <i19> [#uses=1]
 	store i19 %tmp, i19* @i19_s
 	ret void
 }
 
 define void @i20_ls() nounwind  {
-	%tmp = load i20* @i20_l		; <i20> [#uses=1]
+	%tmp = load i20, i20* @i20_l		; <i20> [#uses=1]
 	store i20 %tmp, i20* @i20_s
 	ret void
 }
 
 define void @i21_ls() nounwind  {
-	%tmp = load i21* @i21_l		; <i21> [#uses=1]
+	%tmp = load i21, i21* @i21_l		; <i21> [#uses=1]
 	store i21 %tmp, i21* @i21_s
 	ret void
 }
 
 define void @i22_ls() nounwind  {
-	%tmp = load i22* @i22_l		; <i22> [#uses=1]
+	%tmp = load i22, i22* @i22_l		; <i22> [#uses=1]
 	store i22 %tmp, i22* @i22_s
 	ret void
 }
 
 define void @i23_ls() nounwind  {
-	%tmp = load i23* @i23_l		; <i23> [#uses=1]
+	%tmp = load i23, i23* @i23_l		; <i23> [#uses=1]
 	store i23 %tmp, i23* @i23_s
 	ret void
 }
 
 define void @i24_ls() nounwind  {
-	%tmp = load i24* @i24_l		; <i24> [#uses=1]
+	%tmp = load i24, i24* @i24_l		; <i24> [#uses=1]
 	store i24 %tmp, i24* @i24_s
 	ret void
 }
 
 define void @i25_ls() nounwind  {
-	%tmp = load i25* @i25_l		; <i25> [#uses=1]
+	%tmp = load i25, i25* @i25_l		; <i25> [#uses=1]
 	store i25 %tmp, i25* @i25_s
 	ret void
 }
 
 define void @i26_ls() nounwind  {
-	%tmp = load i26* @i26_l		; <i26> [#uses=1]
+	%tmp = load i26, i26* @i26_l		; <i26> [#uses=1]
 	store i26 %tmp, i26* @i26_s
 	ret void
 }
 
 define void @i27_ls() nounwind  {
-	%tmp = load i27* @i27_l		; <i27> [#uses=1]
+	%tmp = load i27, i27* @i27_l		; <i27> [#uses=1]
 	store i27 %tmp, i27* @i27_s
 	ret void
 }
 
 define void @i28_ls() nounwind  {
-	%tmp = load i28* @i28_l		; <i28> [#uses=1]
+	%tmp = load i28, i28* @i28_l		; <i28> [#uses=1]
 	store i28 %tmp, i28* @i28_s
 	ret void
 }
 
 define void @i29_ls() nounwind  {
-	%tmp = load i29* @i29_l		; <i29> [#uses=1]
+	%tmp = load i29, i29* @i29_l		; <i29> [#uses=1]
 	store i29 %tmp, i29* @i29_s
 	ret void
 }
 
 define void @i30_ls() nounwind  {
-	%tmp = load i30* @i30_l		; <i30> [#uses=1]
+	%tmp = load i30, i30* @i30_l		; <i30> [#uses=1]
 	store i30 %tmp, i30* @i30_s
 	ret void
 }
 
 define void @i31_ls() nounwind  {
-	%tmp = load i31* @i31_l		; <i31> [#uses=1]
+	%tmp = load i31, i31* @i31_l		; <i31> [#uses=1]
 	store i31 %tmp, i31* @i31_s
 	ret void
 }
 
 define void @i32_ls() nounwind  {
-	%tmp = load i32* @i32_l		; <i32> [#uses=1]
+	%tmp = load i32, i32* @i32_l		; <i32> [#uses=1]
 	store i32 %tmp, i32* @i32_s
 	ret void
 }
 
 define void @i33_ls() nounwind  {
-	%tmp = load i33* @i33_l		; <i33> [#uses=1]
+	%tmp = load i33, i33* @i33_l		; <i33> [#uses=1]
 	store i33 %tmp, i33* @i33_s
 	ret void
 }
 
 define void @i34_ls() nounwind  {
-	%tmp = load i34* @i34_l		; <i34> [#uses=1]
+	%tmp = load i34, i34* @i34_l		; <i34> [#uses=1]
 	store i34 %tmp, i34* @i34_s
 	ret void
 }
 
 define void @i35_ls() nounwind  {
-	%tmp = load i35* @i35_l		; <i35> [#uses=1]
+	%tmp = load i35, i35* @i35_l		; <i35> [#uses=1]
 	store i35 %tmp, i35* @i35_s
 	ret void
 }
 
 define void @i36_ls() nounwind  {
-	%tmp = load i36* @i36_l		; <i36> [#uses=1]
+	%tmp = load i36, i36* @i36_l		; <i36> [#uses=1]
 	store i36 %tmp, i36* @i36_s
 	ret void
 }
 
 define void @i37_ls() nounwind  {
-	%tmp = load i37* @i37_l		; <i37> [#uses=1]
+	%tmp = load i37, i37* @i37_l		; <i37> [#uses=1]
 	store i37 %tmp, i37* @i37_s
 	ret void
 }
 
 define void @i38_ls() nounwind  {
-	%tmp = load i38* @i38_l		; <i38> [#uses=1]
+	%tmp = load i38, i38* @i38_l		; <i38> [#uses=1]
 	store i38 %tmp, i38* @i38_s
 	ret void
 }
 
 define void @i39_ls() nounwind  {
-	%tmp = load i39* @i39_l		; <i39> [#uses=1]
+	%tmp = load i39, i39* @i39_l		; <i39> [#uses=1]
 	store i39 %tmp, i39* @i39_s
 	ret void
 }
 
 define void @i40_ls() nounwind  {
-	%tmp = load i40* @i40_l		; <i40> [#uses=1]
+	%tmp = load i40, i40* @i40_l		; <i40> [#uses=1]
 	store i40 %tmp, i40* @i40_s
 	ret void
 }
 
 define void @i41_ls() nounwind  {
-	%tmp = load i41* @i41_l		; <i41> [#uses=1]
+	%tmp = load i41, i41* @i41_l		; <i41> [#uses=1]
 	store i41 %tmp, i41* @i41_s
 	ret void
 }
 
 define void @i42_ls() nounwind  {
-	%tmp = load i42* @i42_l		; <i42> [#uses=1]
+	%tmp = load i42, i42* @i42_l		; <i42> [#uses=1]
 	store i42 %tmp, i42* @i42_s
 	ret void
 }
 
 define void @i43_ls() nounwind  {
-	%tmp = load i43* @i43_l		; <i43> [#uses=1]
+	%tmp = load i43, i43* @i43_l		; <i43> [#uses=1]
 	store i43 %tmp, i43* @i43_s
 	ret void
 }
 
 define void @i44_ls() nounwind  {
-	%tmp = load i44* @i44_l		; <i44> [#uses=1]
+	%tmp = load i44, i44* @i44_l		; <i44> [#uses=1]
 	store i44 %tmp, i44* @i44_s
 	ret void
 }
 
 define void @i45_ls() nounwind  {
-	%tmp = load i45* @i45_l		; <i45> [#uses=1]
+	%tmp = load i45, i45* @i45_l		; <i45> [#uses=1]
 	store i45 %tmp, i45* @i45_s
 	ret void
 }
 
 define void @i46_ls() nounwind  {
-	%tmp = load i46* @i46_l		; <i46> [#uses=1]
+	%tmp = load i46, i46* @i46_l		; <i46> [#uses=1]
 	store i46 %tmp, i46* @i46_s
 	ret void
 }
 
 define void @i47_ls() nounwind  {
-	%tmp = load i47* @i47_l		; <i47> [#uses=1]
+	%tmp = load i47, i47* @i47_l		; <i47> [#uses=1]
 	store i47 %tmp, i47* @i47_s
 	ret void
 }
 
 define void @i48_ls() nounwind  {
-	%tmp = load i48* @i48_l		; <i48> [#uses=1]
+	%tmp = load i48, i48* @i48_l		; <i48> [#uses=1]
 	store i48 %tmp, i48* @i48_s
 	ret void
 }
 
 define void @i49_ls() nounwind  {
-	%tmp = load i49* @i49_l		; <i49> [#uses=1]
+	%tmp = load i49, i49* @i49_l		; <i49> [#uses=1]
 	store i49 %tmp, i49* @i49_s
 	ret void
 }
 
 define void @i50_ls() nounwind  {
-	%tmp = load i50* @i50_l		; <i50> [#uses=1]
+	%tmp = load i50, i50* @i50_l		; <i50> [#uses=1]
 	store i50 %tmp, i50* @i50_s
 	ret void
 }
 
 define void @i51_ls() nounwind  {
-	%tmp = load i51* @i51_l		; <i51> [#uses=1]
+	%tmp = load i51, i51* @i51_l		; <i51> [#uses=1]
 	store i51 %tmp, i51* @i51_s
 	ret void
 }
 
 define void @i52_ls() nounwind  {
-	%tmp = load i52* @i52_l		; <i52> [#uses=1]
+	%tmp = load i52, i52* @i52_l		; <i52> [#uses=1]
 	store i52 %tmp, i52* @i52_s
 	ret void
 }
 
 define void @i53_ls() nounwind  {
-	%tmp = load i53* @i53_l		; <i53> [#uses=1]
+	%tmp = load i53, i53* @i53_l		; <i53> [#uses=1]
 	store i53 %tmp, i53* @i53_s
 	ret void
 }
 
 define void @i54_ls() nounwind  {
-	%tmp = load i54* @i54_l		; <i54> [#uses=1]
+	%tmp = load i54, i54* @i54_l		; <i54> [#uses=1]
 	store i54 %tmp, i54* @i54_s
 	ret void
 }
 
 define void @i55_ls() nounwind  {
-	%tmp = load i55* @i55_l		; <i55> [#uses=1]
+	%tmp = load i55, i55* @i55_l		; <i55> [#uses=1]
 	store i55 %tmp, i55* @i55_s
 	ret void
 }
 
 define void @i56_ls() nounwind  {
-	%tmp = load i56* @i56_l		; <i56> [#uses=1]
+	%tmp = load i56, i56* @i56_l		; <i56> [#uses=1]
 	store i56 %tmp, i56* @i56_s
 	ret void
 }
 
 define void @i57_ls() nounwind  {
-	%tmp = load i57* @i57_l		; <i57> [#uses=1]
+	%tmp = load i57, i57* @i57_l		; <i57> [#uses=1]
 	store i57 %tmp, i57* @i57_s
 	ret void
 }
 
 define void @i58_ls() nounwind  {
-	%tmp = load i58* @i58_l		; <i58> [#uses=1]
+	%tmp = load i58, i58* @i58_l		; <i58> [#uses=1]
 	store i58 %tmp, i58* @i58_s
 	ret void
 }
 
 define void @i59_ls() nounwind  {
-	%tmp = load i59* @i59_l		; <i59> [#uses=1]
+	%tmp = load i59, i59* @i59_l		; <i59> [#uses=1]
 	store i59 %tmp, i59* @i59_s
 	ret void
 }
 
 define void @i60_ls() nounwind  {
-	%tmp = load i60* @i60_l		; <i60> [#uses=1]
+	%tmp = load i60, i60* @i60_l		; <i60> [#uses=1]
 	store i60 %tmp, i60* @i60_s
 	ret void
 }
 
 define void @i61_ls() nounwind  {
-	%tmp = load i61* @i61_l		; <i61> [#uses=1]
+	%tmp = load i61, i61* @i61_l		; <i61> [#uses=1]
 	store i61 %tmp, i61* @i61_s
 	ret void
 }
 
 define void @i62_ls() nounwind  {
-	%tmp = load i62* @i62_l		; <i62> [#uses=1]
+	%tmp = load i62, i62* @i62_l		; <i62> [#uses=1]
 	store i62 %tmp, i62* @i62_s
 	ret void
 }
 
 define void @i63_ls() nounwind  {
-	%tmp = load i63* @i63_l		; <i63> [#uses=1]
+	%tmp = load i63, i63* @i63_l		; <i63> [#uses=1]
 	store i63 %tmp, i63* @i63_s
 	ret void
 }
 
 define void @i64_ls() nounwind  {
-	%tmp = load i64* @i64_l		; <i64> [#uses=1]
+	%tmp = load i64, i64* @i64_l		; <i64> [#uses=1]
 	store i64 %tmp, i64* @i64_s
 	ret void
 }
 
 define void @i65_ls() nounwind  {
-	%tmp = load i65* @i65_l		; <i65> [#uses=1]
+	%tmp = load i65, i65* @i65_l		; <i65> [#uses=1]
 	store i65 %tmp, i65* @i65_s
 	ret void
 }
 
 define void @i66_ls() nounwind  {
-	%tmp = load i66* @i66_l		; <i66> [#uses=1]
+	%tmp = load i66, i66* @i66_l		; <i66> [#uses=1]
 	store i66 %tmp, i66* @i66_s
 	ret void
 }
 
 define void @i67_ls() nounwind  {
-	%tmp = load i67* @i67_l		; <i67> [#uses=1]
+	%tmp = load i67, i67* @i67_l		; <i67> [#uses=1]
 	store i67 %tmp, i67* @i67_s
 	ret void
 }
 
 define void @i68_ls() nounwind  {
-	%tmp = load i68* @i68_l		; <i68> [#uses=1]
+	%tmp = load i68, i68* @i68_l		; <i68> [#uses=1]
 	store i68 %tmp, i68* @i68_s
 	ret void
 }
 
 define void @i69_ls() nounwind  {
-	%tmp = load i69* @i69_l		; <i69> [#uses=1]
+	%tmp = load i69, i69* @i69_l		; <i69> [#uses=1]
 	store i69 %tmp, i69* @i69_s
 	ret void
 }
 
 define void @i70_ls() nounwind  {
-	%tmp = load i70* @i70_l		; <i70> [#uses=1]
+	%tmp = load i70, i70* @i70_l		; <i70> [#uses=1]
 	store i70 %tmp, i70* @i70_s
 	ret void
 }
 
 define void @i71_ls() nounwind  {
-	%tmp = load i71* @i71_l		; <i71> [#uses=1]
+	%tmp = load i71, i71* @i71_l		; <i71> [#uses=1]
 	store i71 %tmp, i71* @i71_s
 	ret void
 }
 
 define void @i72_ls() nounwind  {
-	%tmp = load i72* @i72_l		; <i72> [#uses=1]
+	%tmp = load i72, i72* @i72_l		; <i72> [#uses=1]
 	store i72 %tmp, i72* @i72_s
 	ret void
 }
 
 define void @i73_ls() nounwind  {
-	%tmp = load i73* @i73_l		; <i73> [#uses=1]
+	%tmp = load i73, i73* @i73_l		; <i73> [#uses=1]
 	store i73 %tmp, i73* @i73_s
 	ret void
 }
 
 define void @i74_ls() nounwind  {
-	%tmp = load i74* @i74_l		; <i74> [#uses=1]
+	%tmp = load i74, i74* @i74_l		; <i74> [#uses=1]
 	store i74 %tmp, i74* @i74_s
 	ret void
 }
 
 define void @i75_ls() nounwind  {
-	%tmp = load i75* @i75_l		; <i75> [#uses=1]
+	%tmp = load i75, i75* @i75_l		; <i75> [#uses=1]
 	store i75 %tmp, i75* @i75_s
 	ret void
 }
 
 define void @i76_ls() nounwind  {
-	%tmp = load i76* @i76_l		; <i76> [#uses=1]
+	%tmp = load i76, i76* @i76_l		; <i76> [#uses=1]
 	store i76 %tmp, i76* @i76_s
 	ret void
 }
 
 define void @i77_ls() nounwind  {
-	%tmp = load i77* @i77_l		; <i77> [#uses=1]
+	%tmp = load i77, i77* @i77_l		; <i77> [#uses=1]
 	store i77 %tmp, i77* @i77_s
 	ret void
 }
 
 define void @i78_ls() nounwind  {
-	%tmp = load i78* @i78_l		; <i78> [#uses=1]
+	%tmp = load i78, i78* @i78_l		; <i78> [#uses=1]
 	store i78 %tmp, i78* @i78_s
 	ret void
 }
 
 define void @i79_ls() nounwind  {
-	%tmp = load i79* @i79_l		; <i79> [#uses=1]
+	%tmp = load i79, i79* @i79_l		; <i79> [#uses=1]
 	store i79 %tmp, i79* @i79_s
 	ret void
 }
 
 define void @i80_ls() nounwind  {
-	%tmp = load i80* @i80_l		; <i80> [#uses=1]
+	%tmp = load i80, i80* @i80_l		; <i80> [#uses=1]
 	store i80 %tmp, i80* @i80_s
 	ret void
 }
 
 define void @i81_ls() nounwind  {
-	%tmp = load i81* @i81_l		; <i81> [#uses=1]
+	%tmp = load i81, i81* @i81_l		; <i81> [#uses=1]
 	store i81 %tmp, i81* @i81_s
 	ret void
 }
 
 define void @i82_ls() nounwind  {
-	%tmp = load i82* @i82_l		; <i82> [#uses=1]
+	%tmp = load i82, i82* @i82_l		; <i82> [#uses=1]
 	store i82 %tmp, i82* @i82_s
 	ret void
 }
 
 define void @i83_ls() nounwind  {
-	%tmp = load i83* @i83_l		; <i83> [#uses=1]
+	%tmp = load i83, i83* @i83_l		; <i83> [#uses=1]
 	store i83 %tmp, i83* @i83_s
 	ret void
 }
 
 define void @i84_ls() nounwind  {
-	%tmp = load i84* @i84_l		; <i84> [#uses=1]
+	%tmp = load i84, i84* @i84_l		; <i84> [#uses=1]
 	store i84 %tmp, i84* @i84_s
 	ret void
 }
 
 define void @i85_ls() nounwind  {
-	%tmp = load i85* @i85_l		; <i85> [#uses=1]
+	%tmp = load i85, i85* @i85_l		; <i85> [#uses=1]
 	store i85 %tmp, i85* @i85_s
 	ret void
 }
 
 define void @i86_ls() nounwind  {
-	%tmp = load i86* @i86_l		; <i86> [#uses=1]
+	%tmp = load i86, i86* @i86_l		; <i86> [#uses=1]
 	store i86 %tmp, i86* @i86_s
 	ret void
 }
 
 define void @i87_ls() nounwind  {
-	%tmp = load i87* @i87_l		; <i87> [#uses=1]
+	%tmp = load i87, i87* @i87_l		; <i87> [#uses=1]
 	store i87 %tmp, i87* @i87_s
 	ret void
 }
 
 define void @i88_ls() nounwind  {
-	%tmp = load i88* @i88_l		; <i88> [#uses=1]
+	%tmp = load i88, i88* @i88_l		; <i88> [#uses=1]
 	store i88 %tmp, i88* @i88_s
 	ret void
 }
 
 define void @i89_ls() nounwind  {
-	%tmp = load i89* @i89_l		; <i89> [#uses=1]
+	%tmp = load i89, i89* @i89_l		; <i89> [#uses=1]
 	store i89 %tmp, i89* @i89_s
 	ret void
 }
 
 define void @i90_ls() nounwind  {
-	%tmp = load i90* @i90_l		; <i90> [#uses=1]
+	%tmp = load i90, i90* @i90_l		; <i90> [#uses=1]
 	store i90 %tmp, i90* @i90_s
 	ret void
 }
 
 define void @i91_ls() nounwind  {
-	%tmp = load i91* @i91_l		; <i91> [#uses=1]
+	%tmp = load i91, i91* @i91_l		; <i91> [#uses=1]
 	store i91 %tmp, i91* @i91_s
 	ret void
 }
 
 define void @i92_ls() nounwind  {
-	%tmp = load i92* @i92_l		; <i92> [#uses=1]
+	%tmp = load i92, i92* @i92_l		; <i92> [#uses=1]
 	store i92 %tmp, i92* @i92_s
 	ret void
 }
 
 define void @i93_ls() nounwind  {
-	%tmp = load i93* @i93_l		; <i93> [#uses=1]
+	%tmp = load i93, i93* @i93_l		; <i93> [#uses=1]
 	store i93 %tmp, i93* @i93_s
 	ret void
 }
 
 define void @i94_ls() nounwind  {
-	%tmp = load i94* @i94_l		; <i94> [#uses=1]
+	%tmp = load i94, i94* @i94_l		; <i94> [#uses=1]
 	store i94 %tmp, i94* @i94_s
 	ret void
 }
 
 define void @i95_ls() nounwind  {
-	%tmp = load i95* @i95_l		; <i95> [#uses=1]
+	%tmp = load i95, i95* @i95_l		; <i95> [#uses=1]
 	store i95 %tmp, i95* @i95_s
 	ret void
 }
 
 define void @i96_ls() nounwind  {
-	%tmp = load i96* @i96_l		; <i96> [#uses=1]
+	%tmp = load i96, i96* @i96_l		; <i96> [#uses=1]
 	store i96 %tmp, i96* @i96_s
 	ret void
 }
 
 define void @i97_ls() nounwind  {
-	%tmp = load i97* @i97_l		; <i97> [#uses=1]
+	%tmp = load i97, i97* @i97_l		; <i97> [#uses=1]
 	store i97 %tmp, i97* @i97_s
 	ret void
 }
 
 define void @i98_ls() nounwind  {
-	%tmp = load i98* @i98_l		; <i98> [#uses=1]
+	%tmp = load i98, i98* @i98_l		; <i98> [#uses=1]
 	store i98 %tmp, i98* @i98_s
 	ret void
 }
 
 define void @i99_ls() nounwind  {
-	%tmp = load i99* @i99_l		; <i99> [#uses=1]
+	%tmp = load i99, i99* @i99_l		; <i99> [#uses=1]
 	store i99 %tmp, i99* @i99_s
 	ret void
 }
 
 define void @i100_ls() nounwind  {
-	%tmp = load i100* @i100_l		; <i100> [#uses=1]
+	%tmp = load i100, i100* @i100_l		; <i100> [#uses=1]
 	store i100 %tmp, i100* @i100_s
 	ret void
 }
 
 define void @i101_ls() nounwind  {
-	%tmp = load i101* @i101_l		; <i101> [#uses=1]
+	%tmp = load i101, i101* @i101_l		; <i101> [#uses=1]
 	store i101 %tmp, i101* @i101_s
 	ret void
 }
 
 define void @i102_ls() nounwind  {
-	%tmp = load i102* @i102_l		; <i102> [#uses=1]
+	%tmp = load i102, i102* @i102_l		; <i102> [#uses=1]
 	store i102 %tmp, i102* @i102_s
 	ret void
 }
 
 define void @i103_ls() nounwind  {
-	%tmp = load i103* @i103_l		; <i103> [#uses=1]
+	%tmp = load i103, i103* @i103_l		; <i103> [#uses=1]
 	store i103 %tmp, i103* @i103_s
 	ret void
 }
 
 define void @i104_ls() nounwind  {
-	%tmp = load i104* @i104_l		; <i104> [#uses=1]
+	%tmp = load i104, i104* @i104_l		; <i104> [#uses=1]
 	store i104 %tmp, i104* @i104_s
 	ret void
 }
 
 define void @i105_ls() nounwind  {
-	%tmp = load i105* @i105_l		; <i105> [#uses=1]
+	%tmp = load i105, i105* @i105_l		; <i105> [#uses=1]
 	store i105 %tmp, i105* @i105_s
 	ret void
 }
 
 define void @i106_ls() nounwind  {
-	%tmp = load i106* @i106_l		; <i106> [#uses=1]
+	%tmp = load i106, i106* @i106_l		; <i106> [#uses=1]
 	store i106 %tmp, i106* @i106_s
 	ret void
 }
 
 define void @i107_ls() nounwind  {
-	%tmp = load i107* @i107_l		; <i107> [#uses=1]
+	%tmp = load i107, i107* @i107_l		; <i107> [#uses=1]
 	store i107 %tmp, i107* @i107_s
 	ret void
 }
 
 define void @i108_ls() nounwind  {
-	%tmp = load i108* @i108_l		; <i108> [#uses=1]
+	%tmp = load i108, i108* @i108_l		; <i108> [#uses=1]
 	store i108 %tmp, i108* @i108_s
 	ret void
 }
 
 define void @i109_ls() nounwind  {
-	%tmp = load i109* @i109_l		; <i109> [#uses=1]
+	%tmp = load i109, i109* @i109_l		; <i109> [#uses=1]
 	store i109 %tmp, i109* @i109_s
 	ret void
 }
 
 define void @i110_ls() nounwind  {
-	%tmp = load i110* @i110_l		; <i110> [#uses=1]
+	%tmp = load i110, i110* @i110_l		; <i110> [#uses=1]
 	store i110 %tmp, i110* @i110_s
 	ret void
 }
 
 define void @i111_ls() nounwind  {
-	%tmp = load i111* @i111_l		; <i111> [#uses=1]
+	%tmp = load i111, i111* @i111_l		; <i111> [#uses=1]
 	store i111 %tmp, i111* @i111_s
 	ret void
 }
 
 define void @i112_ls() nounwind  {
-	%tmp = load i112* @i112_l		; <i112> [#uses=1]
+	%tmp = load i112, i112* @i112_l		; <i112> [#uses=1]
 	store i112 %tmp, i112* @i112_s
 	ret void
 }
 
 define void @i113_ls() nounwind  {
-	%tmp = load i113* @i113_l		; <i113> [#uses=1]
+	%tmp = load i113, i113* @i113_l		; <i113> [#uses=1]
 	store i113 %tmp, i113* @i113_s
 	ret void
 }
 
 define void @i114_ls() nounwind  {
-	%tmp = load i114* @i114_l		; <i114> [#uses=1]
+	%tmp = load i114, i114* @i114_l		; <i114> [#uses=1]
 	store i114 %tmp, i114* @i114_s
 	ret void
 }
 
 define void @i115_ls() nounwind  {
-	%tmp = load i115* @i115_l		; <i115> [#uses=1]
+	%tmp = load i115, i115* @i115_l		; <i115> [#uses=1]
 	store i115 %tmp, i115* @i115_s
 	ret void
 }
 
 define void @i116_ls() nounwind  {
-	%tmp = load i116* @i116_l		; <i116> [#uses=1]
+	%tmp = load i116, i116* @i116_l		; <i116> [#uses=1]
 	store i116 %tmp, i116* @i116_s
 	ret void
 }
 
 define void @i117_ls() nounwind  {
-	%tmp = load i117* @i117_l		; <i117> [#uses=1]
+	%tmp = load i117, i117* @i117_l		; <i117> [#uses=1]
 	store i117 %tmp, i117* @i117_s
 	ret void
 }
 
 define void @i118_ls() nounwind  {
-	%tmp = load i118* @i118_l		; <i118> [#uses=1]
+	%tmp = load i118, i118* @i118_l		; <i118> [#uses=1]
 	store i118 %tmp, i118* @i118_s
 	ret void
 }
 
 define void @i119_ls() nounwind  {
-	%tmp = load i119* @i119_l		; <i119> [#uses=1]
+	%tmp = load i119, i119* @i119_l		; <i119> [#uses=1]
 	store i119 %tmp, i119* @i119_s
 	ret void
 }
 
 define void @i120_ls() nounwind  {
-	%tmp = load i120* @i120_l		; <i120> [#uses=1]
+	%tmp = load i120, i120* @i120_l		; <i120> [#uses=1]
 	store i120 %tmp, i120* @i120_s
 	ret void
 }
 
 define void @i121_ls() nounwind  {
-	%tmp = load i121* @i121_l		; <i121> [#uses=1]
+	%tmp = load i121, i121* @i121_l		; <i121> [#uses=1]
 	store i121 %tmp, i121* @i121_s
 	ret void
 }
 
 define void @i122_ls() nounwind  {
-	%tmp = load i122* @i122_l		; <i122> [#uses=1]
+	%tmp = load i122, i122* @i122_l		; <i122> [#uses=1]
 	store i122 %tmp, i122* @i122_s
 	ret void
 }
 
 define void @i123_ls() nounwind  {
-	%tmp = load i123* @i123_l		; <i123> [#uses=1]
+	%tmp = load i123, i123* @i123_l		; <i123> [#uses=1]
 	store i123 %tmp, i123* @i123_s
 	ret void
 }
 
 define void @i124_ls() nounwind  {
-	%tmp = load i124* @i124_l		; <i124> [#uses=1]
+	%tmp = load i124, i124* @i124_l		; <i124> [#uses=1]
 	store i124 %tmp, i124* @i124_s
 	ret void
 }
 
 define void @i125_ls() nounwind  {
-	%tmp = load i125* @i125_l		; <i125> [#uses=1]
+	%tmp = load i125, i125* @i125_l		; <i125> [#uses=1]
 	store i125 %tmp, i125* @i125_s
 	ret void
 }
 
 define void @i126_ls() nounwind  {
-	%tmp = load i126* @i126_l		; <i126> [#uses=1]
+	%tmp = load i126, i126* @i126_l		; <i126> [#uses=1]
 	store i126 %tmp, i126* @i126_s
 	ret void
 }
 
 define void @i127_ls() nounwind  {
-	%tmp = load i127* @i127_l		; <i127> [#uses=1]
+	%tmp = load i127, i127* @i127_l		; <i127> [#uses=1]
 	store i127 %tmp, i127* @i127_s
 	ret void
 }
 
 define void @i128_ls() nounwind  {
-	%tmp = load i128* @i128_l		; <i128> [#uses=1]
+	%tmp = load i128, i128* @i128_l		; <i128> [#uses=1]
 	store i128 %tmp, i128* @i128_s
 	ret void
 }
 
 define void @i129_ls() nounwind  {
-	%tmp = load i129* @i129_l		; <i129> [#uses=1]
+	%tmp = load i129, i129* @i129_l		; <i129> [#uses=1]
 	store i129 %tmp, i129* @i129_s
 	ret void
 }
 
 define void @i130_ls() nounwind  {
-	%tmp = load i130* @i130_l		; <i130> [#uses=1]
+	%tmp = load i130, i130* @i130_l		; <i130> [#uses=1]
 	store i130 %tmp, i130* @i130_s
 	ret void
 }
 
 define void @i131_ls() nounwind  {
-	%tmp = load i131* @i131_l		; <i131> [#uses=1]
+	%tmp = load i131, i131* @i131_l		; <i131> [#uses=1]
 	store i131 %tmp, i131* @i131_s
 	ret void
 }
 
 define void @i132_ls() nounwind  {
-	%tmp = load i132* @i132_l		; <i132> [#uses=1]
+	%tmp = load i132, i132* @i132_l		; <i132> [#uses=1]
 	store i132 %tmp, i132* @i132_s
 	ret void
 }
 
 define void @i133_ls() nounwind  {
-	%tmp = load i133* @i133_l		; <i133> [#uses=1]
+	%tmp = load i133, i133* @i133_l		; <i133> [#uses=1]
 	store i133 %tmp, i133* @i133_s
 	ret void
 }
 
 define void @i134_ls() nounwind  {
-	%tmp = load i134* @i134_l		; <i134> [#uses=1]
+	%tmp = load i134, i134* @i134_l		; <i134> [#uses=1]
 	store i134 %tmp, i134* @i134_s
 	ret void
 }
 
 define void @i135_ls() nounwind  {
-	%tmp = load i135* @i135_l		; <i135> [#uses=1]
+	%tmp = load i135, i135* @i135_l		; <i135> [#uses=1]
 	store i135 %tmp, i135* @i135_s
 	ret void
 }
 
 define void @i136_ls() nounwind  {
-	%tmp = load i136* @i136_l		; <i136> [#uses=1]
+	%tmp = load i136, i136* @i136_l		; <i136> [#uses=1]
 	store i136 %tmp, i136* @i136_s
 	ret void
 }
 
 define void @i137_ls() nounwind  {
-	%tmp = load i137* @i137_l		; <i137> [#uses=1]
+	%tmp = load i137, i137* @i137_l		; <i137> [#uses=1]
 	store i137 %tmp, i137* @i137_s
 	ret void
 }
 
 define void @i138_ls() nounwind  {
-	%tmp = load i138* @i138_l		; <i138> [#uses=1]
+	%tmp = load i138, i138* @i138_l		; <i138> [#uses=1]
 	store i138 %tmp, i138* @i138_s
 	ret void
 }
 
 define void @i139_ls() nounwind  {
-	%tmp = load i139* @i139_l		; <i139> [#uses=1]
+	%tmp = load i139, i139* @i139_l		; <i139> [#uses=1]
 	store i139 %tmp, i139* @i139_s
 	ret void
 }
 
 define void @i140_ls() nounwind  {
-	%tmp = load i140* @i140_l		; <i140> [#uses=1]
+	%tmp = load i140, i140* @i140_l		; <i140> [#uses=1]
 	store i140 %tmp, i140* @i140_s
 	ret void
 }
 
 define void @i141_ls() nounwind  {
-	%tmp = load i141* @i141_l		; <i141> [#uses=1]
+	%tmp = load i141, i141* @i141_l		; <i141> [#uses=1]
 	store i141 %tmp, i141* @i141_s
 	ret void
 }
 
 define void @i142_ls() nounwind  {
-	%tmp = load i142* @i142_l		; <i142> [#uses=1]
+	%tmp = load i142, i142* @i142_l		; <i142> [#uses=1]
 	store i142 %tmp, i142* @i142_s
 	ret void
 }
 
 define void @i143_ls() nounwind  {
-	%tmp = load i143* @i143_l		; <i143> [#uses=1]
+	%tmp = load i143, i143* @i143_l		; <i143> [#uses=1]
 	store i143 %tmp, i143* @i143_s
 	ret void
 }
 
 define void @i144_ls() nounwind  {
-	%tmp = load i144* @i144_l		; <i144> [#uses=1]
+	%tmp = load i144, i144* @i144_l		; <i144> [#uses=1]
 	store i144 %tmp, i144* @i144_s
 	ret void
 }
 
 define void @i145_ls() nounwind  {
-	%tmp = load i145* @i145_l		; <i145> [#uses=1]
+	%tmp = load i145, i145* @i145_l		; <i145> [#uses=1]
 	store i145 %tmp, i145* @i145_s
 	ret void
 }
 
 define void @i146_ls() nounwind  {
-	%tmp = load i146* @i146_l		; <i146> [#uses=1]
+	%tmp = load i146, i146* @i146_l		; <i146> [#uses=1]
 	store i146 %tmp, i146* @i146_s
 	ret void
 }
 
 define void @i147_ls() nounwind  {
-	%tmp = load i147* @i147_l		; <i147> [#uses=1]
+	%tmp = load i147, i147* @i147_l		; <i147> [#uses=1]
 	store i147 %tmp, i147* @i147_s
 	ret void
 }
 
 define void @i148_ls() nounwind  {
-	%tmp = load i148* @i148_l		; <i148> [#uses=1]
+	%tmp = load i148, i148* @i148_l		; <i148> [#uses=1]
 	store i148 %tmp, i148* @i148_s
 	ret void
 }
 
 define void @i149_ls() nounwind  {
-	%tmp = load i149* @i149_l		; <i149> [#uses=1]
+	%tmp = load i149, i149* @i149_l		; <i149> [#uses=1]
 	store i149 %tmp, i149* @i149_s
 	ret void
 }
 
 define void @i150_ls() nounwind  {
-	%tmp = load i150* @i150_l		; <i150> [#uses=1]
+	%tmp = load i150, i150* @i150_l		; <i150> [#uses=1]
 	store i150 %tmp, i150* @i150_s
 	ret void
 }
 
 define void @i151_ls() nounwind  {
-	%tmp = load i151* @i151_l		; <i151> [#uses=1]
+	%tmp = load i151, i151* @i151_l		; <i151> [#uses=1]
 	store i151 %tmp, i151* @i151_s
 	ret void
 }
 
 define void @i152_ls() nounwind  {
-	%tmp = load i152* @i152_l		; <i152> [#uses=1]
+	%tmp = load i152, i152* @i152_l		; <i152> [#uses=1]
 	store i152 %tmp, i152* @i152_s
 	ret void
 }
 
 define void @i153_ls() nounwind  {
-	%tmp = load i153* @i153_l		; <i153> [#uses=1]
+	%tmp = load i153, i153* @i153_l		; <i153> [#uses=1]
 	store i153 %tmp, i153* @i153_s
 	ret void
 }
 
 define void @i154_ls() nounwind  {
-	%tmp = load i154* @i154_l		; <i154> [#uses=1]
+	%tmp = load i154, i154* @i154_l		; <i154> [#uses=1]
 	store i154 %tmp, i154* @i154_s
 	ret void
 }
 
 define void @i155_ls() nounwind  {
-	%tmp = load i155* @i155_l		; <i155> [#uses=1]
+	%tmp = load i155, i155* @i155_l		; <i155> [#uses=1]
 	store i155 %tmp, i155* @i155_s
 	ret void
 }
 
 define void @i156_ls() nounwind  {
-	%tmp = load i156* @i156_l		; <i156> [#uses=1]
+	%tmp = load i156, i156* @i156_l		; <i156> [#uses=1]
 	store i156 %tmp, i156* @i156_s
 	ret void
 }
 
 define void @i157_ls() nounwind  {
-	%tmp = load i157* @i157_l		; <i157> [#uses=1]
+	%tmp = load i157, i157* @i157_l		; <i157> [#uses=1]
 	store i157 %tmp, i157* @i157_s
 	ret void
 }
 
 define void @i158_ls() nounwind  {
-	%tmp = load i158* @i158_l		; <i158> [#uses=1]
+	%tmp = load i158, i158* @i158_l		; <i158> [#uses=1]
 	store i158 %tmp, i158* @i158_s
 	ret void
 }
 
 define void @i159_ls() nounwind  {
-	%tmp = load i159* @i159_l		; <i159> [#uses=1]
+	%tmp = load i159, i159* @i159_l		; <i159> [#uses=1]
 	store i159 %tmp, i159* @i159_s
 	ret void
 }
 
 define void @i160_ls() nounwind  {
-	%tmp = load i160* @i160_l		; <i160> [#uses=1]
+	%tmp = load i160, i160* @i160_l		; <i160> [#uses=1]
 	store i160 %tmp, i160* @i160_s
 	ret void
 }
 
 define void @i161_ls() nounwind  {
-	%tmp = load i161* @i161_l		; <i161> [#uses=1]
+	%tmp = load i161, i161* @i161_l		; <i161> [#uses=1]
 	store i161 %tmp, i161* @i161_s
 	ret void
 }
 
 define void @i162_ls() nounwind  {
-	%tmp = load i162* @i162_l		; <i162> [#uses=1]
+	%tmp = load i162, i162* @i162_l		; <i162> [#uses=1]
 	store i162 %tmp, i162* @i162_s
 	ret void
 }
 
 define void @i163_ls() nounwind  {
-	%tmp = load i163* @i163_l		; <i163> [#uses=1]
+	%tmp = load i163, i163* @i163_l		; <i163> [#uses=1]
 	store i163 %tmp, i163* @i163_s
 	ret void
 }
 
 define void @i164_ls() nounwind  {
-	%tmp = load i164* @i164_l		; <i164> [#uses=1]
+	%tmp = load i164, i164* @i164_l		; <i164> [#uses=1]
 	store i164 %tmp, i164* @i164_s
 	ret void
 }
 
 define void @i165_ls() nounwind  {
-	%tmp = load i165* @i165_l		; <i165> [#uses=1]
+	%tmp = load i165, i165* @i165_l		; <i165> [#uses=1]
 	store i165 %tmp, i165* @i165_s
 	ret void
 }
 
 define void @i166_ls() nounwind  {
-	%tmp = load i166* @i166_l		; <i166> [#uses=1]
+	%tmp = load i166, i166* @i166_l		; <i166> [#uses=1]
 	store i166 %tmp, i166* @i166_s
 	ret void
 }
 
 define void @i167_ls() nounwind  {
-	%tmp = load i167* @i167_l		; <i167> [#uses=1]
+	%tmp = load i167, i167* @i167_l		; <i167> [#uses=1]
 	store i167 %tmp, i167* @i167_s
 	ret void
 }
 
 define void @i168_ls() nounwind  {
-	%tmp = load i168* @i168_l		; <i168> [#uses=1]
+	%tmp = load i168, i168* @i168_l		; <i168> [#uses=1]
 	store i168 %tmp, i168* @i168_s
 	ret void
 }
 
 define void @i169_ls() nounwind  {
-	%tmp = load i169* @i169_l		; <i169> [#uses=1]
+	%tmp = load i169, i169* @i169_l		; <i169> [#uses=1]
 	store i169 %tmp, i169* @i169_s
 	ret void
 }
 
 define void @i170_ls() nounwind  {
-	%tmp = load i170* @i170_l		; <i170> [#uses=1]
+	%tmp = load i170, i170* @i170_l		; <i170> [#uses=1]
 	store i170 %tmp, i170* @i170_s
 	ret void
 }
 
 define void @i171_ls() nounwind  {
-	%tmp = load i171* @i171_l		; <i171> [#uses=1]
+	%tmp = load i171, i171* @i171_l		; <i171> [#uses=1]
 	store i171 %tmp, i171* @i171_s
 	ret void
 }
 
 define void @i172_ls() nounwind  {
-	%tmp = load i172* @i172_l		; <i172> [#uses=1]
+	%tmp = load i172, i172* @i172_l		; <i172> [#uses=1]
 	store i172 %tmp, i172* @i172_s
 	ret void
 }
 
 define void @i173_ls() nounwind  {
-	%tmp = load i173* @i173_l		; <i173> [#uses=1]
+	%tmp = load i173, i173* @i173_l		; <i173> [#uses=1]
 	store i173 %tmp, i173* @i173_s
 	ret void
 }
 
 define void @i174_ls() nounwind  {
-	%tmp = load i174* @i174_l		; <i174> [#uses=1]
+	%tmp = load i174, i174* @i174_l		; <i174> [#uses=1]
 	store i174 %tmp, i174* @i174_s
 	ret void
 }
 
 define void @i175_ls() nounwind  {
-	%tmp = load i175* @i175_l		; <i175> [#uses=1]
+	%tmp = load i175, i175* @i175_l		; <i175> [#uses=1]
 	store i175 %tmp, i175* @i175_s
 	ret void
 }
 
 define void @i176_ls() nounwind  {
-	%tmp = load i176* @i176_l		; <i176> [#uses=1]
+	%tmp = load i176, i176* @i176_l		; <i176> [#uses=1]
 	store i176 %tmp, i176* @i176_s
 	ret void
 }
 
 define void @i177_ls() nounwind  {
-	%tmp = load i177* @i177_l		; <i177> [#uses=1]
+	%tmp = load i177, i177* @i177_l		; <i177> [#uses=1]
 	store i177 %tmp, i177* @i177_s
 	ret void
 }
 
 define void @i178_ls() nounwind  {
-	%tmp = load i178* @i178_l		; <i178> [#uses=1]
+	%tmp = load i178, i178* @i178_l		; <i178> [#uses=1]
 	store i178 %tmp, i178* @i178_s
 	ret void
 }
 
 define void @i179_ls() nounwind  {
-	%tmp = load i179* @i179_l		; <i179> [#uses=1]
+	%tmp = load i179, i179* @i179_l		; <i179> [#uses=1]
 	store i179 %tmp, i179* @i179_s
 	ret void
 }
 
 define void @i180_ls() nounwind  {
-	%tmp = load i180* @i180_l		; <i180> [#uses=1]
+	%tmp = load i180, i180* @i180_l		; <i180> [#uses=1]
 	store i180 %tmp, i180* @i180_s
 	ret void
 }
 
 define void @i181_ls() nounwind  {
-	%tmp = load i181* @i181_l		; <i181> [#uses=1]
+	%tmp = load i181, i181* @i181_l		; <i181> [#uses=1]
 	store i181 %tmp, i181* @i181_s
 	ret void
 }
 
 define void @i182_ls() nounwind  {
-	%tmp = load i182* @i182_l		; <i182> [#uses=1]
+	%tmp = load i182, i182* @i182_l		; <i182> [#uses=1]
 	store i182 %tmp, i182* @i182_s
 	ret void
 }
 
 define void @i183_ls() nounwind  {
-	%tmp = load i183* @i183_l		; <i183> [#uses=1]
+	%tmp = load i183, i183* @i183_l		; <i183> [#uses=1]
 	store i183 %tmp, i183* @i183_s
 	ret void
 }
 
 define void @i184_ls() nounwind  {
-	%tmp = load i184* @i184_l		; <i184> [#uses=1]
+	%tmp = load i184, i184* @i184_l		; <i184> [#uses=1]
 	store i184 %tmp, i184* @i184_s
 	ret void
 }
 
 define void @i185_ls() nounwind  {
-	%tmp = load i185* @i185_l		; <i185> [#uses=1]
+	%tmp = load i185, i185* @i185_l		; <i185> [#uses=1]
 	store i185 %tmp, i185* @i185_s
 	ret void
 }
 
 define void @i186_ls() nounwind  {
-	%tmp = load i186* @i186_l		; <i186> [#uses=1]
+	%tmp = load i186, i186* @i186_l		; <i186> [#uses=1]
 	store i186 %tmp, i186* @i186_s
 	ret void
 }
 
 define void @i187_ls() nounwind  {
-	%tmp = load i187* @i187_l		; <i187> [#uses=1]
+	%tmp = load i187, i187* @i187_l		; <i187> [#uses=1]
 	store i187 %tmp, i187* @i187_s
 	ret void
 }
 
 define void @i188_ls() nounwind  {
-	%tmp = load i188* @i188_l		; <i188> [#uses=1]
+	%tmp = load i188, i188* @i188_l		; <i188> [#uses=1]
 	store i188 %tmp, i188* @i188_s
 	ret void
 }
 
 define void @i189_ls() nounwind  {
-	%tmp = load i189* @i189_l		; <i189> [#uses=1]
+	%tmp = load i189, i189* @i189_l		; <i189> [#uses=1]
 	store i189 %tmp, i189* @i189_s
 	ret void
 }
 
 define void @i190_ls() nounwind  {
-	%tmp = load i190* @i190_l		; <i190> [#uses=1]
+	%tmp = load i190, i190* @i190_l		; <i190> [#uses=1]
 	store i190 %tmp, i190* @i190_s
 	ret void
 }
 
 define void @i191_ls() nounwind  {
-	%tmp = load i191* @i191_l		; <i191> [#uses=1]
+	%tmp = load i191, i191* @i191_l		; <i191> [#uses=1]
 	store i191 %tmp, i191* @i191_s
 	ret void
 }
 
 define void @i192_ls() nounwind  {
-	%tmp = load i192* @i192_l		; <i192> [#uses=1]
+	%tmp = load i192, i192* @i192_l		; <i192> [#uses=1]
 	store i192 %tmp, i192* @i192_s
 	ret void
 }
 
 define void @i193_ls() nounwind  {
-	%tmp = load i193* @i193_l		; <i193> [#uses=1]
+	%tmp = load i193, i193* @i193_l		; <i193> [#uses=1]
 	store i193 %tmp, i193* @i193_s
 	ret void
 }
 
 define void @i194_ls() nounwind  {
-	%tmp = load i194* @i194_l		; <i194> [#uses=1]
+	%tmp = load i194, i194* @i194_l		; <i194> [#uses=1]
 	store i194 %tmp, i194* @i194_s
 	ret void
 }
 
 define void @i195_ls() nounwind  {
-	%tmp = load i195* @i195_l		; <i195> [#uses=1]
+	%tmp = load i195, i195* @i195_l		; <i195> [#uses=1]
 	store i195 %tmp, i195* @i195_s
 	ret void
 }
 
 define void @i196_ls() nounwind  {
-	%tmp = load i196* @i196_l		; <i196> [#uses=1]
+	%tmp = load i196, i196* @i196_l		; <i196> [#uses=1]
 	store i196 %tmp, i196* @i196_s
 	ret void
 }
 
 define void @i197_ls() nounwind  {
-	%tmp = load i197* @i197_l		; <i197> [#uses=1]
+	%tmp = load i197, i197* @i197_l		; <i197> [#uses=1]
 	store i197 %tmp, i197* @i197_s
 	ret void
 }
 
 define void @i198_ls() nounwind  {
-	%tmp = load i198* @i198_l		; <i198> [#uses=1]
+	%tmp = load i198, i198* @i198_l		; <i198> [#uses=1]
 	store i198 %tmp, i198* @i198_s
 	ret void
 }
 
 define void @i199_ls() nounwind  {
-	%tmp = load i199* @i199_l		; <i199> [#uses=1]
+	%tmp = load i199, i199* @i199_l		; <i199> [#uses=1]
 	store i199 %tmp, i199* @i199_s
 	ret void
 }
 
 define void @i200_ls() nounwind  {
-	%tmp = load i200* @i200_l		; <i200> [#uses=1]
+	%tmp = load i200, i200* @i200_l		; <i200> [#uses=1]
 	store i200 %tmp, i200* @i200_s
 	ret void
 }
 
 define void @i201_ls() nounwind  {
-	%tmp = load i201* @i201_l		; <i201> [#uses=1]
+	%tmp = load i201, i201* @i201_l		; <i201> [#uses=1]
 	store i201 %tmp, i201* @i201_s
 	ret void
 }
 
 define void @i202_ls() nounwind  {
-	%tmp = load i202* @i202_l		; <i202> [#uses=1]
+	%tmp = load i202, i202* @i202_l		; <i202> [#uses=1]
 	store i202 %tmp, i202* @i202_s
 	ret void
 }
 
 define void @i203_ls() nounwind  {
-	%tmp = load i203* @i203_l		; <i203> [#uses=1]
+	%tmp = load i203, i203* @i203_l		; <i203> [#uses=1]
 	store i203 %tmp, i203* @i203_s
 	ret void
 }
 
 define void @i204_ls() nounwind  {
-	%tmp = load i204* @i204_l		; <i204> [#uses=1]
+	%tmp = load i204, i204* @i204_l		; <i204> [#uses=1]
 	store i204 %tmp, i204* @i204_s
 	ret void
 }
 
 define void @i205_ls() nounwind  {
-	%tmp = load i205* @i205_l		; <i205> [#uses=1]
+	%tmp = load i205, i205* @i205_l		; <i205> [#uses=1]
 	store i205 %tmp, i205* @i205_s
 	ret void
 }
 
 define void @i206_ls() nounwind  {
-	%tmp = load i206* @i206_l		; <i206> [#uses=1]
+	%tmp = load i206, i206* @i206_l		; <i206> [#uses=1]
 	store i206 %tmp, i206* @i206_s
 	ret void
 }
 
 define void @i207_ls() nounwind  {
-	%tmp = load i207* @i207_l		; <i207> [#uses=1]
+	%tmp = load i207, i207* @i207_l		; <i207> [#uses=1]
 	store i207 %tmp, i207* @i207_s
 	ret void
 }
 
 define void @i208_ls() nounwind  {
-	%tmp = load i208* @i208_l		; <i208> [#uses=1]
+	%tmp = load i208, i208* @i208_l		; <i208> [#uses=1]
 	store i208 %tmp, i208* @i208_s
 	ret void
 }
 
 define void @i209_ls() nounwind  {
-	%tmp = load i209* @i209_l		; <i209> [#uses=1]
+	%tmp = load i209, i209* @i209_l		; <i209> [#uses=1]
 	store i209 %tmp, i209* @i209_s
 	ret void
 }
 
 define void @i210_ls() nounwind  {
-	%tmp = load i210* @i210_l		; <i210> [#uses=1]
+	%tmp = load i210, i210* @i210_l		; <i210> [#uses=1]
 	store i210 %tmp, i210* @i210_s
 	ret void
 }
 
 define void @i211_ls() nounwind  {
-	%tmp = load i211* @i211_l		; <i211> [#uses=1]
+	%tmp = load i211, i211* @i211_l		; <i211> [#uses=1]
 	store i211 %tmp, i211* @i211_s
 	ret void
 }
 
 define void @i212_ls() nounwind  {
-	%tmp = load i212* @i212_l		; <i212> [#uses=1]
+	%tmp = load i212, i212* @i212_l		; <i212> [#uses=1]
 	store i212 %tmp, i212* @i212_s
 	ret void
 }
 
 define void @i213_ls() nounwind  {
-	%tmp = load i213* @i213_l		; <i213> [#uses=1]
+	%tmp = load i213, i213* @i213_l		; <i213> [#uses=1]
 	store i213 %tmp, i213* @i213_s
 	ret void
 }
 
 define void @i214_ls() nounwind  {
-	%tmp = load i214* @i214_l		; <i214> [#uses=1]
+	%tmp = load i214, i214* @i214_l		; <i214> [#uses=1]
 	store i214 %tmp, i214* @i214_s
 	ret void
 }
 
 define void @i215_ls() nounwind  {
-	%tmp = load i215* @i215_l		; <i215> [#uses=1]
+	%tmp = load i215, i215* @i215_l		; <i215> [#uses=1]
 	store i215 %tmp, i215* @i215_s
 	ret void
 }
 
 define void @i216_ls() nounwind  {
-	%tmp = load i216* @i216_l		; <i216> [#uses=1]
+	%tmp = load i216, i216* @i216_l		; <i216> [#uses=1]
 	store i216 %tmp, i216* @i216_s
 	ret void
 }
 
 define void @i217_ls() nounwind  {
-	%tmp = load i217* @i217_l		; <i217> [#uses=1]
+	%tmp = load i217, i217* @i217_l		; <i217> [#uses=1]
 	store i217 %tmp, i217* @i217_s
 	ret void
 }
 
 define void @i218_ls() nounwind  {
-	%tmp = load i218* @i218_l		; <i218> [#uses=1]
+	%tmp = load i218, i218* @i218_l		; <i218> [#uses=1]
 	store i218 %tmp, i218* @i218_s
 	ret void
 }
 
 define void @i219_ls() nounwind  {
-	%tmp = load i219* @i219_l		; <i219> [#uses=1]
+	%tmp = load i219, i219* @i219_l		; <i219> [#uses=1]
 	store i219 %tmp, i219* @i219_s
 	ret void
 }
 
 define void @i220_ls() nounwind  {
-	%tmp = load i220* @i220_l		; <i220> [#uses=1]
+	%tmp = load i220, i220* @i220_l		; <i220> [#uses=1]
 	store i220 %tmp, i220* @i220_s
 	ret void
 }
 
 define void @i221_ls() nounwind  {
-	%tmp = load i221* @i221_l		; <i221> [#uses=1]
+	%tmp = load i221, i221* @i221_l		; <i221> [#uses=1]
 	store i221 %tmp, i221* @i221_s
 	ret void
 }
 
 define void @i222_ls() nounwind  {
-	%tmp = load i222* @i222_l		; <i222> [#uses=1]
+	%tmp = load i222, i222* @i222_l		; <i222> [#uses=1]
 	store i222 %tmp, i222* @i222_s
 	ret void
 }
 
 define void @i223_ls() nounwind  {
-	%tmp = load i223* @i223_l		; <i223> [#uses=1]
+	%tmp = load i223, i223* @i223_l		; <i223> [#uses=1]
 	store i223 %tmp, i223* @i223_s
 	ret void
 }
 
 define void @i224_ls() nounwind  {
-	%tmp = load i224* @i224_l		; <i224> [#uses=1]
+	%tmp = load i224, i224* @i224_l		; <i224> [#uses=1]
 	store i224 %tmp, i224* @i224_s
 	ret void
 }
 
 define void @i225_ls() nounwind  {
-	%tmp = load i225* @i225_l		; <i225> [#uses=1]
+	%tmp = load i225, i225* @i225_l		; <i225> [#uses=1]
 	store i225 %tmp, i225* @i225_s
 	ret void
 }
 
 define void @i226_ls() nounwind  {
-	%tmp = load i226* @i226_l		; <i226> [#uses=1]
+	%tmp = load i226, i226* @i226_l		; <i226> [#uses=1]
 	store i226 %tmp, i226* @i226_s
 	ret void
 }
 
 define void @i227_ls() nounwind  {
-	%tmp = load i227* @i227_l		; <i227> [#uses=1]
+	%tmp = load i227, i227* @i227_l		; <i227> [#uses=1]
 	store i227 %tmp, i227* @i227_s
 	ret void
 }
 
 define void @i228_ls() nounwind  {
-	%tmp = load i228* @i228_l		; <i228> [#uses=1]
+	%tmp = load i228, i228* @i228_l		; <i228> [#uses=1]
 	store i228 %tmp, i228* @i228_s
 	ret void
 }
 
 define void @i229_ls() nounwind  {
-	%tmp = load i229* @i229_l		; <i229> [#uses=1]
+	%tmp = load i229, i229* @i229_l		; <i229> [#uses=1]
 	store i229 %tmp, i229* @i229_s
 	ret void
 }
 
 define void @i230_ls() nounwind  {
-	%tmp = load i230* @i230_l		; <i230> [#uses=1]
+	%tmp = load i230, i230* @i230_l		; <i230> [#uses=1]
 	store i230 %tmp, i230* @i230_s
 	ret void
 }
 
 define void @i231_ls() nounwind  {
-	%tmp = load i231* @i231_l		; <i231> [#uses=1]
+	%tmp = load i231, i231* @i231_l		; <i231> [#uses=1]
 	store i231 %tmp, i231* @i231_s
 	ret void
 }
 
 define void @i232_ls() nounwind  {
-	%tmp = load i232* @i232_l		; <i232> [#uses=1]
+	%tmp = load i232, i232* @i232_l		; <i232> [#uses=1]
 	store i232 %tmp, i232* @i232_s
 	ret void
 }
 
 define void @i233_ls() nounwind  {
-	%tmp = load i233* @i233_l		; <i233> [#uses=1]
+	%tmp = load i233, i233* @i233_l		; <i233> [#uses=1]
 	store i233 %tmp, i233* @i233_s
 	ret void
 }
 
 define void @i234_ls() nounwind  {
-	%tmp = load i234* @i234_l		; <i234> [#uses=1]
+	%tmp = load i234, i234* @i234_l		; <i234> [#uses=1]
 	store i234 %tmp, i234* @i234_s
 	ret void
 }
 
 define void @i235_ls() nounwind  {
-	%tmp = load i235* @i235_l		; <i235> [#uses=1]
+	%tmp = load i235, i235* @i235_l		; <i235> [#uses=1]
 	store i235 %tmp, i235* @i235_s
 	ret void
 }
 
 define void @i236_ls() nounwind  {
-	%tmp = load i236* @i236_l		; <i236> [#uses=1]
+	%tmp = load i236, i236* @i236_l		; <i236> [#uses=1]
 	store i236 %tmp, i236* @i236_s
 	ret void
 }
 
 define void @i237_ls() nounwind  {
-	%tmp = load i237* @i237_l		; <i237> [#uses=1]
+	%tmp = load i237, i237* @i237_l		; <i237> [#uses=1]
 	store i237 %tmp, i237* @i237_s
 	ret void
 }
 
 define void @i238_ls() nounwind  {
-	%tmp = load i238* @i238_l		; <i238> [#uses=1]
+	%tmp = load i238, i238* @i238_l		; <i238> [#uses=1]
 	store i238 %tmp, i238* @i238_s
 	ret void
 }
 
 define void @i239_ls() nounwind  {
-	%tmp = load i239* @i239_l		; <i239> [#uses=1]
+	%tmp = load i239, i239* @i239_l		; <i239> [#uses=1]
 	store i239 %tmp, i239* @i239_s
 	ret void
 }
 
 define void @i240_ls() nounwind  {
-	%tmp = load i240* @i240_l		; <i240> [#uses=1]
+	%tmp = load i240, i240* @i240_l		; <i240> [#uses=1]
 	store i240 %tmp, i240* @i240_s
 	ret void
 }
 
 define void @i241_ls() nounwind  {
-	%tmp = load i241* @i241_l		; <i241> [#uses=1]
+	%tmp = load i241, i241* @i241_l		; <i241> [#uses=1]
 	store i241 %tmp, i241* @i241_s
 	ret void
 }
 
 define void @i242_ls() nounwind  {
-	%tmp = load i242* @i242_l		; <i242> [#uses=1]
+	%tmp = load i242, i242* @i242_l		; <i242> [#uses=1]
 	store i242 %tmp, i242* @i242_s
 	ret void
 }
 
 define void @i243_ls() nounwind  {
-	%tmp = load i243* @i243_l		; <i243> [#uses=1]
+	%tmp = load i243, i243* @i243_l		; <i243> [#uses=1]
 	store i243 %tmp, i243* @i243_s
 	ret void
 }
 
 define void @i244_ls() nounwind  {
-	%tmp = load i244* @i244_l		; <i244> [#uses=1]
+	%tmp = load i244, i244* @i244_l		; <i244> [#uses=1]
 	store i244 %tmp, i244* @i244_s
 	ret void
 }
 
 define void @i245_ls() nounwind  {
-	%tmp = load i245* @i245_l		; <i245> [#uses=1]
+	%tmp = load i245, i245* @i245_l		; <i245> [#uses=1]
 	store i245 %tmp, i245* @i245_s
 	ret void
 }
 
 define void @i246_ls() nounwind  {
-	%tmp = load i246* @i246_l		; <i246> [#uses=1]
+	%tmp = load i246, i246* @i246_l		; <i246> [#uses=1]
 	store i246 %tmp, i246* @i246_s
 	ret void
 }
 
 define void @i247_ls() nounwind  {
-	%tmp = load i247* @i247_l		; <i247> [#uses=1]
+	%tmp = load i247, i247* @i247_l		; <i247> [#uses=1]
 	store i247 %tmp, i247* @i247_s
 	ret void
 }
 
 define void @i248_ls() nounwind  {
-	%tmp = load i248* @i248_l		; <i248> [#uses=1]
+	%tmp = load i248, i248* @i248_l		; <i248> [#uses=1]
 	store i248 %tmp, i248* @i248_s
 	ret void
 }
 
 define void @i249_ls() nounwind  {
-	%tmp = load i249* @i249_l		; <i249> [#uses=1]
+	%tmp = load i249, i249* @i249_l		; <i249> [#uses=1]
 	store i249 %tmp, i249* @i249_s
 	ret void
 }
 
 define void @i250_ls() nounwind  {
-	%tmp = load i250* @i250_l		; <i250> [#uses=1]
+	%tmp = load i250, i250* @i250_l		; <i250> [#uses=1]
 	store i250 %tmp, i250* @i250_s
 	ret void
 }
 
 define void @i251_ls() nounwind  {
-	%tmp = load i251* @i251_l		; <i251> [#uses=1]
+	%tmp = load i251, i251* @i251_l		; <i251> [#uses=1]
 	store i251 %tmp, i251* @i251_s
 	ret void
 }
 
 define void @i252_ls() nounwind  {
-	%tmp = load i252* @i252_l		; <i252> [#uses=1]
+	%tmp = load i252, i252* @i252_l		; <i252> [#uses=1]
 	store i252 %tmp, i252* @i252_s
 	ret void
 }
 
 define void @i253_ls() nounwind  {
-	%tmp = load i253* @i253_l		; <i253> [#uses=1]
+	%tmp = load i253, i253* @i253_l		; <i253> [#uses=1]
 	store i253 %tmp, i253* @i253_s
 	ret void
 }
 
 define void @i254_ls() nounwind  {
-	%tmp = load i254* @i254_l		; <i254> [#uses=1]
+	%tmp = load i254, i254* @i254_l		; <i254> [#uses=1]
 	store i254 %tmp, i254* @i254_s
 	ret void
 }
 
 define void @i255_ls() nounwind  {
-	%tmp = load i255* @i255_l		; <i255> [#uses=1]
+	%tmp = load i255, i255* @i255_l		; <i255> [#uses=1]
 	store i255 %tmp, i255* @i255_s
 	ret void
 }
 
 define void @i256_ls() nounwind  {
-	%tmp = load i256* @i256_l		; <i256> [#uses=1]
+	%tmp = load i256, i256* @i256_l		; <i256> [#uses=1]
 	store i256 %tmp, i256* @i256_s
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/Generic/badFoldGEP.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/badFoldGEP.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/badFoldGEP.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/badFoldGEP.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ bb0:
         %cann-indvar-idxcast = sext i32 %argc to i64            ; <i64> [#uses=1]
         %reg841 = getelementptr [497 x %Domain], [497 x %Domain]* @domain_array, i64 0, i64 %cann-indvar-idxcast, i32 3          ; <i32*> [#uses=1]
         %reg846 = getelementptr i32, i32* %reg841, i64 1             ; <i32*> [#uses=1]
-        %reg820 = load i32* %reg846             ; <i32> [#uses=1]
+        %reg820 = load i32, i32* %reg846             ; <i32> [#uses=1]
         ret i32 %reg820
 }
 

Modified: llvm/trunk/test/CodeGen/Generic/builtin-expect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/builtin-expect.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/builtin-expect.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/builtin-expect.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   store i32 %x, i32* %x.addr, align 4
-  %tmp = load i32* %x.addr, align 4
+  %tmp = load i32, i32* %x.addr, align 4
   %cmp = icmp sgt i32 %tmp, 1
   %conv = zext i1 %cmp to i32
   %conv1 = sext i32 %conv to i64
@@ -23,7 +23,7 @@ if.end:
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 
@@ -36,7 +36,7 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   store i32 %x, i32* %x.addr, align 4
-  %tmp = load i32* %x.addr, align 4
+  %tmp = load i32, i32* %x.addr, align 4
   %conv = sext i32 %tmp to i64
   %expval = call i64 @llvm.expect.i64(i64 %conv, i64 1)
   %tobool = icmp ne i64 %expval, 0
@@ -52,7 +52,7 @@ if.end:
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 
@@ -61,7 +61,7 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   store i32 %x, i32* %x.addr, align 4
-  %tmp = load i32* %x.addr, align 4
+  %tmp = load i32, i32* %x.addr, align 4
   %tobool = icmp ne i32 %tmp, 0
   %lnot = xor i1 %tobool, true
   %lnot.ext = zext i1 %lnot to i32
@@ -80,7 +80,7 @@ if.end:
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 
@@ -89,7 +89,7 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   store i32 %x, i32* %x.addr, align 4
-  %tmp = load i32* %x.addr, align 4
+  %tmp = load i32, i32* %x.addr, align 4
   %tobool = icmp ne i32 %tmp, 0
   %lnot = xor i1 %tobool, true
   %lnot1 = xor i1 %lnot, true
@@ -109,7 +109,7 @@ if.end:
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 
@@ -118,7 +118,7 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   store i32 %x, i32* %x.addr, align 4
-  %tmp = load i32* %x.addr, align 4
+  %tmp = load i32, i32* %x.addr, align 4
   %cmp = icmp slt i32 %tmp, 0
   %conv = zext i1 %cmp to i32
   %conv1 = sext i32 %conv to i64
@@ -136,7 +136,7 @@ if.end:
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 
@@ -145,7 +145,7 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   store i32 %x, i32* %x.addr, align 4
-  %tmp = load i32* %x.addr, align 4
+  %tmp = load i32, i32* %x.addr, align 4
   %conv = sext i32 %tmp to i64
   %expval = call i64 @llvm.expect.i64(i64 %conv, i64 1)
   switch i64 %expval, label %sw.epilog [
@@ -162,7 +162,7 @@ sw.epilog:
   br label %return
 
 return:                                           ; preds = %sw.epilog, %sw.bb
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 
@@ -171,7 +171,7 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   store i32 %x, i32* %x.addr, align 4
-  %tmp = load i32* %x.addr, align 4
+  %tmp = load i32, i32* %x.addr, align 4
   %conv = sext i32 %tmp to i64
   %expval = call i64 @llvm.expect.i64(i64 %conv, i64 1)
   switch i64 %expval, label %sw.epilog [
@@ -180,7 +180,7 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry, %entry
-  %tmp1 = load i32* %x.addr, align 4
+  %tmp1 = load i32, i32* %x.addr, align 4
   store i32 %tmp1, i32* %retval
   br label %return
 
@@ -189,7 +189,7 @@ sw.epilog:
   br label %return
 
 return:                                           ; preds = %sw.epilog, %sw.bb
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 
@@ -198,7 +198,7 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   store i32 %x, i32* %x.addr, align 4
-  %tmp = load i32* %x.addr, align 4
+  %tmp = load i32, i32* %x.addr, align 4
   %cmp = icmp sgt i32 %tmp, 1
   %conv = zext i1 %cmp to i32
   %expval = call i32 @llvm.expect.i32(i32 %conv, i32 1)
@@ -215,7 +215,7 @@ if.end:
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 

Modified: llvm/trunk/test/CodeGen/Generic/cast-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/cast-fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/cast-fp.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/cast-fp.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 declare i32 @printf(i8*, ...)
 
 define i32 @main() {
-	%a = load double* @A		; <double> [#uses=4]
+	%a = load double, double* @A		; <double> [#uses=4]
 	%a_fs = getelementptr [8 x i8], [8 x i8]* @a_fstr, i64 0, i64 0		; <i8*> [#uses=1]
 	call i32 (i8*, ...)* @printf( i8* %a_fs, double %a )		; <i32>:1 [#uses=0]
 	%a_d2l = fptosi double %a to i64		; <i64> [#uses=1]
@@ -23,7 +23,7 @@ define i32 @main() {
 	call i32 (i8*, ...)* @printf( i8* %a_ds, i8 %a_d2sb )		; <i32>:4 [#uses=0]
 	%a_d2i2sb = trunc i32 %a_d2i to i8		; <i8> [#uses=1]
 	call i32 (i8*, ...)* @printf( i8* %a_ds, i8 %a_d2i2sb )		; <i32>:5 [#uses=0]
-	%b = load i32* @B		; <i32> [#uses=2]
+	%b = load i32, i32* @B		; <i32> [#uses=2]
 	%b_ds = getelementptr [8 x i8], [8 x i8]* @b_dstr, i64 0, i64 0		; <i8*> [#uses=1]
 	call i32 (i8*, ...)* @printf( i8* %b_ds, i32 %b )		; <i32>:6 [#uses=0]
 	%b_i2d = sitofp i32 %b to double		; <double> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/constindices.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/constindices.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/constindices.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/constindices.ll Fri Feb 27 15:17:42 2015
@@ -28,12 +28,12 @@ define i32 @main() {
         %fptrA2 = getelementptr %MixedA, %MixedA* %fptrA1, i64 0, i32 1          ; <[15 x i32]*> [#uses=1]
         %fptrA3 = getelementptr [15 x i32], [15 x i32]* %fptrA2, i64 0, i64 8               ; <i32*> [#uses=1]
         store i32 5, i32* %fptrA3
-        %sqrtTwo = load float* %I1              ; <float> [#uses=1]
-        %exp = load float* %I2          ; <float> [#uses=1]
+        %sqrtTwo = load float, float* %I1              ; <float> [#uses=1]
+        %exp = load float, float* %I2          ; <float> [#uses=1]
         %I3 = getelementptr %MixedA, %MixedA* %ArrayA, i64 1, i32 0              ; <float*> [#uses=1]
-        %pi = load float* %I3           ; <float> [#uses=1]
+        %pi = load float, float* %I3           ; <float> [#uses=1]
         %I4 = getelementptr %MixedB, %MixedB* %ArrayB, i64 2, i32 1, i32 0               ; <float*> [#uses=1]
-        %five = load float* %I4         ; <float> [#uses=1]
+        %five = load float, float* %I4         ; <float> [#uses=1]
         %dsqrtTwo = fpext float %sqrtTwo to double              ; <double> [#uses=1]
         %dexp = fpext float %exp to double              ; <double> [#uses=1]
         %dpi = fpext float %pi to double                ; <double> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/crash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/crash.ll Fri Feb 27 15:17:42 2015
@@ -13,15 +13,15 @@ inbounds ([0 x %struct.AVCodecTag]* @ff_
 
 define void @Parse_Camera(%struct.CAMERA** nocapture %Camera_Ptr) nounwind {
 entry:
-%.pre = load %struct.CAMERA** %Camera_Ptr, align 4
+%.pre = load %struct.CAMERA*, %struct.CAMERA** %Camera_Ptr, align 4
 %0 = getelementptr inbounds %struct.CAMERA, %struct.CAMERA* %.pre, i32 0, i32 1, i32 0
 %1 = getelementptr inbounds %struct.CAMERA, %struct.CAMERA* %.pre, i32 0, i32 1, i32 2
 br label %bb32
 
 bb32:                                             ; preds = %bb6
-%2 = load double* %0, align 4
-%3 = load double* %1, align 4
-%4 = load double* %0, align 4
+%2 = load double, double* %0, align 4
+%3 = load double, double* %1, align 4
+%4 = load double, double* %0, align 4
 call void @Parse_Vector(double* %0) nounwind
 %5 = call i32 @llvm.objectsize.i32.p0i8(i8* undef, i1 false)
 %6 = icmp eq i32 %5, -1

Modified: llvm/trunk/test/CodeGen/Generic/dag-combine-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/dag-combine-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/dag-combine-crash.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/dag-combine-crash.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ if.end:
   br label %block.i.i
 
 block.i.i:
-  %tmpbb = load i8* undef
+  %tmpbb = load i8, i8* undef
   %tmp54 = zext i8 %tmpbb to i64
   %tmp59 = and i64 %tmp54, 8
   %tmp60 = add i64 %tmp59, 3691045929300498764

Modified: llvm/trunk/test/CodeGen/Generic/empty-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/empty-load-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/empty-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/empty-load-store.ll Fri Feb 27 15:17:42 2015
@@ -8,11 +8,11 @@ entry:
         %retval = alloca i32
         store i32 0, i32* %retval
         %local_foo = alloca {  }
-        load {  }* @current_foo
+        load {  }, {  }* @current_foo
         store {  } %0, {  }* %local_foo
         br label %return
 
 return:
-        load i32* %retval
+        load i32, i32* %retval
         ret i32 %1
 }

Modified: llvm/trunk/test/CodeGen/Generic/empty-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/empty-phi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/empty-phi.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/empty-phi.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ bb1:
   br i1 %1, label %bb2, label %bb3
 
 bb2:
-  %load = load [0 x { i8*, i64, i64 }]* undef, align 8
+  %load = load [0 x { i8*, i64, i64 }], [0 x { i8*, i64, i64 }]* undef, align 8
   br label %bb1
 
 bb3:

Modified: llvm/trunk/test/CodeGen/Generic/fp-to-int-invalid.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/fp-to-int-invalid.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/fp-to-int-invalid.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/fp-to-int-invalid.ll Fri Feb 27 15:17:42 2015
@@ -7,9 +7,9 @@ entry:
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store i8* %result, i8** %result_addr
 	store float 0x40B2AFA160000000, float* %test, align 4
-	%0 = load float* %test, align 4		; <float> [#uses=1]
+	%0 = load float, float* %test, align 4		; <float> [#uses=1]
 	%1 = fptosi float %0 to i8		; <i8> [#uses=1]
-	%2 = load i8** %result_addr, align 4		; <i8*> [#uses=1]
+	%2 = load i8*, i8** %result_addr, align 4		; <i8*> [#uses=1]
 	store i8 %1, i8* %2, align 1
 	br label %return
 

Modified: llvm/trunk/test/CodeGen/Generic/fwdtwice.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/fwdtwice.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/fwdtwice.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/fwdtwice.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ define i32 @SumArray(i32 %Num) {
 
 Top:            ; preds = %Top, %0
         store i32 %Num, i32* %Num.upgrd.1
-        %reg108 = load i32* %Num.upgrd.1                ; <i32> [#uses=1]
+        %reg108 = load i32, i32* %Num.upgrd.1                ; <i32> [#uses=1]
         %cast1006 = bitcast i32 %reg108 to i32          ; <i32> [#uses=1]
         %cond1001 = icmp ule i32 %cast1006, 0           ; <i1> [#uses=1]
         br i1 %cond1001, label %bb6, label %Top

Modified: llvm/trunk/test/CodeGen/Generic/global-ret0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/global-ret0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/global-ret0.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/global-ret0.ll Fri Feb 27 15:17:42 2015
@@ -3,6 +3,6 @@
 @g = global i32 0               ; <i32*> [#uses=1]
 
 define i32 @main() {
-        %h = load i32* @g               ; <i32> [#uses=1]
+        %h = load i32, i32* @g               ; <i32> [#uses=1]
         ret i32 %h
 }

Modified: llvm/trunk/test/CodeGen/Generic/inline-asm-mem-clobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/inline-asm-mem-clobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/inline-asm-mem-clobber.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/inline-asm-mem-clobber.ll Fri Feb 27 15:17:42 2015
@@ -8,13 +8,13 @@ entry:
   %rv = alloca i32, align 4
   store i8* %p, i8** %p.addr, align 8
   store i32 0, i32* @G, align 4
-  %0 = load i8** %p.addr, align 8
+  %0 = load i8*, i8** %p.addr, align 8
 ; CHECK: blah
   %1 = call i32 asm "blah", "=r,r,~{memory}"(i8* %0) nounwind
 ; CHECK: @G
   store i32 %1, i32* %rv, align 4
-  %2 = load i32* %rv, align 4
-  %3 = load i32* @G, align 4
+  %2 = load i32, i32* %rv, align 4
+  %3 = load i32, i32* @G, align 4
   %add = add nsw i32 %2, %3
   ret i32 %add
 }

Modified: llvm/trunk/test/CodeGen/Generic/pr2625.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/pr2625.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/pr2625.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/pr2625.ll Fri Feb 27 15:17:42 2015
@@ -7,11 +7,11 @@ entry:
         store { i32, { i32 } }* %0, { i32, { i32 } }** %state
         %retval = alloca i32            ; <i32*> [#uses=2]
         store i32 0, i32* %retval
-        load { i32, { i32 } }** %state          ; <{ i32, { i32 } }*>:1 [#uses=1]
+        load { i32, { i32 } }*, { i32, { i32 } }** %state          ; <{ i32, { i32 } }*>:1 [#uses=1]
         store { i32, { i32 } } zeroinitializer, { i32, { i32 } }* %1
         br label %return
 
 return:         ; preds = %entry
-        load i32* %retval               ; <i32>:2 [#uses=1]
+        load i32, i32* %retval               ; <i32>:2 [#uses=1]
         ret i32 %2
 }

Modified: llvm/trunk/test/CodeGen/Generic/print-arith-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/print-arith-fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/print-arith-fp.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/print-arith-fp.ll Fri Feb 27 15:17:42 2015
@@ -18,8 +18,8 @@
 declare i32 @printf(i8*, ...)
 
 define i32 @main() {
-	%a = load double* @A		; <double> [#uses=12]
-	%b = load double* @B		; <double> [#uses=12]
+	%a = load double, double* @A		; <double> [#uses=12]
+	%b = load double, double* @B		; <double> [#uses=12]
 	%a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0		; <i8*> [#uses=1]
 	%b_s = getelementptr [8 x i8], [8 x i8]* @b_str, i64 0, i64 0		; <i8*> [#uses=1]
 	call i32 (i8*, ...)* @printf( i8* %a_s, double %a )		; <i32>:1 [#uses=0]

Modified: llvm/trunk/test/CodeGen/Generic/print-arith-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/print-arith-int.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/print-arith-int.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/print-arith-int.ll Fri Feb 27 15:17:42 2015
@@ -23,8 +23,8 @@
 declare i32 @printf(i8*, ...)
 
 define i32 @main() {
-	%a = load i32* @A		; <i32> [#uses=16]
-	%b = load i32* @B		; <i32> [#uses=17]
+	%a = load i32, i32* @A		; <i32> [#uses=16]
+	%b = load i32, i32* @B		; <i32> [#uses=17]
 	%a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0		; <i8*> [#uses=1]
 	%b_s = getelementptr [8 x i8], [8 x i8]* @b_str, i64 0, i64 0		; <i8*> [#uses=1]
 	call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a )		; <i32>:1 [#uses=0]

Modified: llvm/trunk/test/CodeGen/Generic/print-mul-exp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/print-mul-exp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/print-mul-exp.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/print-mul-exp.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 declare i32 @printf(i8*, ...)
 
 define i32 @main() {
-	%a = load i32* @A		; <i32> [#uses=21]
+	%a = load i32, i32* @A		; <i32> [#uses=21]
 	%a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0		; <i8*> [#uses=1]
 	%a_mul_s = getelementptr [13 x i8], [13 x i8]* @a_mul_str, i64 0, i64 0		; <i8*> [#uses=20]
 	call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a )		; <i32>:1 [#uses=0]

Modified: llvm/trunk/test/CodeGen/Generic/print-mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/print-mul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/print-mul.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/print-mul.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ declare i32 @printf(i8*, ...)
 
 define i32 @main() {
 entry:
-	%a = load i32* @A		; <i32> [#uses=2]
-	%b = load i32* @B		; <i32> [#uses=1]
+	%a = load i32, i32* @A		; <i32> [#uses=2]
+	%b = load i32, i32* @B		; <i32> [#uses=1]
 	%a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0		; <i8*> [#uses=1]
 	%b_s = getelementptr [8 x i8], [8 x i8]* @b_str, i64 0, i64 0		; <i8*> [#uses=1]
 	%a_mul_s = getelementptr [13 x i8], [13 x i8]* @a_mul_str, i64 0, i64 0		; <i8*> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/print-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/print-shift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/print-shift.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/print-shift.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ declare i32 @printf(i8*, ...)
 
 define i32 @main() {
 entry:
-        %a = load i32* @A               ; <i32> [#uses=2]
-        %b = load i32* @B               ; <i32> [#uses=1]
+        %a = load i32, i32* @A               ; <i32> [#uses=2]
+        %b = load i32, i32* @B               ; <i32> [#uses=1]
         %a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0             ; <i8*> [#uses=1]
         %b_s = getelementptr [8 x i8], [8 x i8]* @b_str, i64 0, i64 0             ; <i8*> [#uses=1]
         %a_shl_s = getelementptr [14 x i8], [14 x i8]* @a_shl_str, i64 0, i64 0            ; <i8*> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/select.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/select.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/select.ll Fri Feb 27 15:17:42 2015
@@ -70,7 +70,7 @@ define i32* @castconst(float) {
         %castsmall = trunc i64 1 to i32         ; <i32> [#uses=1]
         %usebig = add i32 %castbig, %castsmall          ; <i32> [#uses=0]
         %castglob = bitcast i32* @AConst to i64*                ; <i64*> [#uses=1]
-        %dummyl = load i64* %castglob           ; <i64> [#uses=0]
+        %dummyl = load i64, i64* %castglob           ; <i64> [#uses=0]
         %castnull = inttoptr i64 0 to i32*              ; <i32*> [#uses=1]
         ret i32* %castnull
 }
@@ -155,7 +155,7 @@ bb2:
         %cast116 = ptrtoint i32* %A to i64              ; <i64> [#uses=1]
         %reg116 = add i64 %cast116, %cast115            ; <i64> [#uses=1]
         %castPtr = inttoptr i64 %reg116 to i32*         ; <i32*> [#uses=1]
-        %reg118 = load i32* %castPtr            ; <i32> [#uses=1]
+        %reg118 = load i32, i32* %castPtr            ; <i32> [#uses=1]
         %cast117 = sext i32 %reg118 to i64              ; <i64> [#uses=2]
         %reg159 = add i64 1234567, %cast117             ; <i64> [#uses=0]
         %reg160 = add i64 7654321, %cast117             ; <i64> [#uses=0]
@@ -181,7 +181,7 @@ define void @checkNot(i1 %b, i32 %i) {
 ;
 define i32 @checkFoldGEP(%Domain* %D, i64 %idx) {
         %reg841 = getelementptr %Domain, %Domain* %D, i64 0, i32 1               ; <i32*> [#uses=1]
-        %reg820 = load i32* %reg841             ; <i32> [#uses=1]
+        %reg820 = load i32, i32* %reg841             ; <i32> [#uses=1]
         ret i32 %reg820
 }
 

Modified: llvm/trunk/test/CodeGen/Generic/undef-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/undef-phi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/undef-phi.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/undef-phi.ll Fri Feb 27 15:17:42 2015
@@ -14,13 +14,13 @@ entry:
 for.body:
   %stack.addr.02 = phi %struct.xx_stack* [ %0, %for.body ], [ %stack, %entry ]
   %next = getelementptr inbounds %struct.xx_stack, %struct.xx_stack* %stack.addr.02, i64 0, i32 1
-  %0 = load %struct.xx_stack** %next, align 8
+  %0 = load %struct.xx_stack*, %struct.xx_stack** %next, align 8
   %tobool = icmp eq %struct.xx_stack* %0, null
   br i1 %tobool, label %for.end, label %for.body
 
 for.end:
   %top.0.lcssa = phi %struct.xx_stack* [ undef, %entry ], [ %stack.addr.02, %for.body ]
   %first = getelementptr inbounds %struct.xx_stack, %struct.xx_stack* %top.0.lcssa, i64 0, i32 0
-  %1 = load i32* %first, align 4
+  %1 = load i32, i32* %first, align 4
   ret i32 %1
 }

Modified: llvm/trunk/test/CodeGen/Generic/v-split.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/v-split.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/v-split.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/v-split.ll Fri Feb 27 15:17:42 2015
@@ -2,8 +2,8 @@
 %f8 = type <8 x float>
 
 define void @test_f8(%f8 *%P, %f8* %Q, %f8 *%S) {
-  %p = load %f8* %P
-  %q = load %f8* %Q
+  %p = load %f8, %f8* %P
+  %q = load %f8, %f8* %Q
   %R = fadd %f8 %p, %q
   store %f8 %R, %f8 *%S
   ret void

Modified: llvm/trunk/test/CodeGen/Generic/vector-casts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/vector-casts.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/vector-casts.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/vector-casts.ll Fri Feb 27 15:17:42 2015
@@ -2,43 +2,43 @@
 ; PR2671
 
 define void @a(<2 x double>* %p, <2 x i8>* %q) {
-        %t = load <2 x double>* %p
+        %t = load <2 x double>, <2 x double>* %p
 	%r = fptosi <2 x double> %t to <2 x i8>
         store <2 x i8> %r, <2 x i8>* %q
 	ret void
 }
 define void @b(<2 x double>* %p, <2 x i8>* %q) {
-        %t = load <2 x double>* %p
+        %t = load <2 x double>, <2 x double>* %p
 	%r = fptoui <2 x double> %t to <2 x i8>
         store <2 x i8> %r, <2 x i8>* %q
 	ret void
 }
 define void @c(<2 x i8>* %p, <2 x double>* %q) {
-        %t = load <2 x i8>* %p
+        %t = load <2 x i8>, <2 x i8>* %p
 	%r = sitofp <2 x i8> %t to <2 x double>
         store <2 x double> %r, <2 x double>* %q
 	ret void
 }
 define void @d(<2 x i8>* %p, <2 x double>* %q) {
-        %t = load <2 x i8>* %p
+        %t = load <2 x i8>, <2 x i8>* %p
 	%r = uitofp <2 x i8> %t to <2 x double>
         store <2 x double> %r, <2 x double>* %q
 	ret void
 }
 define void @e(<2 x i8>* %p, <2 x i16>* %q) {
-        %t = load <2 x i8>* %p
+        %t = load <2 x i8>, <2 x i8>* %p
 	%r = sext <2 x i8> %t to <2 x i16>
         store <2 x i16> %r, <2 x i16>* %q
 	ret void
 }
 define void @f(<2 x i8>* %p, <2 x i16>* %q) {
-        %t = load <2 x i8>* %p
+        %t = load <2 x i8>, <2 x i8>* %p
 	%r = zext <2 x i8> %t to <2 x i16>
         store <2 x i16> %r, <2 x i16>* %q
 	ret void
 }
 define void @g(<2 x i16>* %p, <2 x i8>* %q) {
-        %t = load <2 x i16>* %p
+        %t = load <2 x i16>, <2 x i16>* %p
 	%r = trunc <2 x i16> %t to <2 x i8>
         store <2 x i8> %r, <2 x i8>* %q
 	ret void

Modified: llvm/trunk/test/CodeGen/Generic/vector-identity-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/vector-identity-shuffle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/vector-identity-shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/vector-identity-shuffle.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
 
 
 define void @test(<4 x float>* %tmp2.i) {
-        %tmp2.i.upgrd.1 = load <4 x float>* %tmp2.i             ; <<4 x float>> [#uses=4]
+        %tmp2.i.upgrd.1 = load <4 x float>, <4 x float>* %tmp2.i             ; <<4 x float>> [#uses=4]
         %xFloat0.48 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 0         ; <float> [#uses=1]
         %inFloat0.49 = insertelement <4 x float> undef, float %xFloat0.48, i32 0                ; <<4 x float>> [#uses=1]
         %xFloat1.50 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 1         ; <float> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Generic/vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/vector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/vector.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/vector.ll Fri Feb 27 15:17:42 2015
@@ -12,48 +12,48 @@
 ;;; TEST HANDLING OF VARIOUS VECTOR SIZES
 
 define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
-	%p = load %f1* %P		; <%f1> [#uses=1]
-	%q = load %f1* %Q		; <%f1> [#uses=1]
+	%p = load %f1, %f1* %P		; <%f1> [#uses=1]
+	%q = load %f1, %f1* %Q		; <%f1> [#uses=1]
 	%R = fadd %f1 %p, %q		; <%f1> [#uses=1]
 	store %f1 %R, %f1* %S
 	ret void
 }
 
 define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
-	%p = load %f2* %P		; <%f2> [#uses=1]
-	%q = load %f2* %Q		; <%f2> [#uses=1]
+	%p = load %f2, %f2* %P		; <%f2> [#uses=1]
+	%q = load %f2, %f2* %Q		; <%f2> [#uses=1]
 	%R = fadd %f2 %p, %q		; <%f2> [#uses=1]
 	store %f2 %R, %f2* %S
 	ret void
 }
 
 define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
-	%p = load %f4* %P		; <%f4> [#uses=1]
-	%q = load %f4* %Q		; <%f4> [#uses=1]
+	%p = load %f4, %f4* %P		; <%f4> [#uses=1]
+	%q = load %f4, %f4* %Q		; <%f4> [#uses=1]
 	%R = fadd %f4 %p, %q		; <%f4> [#uses=1]
 	store %f4 %R, %f4* %S
 	ret void
 }
 
 define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
-	%p = load %f8* %P		; <%f8> [#uses=1]
-	%q = load %f8* %Q		; <%f8> [#uses=1]
+	%p = load %f8, %f8* %P		; <%f8> [#uses=1]
+	%q = load %f8, %f8* %Q		; <%f8> [#uses=1]
 	%R = fadd %f8 %p, %q		; <%f8> [#uses=1]
 	store %f8 %R, %f8* %S
 	ret void
 }
 
 define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
-	%p = load %f8* %P		; <%f8> [#uses=1]
-	%q = load %f8* %Q		; <%f8> [#uses=1]
+	%p = load %f8, %f8* %P		; <%f8> [#uses=1]
+	%q = load %f8, %f8* %Q		; <%f8> [#uses=1]
 	%R = fmul %f8 %p, %q		; <%f8> [#uses=1]
 	store %f8 %R, %f8* %S
 	ret void
 }
 
 define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
-	%p = load %f8* %P		; <%f8> [#uses=1]
-	%q = load %f8* %Q		; <%f8> [#uses=1]
+	%p = load %f8, %f8* %P		; <%f8> [#uses=1]
+	%q = load %f8, %f8* %Q		; <%f8> [#uses=1]
 	%R = fdiv %f8 %p, %q		; <%f8> [#uses=1]
 	store %f8 %R, %f8* %S
 	ret void
@@ -63,21 +63,21 @@ define void @test_div(%f8* %P, %f8* %Q,
 
 
 define void @test_cst(%f4* %P, %f4* %S) {
-	%p = load %f4* %P		; <%f4> [#uses=1]
+	%p = load %f4, %f4* %P		; <%f4> [#uses=1]
 	%R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 >		; <%f4> [#uses=1]
 	store %f4 %R, %f4* %S
 	ret void
 }
 
 define void @test_zero(%f4* %P, %f4* %S) {
-	%p = load %f4* %P		; <%f4> [#uses=1]
+	%p = load %f4, %f4* %P		; <%f4> [#uses=1]
 	%R = fadd %f4 %p, zeroinitializer		; <%f4> [#uses=1]
 	store %f4 %R, %f4* %S
 	ret void
 }
 
 define void @test_undef(%f4* %P, %f4* %S) {
-	%p = load %f4* %P		; <%f4> [#uses=1]
+	%p = load %f4, %f4* %P		; <%f4> [#uses=1]
 	%R = fadd %f4 %p, undef		; <%f4> [#uses=1]
 	store %f4 %R, %f4* %S
 	ret void
@@ -102,19 +102,19 @@ define void @test_scalar_to_vector(float
 }
 
 define float @test_extract_elt(%f8* %P) {
-	%p = load %f8* %P		; <%f8> [#uses=1]
+	%p = load %f8, %f8* %P		; <%f8> [#uses=1]
 	%R = extractelement %f8 %p, i32 3		; <float> [#uses=1]
 	ret float %R
 }
 
 define double @test_extract_elt2(%d8* %P) {
-	%p = load %d8* %P		; <%d8> [#uses=1]
+	%p = load %d8, %d8* %P		; <%d8> [#uses=1]
 	%R = extractelement %d8 %p, i32 3		; <double> [#uses=1]
 	ret double %R
 }
 
 define void @test_cast_1(%f4* %b, %i4* %a) {
-	%tmp = load %f4* %b		; <%f4> [#uses=1]
+	%tmp = load %f4, %f4* %b		; <%f4> [#uses=1]
 	%tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 >		; <%f4> [#uses=1]
 	%tmp3 = bitcast %f4 %tmp2 to %i4		; <%i4> [#uses=1]
 	%tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 >		; <%i4> [#uses=1]
@@ -123,7 +123,7 @@ define void @test_cast_1(%f4* %b, %i4* %
 }
 
 define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
-	%T = load %f8* %a		; <%f8> [#uses=1]
+	%T = load %f8, %f8* %a		; <%f8> [#uses=1]
 	%T2 = bitcast %f8 %T to <8 x i32>		; <<8 x i32>> [#uses=1]
 	store <8 x i32> %T2, <8 x i32>* %b
 	ret void
@@ -136,7 +136,7 @@ define void @splat(%f4* %P, %f4* %Q, flo
 	%tmp2 = insertelement %f4 %tmp, float %X, i32 1		; <%f4> [#uses=1]
 	%tmp4 = insertelement %f4 %tmp2, float %X, i32 2		; <%f4> [#uses=1]
 	%tmp6 = insertelement %f4 %tmp4, float %X, i32 3		; <%f4> [#uses=1]
-	%q = load %f4* %Q		; <%f4> [#uses=1]
+	%q = load %f4, %f4* %Q		; <%f4> [#uses=1]
 	%R = fadd %f4 %q, %tmp6		; <%f4> [#uses=1]
 	store %f4 %R, %f4* %P
 	ret void
@@ -147,7 +147,7 @@ define void @splat_i4(%i4* %P, %i4* %Q,
 	%tmp2 = insertelement %i4 %tmp, i32 %X, i32 1		; <%i4> [#uses=1]
 	%tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2		; <%i4> [#uses=1]
 	%tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3		; <%i4> [#uses=1]
-	%q = load %i4* %Q		; <%i4> [#uses=1]
+	%q = load %i4, %i4* %Q		; <%i4> [#uses=1]
 	%R = add %i4 %q, %tmp6		; <%i4> [#uses=1]
 	store %i4 %R, %i4* %P
 	ret void

Modified: llvm/trunk/test/CodeGen/Hexagon/BranchPredict.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/BranchPredict.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/BranchPredict.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/BranchPredict.ll Fri Feb 27 15:17:42 2015
@@ -53,7 +53,7 @@ return:
 define i32 @foo_bar(i32 %a, i16 signext %b) nounwind {
 ; CHECK: if{{ *}}(!cmp.eq(r{{[0-9]*}}.new, #0)) jump:nt
 entry:
-  %0 = load i32* @j, align 4
+  %0 = load i32, i32* @j, align 4
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %if.else, label %if.then, !prof !0
 

Modified: llvm/trunk/test/CodeGen/Hexagon/absaddr-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/absaddr-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/absaddr-store.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/absaddr-store.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 define zeroext i8 @absStoreByte() nounwind {
 ; CHECK: memb(##b){{ *}}={{ *}}r{{[0-9]+}}
 entry:
-  %0 = load i8* @b, align 1
+  %0 = load i8, i8* @b, align 1
   %conv = zext i8 %0 to i32
   %mul = mul nsw i32 100, %conv
   %conv1 = trunc i32 %mul to i8
@@ -20,7 +20,7 @@ entry:
 define signext i16 @absStoreHalf() nounwind {
 ; CHECK: memh(##c){{ *}}={{ *}}r{{[0-9]+}}
 entry:
-  %0 = load i16* @c, align 2
+  %0 = load i16, i16* @c, align 2
   %conv = sext i16 %0 to i32
   %mul = mul nsw i32 100, %conv
   %conv1 = trunc i32 %mul to i16
@@ -31,7 +31,7 @@ entry:
 define i32 @absStoreWord() nounwind {
 ; CHECK: memw(##a){{ *}}={{ *}}r{{[0-9]+}}
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %mul = mul nsw i32 100, %0
   store i32 %mul, i32* @a, align 4
   ret i32 %mul

Modified: llvm/trunk/test/CodeGen/Hexagon/absimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/absimm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/absimm.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/absimm.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
 define i32* @f2(i32* nocapture %i) nounwind {
 entry:
 ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(##786432)
-  %0 = load volatile i32* inttoptr (i32 786432 to i32*), align 262144
+  %0 = load volatile i32, i32* inttoptr (i32 786432 to i32*), align 262144
   %1 = inttoptr i32 %0 to i32*
   ret i32* %1
   }

Modified: llvm/trunk/test/CodeGen/Hexagon/always-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/always-ext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/always-ext.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/always-ext.ll Fri Feb 27 15:17:42 2015
@@ -24,8 +24,8 @@ entry:
   br i1 undef, label %for.body.us, label %for.end
 
 for.body.us:                                      ; preds = %entry
-  %0 = load %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** null, align 4
-  %1 = load i32* undef, align 4
+  %0 = load %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*, %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** null, align 4
+  %1 = load i32, i32* undef, align 4
   %cmp.i.us = icmp slt i32 %1, 1024
   br i1 %cmp.i.us, label %CuSuiteAdd.exit.us, label %cond.false6.i.us
 

Modified: llvm/trunk/test/CodeGen/Hexagon/block-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/block-addr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/block-addr.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/block-addr.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
   br label %while.body
 
 while.body:
-  %ret.0.load17 = load volatile i32* %ret, align 4
+  %ret.0.load17 = load volatile i32, i32* %ret, align 4
   switch i32 %ret.0.load17, label %label6 [
     i32 0, label %label0
     i32 1, label %label1
@@ -21,37 +21,37 @@ while.body:
   ]
 
 label0:
-  %ret.0.load18 = load volatile i32* %ret, align 4
+  %ret.0.load18 = load volatile i32, i32* %ret, align 4
   %inc = add nsw i32 %ret.0.load18, 1
   store volatile i32 %inc, i32* %ret, align 4
   br label %while.body
 
 label1:
-  %ret.0.load19 = load volatile i32* %ret, align 4
+  %ret.0.load19 = load volatile i32, i32* %ret, align 4
   %inc2 = add nsw i32 %ret.0.load19, 1
   store volatile i32 %inc2, i32* %ret, align 4
   br label %while.body
 
 label2:
-  %ret.0.load20 = load volatile i32* %ret, align 4
+  %ret.0.load20 = load volatile i32, i32* %ret, align 4
   %inc4 = add nsw i32 %ret.0.load20, 1
   store volatile i32 %inc4, i32* %ret, align 4
   br label %while.body
 
 label3:
-  %ret.0.load21 = load volatile i32* %ret, align 4
+  %ret.0.load21 = load volatile i32, i32* %ret, align 4
   %inc6 = add nsw i32 %ret.0.load21, 1
   store volatile i32 %inc6, i32* %ret, align 4
   br label %while.body
 
 label4:
-  %ret.0.load22 = load volatile i32* %ret, align 4
+  %ret.0.load22 = load volatile i32, i32* %ret, align 4
   %inc8 = add nsw i32 %ret.0.load22, 1
   store volatile i32 %inc8, i32* %ret, align 4
   br label %while.body
 
 label5:
-  %ret.0.load23 = load volatile i32* %ret, align 4
+  %ret.0.load23 = load volatile i32, i32* %ret, align 4
   %inc10 = add nsw i32 %ret.0.load23, 1
   store volatile i32 %inc10, i32* %ret, align 4
   br label %while.body

Modified: llvm/trunk/test/CodeGen/Hexagon/cext-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cext-check.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cext-check.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/cext-check.ll Fri Feb 27 15:17:42 2015
@@ -7,19 +7,19 @@ define i32 @cext_test1(i32* %a) nounwind
 ; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##4092)
 ; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300)
 entry:
-  %0 = load i32* %a, align 4
+  %0 = load i32, i32* %a, align 4
   %tobool = icmp ne i32 %0, 0
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:
   %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 2000
-  %1 = load i32* %arrayidx1, align 4
+  %1 = load i32, i32* %arrayidx1, align 4
   %add = add nsw i32 %1, 300000
   br label %return
 
 if.end:
   %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1023
-  %2 = load i32* %arrayidx2, align 4
+  %2 = load i32, i32* %arrayidx2, align 4
   %add3 = add nsw i32 %2, 300
   br label %return
 
@@ -39,14 +39,14 @@ entry:
 
 if.then:
   %arrayidx = getelementptr inbounds i8, i8* %a, i32 1023
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 300000
   br label %return
 
 if.end:
   %arrayidx1 = getelementptr inbounds i8, i8* %a, i32 1024
-  %1 = load i8* %arrayidx1, align 1
+  %1 = load i8, i8* %arrayidx1, align 1
   %conv2 = zext i8 %1 to i32
   %add3 = add nsw i32 %conv2, 6000
   br label %return

Modified: llvm/trunk/test/CodeGen/Hexagon/cext-valid-packet2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cext-valid-packet2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cext-valid-packet2.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/cext-valid-packet2.ll Fri Feb 27 15:17:42 2015
@@ -10,24 +10,24 @@
 define i32 @test(i32* nocapture %a, i32* nocapture %b, i32 %c) nounwind {
 entry:
   %add = add nsw i32 %c, 200002
-  %0 = load i32* %a, align 4
+  %0 = load i32, i32* %a, align 4
   %add1 = add nsw i32 %0, 200000
   %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 3000
   store i32 %add1, i32* %arrayidx2, align 4
-  %1 = load i32* %b, align 4
+  %1 = load i32, i32* %b, align 4
   %add4 = add nsw i32 %1, 200001
   %arrayidx5 = getelementptr inbounds i32, i32* %a, i32 1
   store i32 %add4, i32* %arrayidx5, align 4
   %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 1
-  %2 = load i32* %arrayidx7, align 4
+  %2 = load i32, i32* %arrayidx7, align 4
   %cmp = icmp sgt i32 %add4, %2
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
   %arrayidx8 = getelementptr inbounds i32, i32* %a, i32 2
-  %3 = load i32* %arrayidx8, align 4
+  %3 = load i32, i32* %arrayidx8, align 4
   %arrayidx9 = getelementptr inbounds i32, i32* %b, i32 2000
-  %4 = load i32* %arrayidx9, align 4
+  %4 = load i32, i32* %arrayidx9, align 4
   %sub = sub nsw i32 %3, %4
   %arrayidx10 = getelementptr inbounds i32, i32* %a, i32 4000
   store i32 %sub, i32* %arrayidx10, align 4

Modified: llvm/trunk/test/CodeGen/Hexagon/cmp_pred2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cmp_pred2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cmp_pred2.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/cmp_pred2.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ entry:
   br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
 
 entry.if.end_crit_edge:
-  %.pre = load i32* @c, align 4
+  %.pre = load i32, i32* @c, align 4
   br label %if.end
 
 if.then:
@@ -32,7 +32,7 @@ entry:
   br i1 %cmp, label %entry.if.end_crit_edge, label %if.then
 
 entry.if.end_crit_edge:
-  %.pre = load i32* @c, align 4
+  %.pre = load i32, i32* @c, align 4
   br label %if.end
 
 if.then:
@@ -53,7 +53,7 @@ entry:
   br i1 %cmp, label %entry.if.end_crit_edge, label %if.then
 
 entry.if.end_crit_edge:
-  %.pre = load i32* @c, align 4
+  %.pre = load i32, i32* @c, align 4
   br label %if.end
 
 if.then:
@@ -73,7 +73,7 @@ entry:
   br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
 
 entry.if.end_crit_edge:
-  %.pre = load i32* @c, align 4
+  %.pre = load i32, i32* @c, align 4
   br label %if.end
 
 if.then:

Modified: llvm/trunk/test/CodeGen/Hexagon/cmpb_pred.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cmpb_pred.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cmpb_pred.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/cmpb_pred.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ entry:
 define i32 @Func_3b(i32) nounwind readonly {
 entry:
 ; CHECK-NOT: mux
-  %1 = load i8* @Enum_global, align 1
+  %1 = load i8, i8* @Enum_global, align 1
   %2 = trunc i32 %0 to i8
   %cmp = icmp ne i8 %1, %2
   %selv = zext i1 %cmp to i32
@@ -35,7 +35,7 @@ entry:
 define i32 @Func_3d(i32) nounwind readonly {
 entry:
 ; CHECK-NOT: mux
-  %1 = load i8* @Enum_global, align 1
+  %1 = load i8, i8* @Enum_global, align 1
   %2 = trunc i32 %0 to i8
   %cmp = icmp eq i8 %1, %2
   %selv = zext i1 %cmp to i32
@@ -45,7 +45,7 @@ entry:
 define i32 @Func_3e(i32) nounwind readonly {
 entry:
 ; CHECK-NOT: mux
-  %1 = load i8* @Enum_global, align 1
+  %1 = load i8, i8* @Enum_global, align 1
   %2 = trunc i32 %0 to i8
   %cmp = icmp eq i8 %1, %2
   %selv = zext i1 %cmp to i32

Modified: llvm/trunk/test/CodeGen/Hexagon/combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/combine.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/combine.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32* @j, align 4
-  %1 = load i64* @k, align 8
+  %0 = load i32, i32* @j, align 4
+  %1 = load i64, i64* @k, align 8
   %conv = trunc i64 %1 to i32
   %2 = call i64 @llvm.hexagon.A2.combinew(i32 %0, i32 %conv)
   store i64 %2, i64* @k, align 8

Modified: llvm/trunk/test/CodeGen/Hexagon/combine_ir.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/combine_ir.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/combine_ir.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/combine_ir.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 
 define void @word(i32* nocapture %a) nounwind {
 entry:
-  %0 = load i32* %a, align 4
+  %0 = load i32, i32* %a, align 4
   %1 = zext i32 %0 to i64
   tail call void @bar(i64 %1) nounwind
   ret void
@@ -17,10 +17,10 @@ declare void @bar(i64)
 
 define void @halfword(i16* nocapture %a) nounwind {
 entry:
-  %0 = load i16* %a, align 2
+  %0 = load i16, i16* %a, align 2
   %1 = zext i16 %0 to i64
   %add.ptr = getelementptr inbounds i16, i16* %a, i32 1
-  %2 = load i16* %add.ptr, align 2
+  %2 = load i16, i16* %add.ptr, align 2
   %3 = zext i16 %2 to i64
   %4 = shl nuw nsw i64 %3, 16
   %ins = or i64 %4, %1
@@ -33,10 +33,10 @@ entry:
 
 define void @byte(i8* nocapture %a) nounwind {
 entry:
-  %0 = load i8* %a, align 1
+  %0 = load i8, i8* %a, align 1
   %1 = zext i8 %0 to i64
   %add.ptr = getelementptr inbounds i8, i8* %a, i32 1
-  %2 = load i8* %add.ptr, align 1
+  %2 = load i8, i8* %add.ptr, align 1
   %3 = zext i8 %2 to i64
   %4 = shl nuw nsw i64 %3, 8
   %ins = or i64 %4, %1

Modified: llvm/trunk/test/CodeGen/Hexagon/convertdptoint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/convertdptoint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/convertdptoint.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/convertdptoint.ll Fri Feb 27 15:17:42 2015
@@ -14,13 +14,13 @@ entry:
   store i32 0, i32* %retval
   store double 1.540000e+01, double* %a, align 8
   store double 9.100000e+00, double* %b, align 8
-  %0 = load double* %a, align 8
-  %1 = load double* %b, align 8
+  %0 = load double, double* %a, align 8
+  %1 = load double, double* %b, align 8
   %add = fadd double %0, %1
   store double %add, double* %c, align 8
-  %2 = load double* %c, align 8
+  %2 = load double, double* %c, align 8
   %conv = fptosi double %2 to i32
   store i32 %conv, i32* %i, align 4
-  %3 = load i32* %i, align 4
+  %3 = load i32, i32* %i, align 4
   ret i32 %3
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/convertdptoll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/convertdptoll.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/convertdptoll.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/convertdptoll.ll Fri Feb 27 15:17:42 2015
@@ -14,14 +14,14 @@ entry:
   store i32 0, i32* %retval
   store double 1.540000e+01, double* %a, align 8
   store double 9.100000e+00, double* %b, align 8
-  %0 = load double* %a, align 8
-  %1 = load double* %b, align 8
+  %0 = load double, double* %a, align 8
+  %1 = load double, double* %b, align 8
   %add = fadd double %0, %1
   store double %add, double* %c, align 8
-  %2 = load double* %c, align 8
+  %2 = load double, double* %c, align 8
   %conv = fptosi double %2 to i64
   store i64 %conv, i64* %i, align 8
-  %3 = load i64* %i, align 8
+  %3 = load i64, i64* %i, align 8
   %conv1 = trunc i64 %3 to i32
   ret i32 %conv1
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/convertsptoint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/convertsptoint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/convertsptoint.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/convertsptoint.ll Fri Feb 27 15:17:42 2015
@@ -14,13 +14,13 @@ entry:
   store i32 0, i32* %retval
   store float 0x402ECCCCC0000000, float* %a, align 4
   store float 0x4022333340000000, float* %b, align 4
-  %0 = load float* %a, align 4
-  %1 = load float* %b, align 4
+  %0 = load float, float* %a, align 4
+  %1 = load float, float* %b, align 4
   %add = fadd float %0, %1
   store float %add, float* %c, align 4
-  %2 = load float* %c, align 4
+  %2 = load float, float* %c, align 4
   %conv = fptosi float %2 to i32
   store i32 %conv, i32* %i, align 4
-  %3 = load i32* %i, align 4
+  %3 = load i32, i32* %i, align 4
   ret i32 %3
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/convertsptoll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/convertsptoll.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/convertsptoll.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/convertsptoll.ll Fri Feb 27 15:17:42 2015
@@ -14,14 +14,14 @@ entry:
   store i32 0, i32* %retval
   store float 0x402ECCCCC0000000, float* %a, align 4
   store float 0x4022333340000000, float* %b, align 4
-  %0 = load float* %a, align 4
-  %1 = load float* %b, align 4
+  %0 = load float, float* %a, align 4
+  %1 = load float, float* %b, align 4
   %add = fadd float %0, %1
   store float %add, float* %c, align 4
-  %2 = load float* %c, align 4
+  %2 = load float, float* %c, align 4
   %conv = fptosi float %2 to i64
   store i64 %conv, i64* %i, align 8
-  %3 = load i64* %i, align 8
+  %3 = load i64, i64* %i, align 8
   %conv1 = trunc i64 %3 to i32
   ret i32 %conv1
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/dadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dadd.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/dadd.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@ entry:
   %c = alloca double, align 8
   store double 1.540000e+01, double* %a, align 8
   store double 9.100000e+00, double* %b, align 8
-  %0 = load double* %a, align 8
-  %1 = load double* %b, align 8
+  %0 = load double, double* %a, align 8
+  %1 = load double, double* %b, align 8
   %add = fadd double %0, %1
   store double %add, double* %c, align 8
   ret i32 0

Modified: llvm/trunk/test/CodeGen/Hexagon/dmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dmul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dmul.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/dmul.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ entry:
   %c = alloca double, align 8
   store double 1.540000e+01, double* %a, align 8
   store double 9.100000e+00, double* %b, align 8
-  %0 = load double* %b, align 8
-  %1 = load double* %a, align 8
+  %0 = load double, double* %b, align 8
+  %1 = load double, double* %a, align 8
   %mul = fmul double %0, %1
   store double %mul, double* %c, align 8
   ret i32 0

Modified: llvm/trunk/test/CodeGen/Hexagon/double.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/double.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/double.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/double.ll Fri Feb 27 15:17:42 2015
@@ -10,13 +10,13 @@ entry:
   store double* %acc, double** %acc.addr, align 4
   store double %num, double* %num.addr, align 8
   store double %num2, double* %num2.addr, align 8
-  %0 = load double** %acc.addr, align 4
-  %1 = load double* %0
-  %2 = load double* %num.addr, align 8
+  %0 = load double*, double** %acc.addr, align 4
+  %1 = load double, double* %0
+  %2 = load double, double* %num.addr, align 8
   %add = fadd double %1, %2
-  %3 = load double* %num2.addr, align 8
+  %3 = load double, double* %num2.addr, align 8
   %sub = fsub double %add, %3
-  %4 = load double** %acc.addr, align 4
+  %4 = load double*, double** %acc.addr, align 4
   store double %sub, double* %4
   ret void
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll Fri Feb 27 15:17:42 2015
@@ -14,13 +14,13 @@ entry:
   store i32 0, i32* %retval
   store double 1.540000e+01, double* %a, align 8
   store double 9.100000e+00, double* %b, align 8
-  %0 = load double* %a, align 8
-  %1 = load double* %b, align 8
+  %0 = load double, double* %a, align 8
+  %1 = load double, double* %b, align 8
   %add = fadd double %0, %1
   store double %add, double* %c, align 8
-  %2 = load double* %c, align 8
+  %2 = load double, double* %c, align 8
   %conv = fptosi double %2 to i32
   store i32 %conv, i32* %i, align 4
-  %3 = load i32* %i, align 4
+  %3 = load i32, i32* %i, align 4
   ret i32 %3
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/dsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dsub.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/dsub.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ entry:
   %c = alloca double, align 8
   store double 1.540000e+01, double* %a, align 8
   store double 9.100000e+00, double* %b, align 8
-  %0 = load double* %b, align 8
-  %1 = load double* %a, align 8
+  %0 = load double, double* %b, align 8
+  %1 = load double, double* %a, align 8
   %sub = fsub double %0, %1
   store double %sub, double* %c, align 8
   ret i32 0

Modified: llvm/trunk/test/CodeGen/Hexagon/extload-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/extload-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/extload-combine.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/extload-combine.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define i64 @short_test1() #0 {
 ; CHECK: combine(#0, [[VAR]])
 entry:
   store i16 0, i16* @a, align 2
-  %0 = load i16* @b, align 2
+  %0 = load i16, i16* @b, align 2
   %conv2 = zext i16 %0 to i64
   ret i64 %conv2
 }
@@ -30,7 +30,7 @@ define i64 @short_test2() #0 {
 ; CHECK: sxtw([[VAR1]])
 entry:
   store i16 0, i16* @a, align 2
-  %0 = load i16* @c, align 2
+  %0 = load i16, i16* @c, align 2
   %conv2 = sext i16 %0 to i64
   ret i64 %conv2
 }
@@ -41,7 +41,7 @@ define i64 @char_test1() #0 {
 ; CHECK: combine(#0, [[VAR2]])
 entry:
   store i8 0, i8* @char_a, align 1
-  %0 = load i8* @char_b, align 1
+  %0 = load i8, i8* @char_b, align 1
   %conv2 = zext i8 %0 to i64
   ret i64 %conv2
 }
@@ -52,7 +52,7 @@ define i64 @char_test2() #0 {
 ; CHECK: sxtw([[VAR3]])
 entry:
   store i8 0, i8* @char_a, align 1
-  %0 = load i8* @char_c, align 1
+  %0 = load i8, i8* @char_c, align 1
   %conv2 = sext i8 %0 to i64
   ret i64 %conv2
 }
@@ -63,7 +63,7 @@ define i64 @int_test1() #0 {
 ; CHECK: combine(#0, [[VAR4]])
 entry:
   store i32 0, i32* @int_a, align 4
-  %0 = load i32* @int_b, align 4
+  %0 = load i32, i32* @int_b, align 4
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -74,7 +74,7 @@ define i64 @int_test2() #0 {
 ; CHECK: sxtw([[VAR5]])
 entry:
   store i32 0, i32* @int_a, align 4
-  %0 = load i32* @int_c, align 4
+  %0 = load i32, i32* @int_c, align 4
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/fadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/fadd.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/fadd.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ entry:
   %c = alloca float, align 4
   store float 0x402ECCCCC0000000, float* %a, align 4
   store float 0x4022333340000000, float* %b, align 4
-  %0 = load float* %a, align 4
-  %1 = load float* %b, align 4
+  %0 = load float, float* %a, align 4
+  %1 = load float, float* %b, align 4
   %add = fadd float %0, %1
   store float %add, float* %c, align 4
   ret i32 0

Modified: llvm/trunk/test/CodeGen/Hexagon/fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fcmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/fcmp.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/fcmp.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
   %retval = alloca i32, align 4
   %y.addr = alloca float, align 4
   store float %y, float* %y.addr, align 4
-  %0 = load float* %y.addr, align 4
+  %0 = load float, float* %y.addr, align 4
   %cmp = fcmp ogt float %0, 0x406AD7EFA0000000
   br i1 %cmp, label %if.then, label %if.else
 
@@ -21,7 +21,7 @@ if.else:
   br label %return
 
 return:                                           ; preds = %if.else, %if.then
-  %1 = load i32* %retval
+  %1 = load i32, i32* %retval
   ret i32 %1
 }
 
@@ -31,7 +31,7 @@ entry:
   %a = alloca float, align 4
   store i32 0, i32* %retval
   store float 0x40012E0A00000000, float* %a, align 4
-  %0 = load float* %a, align 4
+  %0 = load float, float* %a, align 4
   %call = call i32 @foo(float %0)
   ret i32 %call
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/float.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/float.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/float.ll Fri Feb 27 15:17:42 2015
@@ -10,13 +10,13 @@ entry:
   store float* %acc, float** %acc.addr, align 4
   store float %num, float* %num.addr, align 4
   store float %num2, float* %num2.addr, align 4
-  %0 = load float** %acc.addr, align 4
-  %1 = load float* %0
-  %2 = load float* %num.addr, align 4
+  %0 = load float*, float** %acc.addr, align 4
+  %1 = load float, float* %0
+  %2 = load float, float* %num.addr, align 4
   %add = fadd float %1, %2
-  %3 = load float* %num2.addr, align 4
+  %3 = load float, float* %num2.addr, align 4
   %sub = fsub float %add, %3
-  %4 = load float** %acc.addr, align 4
+  %4 = load float*, float** %acc.addr, align 4
   store float %sub, float* %4
   ret void
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll Fri Feb 27 15:17:42 2015
@@ -10,13 +10,13 @@ entry:
   store float* %acc, float** %acc.addr, align 4
   store float %num, float* %num.addr, align 4
   store float %num2, float* %num2.addr, align 4
-  %0 = load float** %acc.addr, align 4
-  %1 = load float* %0
-  %2 = load float* %num.addr, align 4
+  %0 = load float*, float** %acc.addr, align 4
+  %1 = load float, float* %0
+  %2 = load float, float* %num.addr, align 4
   %add = fadd float %1, %2
-  %3 = load float* %num2.addr, align 4
+  %3 = load float, float* %num2.addr, align 4
   %sub = fsub float %add, %3
-  %4 = load float** %acc.addr, align 4
+  %4 = load float*, float** %acc.addr, align 4
   store float %sub, float* %4
   ret void
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/fmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fmul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/fmul.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/fmul.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@ entry:
   %c = alloca float, align 4
   store float 0x402ECCCCC0000000, float* %a, align 4
   store float 0x4022333340000000, float* %b, align 4
-  %0 = load float* %b, align 4
-  %1 = load float* %a, align 4
+  %0 = load float, float* %b, align 4
+  %1 = load float, float* %a, align 4
   %mul = fmul float %0, %1
   store float %mul, float* %c, align 4
   ret i32 0

Modified: llvm/trunk/test/CodeGen/Hexagon/frame.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/frame.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/frame.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/frame.ll Fri Feb 27 15:17:42 2015
@@ -10,14 +10,14 @@
 define i32 @foo() nounwind {
 entry:
   %i = alloca i32, align 4
-  %0 = load i32* @num, align 4
+  %0 = load i32, i32* @num, align 4
   store i32 %0, i32* %i, align 4
-  %1 = load i32* %i, align 4
-  %2 = load i32* @acc, align 4
+  %1 = load i32, i32* %i, align 4
+  %2 = load i32, i32* @acc, align 4
   %mul = mul nsw i32 %1, %2
-  %3 = load i32* @num2, align 4
+  %3 = load i32, i32* @num2, align 4
   %add = add nsw i32 %mul, %3
   store i32 %add, i32* %i, align 4
-  %4 = load i32* %i, align 4
+  %4 = load i32, i32* %i, align 4
   ret i32 %4
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/fsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/fsub.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/fsub.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ entry:
   %c = alloca float, align 4
   store float 0x402ECCCCC0000000, float* %a, align 4
   store float 0x4022333340000000, float* %b, align 4
-  %0 = load float* %b, align 4
-  %1 = load float* %a, align 4
+  %0 = load float, float* %b, align 4
+  %1 = load float, float* %a, align 4
   %sub = fsub float %0, %1
   store float %sub, float* %c, align 4
   ret i32 0

Modified: llvm/trunk/test/CodeGen/Hexagon/fusedandshift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fusedandshift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/fusedandshift.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/fusedandshift.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define i32 @main(i16* %a, i16* %b) nounwind {
   entry:
-  %0 = load i16* %a, align 2
+  %0 = load i16, i16* %a, align 2
   %conv1 = sext i16 %0 to i32
   %shr1 = ashr i32 %conv1, 3
   %and1 = and i32 %shr1, 15

Modified: llvm/trunk/test/CodeGen/Hexagon/gp-plus-offset-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/gp-plus-offset-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/gp-plus-offset-load.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/gp-plus-offset-load.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %0 = load i32* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 3), align 4
+  %0 = load i32, i32* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 3), align 4
   store i32 %0, i32* %ival, align 4
   br label %if.end
 
@@ -27,7 +27,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %0 = load i8* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 1), align 1
+  %0 = load i8, i8* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 1), align 1
   store i8 %0, i8* %ival, align 1
   br label %if.end
 
@@ -42,7 +42,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %0 = load i16* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 2), align 2
+  %0 = load i16, i16* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 2), align 2
   store i16 %0, i16* %ival, align 2
   br label %if.end
 

Modified: llvm/trunk/test/CodeGen/Hexagon/gp-rel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/gp-rel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/gp-rel.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/gp-rel.ll Fri Feb 27 15:17:42 2015
@@ -10,14 +10,14 @@ entry:
 ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#a)
 ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#b)
 ; CHECK: if{{ *}}(p{{[0-3]}}) memw(##c){{ *}}={{ *}}r{{[0-9]+}}
-  %0 = load i32* @a, align 4
-  %1 = load i32* @b, align 4
+  %0 = load i32, i32* @a, align 4
+  %1 = load i32, i32* @b, align 4
   %add = add nsw i32 %1, %0
   %cmp = icmp eq i32 %0, %1
   br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
 
 entry.if.end_crit_edge:
-  %.pre = load i32* @c, align 4
+  %.pre = load i32, i32* @c, align 4
   br label %if.end
 
 if.then:

Modified: llvm/trunk/test/CodeGen/Hexagon/hwloop-cleanup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-cleanup.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-cleanup.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-cleanup.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ for.body:
   %sum.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
   %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %b, %for.body.preheader ]
   %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %0 = load i32* %arrayidx.phi, align 4
+  %0 = load i32, i32* %arrayidx.phi, align 4
   %add = add nsw i32 %0, %sum.03
   %inc = add nsw i32 %i.02, 1
   %exitcond = icmp eq i32 %inc, %n
@@ -50,7 +50,7 @@ for.body:
   %sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
   %arrayidx.phi = phi i32* [ %b, %entry ], [ %arrayidx.inc, %for.body ]
   %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %0 = load i32* %arrayidx.phi, align 4
+  %0 = load i32, i32* %arrayidx.phi, align 4
   %add = add nsw i32 %0, %sum.02
   %inc = add nsw i32 %i.01, 1
   %exitcond = icmp eq i32 %inc, 40

Modified: llvm/trunk/test/CodeGen/Hexagon/hwloop-dbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-dbg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-dbg.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-dbg.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ for.body:
   %b.addr.01 = phi i32* [ %b, %entry ], [ %incdec.ptr, %for.body ]
   %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.01, i32 1, !dbg !21
   tail call void @llvm.dbg.value(metadata i32* %incdec.ptr, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !21
-  %0 = load i32* %b.addr.01, align 4, !dbg !21
+  %0 = load i32, i32* %b.addr.01, align 4, !dbg !21
   store i32 %0, i32* %arrayidx.phi, align 4, !dbg !21
   %inc = add nsw i32 %i.02, 1, !dbg !26
   tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !26

Modified: llvm/trunk/test/CodeGen/Hexagon/hwloop-le.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-le.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-le.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-le.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -44,7 +44,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -73,7 +73,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -102,7 +102,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -131,7 +131,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -160,7 +160,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -189,7 +189,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -218,7 +218,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -247,7 +247,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -276,7 +276,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -305,7 +305,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -334,7 +334,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -363,7 +363,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -392,7 +392,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -421,7 +421,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8

Modified: llvm/trunk/test/CodeGen/Hexagon/hwloop-lt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-lt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-lt.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-lt.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -44,7 +44,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -73,7 +73,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -102,7 +102,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -131,7 +131,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -160,7 +160,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -189,7 +189,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -218,7 +218,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -247,7 +247,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -276,7 +276,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -305,7 +305,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -334,7 +334,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -363,7 +363,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -392,7 +392,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -421,7 +421,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8

Modified: llvm/trunk/test/CodeGen/Hexagon/hwloop-ne.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-ne.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-ne.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-ne.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 32623, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -44,7 +44,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 29554, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -73,7 +73,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 15692, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -102,7 +102,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 10449, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -131,7 +131,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 32087, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -160,7 +160,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -189,7 +189,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -218,7 +218,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -247,7 +247,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -276,7 +276,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -305,7 +305,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -334,7 +334,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -363,7 +363,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -392,7 +392,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -421,7 +421,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8

Modified: llvm/trunk/test/CodeGen/Hexagon/i16_VarArg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/i16_VarArg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/i16_VarArg.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/i16_VarArg.ll Fri Feb 27 15:17:42 2015
@@ -20,8 +20,8 @@
 declare i32 @printf(i8*, ...)
 
 define i32 @main() {
-        %a = load double* @A
-        %b = load double* @B
+        %a = load double, double* @A
+        %b = load double, double* @B
         %lt_r = fcmp olt double %a, %b
         %le_r = fcmp ole double %a, %b
         %gt_r = fcmp ogt double %a, %b

Modified: llvm/trunk/test/CodeGen/Hexagon/i1_VarArg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/i1_VarArg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/i1_VarArg.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/i1_VarArg.ll Fri Feb 27 15:17:42 2015
@@ -20,8 +20,8 @@
 declare i32 @printf(i8*, ...)
 
 define i32 @main() {
-        %a = load double* @A
-        %b = load double* @B
+        %a = load double, double* @A
+        %b = load double, double* @B
         %lt_r = fcmp olt double %a, %b
         %le_r = fcmp ole double %a, %b
         %gt_r = fcmp ogt double %a, %b

Modified: llvm/trunk/test/CodeGen/Hexagon/i8_VarArg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/i8_VarArg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/i8_VarArg.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/i8_VarArg.ll Fri Feb 27 15:17:42 2015
@@ -20,8 +20,8 @@
 declare i32 @printf(i8*, ...)
 
 define i32 @main() {
-        %a = load double* @A
-        %b = load double* @B
+        %a = load double, double* @A
+        %b = load double, double* @B
         %lt_r = fcmp olt double %a, %b
         %le_r = fcmp ole double %a, %b
         %gt_r = fcmp ogt double %a, %b

Modified: llvm/trunk/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/idxload-with-zero-offset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/idxload-with-zero-offset.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/idxload-with-zero-offset.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define i32 @load_w(i32* nocapture %a, i3
 entry:
   %tmp = add i32 %n, %m
   %scevgep9 = getelementptr i32, i32* %a, i32 %tmp
-  %val = load i32* %scevgep9, align 4
+  %val = load i32, i32* %scevgep9, align 4
   ret i32 %val
 }
 
@@ -19,7 +19,7 @@ define i16 @load_uh(i16* nocapture %a, i
 entry:
   %tmp = add i32 %n, %m
   %scevgep9 = getelementptr i16, i16* %a, i32 %tmp
-  %val = load i16* %scevgep9, align 2
+  %val = load i16, i16* %scevgep9, align 2
   ret i16 %val
 }
 
@@ -30,7 +30,7 @@ define i32 @load_h(i16* nocapture %a, i3
 entry:
   %tmp = add i32 %n, %m
   %scevgep9 = getelementptr i16, i16* %a, i32 %tmp
-  %val = load i16* %scevgep9, align 2
+  %val = load i16, i16* %scevgep9, align 2
   %conv = sext i16 %val to i32
   ret i32 %conv
 }
@@ -42,7 +42,7 @@ define i8 @load_ub(i8* nocapture %a, i32
 entry:
   %tmp = add i32 %n, %m
   %scevgep9 = getelementptr i8, i8* %a, i32 %tmp
-  %val = load i8* %scevgep9, align 1
+  %val = load i8, i8* %scevgep9, align 1
   ret i8 %val
 }
 
@@ -53,7 +53,7 @@ define i32 @foo_2(i8* nocapture %a, i32
 entry:
   %tmp = add i32 %n, %m
   %scevgep9 = getelementptr i8, i8* %a, i32 %tmp
-  %val = load i8* %scevgep9, align 1
+  %val = load i8, i8* %scevgep9, align 1
   %conv = sext i8 %val to i32
   ret i32 %conv
 }
@@ -65,6 +65,6 @@ define i64 @load_d(i64* nocapture %a, i3
 entry:
   %tmp = add i32 %n, %m
   %scevgep9 = getelementptr i64, i64* %a, i32 %tmp
-  %val = load i64* %scevgep9, align 8
+  %val = load i64, i64* %scevgep9, align 8
   ret i64 %val
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/macint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/macint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/macint.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/macint.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define i32 @main(i32* %a, i32* %b) nounwind {
   entry:
-  %0 = load i32* %a, align 4
+  %0 = load i32, i32* %a, align 4
   %div = udiv i32 %0, 10000
   %rem = urem i32 %div, 10
   store i32 %rem, i32* %b, align 4

Modified: llvm/trunk/test/CodeGen/Hexagon/memops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/memops.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 define void @memop_unsigned_char_add5(i8* nocapture %p) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i8
@@ -16,7 +16,7 @@ define void @memop_unsigned_char_add(i8*
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %conv = zext i8 %x to i32
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv1 = zext i8 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i8
@@ -28,7 +28,7 @@ define void @memop_unsigned_char_sub(i8*
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %conv = zext i8 %x to i32
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv1 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i8
@@ -39,7 +39,7 @@ entry:
 define void @memop_unsigned_char_or(i8* nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %or3 = or i8 %0, %x
   store i8 %or3, i8* %p, align 1
   ret void
@@ -48,7 +48,7 @@ entry:
 define void @memop_unsigned_char_and(i8* nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %and3 = and i8 %0, %x
   store i8 %and3, i8* %p, align 1
   ret void
@@ -57,7 +57,7 @@ entry:
 define void @memop_unsigned_char_clrbit(i8* nocapture %p) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv = zext i8 %0 to i32
   %and = and i32 %conv, 223
   %conv1 = trunc i32 %and to i8
@@ -68,7 +68,7 @@ entry:
 define void @memop_unsigned_char_setbit(i8* nocapture %p) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv = zext i8 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i8
@@ -80,7 +80,7 @@ define void @memop_unsigned_char_add5_in
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i8
@@ -93,7 +93,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %conv = zext i8 %x to i32
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv1 = zext i8 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i8
@@ -106,7 +106,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %conv = zext i8 %x to i32
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv1 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i8
@@ -118,7 +118,7 @@ define void @memop_unsigned_char_or_inde
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %or3 = or i8 %0, %x
   store i8 %or3, i8* %add.ptr, align 1
   ret void
@@ -128,7 +128,7 @@ define void @memop_unsigned_char_and_ind
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %and3 = and i8 %0, %x
   store i8 %and3, i8* %add.ptr, align 1
   ret void
@@ -138,7 +138,7 @@ define void @memop_unsigned_char_clrbit_
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %and = and i32 %conv, 223
   %conv1 = trunc i32 %and to i8
@@ -150,7 +150,7 @@ define void @memop_unsigned_char_setbit_
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i8
@@ -162,7 +162,7 @@ define void @memop_unsigned_char_add5_in
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i8
@@ -175,7 +175,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
   %conv = zext i8 %x to i32
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv1 = zext i8 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i8
@@ -188,7 +188,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
   %conv = zext i8 %x to i32
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv1 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i8
@@ -200,7 +200,7 @@ define void @memop_unsigned_char_or_inde
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %or3 = or i8 %0, %x
   store i8 %or3, i8* %add.ptr, align 1
   ret void
@@ -210,7 +210,7 @@ define void @memop_unsigned_char_and_ind
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %and3 = and i8 %0, %x
   store i8 %and3, i8* %add.ptr, align 1
   ret void
@@ -220,7 +220,7 @@ define void @memop_unsigned_char_clrbit_
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %and = and i32 %conv, 223
   %conv1 = trunc i32 %and to i8
@@ -232,7 +232,7 @@ define void @memop_unsigned_char_setbit_
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i8
@@ -243,7 +243,7 @@ entry:
 define void @memop_signed_char_add5(i8* nocapture %p) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv2 = zext i8 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i8
@@ -255,7 +255,7 @@ define void @memop_signed_char_add(i8* n
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv13 = zext i8 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i8
@@ -267,7 +267,7 @@ define void @memop_signed_char_sub(i8* n
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv13 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i8
@@ -278,7 +278,7 @@ entry:
 define void @memop_signed_char_or(i8* nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %or3 = or i8 %0, %x
   store i8 %or3, i8* %p, align 1
   ret void
@@ -287,7 +287,7 @@ entry:
 define void @memop_signed_char_and(i8* nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %and3 = and i8 %0, %x
   store i8 %and3, i8* %p, align 1
   ret void
@@ -296,7 +296,7 @@ entry:
 define void @memop_signed_char_clrbit(i8* nocapture %p) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv2 = zext i8 %0 to i32
   %and = and i32 %conv2, 223
   %conv1 = trunc i32 %and to i8
@@ -307,7 +307,7 @@ entry:
 define void @memop_signed_char_setbit(i8* nocapture %p) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
-  %0 = load i8* %p, align 1
+  %0 = load i8, i8* %p, align 1
   %conv2 = zext i8 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i8
@@ -319,7 +319,7 @@ define void @memop_signed_char_add5_inde
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i8
@@ -332,7 +332,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv13 = zext i8 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i8
@@ -345,7 +345,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv13 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i8
@@ -357,7 +357,7 @@ define void @memop_signed_char_or_index(
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %or3 = or i8 %0, %x
   store i8 %or3, i8* %add.ptr, align 1
   ret void
@@ -367,7 +367,7 @@ define void @memop_signed_char_and_index
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %and3 = and i8 %0, %x
   store i8 %and3, i8* %add.ptr, align 1
   ret void
@@ -377,7 +377,7 @@ define void @memop_signed_char_clrbit_in
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %and = and i32 %conv2, 223
   %conv1 = trunc i32 %and to i8
@@ -389,7 +389,7 @@ define void @memop_signed_char_setbit_in
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i8
@@ -401,7 +401,7 @@ define void @memop_signed_char_add5_inde
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i8
@@ -414,7 +414,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv13 = zext i8 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i8
@@ -427,7 +427,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv13 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i8
@@ -439,7 +439,7 @@ define void @memop_signed_char_or_index5
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %or3 = or i8 %0, %x
   store i8 %or3, i8* %add.ptr, align 1
   ret void
@@ -449,7 +449,7 @@ define void @memop_signed_char_and_index
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %and3 = and i8 %0, %x
   store i8 %and3, i8* %add.ptr, align 1
   ret void
@@ -459,7 +459,7 @@ define void @memop_signed_char_clrbit_in
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %and = and i32 %conv2, 223
   %conv1 = trunc i32 %and to i8
@@ -471,7 +471,7 @@ define void @memop_signed_char_setbit_in
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i8
@@ -482,7 +482,7 @@ entry:
 define void @memop_unsigned_short_add5(i16* nocapture %p) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv = zext i16 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i16
@@ -494,7 +494,7 @@ define void @memop_unsigned_short_add(i1
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %conv = zext i16 %x to i32
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv1 = zext i16 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i16
@@ -506,7 +506,7 @@ define void @memop_unsigned_short_sub(i1
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %conv = zext i16 %x to i32
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv1 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i16
@@ -517,7 +517,7 @@ entry:
 define void @memop_unsigned_short_or(i16* nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %or3 = or i16 %0, %x
   store i16 %or3, i16* %p, align 2
   ret void
@@ -526,7 +526,7 @@ entry:
 define void @memop_unsigned_short_and(i16* nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %and3 = and i16 %0, %x
   store i16 %and3, i16* %p, align 2
   ret void
@@ -535,7 +535,7 @@ entry:
 define void @memop_unsigned_short_clrbit(i16* nocapture %p) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv = zext i16 %0 to i32
   %and = and i32 %conv, 65503
   %conv1 = trunc i32 %and to i16
@@ -546,7 +546,7 @@ entry:
 define void @memop_unsigned_short_setbit(i16* nocapture %p) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv = zext i16 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i16
@@ -558,7 +558,7 @@ define void @memop_unsigned_short_add5_i
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i16
@@ -571,7 +571,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %conv = zext i16 %x to i32
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv1 = zext i16 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i16
@@ -584,7 +584,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %conv = zext i16 %x to i32
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv1 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i16
@@ -596,7 +596,7 @@ define void @memop_unsigned_short_or_ind
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %or3 = or i16 %0, %x
   store i16 %or3, i16* %add.ptr, align 2
   ret void
@@ -606,7 +606,7 @@ define void @memop_unsigned_short_and_in
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %and3 = and i16 %0, %x
   store i16 %and3, i16* %add.ptr, align 2
   ret void
@@ -616,7 +616,7 @@ define void @memop_unsigned_short_clrbit
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %and = and i32 %conv, 65503
   %conv1 = trunc i32 %and to i16
@@ -628,7 +628,7 @@ define void @memop_unsigned_short_setbit
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i16
@@ -640,7 +640,7 @@ define void @memop_unsigned_short_add5_i
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i16
@@ -653,7 +653,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
   %conv = zext i16 %x to i32
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv1 = zext i16 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i16
@@ -666,7 +666,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
   %conv = zext i16 %x to i32
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv1 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i16
@@ -678,7 +678,7 @@ define void @memop_unsigned_short_or_ind
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %or3 = or i16 %0, %x
   store i16 %or3, i16* %add.ptr, align 2
   ret void
@@ -688,7 +688,7 @@ define void @memop_unsigned_short_and_in
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %and3 = and i16 %0, %x
   store i16 %and3, i16* %add.ptr, align 2
   ret void
@@ -698,7 +698,7 @@ define void @memop_unsigned_short_clrbit
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %and = and i32 %conv, 65503
   %conv1 = trunc i32 %and to i16
@@ -710,7 +710,7 @@ define void @memop_unsigned_short_setbit
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i16
@@ -721,7 +721,7 @@ entry:
 define void @memop_signed_short_add5(i16* nocapture %p) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv2 = zext i16 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i16
@@ -733,7 +733,7 @@ define void @memop_signed_short_add(i16*
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv13 = zext i16 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i16
@@ -745,7 +745,7 @@ define void @memop_signed_short_sub(i16*
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv13 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i16
@@ -756,7 +756,7 @@ entry:
 define void @memop_signed_short_or(i16* nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %or3 = or i16 %0, %x
   store i16 %or3, i16* %p, align 2
   ret void
@@ -765,7 +765,7 @@ entry:
 define void @memop_signed_short_and(i16* nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %and3 = and i16 %0, %x
   store i16 %and3, i16* %p, align 2
   ret void
@@ -774,7 +774,7 @@ entry:
 define void @memop_signed_short_clrbit(i16* nocapture %p) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv2 = zext i16 %0 to i32
   %and = and i32 %conv2, 65503
   %conv1 = trunc i32 %and to i16
@@ -785,7 +785,7 @@ entry:
 define void @memop_signed_short_setbit(i16* nocapture %p) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
-  %0 = load i16* %p, align 2
+  %0 = load i16, i16* %p, align 2
   %conv2 = zext i16 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i16
@@ -797,7 +797,7 @@ define void @memop_signed_short_add5_ind
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i16
@@ -810,7 +810,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv13 = zext i16 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i16
@@ -823,7 +823,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv13 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i16
@@ -835,7 +835,7 @@ define void @memop_signed_short_or_index
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %or3 = or i16 %0, %x
   store i16 %or3, i16* %add.ptr, align 2
   ret void
@@ -845,7 +845,7 @@ define void @memop_signed_short_and_inde
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %and3 = and i16 %0, %x
   store i16 %and3, i16* %add.ptr, align 2
   ret void
@@ -855,7 +855,7 @@ define void @memop_signed_short_clrbit_i
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %and = and i32 %conv2, 65503
   %conv1 = trunc i32 %and to i16
@@ -867,7 +867,7 @@ define void @memop_signed_short_setbit_i
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i16
@@ -879,7 +879,7 @@ define void @memop_signed_short_add5_ind
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i16
@@ -892,7 +892,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv13 = zext i16 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i16
@@ -905,7 +905,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv13 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i16
@@ -917,7 +917,7 @@ define void @memop_signed_short_or_index
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %or3 = or i16 %0, %x
   store i16 %or3, i16* %add.ptr, align 2
   ret void
@@ -927,7 +927,7 @@ define void @memop_signed_short_and_inde
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %and3 = and i16 %0, %x
   store i16 %and3, i16* %add.ptr, align 2
   ret void
@@ -937,7 +937,7 @@ define void @memop_signed_short_clrbit_i
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %and = and i32 %conv2, 65503
   %conv1 = trunc i32 %and to i16
@@ -949,7 +949,7 @@ define void @memop_signed_short_setbit_i
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i16
@@ -960,7 +960,7 @@ entry:
 define void @memop_signed_int_add5(i32* nocapture %p) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %add = add i32 %0, 5
   store i32 %add, i32* %p, align 4
   ret void
@@ -969,7 +969,7 @@ entry:
 define void @memop_signed_int_add(i32* nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %add = add i32 %0, %x
   store i32 %add, i32* %p, align 4
   ret void
@@ -978,7 +978,7 @@ entry:
 define void @memop_signed_int_sub(i32* nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %sub = sub i32 %0, %x
   store i32 %sub, i32* %p, align 4
   ret void
@@ -987,7 +987,7 @@ entry:
 define void @memop_signed_int_or(i32* nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %or = or i32 %0, %x
   store i32 %or, i32* %p, align 4
   ret void
@@ -996,7 +996,7 @@ entry:
 define void @memop_signed_int_and(i32* nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %and = and i32 %0, %x
   store i32 %and, i32* %p, align 4
   ret void
@@ -1005,7 +1005,7 @@ entry:
 define void @memop_signed_int_clrbit(i32* nocapture %p) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %and = and i32 %0, -33
   store i32 %and, i32* %p, align 4
   ret void
@@ -1014,7 +1014,7 @@ entry:
 define void @memop_signed_int_setbit(i32* nocapture %p) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %or = or i32 %0, 128
   store i32 %or, i32* %p, align 4
   ret void
@@ -1024,7 +1024,7 @@ define void @memop_signed_int_add5_index
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %add = add i32 %0, 5
   store i32 %add, i32* %add.ptr, align 4
   ret void
@@ -1034,7 +1034,7 @@ define void @memop_signed_int_add_index(
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %add = add i32 %0, %x
   store i32 %add, i32* %add.ptr, align 4
   ret void
@@ -1044,7 +1044,7 @@ define void @memop_signed_int_sub_index(
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %sub = sub i32 %0, %x
   store i32 %sub, i32* %add.ptr, align 4
   ret void
@@ -1054,7 +1054,7 @@ define void @memop_signed_int_or_index(i
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %or = or i32 %0, %x
   store i32 %or, i32* %add.ptr, align 4
   ret void
@@ -1064,7 +1064,7 @@ define void @memop_signed_int_and_index(
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %and = and i32 %0, %x
   store i32 %and, i32* %add.ptr, align 4
   ret void
@@ -1074,7 +1074,7 @@ define void @memop_signed_int_clrbit_ind
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %and = and i32 %0, -33
   store i32 %and, i32* %add.ptr, align 4
   ret void
@@ -1084,7 +1084,7 @@ define void @memop_signed_int_setbit_ind
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %or = or i32 %0, 128
   store i32 %or, i32* %add.ptr, align 4
   ret void
@@ -1094,7 +1094,7 @@ define void @memop_signed_int_add5_index
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %add = add i32 %0, 5
   store i32 %add, i32* %add.ptr, align 4
   ret void
@@ -1104,7 +1104,7 @@ define void @memop_signed_int_add_index5
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %add = add i32 %0, %x
   store i32 %add, i32* %add.ptr, align 4
   ret void
@@ -1114,7 +1114,7 @@ define void @memop_signed_int_sub_index5
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %sub = sub i32 %0, %x
   store i32 %sub, i32* %add.ptr, align 4
   ret void
@@ -1124,7 +1124,7 @@ define void @memop_signed_int_or_index5(
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %or = or i32 %0, %x
   store i32 %or, i32* %add.ptr, align 4
   ret void
@@ -1134,7 +1134,7 @@ define void @memop_signed_int_and_index5
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %and = and i32 %0, %x
   store i32 %and, i32* %add.ptr, align 4
   ret void
@@ -1144,7 +1144,7 @@ define void @memop_signed_int_clrbit_ind
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %and = and i32 %0, -33
   store i32 %and, i32* %add.ptr, align 4
   ret void
@@ -1154,7 +1154,7 @@ define void @memop_signed_int_setbit_ind
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %or = or i32 %0, 128
   store i32 %or, i32* %add.ptr, align 4
   ret void
@@ -1163,7 +1163,7 @@ entry:
 define void @memop_unsigned_int_add5(i32* nocapture %p) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %add = add nsw i32 %0, 5
   store i32 %add, i32* %p, align 4
   ret void
@@ -1172,7 +1172,7 @@ entry:
 define void @memop_unsigned_int_add(i32* nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %add = add nsw i32 %0, %x
   store i32 %add, i32* %p, align 4
   ret void
@@ -1181,7 +1181,7 @@ entry:
 define void @memop_unsigned_int_sub(i32* nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %sub = sub nsw i32 %0, %x
   store i32 %sub, i32* %p, align 4
   ret void
@@ -1190,7 +1190,7 @@ entry:
 define void @memop_unsigned_int_or(i32* nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %or = or i32 %0, %x
   store i32 %or, i32* %p, align 4
   ret void
@@ -1199,7 +1199,7 @@ entry:
 define void @memop_unsigned_int_and(i32* nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %and = and i32 %0, %x
   store i32 %and, i32* %p, align 4
   ret void
@@ -1208,7 +1208,7 @@ entry:
 define void @memop_unsigned_int_clrbit(i32* nocapture %p) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %and = and i32 %0, -33
   store i32 %and, i32* %p, align 4
   ret void
@@ -1217,7 +1217,7 @@ entry:
 define void @memop_unsigned_int_setbit(i32* nocapture %p) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
-  %0 = load i32* %p, align 4
+  %0 = load i32, i32* %p, align 4
   %or = or i32 %0, 128
   store i32 %or, i32* %p, align 4
   ret void
@@ -1227,7 +1227,7 @@ define void @memop_unsigned_int_add5_ind
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %add = add nsw i32 %0, 5
   store i32 %add, i32* %add.ptr, align 4
   ret void
@@ -1237,7 +1237,7 @@ define void @memop_unsigned_int_add_inde
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %add = add nsw i32 %0, %x
   store i32 %add, i32* %add.ptr, align 4
   ret void
@@ -1247,7 +1247,7 @@ define void @memop_unsigned_int_sub_inde
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %sub = sub nsw i32 %0, %x
   store i32 %sub, i32* %add.ptr, align 4
   ret void
@@ -1257,7 +1257,7 @@ define void @memop_unsigned_int_or_index
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %or = or i32 %0, %x
   store i32 %or, i32* %add.ptr, align 4
   ret void
@@ -1267,7 +1267,7 @@ define void @memop_unsigned_int_and_inde
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %and = and i32 %0, %x
   store i32 %and, i32* %add.ptr, align 4
   ret void
@@ -1277,7 +1277,7 @@ define void @memop_unsigned_int_clrbit_i
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %and = and i32 %0, -33
   store i32 %and, i32* %add.ptr, align 4
   ret void
@@ -1287,7 +1287,7 @@ define void @memop_unsigned_int_setbit_i
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %or = or i32 %0, 128
   store i32 %or, i32* %add.ptr, align 4
   ret void
@@ -1297,7 +1297,7 @@ define void @memop_unsigned_int_add5_ind
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %add = add nsw i32 %0, 5
   store i32 %add, i32* %add.ptr, align 4
   ret void
@@ -1307,7 +1307,7 @@ define void @memop_unsigned_int_add_inde
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %add = add nsw i32 %0, %x
   store i32 %add, i32* %add.ptr, align 4
   ret void
@@ -1317,7 +1317,7 @@ define void @memop_unsigned_int_sub_inde
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %sub = sub nsw i32 %0, %x
   store i32 %sub, i32* %add.ptr, align 4
   ret void
@@ -1327,7 +1327,7 @@ define void @memop_unsigned_int_or_index
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %or = or i32 %0, %x
   store i32 %or, i32* %add.ptr, align 4
   ret void
@@ -1337,7 +1337,7 @@ define void @memop_unsigned_int_and_inde
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %and = and i32 %0, %x
   store i32 %and, i32* %add.ptr, align 4
   ret void
@@ -1347,7 +1347,7 @@ define void @memop_unsigned_int_clrbit_i
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %and = and i32 %0, -33
   store i32 %and, i32* %add.ptr, align 4
   ret void
@@ -1357,7 +1357,7 @@ define void @memop_unsigned_int_setbit_i
 entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
   %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   %or = or i32 %0, 128
   store i32 %or, i32* %add.ptr, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/Hexagon/memops1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops1.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/memops1.ll Fri Feb 27 15:17:42 2015
@@ -7,9 +7,9 @@ entry:
 ; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1
   %p.addr = alloca i32*, align 4
   store i32* %p, i32** %p.addr, align 4
-  %0 = load i32** %p.addr, align 4
+  %0 = load i32*, i32** %p.addr, align 4
   %add.ptr = getelementptr inbounds i32, i32* %0, i32 10
-  %1 = load i32* %add.ptr, align 4
+  %1 = load i32, i32* %add.ptr, align 4
   %sub = sub nsw i32 %1, 1
   store i32 %sub, i32* %add.ptr, align 4
   ret void
@@ -22,11 +22,11 @@ entry:
   %i.addr = alloca i32, align 4
   store i32* %p, i32** %p.addr, align 4
   store i32 %i, i32* %i.addr, align 4
-  %0 = load i32** %p.addr, align 4
-  %1 = load i32* %i.addr, align 4
+  %0 = load i32*, i32** %p.addr, align 4
+  %1 = load i32, i32* %i.addr, align 4
   %add.ptr = getelementptr inbounds i32, i32* %0, i32 %1
   %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  %2 = load i32* %add.ptr1, align 4
+  %2 = load i32, i32* %add.ptr1, align 4
   %sub = sub nsw i32 %2, 1
   store i32 %sub, i32* %add.ptr1, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/Hexagon/memops2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops2.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/memops2.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define void @f(i16* nocapture %p) nounwi
 entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
   %add.ptr = getelementptr inbounds i16, i16* %p, i32 10
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %sub = add nsw i32 %conv2, 65535
   %conv1 = trunc i32 %sub to i16
@@ -19,7 +19,7 @@ entry:
 ; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
   %add.ptr.sum = add i32 %i, 10
   %add.ptr1 = getelementptr inbounds i16, i16* %p, i32 %add.ptr.sum
-  %0 = load i16* %add.ptr1, align 2
+  %0 = load i16, i16* %add.ptr1, align 2
   %conv3 = zext i16 %0 to i32
   %sub = add nsw i32 %conv3, 65535
   %conv2 = trunc i32 %sub to i16

Modified: llvm/trunk/test/CodeGen/Hexagon/memops3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops3.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/memops3.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define void @f(i8* nocapture %p) nounwin
 entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
   %add.ptr = getelementptr inbounds i8, i8* %p, i32 10
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %sub = add nsw i32 %conv, 255
   %conv1 = trunc i32 %sub to i8
@@ -19,7 +19,7 @@ entry:
 ; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
   %add.ptr.sum = add i32 %i, 10
   %add.ptr1 = getelementptr inbounds i8, i8* %p, i32 %add.ptr.sum
-  %0 = load i8* %add.ptr1, align 1
+  %0 = load i8, i8* %add.ptr1, align 1
   %conv = zext i8 %0 to i32
   %sub = add nsw i32 %conv, 255
   %conv2 = trunc i32 %sub to i8

Modified: llvm/trunk/test/CodeGen/Hexagon/misaligned-access.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/misaligned-access.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/misaligned-access.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/misaligned-access.ll Fri Feb 27 15:17:42 2015
@@ -7,10 +7,10 @@ declare i32 @_hi(i64) #1
 define i32 @CSDRSEARCH_executeSearchManager() #0 {
 entry:
   %temp = alloca i32, align 4
-  %0 = load i32* @temp1, align 4
+  %0 = load i32, i32* @temp1, align 4
   store i32 %0, i32* %temp, align 4
   %1 = bitcast i32* %temp to i64*
-  %2 = load i64* %1, align 8
+  %2 = load i64, i64* %1, align 8
   %call = call i32 @_hi(i64 %2)
   ret i32 %call
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/mpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mpy.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/mpy.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/mpy.ll Fri Feb 27 15:17:42 2015
@@ -9,10 +9,10 @@ entry:
   store i32 %acc, i32* %acc.addr, align 4
   store i32 %num, i32* %num.addr, align 4
   store i32 %num2, i32* %num2.addr, align 4
-  %0 = load i32* %num.addr, align 4
-  %1 = load i32* %acc.addr, align 4
+  %0 = load i32, i32* %num.addr, align 4
+  %1 = load i32, i32* %acc.addr, align 4
   %mul = mul nsw i32 %0, %1
-  %2 = load i32* %num2.addr, align 4
+  %2 = load i32, i32* %num2.addr, align 4
   %add = add nsw i32 %mul, %2
   store i32 %add, i32* %num.addr, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/Hexagon/newvaluejump.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/newvaluejump.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/newvaluejump.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/newvaluejump.ll Fri Feb 27 15:17:42 2015
@@ -9,10 +9,10 @@ entry:
 ; CHECK: if (cmp.eq(r{{[0-9]+}}.new, #0)) jump{{.}}
   %addr1 = alloca i32, align 4
   %addr2 = alloca i32, align 4
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   store i32 %0, i32* %addr1, align 4
   call void @bar(i32 1, i32 2)
-  %1 = load i32* @j, align 4
+  %1 = load i32, i32* @j, align 4
   %tobool = icmp ne i32 %1, 0
   br i1 %tobool, label %if.then, label %if.else
 

Modified: llvm/trunk/test/CodeGen/Hexagon/newvaluejump2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/newvaluejump2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/newvaluejump2.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/newvaluejump2.ll Fri Feb 27 15:17:42 2015
@@ -7,9 +7,9 @@ define i32 @main() nounwind {
 entry:
 ; CHECK: if (cmp.gt(r{{[0-9]+}}.new, r{{[0-9]+}})) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
   %Reg2 = alloca i8, align 1
-  %0 = load i8* %Reg2, align 1
+  %0 = load i8, i8* %Reg2, align 1
   %conv0 = zext i8 %0 to i32
-  %1 = load i8* @Reg, align 1
+  %1 = load i8, i8* @Reg, align 1
   %conv1 = zext i8 %1 to i32
   %tobool = icmp sle i32 %conv0, %conv1
   br i1 %tobool, label %if.then, label %if.else

Modified: llvm/trunk/test/CodeGen/Hexagon/newvaluestore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/newvaluestore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/newvaluestore.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/newvaluestore.ll Fri Feb 27 15:17:42 2015
@@ -11,11 +11,11 @@ entry:
   %number1 = alloca i32, align 4
   %number2 = alloca i32, align 4
   %number3 = alloca i32, align 4
-  %0 = load i32 * @i, align 4
+  %0 = load i32 , i32 * @i, align 4
   store i32 %0, i32* %number1, align 4
-  %1 = load i32 * @j, align 4
+  %1 = load i32 , i32 * @j, align 4
   store i32 %1, i32* %number2, align 4
-  %2 = load i32 * @k, align 4
+  %2 = load i32 , i32 * @k, align 4
   store i32 %2, i32* %number3, align 4
   ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/opt-fabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/opt-fabs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/opt-fabs.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/opt-fabs.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define float @my_fabsf(float %x) nounwin
 entry:
   %x.addr = alloca float, align 4
   store float %x, float* %x.addr, align 4
-  %0 = load float* %x.addr, align 4
+  %0 = load float, float* %x.addr, align 4
   %call = call float @fabsf(float %0) readnone
   ret float %call
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/opt-fneg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/opt-fneg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/opt-fneg.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/opt-fneg.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ entry:
 ; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
   %x.addr = alloca float, align 4
   store float %x, float* %x.addr, align 4
-  %0 = load float* %x.addr, align 4
+  %0 = load float, float* %x.addr, align 4
   %sub = fsub float -0.000000e+00, %0
   ret float %sub
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/postinc-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/postinc-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/postinc-load.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/postinc-load.ll Fri Feb 27 15:17:42 2015
@@ -12,8 +12,8 @@ for.body:
   %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ]
   %arrayidx1.phi = phi i16* [ %b, %entry ], [ %arrayidx1.inc, %for.body ]
   %sum.03 = phi i32 [ 0, %entry ], [ %add2, %for.body ]
-  %0 = load i32* %arrayidx.phi, align 4
-  %1 = load i16* %arrayidx1.phi, align 2
+  %0 = load i32, i32* %arrayidx.phi, align 4
+  %1 = load i16, i16* %arrayidx1.phi, align 2
   %conv = sext i16 %1 to i32
   %add = add i32 %0, %sum.03
   %add2 = add i32 %add, %conv

Modified: llvm/trunk/test/CodeGen/Hexagon/postinc-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/postinc-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/postinc-store.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/postinc-store.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@ for.body:
   %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 10, %entry ]
   %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ]
   %arrayidx1.phi = phi i16* [ %b, %entry ], [ %arrayidx1.inc, %for.body ]
-  %0 = load i32* %arrayidx.phi, align 4
-  %1 = load i16* %arrayidx1.phi, align 2
+  %0 = load i32, i32* %arrayidx.phi, align 4
+  %1 = load i16, i16* %arrayidx1.phi, align 2
   %conv = sext i16 %1 to i32
   %factor = mul i32 %0, 2
   %add3 = add i32 %factor, %conv

Modified: llvm/trunk/test/CodeGen/Hexagon/pred-gp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/pred-gp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/pred-gp.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/pred-gp.ll Fri Feb 27 15:17:42 2015
@@ -14,11 +14,11 @@ entry:
   br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
 
 entry.if.end_crit_edge:
-  %.pre = load i32* @c, align 4
+  %.pre = load i32, i32* @c, align 4
   br label %if.end
 
 if.then:
-  %0 = load i32* @d, align 4
+  %0 = load i32, i32* @d, align 4
   store i32 %0, i32* @c, align 4
   br label %if.end
 

Modified: llvm/trunk/test/CodeGen/Hexagon/pred-instrs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/pred-instrs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/pred-instrs.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/pred-instrs.ll Fri Feb 27 15:17:42 2015
@@ -25,6 +25,6 @@ if.else:
 if.end:                                           ; preds = %if.else, %if.then
   %storemerge = phi i32 [ %and, %if.else ], [ %shl, %if.then ]
   store i32 %storemerge, i32* @a, align 4
-  %0 = load i32* @d, align 4
+  %0 = load i32, i32* @d, align 4
   ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/remove_lsr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/remove_lsr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/remove_lsr.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/remove_lsr.ll Fri Feb 27 15:17:42 2015
@@ -54,9 +54,9 @@ for.body:
   %7 = trunc i64 %6 to i32
   %8 = tail call i32 @llvm.hexagon.C2.mux(i32 %conv8, i32 %5, i32 %7)
   store i32 %8, i32* %lsr.iv2931, align 4
-  %srcval = load i64* %lsr.iv27, align 8
-  %9 = load i8* %lsr.iv40, align 1
-  %10 = load i8* %lsr.iv37, align 1
+  %srcval = load i64, i64* %lsr.iv27, align 8
+  %9 = load i8, i8* %lsr.iv40, align 1
+  %10 = load i8, i8* %lsr.iv37, align 1
   %lftr.wideiv = trunc i32 %lsr.iv42 to i8
   %exitcond = icmp eq i8 %lftr.wideiv, 32
   %scevgep26 = getelementptr %union.vect64, %union.vect64* %lsr.iv, i32 1

Modified: llvm/trunk/test/CodeGen/Hexagon/static.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/static.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/static.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/static.ll Fri Feb 27 15:17:42 2015
@@ -10,10 +10,10 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32* @num, align 4
-  %1 = load i32* @acc, align 4
+  %0 = load i32, i32* @num, align 4
+  %1 = load i32, i32* @acc, align 4
   %mul = mul nsw i32 %0, %1
-  %2 = load i32* @val, align 4
+  %2 = load i32, i32* @val, align 4
   %add = add nsw i32 %mul, %2
   store i32 %add, i32* @num, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/Hexagon/struct_args.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/struct_args.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/struct_args.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/struct_args.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i64* bitcast (%struct.small* @s1 to i64*), align 1
+  %0 = load i64, i64* bitcast (%struct.small* @s1 to i64*), align 1
   call void @bar(i64 %0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/tfr-to-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/tfr-to-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/tfr-to-combine.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/tfr-to-combine.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ define i64 @test2() #0 {
 ; CHECK: combine(#0, r{{[0-9]+}})
 entry:
   store i16 0, i16* @a, align 2
-  %0 = load i16* @c, align 2
+  %0 = load i16, i16* @c, align 2
   %conv2 = zext i16 %0 to i64
   ret i64 %conv2
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/union-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/union-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/union-1.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/union-1.ll Fri Feb 27 15:17:42 2015
@@ -5,10 +5,10 @@
 
 define void @word(i32* nocapture %a) nounwind {
 entry:
-  %0 = load i32* %a, align 4
+  %0 = load i32, i32* %a, align 4
   %1 = zext i32 %0 to i64
   %add.ptr = getelementptr inbounds i32, i32* %a, i32 1
-  %2 = load i32* %add.ptr, align 4
+  %2 = load i32, i32* %add.ptr, align 4
   %3 = zext i32 %2 to i64
   %4 = shl nuw i64 %3, 32
   %ins = or i64 %4, %1

Modified: llvm/trunk/test/CodeGen/Hexagon/vaddh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vaddh.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vaddh.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/vaddh.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32* @j, align 4
-  %1 = load i32* @k, align 4
+  %0 = load i32, i32* @j, align 4
+  %1 = load i32, i32* @k, align 4
   %2 = call i32 @llvm.hexagon.A2.svaddh(i32 %0, i32 %1)
   store i32 %2, i32* @k, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/Hexagon/validate-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/validate-offset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/validate-offset.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/validate-offset.ll Fri Feb 27 15:17:42 2015
@@ -11,26 +11,26 @@ entry:
   %b.addr = alloca i32, align 4
   store i32 %a, i32* %a.addr, align 4
   store i32 %b, i32* %b.addr, align 4
-  %0 = load i32* %a.addr, align 4
-  %1 = load i32* %b.addr, align 4
+  %0 = load i32, i32* %a.addr, align 4
+  %1 = load i32, i32* %b.addr, align 4
   %cmp = icmp sgt i32 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:
-  %2 = load i32* %a.addr, align 4
-  %3 = load i32* %b.addr, align 4
+  %2 = load i32, i32* %a.addr, align 4
+  %3 = load i32, i32* %b.addr, align 4
   %add = add nsw i32 %2, %3
   store i32 %add, i32* %retval
   br label %return
 
 if.else:
-  %4 = load i32* %a.addr, align 4
-  %5 = load i32* %b.addr, align 4
+  %4 = load i32, i32* %a.addr, align 4
+  %5 = load i32, i32* %b.addr, align 4
   %sub = sub nsw i32 %4, %5
   store i32 %sub, i32* %retval
   br label %return
 
 return:
-  %6 = load i32* %retval
+  %6 = load i32, i32* %retval
   ret i32 %6
 }

Modified: llvm/trunk/test/CodeGen/Hexagon/zextloadi1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/zextloadi1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/zextloadi1.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/zextloadi1.ll Fri Feb 27 15:17:42 2015
@@ -13,13 +13,13 @@
 @i129_s = external global i129
 
 define void @i129_ls() nounwind  {
-        %tmp = load i129* @i129_l
+        %tmp = load i129, i129* @i129_l
         store i129 %tmp, i129* @i129_s
         ret void
 }
 
 define void @i65_ls() nounwind  {
-        %tmp = load i65* @i65_l
+        %tmp = load i65, i65* @i65_l
         store i65 %tmp, i65* @i65_s
         ret void
 }

Modified: llvm/trunk/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ target triple = "msp430-unknown-linux-gn
 
 define void @uip_arp_arpin() nounwind {
 entry:
-	%tmp = load volatile i16* @uip_len		; <i16> [#uses=1]
+	%tmp = load volatile i16, i16* @uip_len		; <i16> [#uses=1]
 	%cmp = icmp ult i16 %tmp, 42		; <i1> [#uses=1]
 	store volatile i16 0, i16* @uip_len
 	br i1 %cmp, label %if.then, label %if.end

Modified: llvm/trunk/test/CodeGen/MSP430/2009-05-17-Rot.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2009-05-17-Rot.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2009-05-17-Rot.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2009-05-17-Rot.ll Fri Feb 27 15:17:42 2015
@@ -4,14 +4,14 @@ define i16 @rol1u16(i16 %x.arg) nounwind
         %retval = alloca i16
         %x = alloca i16
         store i16 %x.arg, i16* %x
-        %1 = load i16* %x
+        %1 = load i16, i16* %x
         %2 = shl i16 %1, 1
-        %3 = load i16* %x
+        %3 = load i16, i16* %x
         %4 = lshr i16 %3, 15
         %5 = or i16 %2, %4
         store i16 %5, i16* %retval
         br label %return
 return:
-        %6 = load i16* %retval
+        %6 = load i16, i16* %retval
         ret i16 %6
 }

Modified: llvm/trunk/test/CodeGen/MSP430/2009-05-17-Shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2009-05-17-Shift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2009-05-17-Shift.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2009-05-17-Shift.ll Fri Feb 27 15:17:42 2015
@@ -4,12 +4,12 @@ define i16 @lsr2u16(i16 %x.arg) nounwind
         %retval = alloca i16
         %x = alloca i16
         store i16 %x.arg, i16* %x
-        %1 = load i16* %x
+        %1 = load i16, i16* %x
         %2 = lshr i16 %1, 2
         store i16 %2, i16* %retval
         br label %return
 return:
-        %3 = load i16* %retval
+        %3 = load i16, i16* %retval
         ret i16 %3
 
 }

Modified: llvm/trunk/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define i16 @foo() nounwind readnone {
 entry:
   %result = alloca i16, align 1                   ; <i16*> [#uses=2]
   store volatile i16 0, i16* %result
-  %tmp = load volatile i16* %result               ; <i16> [#uses=1]
+  %tmp = load volatile i16, i16* %result               ; <i16> [#uses=1]
   ret i16 %tmp
 }
 
@@ -23,7 +23,7 @@ while.cond:
 while.end:                                        ; preds = %while.cond
   %result.i = alloca i16, align 1                 ; <i16*> [#uses=2]
   store volatile i16 0, i16* %result.i
-  %tmp.i = load volatile i16* %result.i           ; <i16> [#uses=0]
+  %tmp.i = load volatile i16, i16* %result.i           ; <i16> [#uses=0]
   ret i16 0
 }
 

Modified: llvm/trunk/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll Fri Feb 27 15:17:42 2015
@@ -11,12 +11,12 @@ entry:
   %x.addr = alloca i8                             ; <i8*> [#uses=2]
   %tmp = alloca i8, align 1                       ; <i8*> [#uses=2]
   store i8 %x, i8* %x.addr
-  %tmp1 = load volatile i8* @"\010x0021"          ; <i8> [#uses=1]
+  %tmp1 = load volatile i8, i8* @"\010x0021"          ; <i8> [#uses=1]
   store i8 %tmp1, i8* %tmp
-  %tmp2 = load i8* %x.addr                        ; <i8> [#uses=1]
+  %tmp2 = load i8, i8* %x.addr                        ; <i8> [#uses=1]
   store volatile i8 %tmp2, i8* @"\010x0021"
-  %tmp3 = load i8* %tmp                           ; <i8> [#uses=1]
+  %tmp3 = load i8, i8* %tmp                           ; <i8> [#uses=1]
   store i8 %tmp3, i8* %retval
-  %0 = load i8* %retval                           ; <i8> [#uses=1]
+  %0 = load i8, i8* %retval                           ; <i8> [#uses=1]
   ret i8 %0
 }

Modified: llvm/trunk/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define void @foo() nounwind {
 entry:
 	%r = alloca i8		; <i8*> [#uses=2]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	load volatile i8* %r, align 1		; <i8>:0 [#uses=1]
+	load volatile i8, i8* %r, align 1		; <i8>:0 [#uses=1]
 	or i8 %0, 1		; <i8>:1 [#uses=1]
 	store volatile i8 %1, i8* %r, align 1
 	br label %return

Modified: llvm/trunk/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll Fri Feb 27 15:17:42 2015
@@ -46,8 +46,8 @@ while.cond36.i:
   br i1 undef, label %do.body, label %while.body41.i
 
 while.body41.i:                                   ; preds = %while.cond36.i
-  %tmp43.i = load i8** @foo                      ; <i8*> [#uses=2]
-  %tmp44.i = load i8* %tmp43.i                    ; <i8> [#uses=1]
+  %tmp43.i = load i8*, i8** @foo                      ; <i8*> [#uses=2]
+  %tmp44.i = load i8, i8* %tmp43.i                    ; <i8> [#uses=1]
   %ptrincdec50.i = getelementptr inbounds i8, i8* %tmp43.i, i16 1 ; <i8*> [#uses=1]
   store i8* %ptrincdec50.i, i8** @foo
   %cmp55.i = icmp eq i8 %tmp44.i, %c              ; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ land.end:
   br i1 %0, label %while.body, label %while.end
 
 while.body:                                       ; preds = %land.end
-  %tmp4 = load i16* undef                         ; <i16> [#uses=0]
+  %tmp4 = load i16, i16* undef                         ; <i16> [#uses=0]
   br label %while.cond
 
 while.end:                                        ; preds = %land.end

Modified: llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-rx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-rx.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-rx.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-rx.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@ target datalayout = "e-p:16:16:16-i1:8:8
 target triple = "msp430-generic-generic"
 
 define i16 @am1(i16 %x, i16* %a) nounwind {
-	%1 = load i16* %a
+	%1 = load i16, i16* %a
 	%2 = or i16 %1,%x
 	ret i16 %2
 }
@@ -13,7 +13,7 @@ define i16 @am1(i16 %x, i16* %a) nounwin
 @foo = external global i16
 
 define i16 @am2(i16 %x) nounwind {
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = or i16 %1,%x
 	ret i16 %2
 }
@@ -24,7 +24,7 @@ define i16 @am2(i16 %x) nounwind {
 
 define i8 @am3(i8 %x, i16 %n) nounwind {
 	%1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %n
-	%2 = load i8* %1
+	%2 = load i8, i8* %1
 	%3 = or i8 %2,%x
 	ret i8 %3
 }
@@ -32,7 +32,7 @@ define i8 @am3(i8 %x, i16 %n) nounwind {
 ; CHECK:		bis.b	bar(r14), r15
 
 define i16 @am4(i16 %x) nounwind {
-	%1 = load volatile i16* inttoptr(i16 32 to i16*)
+	%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
 	%2 = or i16 %1,%x
 	ret i16 %2
 }
@@ -41,7 +41,7 @@ define i16 @am4(i16 %x) nounwind {
 
 define i16 @am5(i16 %x, i16* %a) nounwind {
 	%1 = getelementptr i16, i16* %a, i16 2
-	%2 = load i16* %1
+	%2 = load i16, i16* %1
 	%3 = or i16 %2,%x
 	ret i16 %3
 }
@@ -52,7 +52,7 @@ define i16 @am5(i16 %x, i16* %a) nounwin
 @baz = common global %S zeroinitializer, align 1
 
 define i16 @am6(i16 %x) nounwind {
-	%1 = load i16* getelementptr (%S* @baz, i32 0, i32 1)
+	%1 = load i16, i16* getelementptr (%S* @baz, i32 0, i32 1)
 	%2 = or i16 %1,%x
 	ret i16 %2
 }
@@ -65,7 +65,7 @@ define i16 @am6(i16 %x) nounwind {
 define i8 @am7(i8 %x, i16 %n) nounwind {
 	%1 = getelementptr %T, %T* @duh, i32 0, i32 1
 	%2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
-	%3= load i8* %2
+	%3= load i8, i8* %2
 	%4 = or i8 %3,%x
 	ret i8 %4
 }

Modified: llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-xr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-xr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-xr.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-xr.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@ target datalayout = "e-p:16:16:16-i8:8:8
 target triple = "msp430-generic-generic"
 
 define void @am1(i16* %a, i16 %x) nounwind {
-	%1 = load i16* %a
+	%1 = load i16, i16* %a
 	%2 = or i16 %x, %1
 	store i16 %2, i16* %a
 	ret void
@@ -14,7 +14,7 @@ define void @am1(i16* %a, i16 %x) nounwi
 @foo = external global i16
 
 define void @am2(i16 %x) nounwind {
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = or i16 %x, %1
 	store i16 %2, i16* @foo
 	ret void
@@ -26,7 +26,7 @@ define void @am2(i16 %x) nounwind {
 
 define void @am3(i16 %i, i8 %x) nounwind {
 	%1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %i
-	%2 = load i8* %1
+	%2 = load i8, i8* %1
 	%3 = or i8 %x, %2
 	store i8 %3, i8* %1
 	ret void
@@ -35,7 +35,7 @@ define void @am3(i16 %i, i8 %x) nounwind
 ; CHECK:		bis.b	r14, bar(r15)
 
 define void @am4(i16 %x) nounwind {
-	%1 = load volatile i16* inttoptr(i16 32 to i16*)
+	%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
 	%2 = or i16 %x, %1
 	store volatile i16 %2, i16* inttoptr(i16 32 to i16*)
 	ret void
@@ -45,7 +45,7 @@ define void @am4(i16 %x) nounwind {
 
 define void @am5(i16* %a, i16 %x) readonly {
 	%1 = getelementptr inbounds i16, i16* %a, i16 2
-	%2 = load i16* %1
+	%2 = load i16, i16* %1
 	%3 = or i16 %x, %2
 	store i16 %3, i16* %1
 	ret void
@@ -57,7 +57,7 @@ define void @am5(i16* %a, i16 %x) readon
 @baz = common global %S zeroinitializer
 
 define void @am6(i16 %x) nounwind {
-	%1 = load i16* getelementptr (%S* @baz, i32 0, i32 1)
+	%1 = load i16, i16* getelementptr (%S* @baz, i32 0, i32 1)
 	%2 = or i16 %x, %1
 	store i16 %2, i16* getelementptr (%S* @baz, i32 0, i32 1)
 	ret void
@@ -71,7 +71,7 @@ define void @am6(i16 %x) nounwind {
 define void @am7(i16 %n, i8 %x) nounwind {
 	%1 = getelementptr %T, %T* @duh, i32 0, i32 1
 	%2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
-	%3 = load i8* %2
+	%3 = load i8, i8* %2
 	%4 = or i8 %x, %3
 	store i8 %4, i8* %2
 	ret void

Modified: llvm/trunk/test/CodeGen/MSP430/AddrMode-mov-rx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/AddrMode-mov-rx.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/AddrMode-mov-rx.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/AddrMode-mov-rx.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@ target datalayout = "e-p:16:16:16-i1:8:8
 target triple = "msp430-generic-generic"
 
 define i16 @am1(i16* %a) nounwind {
-	%1 = load i16* %a
+	%1 = load i16, i16* %a
 	ret i16 %1
 }
 ; CHECK-LABEL: am1:
@@ -12,7 +12,7 @@ define i16 @am1(i16* %a) nounwind {
 @foo = external global i16
 
 define i16 @am2() nounwind {
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	ret i16 %1
 }
 ; CHECK-LABEL: am2:
@@ -22,14 +22,14 @@ define i16 @am2() nounwind {
 
 define i8 @am3(i16 %n) nounwind {
 	%1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %n
-	%2 = load i8* %1
+	%2 = load i8, i8* %1
 	ret i8 %2
 }
 ; CHECK-LABEL: am3:
 ; CHECK:		mov.b	bar(r15), r15
 
 define i16 @am4() nounwind {
-	%1 = load volatile i16* inttoptr(i16 32 to i16*)
+	%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
 	ret i16 %1
 }
 ; CHECK-LABEL: am4:
@@ -37,7 +37,7 @@ define i16 @am4() nounwind {
 
 define i16 @am5(i16* %a) nounwind {
 	%1 = getelementptr i16, i16* %a, i16 2
-	%2 = load i16* %1
+	%2 = load i16, i16* %1
 	ret i16 %2
 }
 ; CHECK-LABEL: am5:
@@ -47,7 +47,7 @@ define i16 @am5(i16* %a) nounwind {
 @baz = common global %S zeroinitializer, align 1
 
 define i16 @am6() nounwind {
-	%1 = load i16* getelementptr (%S* @baz, i32 0, i32 1)
+	%1 = load i16, i16* getelementptr (%S* @baz, i32 0, i32 1)
 	ret i16 %1
 }
 ; CHECK-LABEL: am6:
@@ -59,7 +59,7 @@ define i16 @am6() nounwind {
 define i8 @am7(i16 %n) nounwind {
 	%1 = getelementptr %T, %T* @duh, i32 0, i32 1
 	%2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
-	%3= load i8* %2
+	%3= load i8, i8* %2
 	ret i8 %3
 }
 ; CHECK-LABEL: am7:

Modified: llvm/trunk/test/CodeGen/MSP430/Inst16mi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst16mi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst16mi.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst16mi.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define void @mov() nounwind {
 define void @add() nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.w	#2, &foo
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = add i16 %1, 2
 	store i16 %2, i16 * @foo
 	ret void
@@ -23,7 +23,7 @@ define void @add() nounwind {
 define void @and() nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.w	#2, &foo
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = and i16 %1, 2
 	store i16 %2, i16 * @foo
 	ret void
@@ -32,7 +32,7 @@ define void @and() nounwind {
 define void @bis() nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.w	#2, &foo
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = or i16 %1, 2
 	store i16 %2, i16 * @foo
 	ret void
@@ -41,7 +41,7 @@ define void @bis() nounwind {
 define void @xor() nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.w	#2, &foo
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = xor i16 %1, 2
 	store i16 %2, i16 * @foo
 	ret void

Modified: llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ target triple = "msp430-generic-generic"
 define void @mov() nounwind {
 ; CHECK-LABEL: mov:
 ; CHECK: mov.w	&bar, &foo
-        %1 = load i16* @bar
+        %1 = load i16, i16* @bar
         store i16 %1, i16* @foo
         ret void
 }
@@ -15,8 +15,8 @@ define void @mov() nounwind {
 define void @add() nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.w	&bar, &foo
-	%1 = load i16* @bar
-	%2 = load i16* @foo
+	%1 = load i16, i16* @bar
+	%2 = load i16, i16* @foo
 	%3 = add i16 %2, %1
 	store i16 %3, i16* @foo
 	ret void
@@ -25,8 +25,8 @@ define void @add() nounwind {
 define void @and() nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.w	&bar, &foo
-	%1 = load i16* @bar
-	%2 = load i16* @foo
+	%1 = load i16, i16* @bar
+	%2 = load i16, i16* @foo
 	%3 = and i16 %2, %1
 	store i16 %3, i16* @foo
 	ret void
@@ -35,8 +35,8 @@ define void @and() nounwind {
 define void @bis() nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.w	&bar, &foo
-	%1 = load i16* @bar
-	%2 = load i16* @foo
+	%1 = load i16, i16* @bar
+	%2 = load i16, i16* @foo
 	%3 = or i16 %2, %1
 	store i16 %3, i16* @foo
 	ret void
@@ -45,8 +45,8 @@ define void @bis() nounwind {
 define void @xor() nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.w	&bar, &foo
-	%1 = load i16* @bar
-	%2 = load i16* @foo
+	%1 = load i16, i16* @bar
+	%2 = load i16, i16* @foo
 	%3 = xor i16 %2, %1
 	store i16 %3, i16* @foo
 	ret void
@@ -58,10 +58,10 @@ entry:
  %x = alloca i32, align 2                        ; <i32*> [#uses=1]
  %y = alloca i32, align 2                        ; <i32*> [#uses=1]
  store i16 0, i16* %retval
- %tmp = load i32* %y                             ; <i32> [#uses=1]
+ %tmp = load i32, i32* %y                             ; <i32> [#uses=1]
  store i32 %tmp, i32* %x
  store i16 0, i16* %retval
- %0 = load i16* %retval                          ; <i16> [#uses=1]
+ %0 = load i16, i16* %retval                          ; <i16> [#uses=1]
  ret i16 %0
 ; CHECK-LABEL: mov2:
 ; CHECK:	mov.w	2(r1), 6(r1)

Modified: llvm/trunk/test/CodeGen/MSP430/Inst16mr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst16mr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst16mr.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst16mr.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ define void @mov(i16 %a) nounwind {
 define void @add(i16 %a) nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.w	r15, &foo
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = add i16 %a, %1
 	store i16 %2, i16* @foo
 	ret void
@@ -22,7 +22,7 @@ define void @add(i16 %a) nounwind {
 define void @and(i16 %a) nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.w	r15, &foo
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = and i16 %a, %1
 	store i16 %2, i16* @foo
 	ret void
@@ -31,7 +31,7 @@ define void @and(i16 %a) nounwind {
 define void @bis(i16 %a) nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.w	r15, &foo
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = or i16 %a, %1
 	store i16 %2, i16* @foo
 	ret void
@@ -41,7 +41,7 @@ define void @bic(i16 zeroext %m) nounwin
 ; CHECK-LABEL: bic:
 ; CHECK: bic.w   r15, &foo
         %1 = xor i16 %m, -1
-        %2 = load i16* @foo
+        %2 = load i16, i16* @foo
         %3 = and i16 %2, %1
         store i16 %3, i16* @foo
         ret void
@@ -50,7 +50,7 @@ define void @bic(i16 zeroext %m) nounwin
 define void @xor(i16 %a) nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.w	r15, &foo
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = xor i16 %a, %1
 	store i16 %2, i16* @foo
 	ret void

Modified: llvm/trunk/test/CodeGen/MSP430/Inst16rm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst16rm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst16rm.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst16rm.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ target triple = "msp430-generic-generic"
 define i16 @add(i16 %a) nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.w	&foo, r15
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = add i16 %a, %1
 	ret i16 %2
 }
@@ -14,7 +14,7 @@ define i16 @add(i16 %a) nounwind {
 define i16 @and(i16 %a) nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.w	&foo, r15
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = and i16 %a, %1
 	ret i16 %2
 }
@@ -22,7 +22,7 @@ define i16 @and(i16 %a) nounwind {
 define i16 @bis(i16 %a) nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.w	&foo, r15
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = or i16 %a, %1
 	ret i16 %2
 }
@@ -30,7 +30,7 @@ define i16 @bis(i16 %a) nounwind {
 define i16  @bic(i16 %a) nounwind {
 ; CHECK-LABEL: bic:
 ; CHECK: bic.w	&foo, r15
-        %1 = load i16* @foo
+        %1 = load i16, i16* @foo
         %2 = xor i16 %1, -1
         %3 = and i16 %a, %2
         ret i16 %3
@@ -39,7 +39,7 @@ define i16  @bic(i16 %a) nounwind {
 define i16 @xor(i16 %a) nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.w	&foo, r15
-	%1 = load i16* @foo
+	%1 = load i16, i16* @foo
 	%2 = xor i16 %a, %1
 	ret i16 %2
 }

Modified: llvm/trunk/test/CodeGen/MSP430/Inst8mi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst8mi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst8mi.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst8mi.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ define void @mov() nounwind {
 define void @add() nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.b	#2, &foo
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = add i8 %1, 2
 	store i8 %2, i8 * @foo
 	ret void
@@ -22,7 +22,7 @@ define void @add() nounwind {
 define void @and() nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.b	#2, &foo
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = and i8 %1, 2
 	store i8 %2, i8 * @foo
 	ret void
@@ -31,7 +31,7 @@ define void @and() nounwind {
 define void @bis() nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.b	#2, &foo
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = or i8 %1, 2
 	store i8 %2, i8 * @foo
 	ret void
@@ -40,7 +40,7 @@ define void @bis() nounwind {
 define void @xor() nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.b	#2, &foo
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = xor i8 %1, 2
 	store i8 %2, i8 * @foo
 	ret void

Modified: llvm/trunk/test/CodeGen/MSP430/Inst8mm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst8mm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst8mm.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst8mm.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ target triple = "msp430-generic-generic"
 define void @mov() nounwind {
 ; CHECK-LABEL: mov:
 ; CHECK: mov.b	&bar, &foo
-        %1 = load i8* @bar
+        %1 = load i8, i8* @bar
         store i8 %1, i8* @foo
         ret void
 }
@@ -16,8 +16,8 @@ define void @mov() nounwind {
 define void @add() nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.b	&bar, &foo
-	%1 = load i8* @bar
-	%2 = load i8* @foo
+	%1 = load i8, i8* @bar
+	%2 = load i8, i8* @foo
 	%3 = add i8 %2, %1
 	store i8 %3, i8* @foo
 	ret void
@@ -26,8 +26,8 @@ define void @add() nounwind {
 define void @and() nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.b	&bar, &foo
-	%1 = load i8* @bar
-	%2 = load i8* @foo
+	%1 = load i8, i8* @bar
+	%2 = load i8, i8* @foo
 	%3 = and i8 %2, %1
 	store i8 %3, i8* @foo
 	ret void
@@ -36,8 +36,8 @@ define void @and() nounwind {
 define void @bis() nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.b	&bar, &foo
-	%1 = load i8* @bar
-	%2 = load i8* @foo
+	%1 = load i8, i8* @bar
+	%2 = load i8, i8* @foo
 	%3 = or i8 %2, %1
 	store i8 %3, i8* @foo
 	ret void
@@ -46,8 +46,8 @@ define void @bis() nounwind {
 define void @xor() nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.b	&bar, &foo
-	%1 = load i8* @bar
-	%2 = load i8* @foo
+	%1 = load i8, i8* @bar
+	%2 = load i8, i8* @foo
 	%3 = xor i8 %2, %1
 	store i8 %3, i8* @foo
 	ret void

Modified: llvm/trunk/test/CodeGen/MSP430/Inst8mr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst8mr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst8mr.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst8mr.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ define void @mov(i8 %a) nounwind {
 define void @and(i8 %a) nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.b	r15, &foo
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = and i8 %a, %1
 	store i8 %2, i8* @foo
 	ret void
@@ -22,7 +22,7 @@ define void @and(i8 %a) nounwind {
 define void @add(i8 %a) nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.b	r15, &foo
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = add i8 %a, %1
 	store i8 %2, i8* @foo
 	ret void
@@ -31,7 +31,7 @@ define void @add(i8 %a) nounwind {
 define void @bis(i8 %a) nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.b	r15, &foo
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = or i8 %a, %1
 	store i8 %2, i8* @foo
 	ret void
@@ -41,7 +41,7 @@ define void @bic(i8 zeroext %m) nounwind
 ; CHECK-LABEL: bic:
 ; CHECK: bic.b   r15, &foo
         %1 = xor i8 %m, -1
-        %2 = load i8* @foo
+        %2 = load i8, i8* @foo
         %3 = and i8 %2, %1
         store i8 %3, i8* @foo
         ret void
@@ -50,7 +50,7 @@ define void @bic(i8 zeroext %m) nounwind
 define void @xor(i8 %a) nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.b	r15, &foo
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = xor i8 %a, %1
 	store i8 %2, i8* @foo
 	ret void

Modified: llvm/trunk/test/CodeGen/MSP430/Inst8rm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst8rm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst8rm.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst8rm.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ target triple = "msp430-generic-generic"
 define i8 @add(i8 %a) nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.b	&foo, r15
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = add i8 %a, %1
 	ret i8 %2
 }
@@ -14,7 +14,7 @@ define i8 @add(i8 %a) nounwind {
 define i8 @and(i8 %a) nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.b	&foo, r15
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = and i8 %a, %1
 	ret i8 %2
 }
@@ -22,7 +22,7 @@ define i8 @and(i8 %a) nounwind {
 define i8 @bis(i8 %a) nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.b	&foo, r15
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = or i8 %a, %1
 	ret i8 %2
 }
@@ -30,7 +30,7 @@ define i8 @bis(i8 %a) nounwind {
 define i8  @bic(i8 %a) nounwind {
 ; CHECK-LABEL: bic:
 ; CHECK: bic.b  &foo, r15
-        %1 = load i8* @foo
+        %1 = load i8, i8* @foo
         %2 = xor i8 %1, -1
         %3 = and i8 %a, %2
         ret i8 %3
@@ -39,7 +39,7 @@ define i8  @bic(i8 %a) nounwind {
 define i8 @xor(i8 %a) nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.b	&foo, r15
-	%1 = load i8* @foo
+	%1 = load i8, i8* @foo
 	%2 = xor i8 %a, %1
 	ret i8 %2
 }

Modified: llvm/trunk/test/CodeGen/MSP430/bit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/bit.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/bit.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/bit.ll Fri Feb 27 15:17:42 2015
@@ -33,7 +33,7 @@ define i8 @bitbir(i8 %a) nounwind {
 ; CHECK: bit.b	#15, r15
 
 define i8 @bitbmi() nounwind {
-	%t1 = load i8* @foo8
+	%t1 = load i8, i8* @foo8
 	%t2 = and i8 %t1, 15
 	%t3 = icmp ne i8 %t2, 0
 	%t4 = zext i1 %t3 to i8
@@ -43,7 +43,7 @@ define i8 @bitbmi() nounwind {
 ; CHECK: bit.b	#15, &foo8
 
 define i8 @bitbim() nounwind {
-	%t1 = load i8* @foo8
+	%t1 = load i8, i8* @foo8
 	%t2 = and i8 15, %t1
 	%t3 = icmp ne i8 %t2, 0
 	%t4 = zext i1 %t3 to i8
@@ -53,7 +53,7 @@ define i8 @bitbim() nounwind {
 ; CHECK: bit.b	#15, &foo8
 
 define i8 @bitbrm(i8 %a) nounwind {
-	%t1 = load i8* @foo8
+	%t1 = load i8, i8* @foo8
 	%t2 = and i8 %a, %t1
 	%t3 = icmp ne i8 %t2, 0
 	%t4 = zext i1 %t3 to i8
@@ -63,7 +63,7 @@ define i8 @bitbrm(i8 %a) nounwind {
 ; CHECK: bit.b	&foo8, r15
 
 define i8 @bitbmr(i8 %a) nounwind {
-	%t1 = load i8* @foo8
+	%t1 = load i8, i8* @foo8
 	%t2 = and i8 %t1, %a
 	%t3 = icmp ne i8 %t2, 0
 	%t4 = zext i1 %t3 to i8
@@ -73,8 +73,8 @@ define i8 @bitbmr(i8 %a) nounwind {
 ; CHECK: bit.b	r15, &foo8
 
 define i8 @bitbmm() nounwind {
-	%t1 = load i8* @foo8
-	%t2 = load i8* @bar8
+	%t1 = load i8, i8* @foo8
+	%t2 = load i8, i8* @bar8
 	%t3 = and i8 %t1, %t2
 	%t4 = icmp ne i8 %t3, 0
 	%t5 = zext i1 %t4 to i8
@@ -114,7 +114,7 @@ define i16 @bitwir(i16 %a) nounwind {
 ; CHECK: bit.w	#4080, r15
 
 define i16 @bitwmi() nounwind {
-	%t1 = load i16* @foo16
+	%t1 = load i16, i16* @foo16
 	%t2 = and i16 %t1, 4080
 	%t3 = icmp ne i16 %t2, 0
 	%t4 = zext i1 %t3 to i16
@@ -124,7 +124,7 @@ define i16 @bitwmi() nounwind {
 ; CHECK: bit.w	#4080, &foo16
 
 define i16 @bitwim() nounwind {
-	%t1 = load i16* @foo16
+	%t1 = load i16, i16* @foo16
 	%t2 = and i16 4080, %t1
 	%t3 = icmp ne i16 %t2, 0
 	%t4 = zext i1 %t3 to i16
@@ -134,7 +134,7 @@ define i16 @bitwim() nounwind {
 ; CHECK: bit.w	#4080, &foo16
 
 define i16 @bitwrm(i16 %a) nounwind {
-	%t1 = load i16* @foo16
+	%t1 = load i16, i16* @foo16
 	%t2 = and i16 %a, %t1
 	%t3 = icmp ne i16 %t2, 0
 	%t4 = zext i1 %t3 to i16
@@ -144,7 +144,7 @@ define i16 @bitwrm(i16 %a) nounwind {
 ; CHECK: bit.w	&foo16, r15
 
 define i16 @bitwmr(i16 %a) nounwind {
-	%t1 = load i16* @foo16
+	%t1 = load i16, i16* @foo16
 	%t2 = and i16 %t1, %a
 	%t3 = icmp ne i16 %t2, 0
 	%t4 = zext i1 %t3 to i16
@@ -154,8 +154,8 @@ define i16 @bitwmr(i16 %a) nounwind {
 ; CHECK: bit.w	r15, &foo16
 
 define i16 @bitwmm() nounwind {
-	%t1 = load i16* @foo16
-	%t2 = load i16* @bar16
+	%t1 = load i16, i16* @foo16
+	%t2 = load i16, i16* @bar16
 	%t3 = and i16 %t1, %t2
 	%t4 = icmp ne i16 %t3, 0
 	%t5 = zext i1 %t4 to i16

Modified: llvm/trunk/test/CodeGen/MSP430/byval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/byval.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/byval.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/byval.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ entry:
 ; CHECK-LABEL: callee:
 ; CHECK: mov.w 2(r1), r15
   %0 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i32 0, i32 0
-  %1 = load i16* %0, align 2
+  %1 = load i16, i16* %0, align 2
   ret i16 %1
 }
 

Modified: llvm/trunk/test/CodeGen/MSP430/indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/indirectbr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/indirectbr.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/indirectbr.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define internal i16 @foo(i16 %i) nounwind {
 entry:
-  %0 = load i8** @nextaddr, align 4               ; <i8*> [#uses=2]
+  %0 = load i8*, i8** @nextaddr, align 4               ; <i8*> [#uses=2]
   %1 = icmp eq i8* %0, null                       ; <i1> [#uses=1]
   br i1 %1, label %bb3, label %bb2
 
@@ -15,7 +15,7 @@ bb2:
 
 bb3:                                              ; preds = %entry
   %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
-  %gotovar.4.0.pre = load i8** %2, align 4        ; <i8*> [#uses=1]
+  %gotovar.4.0.pre = load i8*, i8** %2, align 4        ; <i8*> [#uses=1]
   br label %bb2
 
 L5:                                               ; preds = %bb2

Modified: llvm/trunk/test/CodeGen/MSP430/indirectbr2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/indirectbr2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/indirectbr2.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/indirectbr2.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 define internal i16 @foo(i16 %i) nounwind {
 entry:
   %tmp1 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
-  %gotovar.4.0 = load i8** %tmp1, align 4        ; <i8*> [#uses=1]
+  %gotovar.4.0 = load i8*, i8** %tmp1, align 4        ; <i8*> [#uses=1]
 ; CHECK: br .LC.0.2070(r12)
   indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
 

Modified: llvm/trunk/test/CodeGen/MSP430/inline-asm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/inline-asm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/inline-asm.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/inline-asm.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ define void @immmem() nounwind {
 }
 
 define void @mem() nounwind {
-        %fooval = load i16* @foo
+        %fooval = load i16, i16* @foo
         call void asm sideeffect "bic\09$0,r2", "m"(i16 %fooval) nounwind
         ret void
 }

Modified: llvm/trunk/test/CodeGen/MSP430/jumptable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/jumptable.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/jumptable.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/jumptable.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
   %retval = alloca i16, align 2
   %i.addr = alloca i16, align 2
   store i16 %i, i16* %i.addr, align 2
-  %0 = load i16* %i.addr, align 2
+  %0 = load i16, i16* %i.addr, align 2
 ; CHECK: mov.w #2, r14
 ; CHECK: call #__mulhi3hw_noint
 ; CHECK: br .LJTI0_0(r15)
@@ -42,7 +42,7 @@ sw.default:
   br label %return
 
 return:                                           ; preds = %sw.default, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
-  %1 = load i16* %retval
+  %1 = load i16, i16* %retval
   ret i16 %1
 ; CHECK: ret
 }

Modified: llvm/trunk/test/CodeGen/MSP430/memset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/memset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/memset.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/memset.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ target triple = "msp430---elf"
 define void @test() nounwind {
 entry:
 ; CHECK-LABEL: test:
-  %0 = load i8** @buf, align 2
+  %0 = load i8*, i8** @buf, align 2
 ; CHECK: mov.w &buf, r15
 ; CHECK-NEXT: mov.w #5, r14
 ; CHECK-NEXT: mov.w #128, r13

Modified: llvm/trunk/test/CodeGen/MSP430/misched-msp430.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/misched-msp430.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/misched-msp430.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/misched-msp430.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ target datalayout = "e-p:16:16:16-i8:8:8
 ; CHECK: ret
 define void @f() {
 entry:
-  %0 = load i16* @y, align 2
+  %0 = load i16, i16* @y, align 2
   store i16 %0, i16* @x, align 2
   ret void
 }

Modified: llvm/trunk/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/mult-alt-generic-msp430.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/mult-alt-generic-msp430.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/mult-alt-generic-msp430.ll Fri Feb 27 15:17:42 2015
@@ -33,10 +33,10 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,<r"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16* %in1, align 2
+  %tmp1 = load i16, i16* %in1, align 2
   %1 = call i16 asm "foo $1,$0", "=r,r<"(i16 %tmp1) nounwind
   store i16 %1, i16* %out0, align 2
   ret void
@@ -48,10 +48,10 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,>r"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16* %in1, align 2
+  %tmp1 = load i16, i16* %in1, align 2
   %1 = call i16 asm "foo $1,$0", "=r,r>"(i16 %tmp1) nounwind
   store i16 %1, i16* %out0, align 2
   ret void
@@ -63,7 +63,7 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,r"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
   ret void
@@ -120,10 +120,10 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,imr"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16* @min1, align 2
+  %tmp1 = load i16, i16* @min1, align 2
   %1 = call i16 asm "foo $1,$0", "=r,imr"(i16 %tmp1) nounwind
   store i16 %1, i16* %out0, align 2
   %2 = call i16 asm "foo $1,$0", "=r,imr"(i16 1) nounwind
@@ -137,10 +137,10 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,X"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16* @min1, align 2
+  %tmp1 = load i16, i16* @min1, align 2
   %1 = call i16 asm "foo $1,$0", "=r,X"(i16 %tmp1) nounwind
   store i16 %1, i16* %out0, align 2
   %2 = call i16 asm "foo $1,$0", "=r,X"(i16 1) nounwind
@@ -166,7 +166,7 @@ entry:
 
 define void @multi_m() nounwind {
 entry:
-  %tmp = load i16* @min1, align 2
+  %tmp = load i16, i16* @min1, align 2
   call void asm "foo $1,$0", "=*m|r,m|r"(i16* @mout0, i16 %tmp) nounwind
   ret void
 }
@@ -191,10 +191,10 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|<r"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16* %in1, align 2
+  %tmp1 = load i16, i16* %in1, align 2
   %1 = call i16 asm "foo $1,$0", "=r|r,r|r<"(i16 %tmp1) nounwind
   store i16 %1, i16* %out0, align 2
   ret void
@@ -206,10 +206,10 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|>r"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16* %in1, align 2
+  %tmp1 = load i16, i16* %in1, align 2
   %1 = call i16 asm "foo $1,$0", "=r|r,r|r>"(i16 %tmp1) nounwind
   store i16 %1, i16* %out0, align 2
   ret void
@@ -221,7 +221,7 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|m"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
   ret void
@@ -278,10 +278,10 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|imr"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16* @min1, align 2
+  %tmp1 = load i16, i16* @min1, align 2
   %1 = call i16 asm "foo $1,$0", "=r|r,r|imr"(i16 %tmp1) nounwind
   store i16 %1, i16* %out0, align 2
   %2 = call i16 asm "foo $1,$0", "=r|r,r|imr"(i16 1) nounwind
@@ -295,10 +295,10 @@ entry:
   %in1 = alloca i16, align 2
   store i16 0, i16* %out0, align 2
   store i16 1, i16* %in1, align 2
-  %tmp = load i16* %in1, align 2
+  %tmp = load i16, i16* %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|X"(i16 %tmp) nounwind
   store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16* @min1, align 2
+  %tmp1 = load i16, i16* @min1, align 2
   %1 = call i16 asm "foo $1,$0", "=r|r,r|X"(i16 %tmp1) nounwind
   store i16 %1, i16* %out0, align 2
   %2 = call i16 asm "foo $1,$0", "=r|r,r|X"(i16 1) nounwind

Modified: llvm/trunk/test/CodeGen/MSP430/postinc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/postinc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/postinc.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/postinc.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: add:
 ; CHECK: add.w @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = add i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -35,7 +35,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: sub:
 ; CHECK: sub.w @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = sub i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -57,7 +57,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: or:
 ; CHECK: bis.w @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = or i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -79,7 +79,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: xor:
 ; CHECK: xor.w @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = xor i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -101,7 +101,7 @@ for.body:
   %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: and:
 ; CHECK: and.w @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
   %add = and i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/Mips/2008-07-15-SmallSection.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/2008-07-15-SmallSection.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/2008-07-15-SmallSection.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/2008-07-15-SmallSection.ll Fri Feb 27 15:17:42 2015
@@ -27,8 +27,8 @@ entry:
 
 define i32 @A1() nounwind {
 entry:
-  load i32* getelementptr (%struct.anon* @foo, i32 0, i32 0), align 8 
-  load i32* getelementptr (%struct.anon* @foo, i32 0, i32 1), align 4 
+  load i32, i32* getelementptr (%struct.anon* @foo, i32 0, i32 0), align 8 
+  load i32, i32* getelementptr (%struct.anon* @foo, i32 0, i32 1), align 4 
   add i32 %1, %0
   ret i32 %2
 }

Modified: llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll Fri Feb 27 15:17:42 2015
@@ -26,8 +26,8 @@ entry:
 define void @foo0() nounwind {
 entry:
 ; CHECK: addu
-  %0 = load i32* @gi1, align 4
-  %1 = load i32* @gi0, align 4
+  %0 = load i32, i32* @gi1, align 4
+  %1 = load i32, i32* @gi0, align 4
   %2 = tail call i32 asm "addu $0, $1, $2", "=r,r,r"(i32 %0, i32 %1) nounwind
   store i32 %2, i32* @gi2, align 4
   ret void
@@ -36,7 +36,7 @@ entry:
 define void @foo2() nounwind {
 entry:
 ; CHECK: neg.s
-  %0 = load float* @gf1, align 4
+  %0 = load float, float* @gf1, align 4
   %1 = tail call float asm "neg.s $0, $1", "=f,f"(float %0) nounwind
   store float %1, float* @gf0, align 4
   ret void
@@ -45,7 +45,7 @@ entry:
 define void @foo3() nounwind {
 entry:
 ; CHECK: neg.d
-  %0 = load double* @gd1, align 8
+  %0 = load double, double* @gd1, align 8
   %1 = tail call double asm "neg.d $0, $1", "=f,f"(double %0) nounwind
   store double %1, double* @gd0, align 8
   ret void
@@ -64,7 +64,7 @@ define void @foo4() {
 entry:
   %0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"()
   store i32 %0, i32* @gi2, align 4
-  %1 = load float* @gf0, align 4
+  %1 = load float, float* @gf0, align 4
   %2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1)
   store double %2, double* @gd0, align 8
   ret void

Modified: llvm/trunk/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll Fri Feb 27 15:17:42 2015
@@ -7,12 +7,12 @@ entry:
         %retval = alloca double         ; <double*> [#uses=3]
         store double 0.000000e+00, double* %retval
         %r = alloca double              ; <double*> [#uses=1]
-        load double* %r         ; <double>:0 [#uses=1]
+        load double, double* %r         ; <double>:0 [#uses=1]
         store double %0, double* %retval
         br label %return
 
 return:         ; preds = %entry
-        load double* %retval            ; <double>:1 [#uses=1]
+        load double, double* %retval            ; <double>:1 [#uses=1]
         ret double %1
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
 continue.outer:         ; preds = %case4, %entry
         %p.0.ph.rec = phi i32 [ 0, %entry ], [ %indvar.next, %case4 ]          ; <i32> [#uses=2]
         %p.0.ph = getelementptr i8, i8* %0, i32 %p.0.ph.rec         ; <i8*> [#uses=1]
-        %1 = load i8* %p.0.ph           ; <i8> [#uses=1]
+        %1 = load i8, i8* %p.0.ph           ; <i8> [#uses=1]
         switch i8 %1, label %infloop [
                 i8 0, label %return.split
                 i8 76, label %case4

Modified: llvm/trunk/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll Fri Feb 27 15:17:42 2015
@@ -13,16 +13,16 @@ target datalayout = "e-p:32:32:32-i1:8:8
 define double @_erand48_r(%struct._reent* %r, i16* %xseed) nounwind {
 entry:
 	tail call void @__dorand48( %struct._reent* %r, i16* %xseed ) nounwind
-	load i16* %xseed, align 2		; <i16>:0 [#uses=1]
+	load i16, i16* %xseed, align 2		; <i16>:0 [#uses=1]
 	uitofp i16 %0 to double		; <double>:1 [#uses=1]
 	tail call double @ldexp( double %1, i32 -48 ) nounwind		; <double>:2 [#uses=1]
 	getelementptr i16, i16* %xseed, i32 1		; <i16*>:3 [#uses=1]
-	load i16* %3, align 2		; <i16>:4 [#uses=1]
+	load i16, i16* %3, align 2		; <i16>:4 [#uses=1]
 	uitofp i16 %4 to double		; <double>:5 [#uses=1]
 	tail call double @ldexp( double %5, i32 -32 ) nounwind		; <double>:6 [#uses=1]
 	fadd double %2, %6		; <double>:7 [#uses=1]
 	getelementptr i16, i16* %xseed, i32 2		; <i16*>:8 [#uses=1]
-	load i16* %8, align 2		; <i16>:9 [#uses=1]
+	load i16, i16* %8, align 2		; <i16>:9 [#uses=1]
 	uitofp i16 %9 to double		; <double>:10 [#uses=1]
 	tail call double @ldexp( double %10, i32 -16 ) nounwind		; <double>:11 [#uses=1]
 	fadd double %7, %11		; <double>:12 [#uses=1]
@@ -35,18 +35,18 @@ declare double @ldexp(double, i32)
 
 define double @erand48(i16* %xseed) nounwind {
 entry:
-	load %struct._reent** @_impure_ptr, align 4		; <%struct._reent*>:0 [#uses=1]
+	load %struct._reent*, %struct._reent** @_impure_ptr, align 4		; <%struct._reent*>:0 [#uses=1]
 	tail call void @__dorand48( %struct._reent* %0, i16* %xseed ) nounwind
-	load i16* %xseed, align 2		; <i16>:1 [#uses=1]
+	load i16, i16* %xseed, align 2		; <i16>:1 [#uses=1]
 	uitofp i16 %1 to double		; <double>:2 [#uses=1]
 	tail call double @ldexp( double %2, i32 -48 ) nounwind		; <double>:3 [#uses=1]
 	getelementptr i16, i16* %xseed, i32 1		; <i16*>:4 [#uses=1]
-	load i16* %4, align 2		; <i16>:5 [#uses=1]
+	load i16, i16* %4, align 2		; <i16>:5 [#uses=1]
 	uitofp i16 %5 to double		; <double>:6 [#uses=1]
 	tail call double @ldexp( double %6, i32 -32 ) nounwind		; <double>:7 [#uses=1]
 	fadd double %3, %7		; <double>:8 [#uses=1]
 	getelementptr i16, i16* %xseed, i32 2		; <i16*>:9 [#uses=1]
-	load i16* %9, align 2		; <i16>:10 [#uses=1]
+	load i16, i16* %9, align 2		; <i16>:10 [#uses=1]
 	uitofp i16 %10 to double		; <double>:11 [#uses=1]
 	tail call double @ldexp( double %11, i32 -16 ) nounwind		; <double>:12 [#uses=1]
 	fadd double %8, %12		; <double>:13 [#uses=1]

Modified: llvm/trunk/test/CodeGen/Mips/2010-07-20-Switch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/2010-07-20-Switch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/2010-07-20-Switch.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/2010-07-20-Switch.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ define i32 @main() nounwind readnone {
 entry:
   %x = alloca i32, align 4                        ; <i32*> [#uses=2]
   store volatile i32 2, i32* %x, align 4
-  %0 = load volatile i32* %x, align 4             ; <i32> [#uses=1]
+  %0 = load volatile i32, i32* %x, align 4             ; <i32> [#uses=1]
 ; STATIC-O32: sll $[[R0:[0-9]+]], ${{[0-9]+}}, 2
 ; STATIC-O32: lui $[[R1:[0-9]+]], %hi($JTI0_0)
 ; STATIC-O32: addu $[[R2:[0-9]+]], $[[R0]], $[[R1]]

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/br1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/br1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/br1.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/br1.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 ; Function Attrs: nounwind
 define void @br() #0 {
 entry:
-  %0 = load i32* @b, align 4
+  %0 = load i32, i32* @b, align 4
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %if.end, label %if.then
 

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/callabi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/callabi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/callabi.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/callabi.ll Fri Feb 27 15:17:42 2015
@@ -84,13 +84,13 @@ entry:
 ; CHECK-LABEL: cxiiiiconv
 ; mips32r2-LABEL:  cxiiiiconv
 ; mips32-LABEL:  cxiiiiconv
-  %0 = load i8* @c1, align 1
+  %0 = load i8, i8* @c1, align 1
   %conv = sext i8 %0 to i32
-  %1 = load i8* @uc1, align 1
+  %1 = load i8, i8* @uc1, align 1
   %conv1 = zext i8 %1 to i32
-  %2 = load i16* @s1, align 2
+  %2 = load i16, i16* @s1, align 2
   %conv2 = sext i16 %2 to i32
-  %3 = load i16* @us1, align 2
+  %3 = load i16, i16* @us1, align 2
   %conv3 = zext i16 %3 to i32
   call void @xiiii(i32 %conv, i32 %conv1, i32 %conv2, i32 %conv3)
 ; CHECK:        addu    $[[REG_GP:[0-9]+]], ${{[0-9]+}}, ${{[0-9+]}}

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll Fri Feb 27 15:17:42 2015
@@ -12,8 +12,8 @@
 ; Function Attrs: nounwind
 define void @feq1()  {
 entry:
-  %0 = load float* @f1, align 4
-  %1 = load float* @f2, align 4
+  %0 = load float, float* @f1, align 4
+  %1 = load float, float* @f2, align 4
   %cmp = fcmp oeq float %0, %1
 ; CHECK-LABEL:  feq1:
 ; CHECK-DAG:    lw      $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}})
@@ -33,8 +33,8 @@ entry:
 ; Function Attrs: nounwind
 define void @fne1()  {
 entry:
-  %0 = load float* @f1, align 4
-  %1 = load float* @f2, align 4
+  %0 = load float, float* @f1, align 4
+  %1 = load float, float* @f2, align 4
   %cmp = fcmp une float %0, %1
 ; CHECK-LABEL:  fne1:
 ; CHECK-DAG:    lw      $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}})
@@ -53,8 +53,8 @@ entry:
 ; Function Attrs: nounwind
 define void @flt1()  {
 entry:
-  %0 = load float* @f1, align 4
-  %1 = load float* @f2, align 4
+  %0 = load float, float* @f1, align 4
+  %1 = load float, float* @f2, align 4
   %cmp = fcmp olt float %0, %1
 ; CHECK-LABEL:  flt1:
 ; CHECK-DAG:    lw      $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}})
@@ -74,8 +74,8 @@ entry:
 ; Function Attrs: nounwind
 define void @fgt1()  {
 entry:
-  %0 = load float* @f1, align 4
-  %1 = load float* @f2, align 4
+  %0 = load float, float* @f1, align 4
+  %1 = load float, float* @f2, align 4
   %cmp = fcmp ogt float %0, %1
 ; CHECK-LABEL: fgt1:
 ; CHECK-DAG:    lw      $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}})
@@ -94,8 +94,8 @@ entry:
 ; Function Attrs: nounwind
 define void @fle1()  {
 entry:
-  %0 = load float* @f1, align 4
-  %1 = load float* @f2, align 4
+  %0 = load float, float* @f1, align 4
+  %1 = load float, float* @f2, align 4
   %cmp = fcmp ole float %0, %1
 ; CHECK-LABEL:  fle1:
 ; CHECK-DAG:    lw      $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}})
@@ -114,8 +114,8 @@ entry:
 ; Function Attrs: nounwind
 define void @fge1()  {
 entry:
-  %0 = load float* @f1, align 4
-  %1 = load float* @f2, align 4
+  %0 = load float, float* @f1, align 4
+  %1 = load float, float* @f2, align 4
   %cmp = fcmp oge float %0, %1
 ; CHECK-LABEL:  fge1:
 ; CHECK-DAG:    lw      $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}})
@@ -134,8 +134,8 @@ entry:
 ; Function Attrs: nounwind
 define void @deq1()  {
 entry:
-  %0 = load double* @d1, align 8
-  %1 = load double* @d2, align 8
+  %0 = load double, double* @d1, align 8
+  %1 = load double, double* @d2, align 8
   %cmp = fcmp oeq double %0, %1
 ; CHECK-LABEL:  deq1:
 ; CHECK-DAG:    lw      $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}})
@@ -154,8 +154,8 @@ entry:
 ; Function Attrs: nounwind
 define void @dne1()  {
 entry:
-  %0 = load double* @d1, align 8
-  %1 = load double* @d2, align 8
+  %0 = load double, double* @d1, align 8
+  %1 = load double, double* @d2, align 8
   %cmp = fcmp une double %0, %1
 ; CHECK-LABEL:  dne1:
 ; CHECK-DAG:    lw      $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}})
@@ -174,8 +174,8 @@ entry:
 ; Function Attrs: nounwind
 define void @dlt1()  {
 entry:
-  %0 = load double* @d1, align 8
-  %1 = load double* @d2, align 8
+  %0 = load double, double* @d1, align 8
+  %1 = load double, double* @d2, align 8
   %cmp = fcmp olt double %0, %1
 ; CHECK-LABEL:  dlt1:
 ; CHECK-DAG:    lw      $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}})
@@ -194,8 +194,8 @@ entry:
 ; Function Attrs: nounwind
 define void @dgt1()  {
 entry:
-  %0 = load double* @d1, align 8
-  %1 = load double* @d2, align 8
+  %0 = load double, double* @d1, align 8
+  %1 = load double, double* @d2, align 8
   %cmp = fcmp ogt double %0, %1
 ; CHECK-LABEL:  dgt1:
 ; CHECK-DAG:    lw      $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}})
@@ -214,8 +214,8 @@ entry:
 ; Function Attrs: nounwind
 define void @dle1()  {
 entry:
-  %0 = load double* @d1, align 8
-  %1 = load double* @d2, align 8
+  %0 = load double, double* @d1, align 8
+  %1 = load double, double* @d2, align 8
   %cmp = fcmp ole double %0, %1
 ; CHECK-LABEL:  dle1:
 ; CHECK-DAG:    lw      $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}})
@@ -234,8 +234,8 @@ entry:
 ; Function Attrs: nounwind
 define void @dge1()  {
 entry:
-  %0 = load double* @d1, align 8
-  %1 = load double* @d2, align 8
+  %0 = load double, double* @d1, align 8
+  %1 = load double, double* @d2, align 8
   %cmp = fcmp oge double %0, %1
 ; CHECK-LABEL:  dge1:
 ; CHECK-DAG:    lw      $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}})

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpext.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpext.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 ; Function Attrs: nounwind
 define void @dv() #0 {
 entry:
-  %0 = load float* @f, align 4
+  %0 = load float, float* @f, align 4
   %conv = fpext float %0 to double
 ; CHECK: cvt.d.s  $f{{[0-9]+}}, $f{{[0-9]+}}
   store double %conv, double* @d_f, align 8

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpintconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpintconv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpintconv.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpintconv.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@
 define void @ifv() {
 entry:
 ; CHECK-LABEL:   .ent  ifv
-  %0 = load float* @f, align 4
+  %0 = load float, float* @f, align 4
   %conv = fptosi float %0 to i32
 ; CHECK:   trunc.w.s  $f[[REG:[0-9]+]], $f{{[0-9]+}}
 ; CHECK:   mfc1	${{[0-9]+}}, $f[[REG]]
@@ -26,7 +26,7 @@ entry:
 define void @idv() {
 entry:
 ; CHECK-LABEL:   .ent  idv
-  %0 = load double* @d, align 8
+  %0 = load double, double* @d, align 8
   %conv = fptosi double %0 to i32
 ; CHECK:   trunc.w.d  $f[[REG:[0-9]+]], $f{{[0-9]+}}
 ; CHECK:   mfc1	${{[0-9]+}}, $f[[REG]]

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/fptrunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/fptrunc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/fptrunc.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/fptrunc.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 ; Function Attrs: nounwind
 define void @fv() #0 {
 entry:
-  %0 = load double* @d, align 8
+  %0 = load double, double* @d, align 8
   %conv = fptrunc double %0 to float
 ; CHECK: cvt.s.d  $f{{[0-9]+}}, $f{{[0-9]+}}
   store float %conv, float* @f, align 4

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/icmpa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/icmpa.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/icmpa.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/icmpa.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@ define void @eq()  {
 entry:
 ; CHECK-LABEL:  .ent  eq
 
-  %0 = load i32* @c, align 4
-  %1 = load i32* @d, align 4
+  %0 = load i32, i32* @c, align 4
+  %1 = load i32, i32* @d, align 4
   %cmp = icmp eq i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
@@ -35,8 +35,8 @@ entry:
 define void @ne()  {
 entry:
 ; CHECK-LABEL:  .ent  ne
-  %0 = load i32* @c, align 4
-  %1 = load i32* @d, align 4
+  %0 = load i32, i32* @c, align 4
+  %1 = load i32, i32* @d, align 4
   %cmp = icmp ne i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
@@ -56,8 +56,8 @@ entry:
 define void @ugt()  {
 entry:
 ; CHECK-LABEL:  .ent  ugt
-  %0 = load i32* @uc, align 4
-  %1 = load i32* @ud, align 4
+  %0 = load i32, i32* @uc, align 4
+  %1 = load i32, i32* @ud, align 4
   %cmp = icmp ugt i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}})
@@ -76,8 +76,8 @@ entry:
 define void @ult()  {
 entry:
 ; CHECK-LABEL:  .ent  ult
-  %0 = load i32* @uc, align 4
-  %1 = load i32* @ud, align 4
+  %0 = load i32, i32* @uc, align 4
+  %1 = load i32, i32* @ud, align 4
   %cmp = icmp ult i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}})
@@ -95,8 +95,8 @@ entry:
 define void @uge()  {
 entry:
 ; CHECK-LABEL:  .ent  uge
-  %0 = load i32* @uc, align 4
-  %1 = load i32* @ud, align 4
+  %0 = load i32, i32* @uc, align 4
+  %1 = load i32, i32* @ud, align 4
   %cmp = icmp uge i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}})
@@ -115,8 +115,8 @@ entry:
 define void @ule()  {
 entry:
 ; CHECK-LABEL:  .ent  ule
-  %0 = load i32* @uc, align 4
-  %1 = load i32* @ud, align 4
+  %0 = load i32, i32* @uc, align 4
+  %1 = load i32, i32* @ud, align 4
   %cmp = icmp ule i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}})
@@ -135,8 +135,8 @@ entry:
 define void @sgt()  {
 entry:
 ; CHECK-LABEL:  .ent sgt
-  %0 = load i32* @c, align 4
-  %1 = load i32* @d, align 4
+  %0 = load i32, i32* @c, align 4
+  %1 = load i32, i32* @d, align 4
   %cmp = icmp sgt i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
@@ -154,8 +154,8 @@ entry:
 define void @slt()  {
 entry:
 ; CHECK-LABEL:  .ent slt
-  %0 = load i32* @c, align 4
-  %1 = load i32* @d, align 4
+  %0 = load i32, i32* @c, align 4
+  %1 = load i32, i32* @d, align 4
   %cmp = icmp slt i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
@@ -173,8 +173,8 @@ entry:
 define void @sge()  {
 entry:
 ; CHECK-LABEL:  .ent sge
-  %0 = load i32* @c, align 4
-  %1 = load i32* @d, align 4
+  %0 = load i32, i32* @c, align 4
+  %1 = load i32, i32* @d, align 4
   %cmp = icmp sge i32 %0, %1
   %conv = zext i1 %cmp to i32
   store i32 %conv, i32* @b1, align 4
@@ -193,8 +193,8 @@ entry:
 define void @sle()  {
 entry:
 ; CHECK-LABEL:  .ent sle
-  %0 = load i32* @c, align 4
-  %1 = load i32* @d, align 4
+  %0 = load i32, i32* @c, align 4
+  %1 = load i32, i32* @d, align 4
   %cmp = icmp sle i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstore2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstore2.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstore2.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ target triple = "mips--linux-gnu"
 ; Function Attrs: nounwind
 define void @cfoo() #0 {
 entry:
-  %0 = load i8* @c2, align 1
+  %0 = load i8, i8* @c2, align 1
   store i8 %0, i8* @c1, align 1
 ; CHECK-LABEL:	cfoo:
 ; CHECK:	lbu	$[[REGc:[0-9]+]], 0(${{[0-9]+}})
@@ -34,7 +34,7 @@ entry:
 ; Function Attrs: nounwind
 define void @sfoo() #0 {
 entry:
-  %0 = load i16* @s2, align 2
+  %0 = load i16, i16* @s2, align 2
   store i16 %0, i16* @s1, align 2
 ; CHECK-LABEL:	sfoo:
 ; CHECK:	lhu	$[[REGs:[0-9]+]], 0(${{[0-9]+}})
@@ -46,7 +46,7 @@ entry:
 ; Function Attrs: nounwind
 define void @ifoo() #0 {
 entry:
-  %0 = load i32* @i2, align 4
+  %0 = load i32, i32* @i2, align 4
   store i32 %0, i32* @i1, align 4
 ; CHECK-LABEL:	ifoo:
 ; CHECK:	lw	$[[REGi:[0-9]+]], 0(${{[0-9]+}})
@@ -58,7 +58,7 @@ entry:
 ; Function Attrs: nounwind
 define void @ffoo() #0 {
 entry:
-  %0 = load float* @f2, align 4
+  %0 = load float, float* @f2, align 4
   store float %0, float* @f1, align 4
 ; CHECK-LABEL:	ffoo:
 ; CHECK:	lwc1	$f[[REGf:[0-9]+]], 0(${{[0-9]+}})
@@ -71,7 +71,7 @@ entry:
 ; Function Attrs: nounwind
 define void @dfoo() #0 {
 entry:
-  %0 = load double* @d2, align 8
+  %0 = load double, double* @d2, align 8
   store double %0, double* @d1, align 8
 ; CHECK-LABEL:        dfoo:
 ; CHECK:        ldc1    $f[[REGd:[0-9]+]], 0(${{[0-9]+}})

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll Fri Feb 27 15:17:42 2015
@@ -28,11 +28,11 @@
 define void @_Z3b_iv()  {
 entry:
 ; CHECK-LABEL:   .ent  _Z3b_iv
-  %0 = load i8* @b1, align 1
+  %0 = load i8, i8* @b1, align 1
   %tobool = trunc i8 %0 to i1
   %frombool = zext i1 %tobool to i8
   store i8 %frombool, i8* @b2, align 1
-  %1 = load i8* @b2, align 1
+  %1 = load i8, i8* @b2, align 1
   %tobool1 = trunc i8 %1 to i1
   %conv = zext i1 %tobool1 to i32
   store i32 %conv, i32* @i, align 4
@@ -51,10 +51,10 @@ define void @_Z4uc_iv()  {
 entry:
 ; CHECK-LABEL:  .ent  _Z4uc_iv
 
-  %0 = load i8* @uc1, align 1
+  %0 = load i8, i8* @uc1, align 1
   %conv = zext i8 %0 to i32
   store i32 %conv, i32* @i, align 4
-  %1 = load i8* @uc2, align 1
+  %1 = load i8, i8* @uc2, align 1
   %conv1 = zext i8 %1 to i32
 ; CHECK:   lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 255
@@ -71,10 +71,10 @@ entry:
 ; mips32r2-LABEL:  .ent  _Z4sc_iv
 ; mips32-LABEL:  .ent  _Z4sc_iv
 
-  %0 = load i8* @sc1, align 1
+  %0 = load i8, i8* @sc1, align 1
   %conv = sext i8 %0 to i32
   store i32 %conv, i32* @i, align 4
-  %1 = load i8* @sc2, align 1
+  %1 = load i8, i8* @sc2, align 1
   %conv1 = sext i8 %1 to i32
   store i32 %conv1, i32* @j, align 4
 ; mips32r2:  lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
@@ -91,10 +91,10 @@ entry:
 define void @_Z4us_iv()  {
 entry:
 ; CHECK-LABEL:  .ent  _Z4us_iv
-  %0 = load i16* @us1, align 2
+  %0 = load i16, i16* @us1, align 2
   %conv = zext i16 %0 to i32
   store i32 %conv, i32* @i, align 4
-  %1 = load i16* @us2, align 2
+  %1 = load i16, i16* @us2, align 2
   %conv1 = zext i16 %1 to i32
   store i32 %conv1, i32* @j, align 4
   ret void
@@ -109,10 +109,10 @@ entry:
 ; mips32r2-LABEL:  .ent  _Z4ss_iv
 ; mips32=LABEL:  .ent  _Z4ss_iv
 
-  %0 = load i16* @ss1, align 2
+  %0 = load i16, i16* @ss1, align 2
   %conv = sext i16 %0 to i32
   store i32 %conv, i32* @i, align 4
-  %1 = load i16* @ss2, align 2
+  %1 = load i16, i16* @ss2, align 2
   %conv1 = sext i16 %1 to i32
   store i32 %conv1, i32* @j, align 4
 ; mips32r2:  lhu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
@@ -129,7 +129,7 @@ entry:
 define void @_Z4b_ssv()  {
 entry:
 ; CHECK-LABEL:  .ent  _Z4b_ssv
-  %0 = load i8* @b2, align 1
+  %0 = load i8, i8* @b2, align 1
   %tobool = trunc i8 %0 to i1
   %conv = zext i1 %tobool to i16
   store i16 %conv, i16* @ssi, align 2
@@ -143,10 +143,10 @@ entry:
 define void @_Z5uc_ssv()  {
 entry:
 ; CHECK-LABEL:  .ent  _Z5uc_ssv
-  %0 = load i8* @uc1, align 1
+  %0 = load i8, i8* @uc1, align 1
   %conv = zext i8 %0 to i16
   store i16 %conv, i16* @ssi, align 2
-  %1 = load i8* @uc2, align 1
+  %1 = load i8, i8* @uc2, align 1
   %conv1 = zext i8 %1 to i16
 ; CHECK:   lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 255
@@ -161,10 +161,10 @@ define void @_Z5sc_ssv()  {
 entry:
 ; mips32r2-LABEL:  .ent  _Z5sc_ssv
 ; mips32-LABEL:  .ent  _Z5sc_ssv
-  %0 = load i8* @sc1, align 1
+  %0 = load i8, i8* @sc1, align 1
   %conv = sext i8 %0 to i16
   store i16 %conv, i16* @ssi, align 2
-  %1 = load i8* @sc2, align 1
+  %1 = load i8, i8* @sc2, align 1
   %conv1 = sext i8 %1 to i16
   store i16 %conv1, i16* @ssj, align 2
 ; mips32r2:  lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/overflt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/overflt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/overflt.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/overflt.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@
 define void @foo() {
 entry:
 ; CHECK-LABEL:   .ent  foo
-  %0 = load float** @y, align 4
+  %0 = load float*, float** @y, align 4
   %arrayidx = getelementptr inbounds float, float* %0, i32 64000
   store float 5.500000e+00, float* %arrayidx, align 4
 ; CHECK:        lui     $[[REG_FPCONST_INT:[0-9]+]], 16560
@@ -31,9 +31,9 @@ entry:
 define void @goo() {
 entry:
 ; CHECK-LABEL:   .ent  goo
-  %0 = load float** @y, align 4
+  %0 = load float*, float** @y, align 4
   %arrayidx = getelementptr inbounds float, float* %0, i32 64000
-  %1 = load float* %arrayidx, align 4
+  %1 = load float, float* %arrayidx, align 4
   store float %1, float* @result, align 4
 ; CHECK-DAG:    lw      $[[REG_RESULT:[0-9]+]], %got(result)(${{[0-9]+}})
 ; CHECK-DAG:    lw      $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}})

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/retabi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/retabi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/retabi.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/retabi.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
 define i32 @reti() {
 entry:
 ; CHECK-LABEL: reti:
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   ret i32 %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -25,7 +25,7 @@ entry:
 define signext i16 @rets() {
 entry:
 ; CHECK-LABEL: rets:
-  %0 = load i16* @s, align 2
+  %0 = load i16, i16* @s, align 2
   ret i16 %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -40,7 +40,7 @@ entry:
 define signext i8 @retc() {
 entry:
 ; CHECK-LABEL: retc:
-  %0 = load i8* @c, align 1
+  %0 = load i8, i8* @c, align 1
   ret i8 %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -55,7 +55,7 @@ entry:
 define float @retf() {
 entry:
 ; CHECK-LABEL: retf:
-  %0 = load float* @f, align 4
+  %0 = load float, float* @f, align 4
   ret float %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -69,7 +69,7 @@ entry:
 define double @retd() {
 entry:
 ; CHECK-LABEL: retd:
-  %0 = load double* @d, align 8
+  %0 = load double, double* @d, align 8
   ret double %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)

Modified: llvm/trunk/test/CodeGen/Mips/Fast-ISel/shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/Fast-ISel/shift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/Fast-ISel/shift.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/Fast-ISel/shift.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define i32 @main() nounwind uwtable {
 entry:
   %foo = alloca %struct.s, align 4
   %0 = bitcast %struct.s* %foo to i32*
-  %bf.load = load i32* %0, align 4
+  %bf.load = load i32, i32* %0, align 4
   %bf.lshr = lshr i32 %bf.load, 2
   %cmp = icmp ne i32 %bf.lshr, 2
   br i1 %cmp, label %if.then, label %if.end

Modified: llvm/trunk/test/CodeGen/Mips/addi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/addi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/addi.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/addi.ll Fri Feb 27 15:17:42 2015
@@ -8,16 +8,16 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   %add = add nsw i32 %0, 5
   store i32 %add, i32* @i, align 4
-  %1 = load i32* @j, align 4
+  %1 = load i32, i32* @j, align 4
   %sub = sub nsw i32 %1, 5
   store i32 %sub, i32* @j, align 4
-  %2 = load i32* @k, align 4
+  %2 = load i32, i32* @k, align 4
   %add1 = add nsw i32 %2, 10000
   store i32 %add1, i32* @k, align 4
-  %3 = load i32* @l, align 4
+  %3 = load i32, i32* @l, align 4
   %sub2 = sub nsw i32 %3, 10000
   store i32 %sub2, i32* @l, align 4
 ; 16: 	addiu	${{[0-9]+}}, 5	# 16 bit inst

Modified: llvm/trunk/test/CodeGen/Mips/addressing-mode.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/addressing-mode.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/addressing-mode.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/addressing-mode.ll Fri Feb 27 15:17:42 2015
@@ -21,9 +21,9 @@ for.body3:
   %s.120 = phi i32 [ %s.022, %for.cond1.preheader ], [ %add7, %for.body3 ]
   %j.019 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ]
   %arrayidx4 = getelementptr inbounds [256 x i32], [256 x i32]* %a, i32 %i.021, i32 %j.019
-  %0 = load i32* %arrayidx4, align 4
+  %0 = load i32, i32* %arrayidx4, align 4
   %arrayidx6 = getelementptr inbounds [256 x i32], [256 x i32]* %b, i32 %i.021, i32 %j.019
-  %1 = load i32* %arrayidx6, align 4
+  %1 = load i32, i32* %arrayidx6, align 4
   %add = add i32 %0, %s.120
   %add7 = add i32 %add, %1
   %add8 = add nsw i32 %j.019, %m

Modified: llvm/trunk/test/CodeGen/Mips/align16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/align16.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/align16.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/align16.ll Fri Feb 27 15:17:42 2015
@@ -15,10 +15,10 @@ entry:
   %x = alloca i32, align 8
   %zz = alloca i32, align 4
   %z = alloca i32, align 4
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   %arrayidx = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10
   store i32 %0, i32* %arrayidx, align 4
-  %1 = load i32* @i, align 4
+  %1 = load i32, i32* @i, align 4
   store i32 %1, i32* %x, align 8
   call void @p(i32* %x)
   %arrayidx1 = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10

Modified: llvm/trunk/test/CodeGen/Mips/alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/alloca.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/alloca.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/alloca.ll Fri Feb 27 15:17:42 2015
@@ -59,23 +59,23 @@ if.end:
 ; CHECK: lw  $25, %call16(printf)
 
   %.pre-phi = phi i32* [ %2, %if.else ], [ %.pre, %if.then ]
-  %tmp7 = load i32* %0, align 4
+  %tmp7 = load i32, i32* %0, align 4
   %arrayidx9 = getelementptr inbounds i8, i8* %tmp1, i32 4
   %3 = bitcast i8* %arrayidx9 to i32*
-  %tmp10 = load i32* %3, align 4
+  %tmp10 = load i32, i32* %3, align 4
   %arrayidx12 = getelementptr inbounds i8, i8* %tmp1, i32 8
   %4 = bitcast i8* %arrayidx12 to i32*
-  %tmp13 = load i32* %4, align 4
-  %tmp16 = load i32* %.pre-phi, align 4
+  %tmp13 = load i32, i32* %4, align 4
+  %tmp16 = load i32, i32* %.pre-phi, align 4
   %arrayidx18 = getelementptr inbounds i8, i8* %tmp1, i32 16
   %5 = bitcast i8* %arrayidx18 to i32*
-  %tmp19 = load i32* %5, align 4
+  %tmp19 = load i32, i32* %5, align 4
   %arrayidx21 = getelementptr inbounds i8, i8* %tmp1, i32 20
   %6 = bitcast i8* %arrayidx21 to i32*
-  %tmp22 = load i32* %6, align 4
+  %tmp22 = load i32, i32* %6, align 4
   %arrayidx24 = getelementptr inbounds i8, i8* %tmp1, i32 24
   %7 = bitcast i8* %arrayidx24 to i32*
-  %tmp25 = load i32* %7, align 4
+  %tmp25 = load i32, i32* %7, align 4
   %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str, i32 0, i32 0), i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind
   ret i32 0
 }

Modified: llvm/trunk/test/CodeGen/Mips/alloca16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/alloca16.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/alloca16.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/alloca16.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define void @temp(i32 %foo) nounwind {
 entry:
   %foo.addr = alloca i32, align 4
   store i32 %foo, i32* %foo.addr, align 4
-  %0 = load i32* %foo.addr, align 4
+  %0 = load i32, i32* %foo.addr, align 4
   store i32 %0, i32* @t, align 4
   ret void
 }
@@ -28,46 +28,46 @@ entry:
   %sssi = alloca i32, align 4
   %ip = alloca i32*, align 4
   %sssj = alloca i32, align 4
-  %0 = load i32* @iiii, align 4
+  %0 = load i32, i32* @iiii, align 4
   store i32 %0, i32* %sssi, align 4
-  %1 = load i32* @kkkk, align 4
+  %1 = load i32, i32* @kkkk, align 4
   %mul = mul nsw i32 %1, 100
   %2 = alloca i8, i32 %mul
   %3 = bitcast i8* %2 to i32*
   store i32* %3, i32** %ip, align 4
-  %4 = load i32* @jjjj, align 4
+  %4 = load i32, i32* @jjjj, align 4
   store i32 %4, i32* %sssj, align 4
-  %5 = load i32* @jjjj, align 4
-  %6 = load i32* @iiii, align 4
-  %7 = load i32** %ip, align 4
+  %5 = load i32, i32* @jjjj, align 4
+  %6 = load i32, i32* @iiii, align 4
+  %7 = load i32*, i32** %ip, align 4
   %arrayidx = getelementptr inbounds i32, i32* %7, i32 %6
   store i32 %5, i32* %arrayidx, align 4
-  %8 = load i32* @kkkk, align 4
-  %9 = load i32* @jjjj, align 4
-  %10 = load i32** %ip, align 4
+  %8 = load i32, i32* @kkkk, align 4
+  %9 = load i32, i32* @jjjj, align 4
+  %10 = load i32*, i32** %ip, align 4
   %arrayidx1 = getelementptr inbounds i32, i32* %10, i32 %9
   store i32 %8, i32* %arrayidx1, align 4
-  %11 = load i32* @iiii, align 4
-  %12 = load i32* @kkkk, align 4
-  %13 = load i32** %ip, align 4
+  %11 = load i32, i32* @iiii, align 4
+  %12 = load i32, i32* @kkkk, align 4
+  %13 = load i32*, i32** %ip, align 4
   %arrayidx2 = getelementptr inbounds i32, i32* %13, i32 %12
   store i32 %11, i32* %arrayidx2, align 4
-  %14 = load i32** %ip, align 4
+  %14 = load i32*, i32** %ip, align 4
   %arrayidx3 = getelementptr inbounds i32, i32* %14, i32 25
-  %15 = load i32* %arrayidx3, align 4
+  %15 = load i32, i32* %arrayidx3, align 4
   store i32 %15, i32* @riii, align 4
-  %16 = load i32** %ip, align 4
+  %16 = load i32*, i32** %ip, align 4
   %arrayidx4 = getelementptr inbounds i32, i32* %16, i32 35
-  %17 = load i32* %arrayidx4, align 4
+  %17 = load i32, i32* %arrayidx4, align 4
   store i32 %17, i32* @rjjj, align 4
-  %18 = load i32** %ip, align 4
+  %18 = load i32*, i32** %ip, align 4
   %arrayidx5 = getelementptr inbounds i32, i32* %18, i32 100
-  %19 = load i32* %arrayidx5, align 4
+  %19 = load i32, i32* %arrayidx5, align 4
   store i32 %19, i32* @rkkk, align 4
-  %20 = load i32* @t, align 4
-  %21 = load i32** %ip, align 4
+  %20 = load i32, i32* @t, align 4
+  %21 = load i32*, i32** %ip, align 4
   %arrayidx6 = getelementptr inbounds i32, i32* %21, i32 %20
-  %22 = load i32* %arrayidx6, align 4
+  %22 = load i32, i32* %arrayidx6, align 4
 ; 16: 	addiu $sp, -16
   call void @temp(i32 %22)
 ; 16: 	addiu $sp, 16

Modified: llvm/trunk/test/CodeGen/Mips/and1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/and1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/and1.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/and1.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32* @x, align 4
-  %1 = load i32* @y, align 4
+  %0 = load i32, i32* @x, align 4
+  %1 = load i32, i32* @y, align 4
   %and = and i32 %0, %1
 ; 16:	and	${{[0-9]+}}, ${{[0-9]+}}
   %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %and)

Modified: llvm/trunk/test/CodeGen/Mips/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/atomic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/atomic.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/atomic.ll Fri Feb 27 15:17:42 2015
@@ -54,7 +54,7 @@ define i32 @AtomicSwap32(i32 signext %ne
 entry:
   %newval.addr = alloca i32, align 4
   store i32 %newval, i32* %newval.addr, align 4
-  %tmp = load i32* %newval.addr, align 4
+  %tmp = load i32, i32* %newval.addr, align 4
   %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
   ret i32 %0
 
@@ -74,7 +74,7 @@ define i32 @AtomicCmpSwap32(i32 signext
 entry:
   %newval.addr = alloca i32, align 4
   store i32 %newval, i32* %newval.addr, align 4
-  %tmp = load i32* %newval.addr, align 4
+  %tmp = load i32, i32* %newval.addr, align 4
   %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic monotonic
   %1 = extractvalue { i32, i1 } %0, 0
   ret i32 %1

Modified: llvm/trunk/test/CodeGen/Mips/atomicops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/atomicops.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/atomicops.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/atomicops.ll Fri Feb 27 15:17:42 2015
@@ -18,14 +18,14 @@ entry:
   store volatile i32 0, i32* %x, align 4
   %0 = atomicrmw add i32* %x, i32 1 seq_cst
   %add.i = add nsw i32 %0, 2
-  %1 = load volatile i32* %x, align 4
+  %1 = load volatile i32, i32* %x, align 4
   %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind
   %pair = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst
   %2 = extractvalue { i32, i1 } %pair, 0
-  %3 = load volatile i32* %x, align 4
+  %3 = load volatile i32, i32* %x, align 4
   %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind
   %4 = atomicrmw xchg i32* %x, i32 1 seq_cst
-  %5 = load volatile i32* %x, align 4
+  %5 = load volatile i32, i32* %x, align 4
   %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %4, i32 %5) nounwind
 ; 16-LABEL: main:
 ; 16:	lw	${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}})

Modified: llvm/trunk/test/CodeGen/Mips/beqzc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/beqzc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/beqzc.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/beqzc.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 ; Function Attrs: nounwind optsize
 define i32 @main() #0 {
 entry:
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   %cmp = icmp eq i32 %0, 0
   %. = select i1 %cmp, i32 10, i32 55
   store i32 %., i32* @j, align 4

Modified: llvm/trunk/test/CodeGen/Mips/beqzc1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/beqzc1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/beqzc1.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/beqzc1.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 ; Function Attrs: nounwind optsize
 define i32 @main() #0 {
 entry:
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.end
 

Modified: llvm/trunk/test/CodeGen/Mips/biggot.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/biggot.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/biggot.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/biggot.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ entry:
 ; N64: daddu  $[[R3:[0-9]+]], $[[R2]], ${{[a-z0-9]+}}
 ; N64: ld  ${{[0-9]+}}, %call_lo(foo0)($[[R3]])
 
-  %0 = load i32* @v0, align 4
+  %0 = load i32, i32* @v0, align 4
   tail call void @foo0(i32 %0) nounwind
   ret void
 }

Modified: llvm/trunk/test/CodeGen/Mips/brconeq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconeq.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconeq.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconeq.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @i, align 4
-  %1 = load i32* @j, align 4
+  %0 = load i32, i32* @i, align 4
+  %1 = load i32, i32* @j, align 4
   %cmp = icmp eq i32 %0, %1
 ; 16:	cmp	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	bteqz	$[[LABEL:[0-9A-Ba-b_]+]]

Modified: llvm/trunk/test/CodeGen/Mips/brconeqk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconeqk.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconeqk.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconeqk.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   %cmp = icmp eq i32 %0, 10
   br i1 %cmp, label %if.end, label %if.then
 ; 16:	cmpi	${{[0-9]+}}, {{[0-9]+}}

Modified: llvm/trunk/test/CodeGen/Mips/brconeqz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconeqz.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconeqz.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconeqz.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.end, label %if.then
 ; 16:	beqz	${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]]

Modified: llvm/trunk/test/CodeGen/Mips/brconge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconge.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconge.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconge.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @i, align 4
-  %1 = load i32* @j, align 4
+  %0 = load i32, i32* @i, align 4
+  %1 = load i32, i32* @j, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %if.then, label %if.end
 
@@ -22,7 +22,7 @@ if.then:
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %2 = load i32* @k, align 4
+  %2 = load i32, i32* @k, align 4
   %cmp1 = icmp slt i32 %0, %2
   br i1 %cmp1, label %if.then2, label %if.end3
 

Modified: llvm/trunk/test/CodeGen/Mips/brcongt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brcongt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brcongt.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brcongt.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @i, align 4
-  %1 = load i32* @j, align 4
+  %0 = load i32, i32* @i, align 4
+  %1 = load i32, i32* @j, align 4
   %cmp = icmp sgt i32 %0, %1
   br i1 %cmp, label %if.end, label %if.then
 ; 16:	slt	${{[0-9]+}}, ${{[0-9]+}}

Modified: llvm/trunk/test/CodeGen/Mips/brconle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconle.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconle.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @j, align 4
-  %1 = load i32* @i, align 4
+  %0 = load i32, i32* @j, align 4
+  %1 = load i32, i32* @i, align 4
   %cmp = icmp sgt i32 %0, %1
   br i1 %cmp, label %if.then, label %if.end
 
@@ -22,7 +22,7 @@ if.then:
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %2 = load i32* @k, align 4
+  %2 = load i32, i32* @k, align 4
   %cmp1 = icmp sgt i32 %1, %2
   br i1 %cmp1, label %if.then2, label %if.end3
 

Modified: llvm/trunk/test/CodeGen/Mips/brconlt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconlt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconlt.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconlt.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @j, align 4
-  %1 = load i32* @i, align 4
+  %0 = load i32, i32* @j, align 4
+  %1 = load i32, i32* @i, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %if.end, label %if.then
 

Modified: llvm/trunk/test/CodeGen/Mips/brconne.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconne.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconne.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconne.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @j, align 4
-  %1 = load i32* @i, align 4
+  %0 = load i32, i32* @j, align 4
+  %1 = load i32, i32* @i, align 4
   %cmp = icmp eq i32 %0, %1
   br i1 %cmp, label %if.then, label %if.end
 ; 16:	cmp	${{[0-9]+}}, ${{[0-9]+}}

Modified: llvm/trunk/test/CodeGen/Mips/brconnek.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconnek.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconnek.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconnek.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @j, align 4
+  %0 = load i32, i32* @j, align 4
   %cmp = icmp eq i32 %0, 5
   br i1 %cmp, label %if.then, label %if.end
 

Modified: llvm/trunk/test/CodeGen/Mips/brconnez.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brconnez.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brconnez.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brconnez.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @j, align 4
+  %0 = load i32, i32* @j, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.end
 

Modified: llvm/trunk/test/CodeGen/Mips/brdelayslot.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brdelayslot.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brdelayslot.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brdelayslot.ll Fri Feb 27 15:17:42 2015
@@ -54,18 +54,18 @@ declare void @foo4(double)
 
 define void @foo5(i32 %a) nounwind {
 entry:
-  %0 = load i32* @g2, align 4
+  %0 = load i32, i32* @g2, align 4
   %tobool = icmp eq i32 %a, 0
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:
-  %1 = load i32* @g1, align 4
+  %1 = load i32, i32* @g1, align 4
   %add = add nsw i32 %1, %0
   store i32 %add, i32* @g1, align 4
   br label %if.end
 
 if.else:
-  %2 = load i32* @g3, align 4
+  %2 = load i32, i32* @g3, align 4
   %sub = sub nsw i32 %2, %0
   store i32 %sub, i32* @g3, align 4
   br label %if.end
@@ -99,9 +99,9 @@ declare void @foo7(double, float)
 define i32 @foo8(i32 %a) nounwind {
 entry:
   store i32 %a, i32* @g1, align 4
-  %0 = load void ()** @foo9, align 4
+  %0 = load void ()*, void ()** @foo9, align 4
   tail call void %0() nounwind
-  %1 = load i32* @g1, align 4
+  %1 = load i32, i32* @g1, align 4
   %add = add nsw i32 %1, %a
   ret i32 %add
 }
@@ -145,7 +145,7 @@ for.body:
   %s.06 = phi i32 [ %add, %for.body ], [ 0, %entry ]
   %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
   %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.05
-  %0 = load i32* %arrayidx, align 4
+  %0 = load i32, i32* %arrayidx, align 4
   %add = add nsw i32 %0, %s.06
   %inc = add nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, %n

Modified: llvm/trunk/test/CodeGen/Mips/brind.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/brind.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/brind.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/brind.ll Fri Feb 27 15:17:42 2015
@@ -27,7 +27,7 @@ L3:
   %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str7, i32 0, i32 0))
   %inc = add i32 %i.2, 1
   %arrayidx = getelementptr inbounds [5 x i8*], [5 x i8*]* @main.L, i32 0, i32 %i.2
-  %0 = load i8** %arrayidx, align 4
+  %0 = load i8*, i8** %arrayidx, align 4
   indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4]
 ; 16: 	jrc	 ${{[0-9]+}}
 L4:                                               ; preds = %L3

Modified: llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll Fri Feb 27 15:17:42 2015
@@ -140,10 +140,10 @@ define void @smallStruct_1b(%struct.Smal
 entry:
   %ss.addr = alloca %struct.SmallStruct_1b*, align 8
   store %struct.SmallStruct_1b* %ss, %struct.SmallStruct_1b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss.addr, align 8
   %1 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
   %2 = getelementptr { i8 }, { i8 }* %1, i32 0, i32 0
-  %3 = load i8* %2, align 1
+  %3 = load i8, i8* %2, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %3)
   ret void
  ; CHECK-LABEL: smallStruct_1b: 
@@ -154,10 +154,10 @@ define void @smallStruct_2b(%struct.Smal
 entry:
   %ss.addr = alloca %struct.SmallStruct_2b*, align 8
   store %struct.SmallStruct_2b* %ss, %struct.SmallStruct_2b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_2b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_2b*, %struct.SmallStruct_2b** %ss.addr, align 8
   %1 = bitcast %struct.SmallStruct_2b* %0 to { i16 }*
   %2 = getelementptr { i16 }, { i16 }* %1, i32 0, i32 0
-  %3 = load i16* %2, align 1
+  %3 = load i16, i16* %2, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i16 inreg %3)
   ret void
  ; CHECK-LABEL: smallStruct_2b:
@@ -169,12 +169,12 @@ entry:
   %ss.addr = alloca %struct.SmallStruct_3b*, align 8
   %.coerce = alloca { i24 }
   store %struct.SmallStruct_3b* %ss, %struct.SmallStruct_3b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_3b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_3b*, %struct.SmallStruct_3b** %ss.addr, align 8
   %1 = bitcast { i24 }* %.coerce to i8*
   %2 = bitcast %struct.SmallStruct_3b* %0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i32 0, i1 false)
   %3 = getelementptr { i24 }, { i24 }* %.coerce, i32 0, i32 0
-  %4 = load i24* %3, align 1
+  %4 = load i24, i24* %3, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i24 inreg %4)
   ret void
  ; CHECK-LABEL: smallStruct_3b:
@@ -187,10 +187,10 @@ define void @smallStruct_4b(%struct.Smal
 entry:
   %ss.addr = alloca %struct.SmallStruct_4b*, align 8
   store %struct.SmallStruct_4b* %ss, %struct.SmallStruct_4b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_4b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_4b*, %struct.SmallStruct_4b** %ss.addr, align 8
   %1 = bitcast %struct.SmallStruct_4b* %0 to { i32 }*
   %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0
-  %3 = load i32* %2, align 1
+  %3 = load i32, i32* %2, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
   ret void
  ; CHECK-LABEL: smallStruct_4b:
@@ -202,12 +202,12 @@ entry:
   %ss.addr = alloca %struct.SmallStruct_5b*, align 8
   %.coerce = alloca { i40 }
   store %struct.SmallStruct_5b* %ss, %struct.SmallStruct_5b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_5b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_5b*, %struct.SmallStruct_5b** %ss.addr, align 8
   %1 = bitcast { i40 }* %.coerce to i8*
   %2 = bitcast %struct.SmallStruct_5b* %0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i32 0, i1 false)
   %3 = getelementptr { i40 }, { i40 }* %.coerce, i32 0, i32 0
-  %4 = load i40* %3, align 1
+  %4 = load i40, i40* %3, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i40 inreg %4)
   ret void
  ; CHECK-LABEL: smallStruct_5b:
@@ -219,12 +219,12 @@ entry:
   %ss.addr = alloca %struct.SmallStruct_6b*, align 8
   %.coerce = alloca { i48 }
   store %struct.SmallStruct_6b* %ss, %struct.SmallStruct_6b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_6b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_6b*, %struct.SmallStruct_6b** %ss.addr, align 8
   %1 = bitcast { i48 }* %.coerce to i8*
   %2 = bitcast %struct.SmallStruct_6b* %0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
   %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
-  %4 = load i48* %3, align 1
+  %4 = load i48, i48* %3, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
   ret void
  ; CHECK-LABEL: smallStruct_6b:
@@ -236,12 +236,12 @@ entry:
   %ss.addr = alloca %struct.SmallStruct_7b*, align 8
   %.coerce = alloca { i56 }
   store %struct.SmallStruct_7b* %ss, %struct.SmallStruct_7b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_7b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_7b*, %struct.SmallStruct_7b** %ss.addr, align 8
   %1 = bitcast { i56 }* %.coerce to i8*
   %2 = bitcast %struct.SmallStruct_7b* %0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i32 0, i1 false)
   %3 = getelementptr { i56 }, { i56 }* %.coerce, i32 0, i32 0
-  %4 = load i56* %3, align 1
+  %4 = load i56, i56* %3, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i56 inreg %4)
   ret void
  ; CHECK-LABEL: smallStruct_7b:
@@ -252,10 +252,10 @@ define void @smallStruct_8b(%struct.Smal
 entry:
   %ss.addr = alloca %struct.SmallStruct_8b*, align 8
   store %struct.SmallStruct_8b* %ss, %struct.SmallStruct_8b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_8b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_8b*, %struct.SmallStruct_8b** %ss.addr, align 8
   %1 = bitcast %struct.SmallStruct_8b* %0 to { i64 }*
   %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
-  %3 = load i64* %2, align 1
+  %3 = load i64, i64* %2, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
   ret void
  ; CHECK-LABEL: smallStruct_8b:
@@ -267,14 +267,14 @@ entry:
   %ss.addr = alloca %struct.SmallStruct_9b*, align 8
   %.coerce = alloca { i64, i8 }
   store %struct.SmallStruct_9b* %ss, %struct.SmallStruct_9b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_9b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_9b*, %struct.SmallStruct_9b** %ss.addr, align 8
   %1 = bitcast { i64, i8 }* %.coerce to i8*
   %2 = bitcast %struct.SmallStruct_9b* %0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i32 0, i1 false)
   %3 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 0
-  %4 = load i64* %3, align 1
+  %4 = load i64, i64* %3, align 1
   %5 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 1
-  %6 = load i8* %5, align 1
+  %6 = load i8, i8* %5, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %4, i8 inreg %6)
   ret void
  ; CHECK-LABEL: smallStruct_9b:

Modified: llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll Fri Feb 27 15:17:42 2015
@@ -74,10 +74,10 @@ define void @smallStruct_1b1s(%struct.Sm
 entry:
   %ss.addr = alloca %struct.SmallStruct_1b1s*, align 8
   store %struct.SmallStruct_1b1s* %ss, %struct.SmallStruct_1b1s** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1b1s** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_1b1s*, %struct.SmallStruct_1b1s** %ss.addr, align 8
   %1 = bitcast %struct.SmallStruct_1b1s* %0 to { i32 }*
   %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0
-  %3 = load i32* %2, align 1
+  %3 = load i32, i32* %2, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
   ret void
  ; CHECK-LABEL: smallStruct_1b1s:
@@ -88,10 +88,10 @@ define void @smallStruct_1b1i(%struct.Sm
 entry:
   %ss.addr = alloca %struct.SmallStruct_1b1i*, align 8
   store %struct.SmallStruct_1b1i* %ss, %struct.SmallStruct_1b1i** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1b1i** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_1b1i*, %struct.SmallStruct_1b1i** %ss.addr, align 8
   %1 = bitcast %struct.SmallStruct_1b1i* %0 to { i64 }*
   %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
-  %3 = load i64* %2, align 1
+  %3 = load i64, i64* %2, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
   ret void
  ; CHECK-LABEL: smallStruct_1b1i:
@@ -103,12 +103,12 @@ entry:
   %ss.addr = alloca %struct.SmallStruct_1b1s1b*, align 8
   %.coerce = alloca { i48 }
   store %struct.SmallStruct_1b1s1b* %ss, %struct.SmallStruct_1b1s1b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1b1s1b** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_1b1s1b*, %struct.SmallStruct_1b1s1b** %ss.addr, align 8
   %1 = bitcast { i48 }* %.coerce to i8*
   %2 = bitcast %struct.SmallStruct_1b1s1b* %0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
   %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
-  %4 = load i48* %3, align 1
+  %4 = load i48, i48* %3, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
   ret void
  ; CHECK-LABEL: smallStruct_1b1s1b:
@@ -121,10 +121,10 @@ define void @smallStruct_1s1i(%struct.Sm
 entry:
   %ss.addr = alloca %struct.SmallStruct_1s1i*, align 8
   store %struct.SmallStruct_1s1i* %ss, %struct.SmallStruct_1s1i** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1s1i** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_1s1i*, %struct.SmallStruct_1s1i** %ss.addr, align 8
   %1 = bitcast %struct.SmallStruct_1s1i* %0 to { i64 }*
   %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
-  %3 = load i64* %2, align 1
+  %3 = load i64, i64* %2, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
   ret void
  ; CHECK-LABEL: smallStruct_1s1i:
@@ -136,12 +136,12 @@ entry:
   %ss.addr = alloca %struct.SmallStruct_3b1s*, align 8
   %.coerce = alloca { i48 }
   store %struct.SmallStruct_3b1s* %ss, %struct.SmallStruct_3b1s** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_3b1s** %ss.addr, align 8
+  %0 = load %struct.SmallStruct_3b1s*, %struct.SmallStruct_3b1s** %ss.addr, align 8
   %1 = bitcast { i48 }* %.coerce to i8*
   %2 = bitcast %struct.SmallStruct_3b1s* %0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
   %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
-  %4 = load i48* %3, align 1
+  %4 = load i48, i48* %3, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
   ret void
  ; CHECK-LABEL: smallStruct_3b1s:

Modified: llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll Fri Feb 27 15:17:42 2015
@@ -110,42 +110,42 @@ entry:
   store %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b** %ss7.addr, align 8
   store %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b** %ss8.addr, align 8
   store %struct.SmallStruct_1b* %ss9, %struct.SmallStruct_1b** %ss9.addr, align 8
-  %0 = load %struct.SmallStruct_1b** %ss1.addr, align 8
-  %1 = load %struct.SmallStruct_1b** %ss2.addr, align 8
-  %2 = load %struct.SmallStruct_1b** %ss3.addr, align 8
-  %3 = load %struct.SmallStruct_1b** %ss4.addr, align 8
-  %4 = load %struct.SmallStruct_1b** %ss5.addr, align 8
-  %5 = load %struct.SmallStruct_1b** %ss6.addr, align 8
-  %6 = load %struct.SmallStruct_1b** %ss7.addr, align 8
-  %7 = load %struct.SmallStruct_1b** %ss8.addr, align 8
-  %8 = load %struct.SmallStruct_1b** %ss9.addr, align 8
+  %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss1.addr, align 8
+  %1 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss2.addr, align 8
+  %2 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss3.addr, align 8
+  %3 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss4.addr, align 8
+  %4 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss5.addr, align 8
+  %5 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss6.addr, align 8
+  %6 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss7.addr, align 8
+  %7 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss8.addr, align 8
+  %8 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss9.addr, align 8
   %9 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
   %10 = getelementptr { i8 }, { i8 }* %9, i32 0, i32 0
-  %11 = load i8* %10, align 1
+  %11 = load i8, i8* %10, align 1
   %12 = bitcast %struct.SmallStruct_1b* %1 to { i8 }*
   %13 = getelementptr { i8 }, { i8 }* %12, i32 0, i32 0
-  %14 = load i8* %13, align 1
+  %14 = load i8, i8* %13, align 1
   %15 = bitcast %struct.SmallStruct_1b* %2 to { i8 }*
   %16 = getelementptr { i8 }, { i8 }* %15, i32 0, i32 0
-  %17 = load i8* %16, align 1
+  %17 = load i8, i8* %16, align 1
   %18 = bitcast %struct.SmallStruct_1b* %3 to { i8 }*
   %19 = getelementptr { i8 }, { i8 }* %18, i32 0, i32 0
-  %20 = load i8* %19, align 1
+  %20 = load i8, i8* %19, align 1
   %21 = bitcast %struct.SmallStruct_1b* %4 to { i8 }*
   %22 = getelementptr { i8 }, { i8 }* %21, i32 0, i32 0
-  %23 = load i8* %22, align 1
+  %23 = load i8, i8* %22, align 1
   %24 = bitcast %struct.SmallStruct_1b* %5 to { i8 }*
   %25 = getelementptr { i8 }, { i8 }* %24, i32 0, i32 0
-  %26 = load i8* %25, align 1
+  %26 = load i8, i8* %25, align 1
   %27 = bitcast %struct.SmallStruct_1b* %6 to { i8 }*
   %28 = getelementptr { i8 }, { i8 }* %27, i32 0, i32 0
-  %29 = load i8* %28, align 1
+  %29 = load i8, i8* %28, align 1
   %30 = bitcast %struct.SmallStruct_1b* %7 to { i8 }*
   %31 = getelementptr { i8 }, { i8 }* %30, i32 0, i32 0
-  %32 = load i8* %31, align 1
+  %32 = load i8, i8* %31, align 1
   %33 = bitcast %struct.SmallStruct_1b* %8 to { i8 }*
   %34 = getelementptr { i8 }, { i8 }* %33, i32 0, i32 0
-  %35 = load i8* %34, align 1
+  %35 = load i8, i8* %34, align 1
   call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %11, i8 inreg %14, i8 inreg %17, i8 inreg %20, i8 inreg %23, i8 inreg %26, i8 inreg %29, i8 inreg %32, i8 inreg %35)
   ret void
  ; CHECK-LABEL: smallStruct_1b_x9:

Modified: llvm/trunk/test/CodeGen/Mips/cconv/return-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/return-float.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/return-float.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/return-float.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@
 
 define float @retfloat() nounwind {
 entry:
-        %0 = load volatile float* @float
+        %0 = load volatile float, float* @float
         ret float %0
 }
 
@@ -35,7 +35,7 @@ entry:
 
 define double @retdouble() nounwind {
 entry:
-        %0 = load volatile double* @double
+        %0 = load volatile double, double* @double
         ret double %0
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/cconv/return-hard-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/return-hard-float.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/return-hard-float.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/return-hard-float.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@
 
 define float @retfloat() nounwind {
 entry:
-        %0 = load volatile float* @float
+        %0 = load volatile float, float* @float
         ret float %0
 }
 
@@ -38,7 +38,7 @@ entry:
 
 define double @retdouble() nounwind {
 entry:
-        %0 = load volatile double* @double
+        %0 = load volatile double, double* @double
         ret double %0
 }
 
@@ -50,7 +50,7 @@ entry:
 
 define { double, double } @retComplexDouble() #0 {
   %retval = alloca { double, double }, align 8
-  %1 = load { double, double }* %retval
+  %1 = load { double, double }, { double, double }* %retval
   ret { double, double } %1
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/cconv/return-hard-fp128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/return-hard-fp128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/return-hard-fp128.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/return-hard-fp128.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@
 
 define fp128 @retldouble() nounwind {
 entry:
-        %0 = load volatile fp128* @fp128
+        %0 = load volatile fp128, fp128* @fp128
         ret fp128 %0
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 
 define inreg {fp128} @ret_struct_fp128() nounwind {
 entry:
-        %0 = load volatile {fp128}* @struct_fp128
+        %0 = load volatile {fp128}, {fp128}* @struct_fp128
         ret {fp128} %0
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/cconv/return-struct.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/return-struct.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/return-struct.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/return-struct.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(
 
 define inreg {i8} @ret_struct_i8() nounwind {
 entry:
-        %0 = load volatile {i8}* @struct_byte
+        %0 = load volatile {i8}, {i8}* @struct_byte
         ret {i8} %0
 }
 
@@ -54,7 +54,7 @@ entry:
         %0 = bitcast {i8,i8}* %retval to i8*
         call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds ({i8,i8}* @struct_2byte, i32 0, i32 0), i64 2, i32 1, i1 false)
         %1 = bitcast {i8,i8}* %retval to {i16}*
-        %2 = load volatile {i16}* %1
+        %2 = load volatile {i16}, {i16}* %1
         ret {i16} %2
 }
 
@@ -91,7 +91,7 @@ entry:
 ; missed by the CCPromoteToType and the shift didn't happen.
 define inreg {i48} @ret_struct_3xi16() nounwind {
 entry:
-        %0 = load volatile i48* bitcast ({[3 x i16]}* @struct_3xi16 to i48*), align 2
+        %0 = load volatile i48, i48* bitcast ({[3 x i16]}* @struct_3xi16 to i48*), align 2
         %1 = insertvalue {i48} undef, i48 %0, 0
         ret {i48} %1
 }
@@ -174,7 +174,7 @@ entry:
 ; This time we let the backend lower the sret argument.
 define {[6 x i32]} @ret_struct_6xi32() {
 entry:
-        %0 = load volatile {[6 x i32]}* @struct_6xi32, align 2
+        %0 = load volatile {[6 x i32]}, {[6 x i32]}* @struct_6xi32, align 2
         ret {[6 x i32]} %0
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/cconv/return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/return.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/return.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/return.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@
 
 define i8 @reti8() nounwind {
 entry:
-        %0 = load volatile i8* @byte
+        %0 = load volatile i8, i8* @byte
         ret i8 %0
 }
 
@@ -38,7 +38,7 @@ entry:
 
 define i32 @reti32() nounwind {
 entry:
-        %0 = load volatile i32* @word
+        %0 = load volatile i32, i32* @word
         ret i32 %0
 }
 
@@ -52,7 +52,7 @@ entry:
 
 define i64 @reti64() nounwind {
 entry:
-        %0 = load volatile i64* @dword
+        %0 = load volatile i64, i64* @dword
         ret i64 %0
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/cfi_offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cfi_offset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cfi_offset.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cfi_offset.ll Fri Feb 27 15:17:42 2015
@@ -32,8 +32,8 @@ define void @bar() {
 ; CHECK:  .cfi_offset 31, -20
 ; CHECK:  .cfi_offset 16, -24
 
-    %val1 = load volatile double* @var
-    %val2 = load volatile double* @var
+    %val1 = load volatile double, double* @var
+    %val2 = load volatile double, double* @var
     call void (...)* @foo() nounwind
     store volatile double %val1, double* @var
     store volatile double %val2, double* @var

Modified: llvm/trunk/test/CodeGen/Mips/ci2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/ci2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/ci2.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/ci2.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 define void @foo() #0 {
 entry:
   store i32 305419896, i32* @i, align 4
-  %0 = load i32* @b, align 4
+  %0 = load i32, i32* @b, align 4
   %tobool = icmp ne i32 %0, 0
   br i1 %tobool, label %if.then, label %if.else
 

Modified: llvm/trunk/test/CodeGen/Mips/cmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cmov.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cmov.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cmov.ll Fri Feb 27 15:17:42 2015
@@ -41,7 +41,7 @@
 define i32* @cmov1(i32 signext %s) nounwind readonly {
 entry:
   %tobool = icmp ne i32 %s, 0
-  %tmp1 = load i32** @i3, align 4
+  %tmp1 = load i32*, i32** @i3, align 4
   %cond = select i1 %tobool, i32* getelementptr inbounds ([3 x i32]* @i1, i32 0, i32 0), i32* %tmp1
   ret i32* %cond
 }
@@ -81,8 +81,8 @@ entry:
 define i32 @cmov2(i32 signext %s) nounwind readonly {
 entry:
   %tobool = icmp ne i32 %s, 0
-  %tmp1 = load i32* @c, align 4
-  %tmp2 = load i32* @d, align 4
+  %tmp1 = load i32, i32* @c, align 4
+  %tmp2 = load i32, i32* @d, align 4
   %cond = select i1 %tobool, i32 %tmp1, i32 %tmp2
   ret i32 %cond
 }

Modified: llvm/trunk/test/CodeGen/Mips/cmplarge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cmplarge.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cmplarge.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cmplarge.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "mipsel--linux-gnu"
 define void @getSubImagesLuma(%struct.StorablePicture* nocapture %s) #0 {
 entry:
   %size_y = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %s, i32 0, i32 1
-  %0 = load i32* %size_y, align 4
+  %0 = load i32, i32* %size_y, align 4
   %sub = add nsw i32 %0, -1
   %add5 = add nsw i32 %0, 20
   %cmp6 = icmp sgt i32 %add5, -20
@@ -20,7 +20,7 @@ for.body:
   %j.07 = phi i32 [ %inc, %for.body ], [ -20, %entry ]
   %call = tail call i32 bitcast (i32 (...)* @iClip3 to i32 (i32, i32, i32)*)(i32 0, i32 %sub, i32 %j.07) #2
   %inc = add nsw i32 %j.07, 1
-  %1 = load i32* %size_y, align 4
+  %1 = load i32, i32* %size_y, align 4
   %add = add nsw i32 %1, 20
   %cmp = icmp slt i32 %inc, %add
   br i1 %cmp, label %for.body, label %for.end

Modified: llvm/trunk/test/CodeGen/Mips/const4a.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/const4a.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/const4a.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/const4a.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ target triple = "mips--linux-gnu"
 define void @t() #0 {
 entry:
   store i32 -559023410, i32* @i, align 4
-  %0 = load i32* @b, align 4
+  %0 = load i32, i32* @b, align 4
 ; no-load-relax:	lw	${{[0-9]+}}, $CPI0_1	# 16 bit inst
   %tobool = icmp ne i32 %0, 0
   br i1 %tobool, label %if.then, label %if.else

Modified: llvm/trunk/test/CodeGen/Mips/ctlz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/ctlz.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/ctlz.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/ctlz.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define i32 @main() #0 {
 entry:
   %retval = alloca i32, align 4
   store i32 0, i32* %retval
-  %0 = load i32* @x, align 4
+  %0 = load i32, i32* @x, align 4
   %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 true)
   store i32 %1, i32* @y, align 4
   ret i32 0

Modified: llvm/trunk/test/CodeGen/Mips/disable-tail-merge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/disable-tail-merge.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/disable-tail-merge.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/disable-tail-merge.ll Fri Feb 27 15:17:42 2015
@@ -9,20 +9,20 @@
 define i32 @test1(i32 %a) {
 entry:
   %tobool = icmp eq i32 %a, 0
-  %0 = load i32* @g0, align 4
+  %0 = load i32, i32* @g0, align 4
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:
   %add = add nsw i32 %0, 1
   store i32 %add, i32* @g0, align 4
-  %1 = load i32* @g1, align 4
+  %1 = load i32, i32* @g1, align 4
   %add1 = add nsw i32 %1, 23
   br label %if.end
 
 if.else:
   %add2 = add nsw i32 %0, 11
   store i32 %add2, i32* @g0, align 4
-  %2 = load i32* @g1, align 4
+  %2 = load i32, i32* @g1, align 4
   %add3 = add nsw i32 %2, 23
   br label %if.end
 

Modified: llvm/trunk/test/CodeGen/Mips/div.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/div.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/div.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/div.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @iiii, align 4
-  %1 = load i32* @jjjj, align 4
+  %0 = load i32, i32* @iiii, align 4
+  %1 = load i32, i32* @jjjj, align 4
   %div = sdiv i32 %0, %1
 ; 16:	div	$zero, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mflo	${{[0-9]+}}

Modified: llvm/trunk/test/CodeGen/Mips/div_rem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/div_rem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/div_rem.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/div_rem.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @iiii, align 4
-  %1 = load i32* @jjjj, align 4
+  %0 = load i32, i32* @iiii, align 4
+  %1 = load i32, i32* @jjjj, align 4
   %div = sdiv i32 %0, %1
   store i32 %div, i32* @kkkk, align 4
   %rem = srem i32 %0, %1

Modified: llvm/trunk/test/CodeGen/Mips/divrem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/divrem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/divrem.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/divrem.ll Fri Feb 27 15:17:42 2015
@@ -220,8 +220,8 @@ entry:
 ; FIXME: It's not clear what this is supposed to test.
 define i32 @killFlags() {
 entry:
-  %0 = load i32* @g0, align 4
-  %1 = load i32* @g1, align 4
+  %0 = load i32, i32* @g0, align 4
+  %1 = load i32, i32* @g1, align 4
   %div = sdiv i32 %0, %1
   ret i32 %div
 }

Modified: llvm/trunk/test/CodeGen/Mips/divu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/divu.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/divu.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/divu.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @iiii, align 4
-  %1 = load i32* @jjjj, align 4
+  %0 = load i32, i32* @iiii, align 4
+  %1 = load i32, i32* @jjjj, align 4
   %div = udiv i32 %0, %1
 ; 16:	divu	$zero, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mflo	${{[0-9]+}}

Modified: llvm/trunk/test/CodeGen/Mips/divu_remu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/divu_remu.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/divu_remu.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/divu_remu.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32* @iiii, align 4
-  %1 = load i32* @jjjj, align 4
+  %0 = load i32, i32* @iiii, align 4
+  %1 = load i32, i32* @jjjj, align 4
   %div = udiv i32 %0, %1
   store i32 %div, i32* @kkkk, align 4
   %rem = urem i32 %0, %1

Modified: llvm/trunk/test/CodeGen/Mips/dsp-patterns.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/dsp-patterns.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/dsp-patterns.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/dsp-patterns.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 define zeroext i8 @test_lbux(i8* nocapture %b, i32 %i) {
 entry:
   %add.ptr = getelementptr inbounds i8, i8* %b, i32 %i
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
   ret i8 %0
 }
 
@@ -17,7 +17,7 @@ entry:
 define signext i16 @test_lhx(i16* nocapture %b, i32 %i) {
 entry:
   %add.ptr = getelementptr inbounds i16, i16* %b, i32 %i
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
   ret i16 %0
 }
 
@@ -27,7 +27,7 @@ entry:
 define i32 @test_lwx(i32* nocapture %b, i32 %i) {
 entry:
   %add.ptr = getelementptr inbounds i32, i32* %b, i32 %i
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
   ret i32 %0
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/dsp-vec-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/dsp-vec-load-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/dsp-vec-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/dsp-vec-load-store.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define void @extend_load_trunc_store_v2i8() {
 entry:
-  %0 = load <2 x i8>* @g1, align 2
+  %0 = load <2 x i8>, <2 x i8>* @g1, align 2
   store <2 x i8> %0, <2 x i8>* @g0, align 2
   ret void
 }

Modified: llvm/trunk/test/CodeGen/Mips/eh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/eh.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/eh.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/eh.ll Fri Feb 27 15:17:42 2015
@@ -37,7 +37,7 @@ lpad:
 catch:                                            ; preds = %lpad
   %3 = tail call i8* @__cxa_begin_catch(i8* %exn) nounwind
   %4 = bitcast i8* %3 to double*
-  %exn.scalar = load double* %4, align 8
+  %exn.scalar = load double, double* %4, align 8
   %add = fadd double %exn.scalar, %i2
   store double %add, double* @g1, align 8
   tail call void @__cxa_end_catch() nounwind

Modified: llvm/trunk/test/CodeGen/Mips/emit-big-cst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/emit-big-cst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/emit-big-cst.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/emit-big-cst.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 
 define void @accessBig(i64* %storage) {
   %addr = bitcast i64* %storage to i82*
-  %bigLoadedCst = load volatile i82* @bigCst
+  %bigLoadedCst = load volatile i82, i82* @bigCst
   %tmp = add i82 %bigLoadedCst, 1
   store i82 %tmp, i82* %addr
   ret void

Modified: llvm/trunk/test/CodeGen/Mips/ex2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/ex2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/ex2.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/ex2.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ entry:
   unreachable
 
 return:                                           ; No predecessors!
-  %1 = load i32* %retval
+  %1 = load i32, i32* %retval
   ret i32 %1
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/extins.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/extins.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/extins.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/extins.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ entry:
 ; 16-NOT: ins ${{[0-9]+}}
   %and = shl i32 %s, 5
   %shl = and i32 %and, 16352
-  %tmp3 = load i32* %d, align 4
+  %tmp3 = load i32, i32* %d, align 4
   %and5 = and i32 %tmp3, -16353
   %or = or i32 %and5, %shl
   store i32 %or, i32* %d, align 4

Modified: llvm/trunk/test/CodeGen/Mips/f16abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/f16abs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/f16abs.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/f16abs.ll Fri Feb 27 15:17:42 2015
@@ -11,12 +11,12 @@
 ; Function Attrs: nounwind optsize
 define i32 @main() #0 {
 entry:
-  %0 = load double* @y, align 8
+  %0 = load double, double* @y, align 8
   %call = tail call double @fabs(double %0) #2
   store double %call, double* @x, align 8
 ; static-NOT: 	.ent	__call_stub_fp_fabs
 ; static-NOT: 	jal fabs
-  %1 = load float* @y1, align 4
+  %1 = load float, float* @y1, align 4
   %call2 = tail call float @fabsf(float %1) #2
   store float %call2, float* @x1, align 4
 ; static-NOT: 	.ent	__call_stub_fp_fabsf

Modified: llvm/trunk/test/CodeGen/Mips/fastcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/fastcc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/fastcc.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/fastcc.ll Fri Feb 27 15:17:42 2015
@@ -108,23 +108,23 @@ entry:
 ; CHECK-NACL-NOT: lw  $15
 ; CHECK-NACL-NOT: lw  $24
 
-  %0 = load i32* @gi0, align 4
-  %1 = load i32* @gi1, align 4
-  %2 = load i32* @gi2, align 4
-  %3 = load i32* @gi3, align 4
-  %4 = load i32* @gi4, align 4
-  %5 = load i32* @gi5, align 4
-  %6 = load i32* @gi6, align 4
-  %7 = load i32* @gi7, align 4
-  %8 = load i32* @gi8, align 4
-  %9 = load i32* @gi9, align 4
-  %10 = load i32* @gi10, align 4
-  %11 = load i32* @gi11, align 4
-  %12 = load i32* @gi12, align 4
-  %13 = load i32* @gi13, align 4
-  %14 = load i32* @gi14, align 4
-  %15 = load i32* @gi15, align 4
-  %16 = load i32* @gi16, align 4
+  %0 = load i32, i32* @gi0, align 4
+  %1 = load i32, i32* @gi1, align 4
+  %2 = load i32, i32* @gi2, align 4
+  %3 = load i32, i32* @gi3, align 4
+  %4 = load i32, i32* @gi4, align 4
+  %5 = load i32, i32* @gi5, align 4
+  %6 = load i32, i32* @gi6, align 4
+  %7 = load i32, i32* @gi7, align 4
+  %8 = load i32, i32* @gi8, align 4
+  %9 = load i32, i32* @gi9, align 4
+  %10 = load i32, i32* @gi10, align 4
+  %11 = load i32, i32* @gi11, align 4
+  %12 = load i32, i32* @gi12, align 4
+  %13 = load i32, i32* @gi13, align 4
+  %14 = load i32, i32* @gi14, align 4
+  %15 = load i32, i32* @gi15, align 4
+  %16 = load i32, i32* @gi16, align 4
   tail call fastcc void @callee0(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16)
   ret void
 }
@@ -196,27 +196,27 @@ entry:
 ; CHECK: lwc1  $f1
 ; CHECK: lwc1  $f0
 
-  %0 = load float* @gfa0, align 4
-  %1 = load float* @gfa1, align 4
-  %2 = load float* @gfa2, align 4
-  %3 = load float* @gfa3, align 4
-  %4 = load float* @gfa4, align 4
-  %5 = load float* @gfa5, align 4
-  %6 = load float* @gfa6, align 4
-  %7 = load float* @gfa7, align 4
-  %8 = load float* @gfa8, align 4
-  %9 = load float* @gfa9, align 4
-  %10 = load float* @gfa10, align 4
-  %11 = load float* @gfa11, align 4
-  %12 = load float* @gfa12, align 4
-  %13 = load float* @gfa13, align 4
-  %14 = load float* @gfa14, align 4
-  %15 = load float* @gfa15, align 4
-  %16 = load float* @gfa16, align 4
-  %17 = load float* @gfa17, align 4
-  %18 = load float* @gfa18, align 4
-  %19 = load float* @gfa19, align 4
-  %20 = load float* @gfa20, align 4
+  %0 = load float, float* @gfa0, align 4
+  %1 = load float, float* @gfa1, align 4
+  %2 = load float, float* @gfa2, align 4
+  %3 = load float, float* @gfa3, align 4
+  %4 = load float, float* @gfa4, align 4
+  %5 = load float, float* @gfa5, align 4
+  %6 = load float, float* @gfa6, align 4
+  %7 = load float, float* @gfa7, align 4
+  %8 = load float, float* @gfa8, align 4
+  %9 = load float, float* @gfa9, align 4
+  %10 = load float, float* @gfa10, align 4
+  %11 = load float, float* @gfa11, align 4
+  %12 = load float, float* @gfa12, align 4
+  %13 = load float, float* @gfa13, align 4
+  %14 = load float, float* @gfa14, align 4
+  %15 = load float, float* @gfa15, align 4
+  %16 = load float, float* @gfa16, align 4
+  %17 = load float, float* @gfa17, align 4
+  %18 = load float, float* @gfa18, align 4
+  %19 = load float, float* @gfa19, align 4
+  %20 = load float, float* @gfa20, align 4
   tail call fastcc void @callee1(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13, float %14, float %15, float %16, float %17, float %18, float %19, float %20)
   ret void
 }
@@ -292,17 +292,17 @@ entry:
 ; NOODDSPREG-DAG:    lwc1    $[[F0:f[0-9]*[02468]]], 40($[[R0]])
 ; NOODDSPREG-DAG:    swc1    $[[F0]], 0($sp)
 
-  %0 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 0), align 4
-  %1 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 1), align 4
-  %2 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 2), align 4
-  %3 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 3), align 4
-  %4 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 4), align 4
-  %5 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 5), align 4
-  %6 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 6), align 4
-  %7 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 7), align 4
-  %8 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 8), align 4
-  %9 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 9), align 4
-  %10 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 10), align 4
+  %0 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 0), align 4
+  %1 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 1), align 4
+  %2 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 2), align 4
+  %3 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 3), align 4
+  %4 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 4), align 4
+  %5 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 5), align 4
+  %6 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 6), align 4
+  %7 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 7), align 4
+  %8 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 8), align 4
+  %9 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 9), align 4
+  %10 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 10), align 4
   tail call fastcc void @callee2(float %0, float %1, float %2, float %3,
                                  float %4, float %5, float %6, float %7,
                                  float %8, float %9, float %10)
@@ -373,17 +373,17 @@ entry:
 ; FP64-NOODDSPREG-DAG:    ldc1    $[[F0:f[0-9]*[02468]]], 80($[[R0]])
 ; FP64-NOODDSPREG-DAG:    sdc1    $[[F0]], 0($sp)
 
-  %0 = load double* getelementptr ([11 x double]* @da, i32 0, i32 0), align 8
-  %1 = load double* getelementptr ([11 x double]* @da, i32 0, i32 1), align 8
-  %2 = load double* getelementptr ([11 x double]* @da, i32 0, i32 2), align 8
-  %3 = load double* getelementptr ([11 x double]* @da, i32 0, i32 3), align 8
-  %4 = load double* getelementptr ([11 x double]* @da, i32 0, i32 4), align 8
-  %5 = load double* getelementptr ([11 x double]* @da, i32 0, i32 5), align 8
-  %6 = load double* getelementptr ([11 x double]* @da, i32 0, i32 6), align 8
-  %7 = load double* getelementptr ([11 x double]* @da, i32 0, i32 7), align 8
-  %8 = load double* getelementptr ([11 x double]* @da, i32 0, i32 8), align 8
-  %9 = load double* getelementptr ([11 x double]* @da, i32 0, i32 9), align 8
-  %10 = load double* getelementptr ([11 x double]* @da, i32 0, i32 10), align 8
+  %0 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 0), align 8
+  %1 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 1), align 8
+  %2 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 2), align 8
+  %3 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 3), align 8
+  %4 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 4), align 8
+  %5 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 5), align 8
+  %6 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 6), align 8
+  %7 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 7), align 8
+  %8 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 8), align 8
+  %9 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 9), align 8
+  %10 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 10), align 8
   tail call fastcc void @callee3(double %0, double %1, double %2, double %3,
                                  double %4, double %5, double %6, double %7,
                                  double %8, double %9, double %10)

Modified: llvm/trunk/test/CodeGen/Mips/fixdfsf.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/fixdfsf.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/fixdfsf.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/fixdfsf.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 ; Function Attrs: nounwind optsize
 define void @foo()  {
 entry:
-  %0 = load double* @x, align 8
+  %0 = load double, double* @x, align 8
   %conv = fptoui double %0 to i32
   store i32 %conv, i32* @y, align 4
 ; pic1:	lw	${{[0-9]+}}, %call16(__fixunsdfsi)(${{[0-9]+}})

Modified: llvm/trunk/test/CodeGen/Mips/fp-indexed-ls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/fp-indexed-ls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/fp-indexed-ls.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/fp-indexed-ls.ll Fri Feb 27 15:17:42 2015
@@ -46,7 +46,7 @@ entry:
 ; CHECK-NACL-NOT: lwxc1
 
   %arrayidx = getelementptr inbounds float, float* %b, i32 %o
-  %0 = load float* %arrayidx, align 4
+  %0 = load float, float* %arrayidx, align 4
   ret float %0
 }
 
@@ -77,7 +77,7 @@ entry:
 ; CHECK-NACL-NOT: ldxc1
 
   %arrayidx = getelementptr inbounds double, double* %b, i32 %o
-  %0 = load double* %arrayidx, align 8
+  %0 = load double, double* %arrayidx, align 8
   ret double %0
 }
 
@@ -101,7 +101,7 @@ entry:
 ; MIPS64R6-NOT:  luxc1
 
   %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
-  %0 = load float* %arrayidx1, align 1
+  %0 = load float, float* %arrayidx1, align 1
   ret float %0
 }
 
@@ -129,7 +129,7 @@ entry:
 
 ; CHECK-NACL-NOT: swxc1
 
-  %0 = load float* @gf, align 4
+  %0 = load float, float* @gf, align 4
   %arrayidx = getelementptr inbounds float, float* %b, i32 %o
   store float %0, float* %arrayidx, align 4
   ret void
@@ -159,7 +159,7 @@ entry:
 
 ; CHECK-NACL-NOT: sdxc1
 
-  %0 = load double* @gd, align 8
+  %0 = load double, double* @gd, align 8
   %arrayidx = getelementptr inbounds double, double* %b, i32 %o
   store double %0, double* %arrayidx, align 8
   ret void
@@ -179,7 +179,7 @@ entry:
 
 ; MIPS64R6-NOT:  suxc1
 
-  %0 = load float* @gf, align 4
+  %0 = load float, float* @gf, align 4
   %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
   store float %0, float* %arrayidx1, align 1
   ret void
@@ -200,7 +200,7 @@ entry:
 ; MIPS64R6-NOT:  luxc1
 
   %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
-  %0 = load double* %arrayidx1, align 1
+  %0 = load double, double* %arrayidx1, align 1
   ret double %0
 }
 
@@ -218,7 +218,7 @@ entry:
 
 ; MIPS64R6-NOT:  suxc1
 
-  %0 = load double* @gd, align 8
+  %0 = load double, double* @gd, align 8
   %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
   store double %0, double* %arrayidx1, align 1
   ret void
@@ -238,7 +238,7 @@ entry:
 
 ; MIPS64R6-NOT:  luxc1
 
-  %0 = load float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1
+  %0 = load float, float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1
   ret float %0
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/fp-spill-reload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/fp-spill-reload.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/fp-spill-reload.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/fp-spill-reload.ll Fri Feb 27 15:17:42 2015
@@ -5,27 +5,27 @@ define void @foo0(i32* nocapture %b) nou
 entry:
 ; CHECK: sw  $fp
 ; CHECK: lw  $fp
-  %0 = load i32* %b, align 4
+  %0 = load i32, i32* %b, align 4
   %arrayidx.1 = getelementptr inbounds i32, i32* %b, i32 1
-  %1 = load i32* %arrayidx.1, align 4
+  %1 = load i32, i32* %arrayidx.1, align 4
   %add.1 = add nsw i32 %1, 1
   %arrayidx.2 = getelementptr inbounds i32, i32* %b, i32 2
-  %2 = load i32* %arrayidx.2, align 4
+  %2 = load i32, i32* %arrayidx.2, align 4
   %add.2 = add nsw i32 %2, 2
   %arrayidx.3 = getelementptr inbounds i32, i32* %b, i32 3
-  %3 = load i32* %arrayidx.3, align 4
+  %3 = load i32, i32* %arrayidx.3, align 4
   %add.3 = add nsw i32 %3, 3
   %arrayidx.4 = getelementptr inbounds i32, i32* %b, i32 4
-  %4 = load i32* %arrayidx.4, align 4
+  %4 = load i32, i32* %arrayidx.4, align 4
   %add.4 = add nsw i32 %4, 4
   %arrayidx.5 = getelementptr inbounds i32, i32* %b, i32 5
-  %5 = load i32* %arrayidx.5, align 4
+  %5 = load i32, i32* %arrayidx.5, align 4
   %add.5 = add nsw i32 %5, 5
   %arrayidx.6 = getelementptr inbounds i32, i32* %b, i32 6
-  %6 = load i32* %arrayidx.6, align 4
+  %6 = load i32, i32* %arrayidx.6, align 4
   %add.6 = add nsw i32 %6, 6
   %arrayidx.7 = getelementptr inbounds i32, i32* %b, i32 7
-  %7 = load i32* %arrayidx.7, align 4
+  %7 = load i32, i32* %arrayidx.7, align 4
   %add.7 = add nsw i32 %7, 7
   call void @foo2(i32 %0, i32 %add.1, i32 %add.2, i32 %add.3, i32 %add.4, i32 %add.5, i32 %add.6, i32 %add.7) nounwind
   call void bitcast (void (...)* @foo1 to void ()*)() nounwind

Modified: llvm/trunk/test/CodeGen/Mips/fp16instrinsmc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/fp16instrinsmc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/fp16instrinsmc.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/fp16instrinsmc.ll Fri Feb 27 15:17:42 2015
@@ -23,8 +23,8 @@ define void @foo1() #0 {
 ; fmask: .set	reorder
 ; fmask: .end	foo1
 entry:
-  %0 = load float* @x, align 4
-  %1 = load float* @one, align 4
+  %0 = load float, float* @x, align 4
+  %1 = load float, float* @one, align 4
   %call = call float @copysignf(float %0, float %1) #2
   store float %call, float* @y, align 4
   ret void
@@ -39,8 +39,8 @@ define void @foo2() #0 {
 ; fmask:	save	{{.*}}
 ; fmask:	.end	foo2
 entry:
-  %0 = load float* @x, align 4
-  %1 = load float* @negone, align 4
+  %0 = load float, float* @x, align 4
+  %1 = load float, float* @negone, align 4
   %call = call float @copysignf(float %0, float %1) #2
   store float %call, float* @y, align 4
   ret void
@@ -57,8 +57,8 @@ entry:
 ; fmask: .set	macro
 ; fmask: .set	reorder
 ; fmask: .end	foo3
-  %0 = load double* @xd, align 8
-  %1 = load float* @oned, align 4
+  %0 = load double, double* @xd, align 8
+  %1 = load float, float* @oned, align 4
   %conv = fpext float %1 to double
   %call = call double @copysign(double %0, double %conv) #2
   store double %call, double* @yd, align 8
@@ -74,8 +74,8 @@ entry:
 ; fmask:	.ent	foo4
 ; fmask:	save	{{.*}}
 ; fmask:	.end	foo4
-  %0 = load double* @xd, align 8
-  %1 = load double* @negoned, align 8
+  %0 = load double, double* @xd, align 8
+  %1 = load double, double* @negoned, align 8
   %call = call double @copysign(double %0, double %1) #2
   store double %call, double* @yd, align 8
   ret void
@@ -84,7 +84,7 @@ entry:
 ; Function Attrs: nounwind
 define void @foo5() #0 {
 entry:
-  %0 = load float* @xn, align 4
+  %0 = load float, float* @xn, align 4
   %call = call float @fabsf(float %0) #2
   store float %call, float* @y, align 4
   ret void
@@ -96,7 +96,7 @@ declare float @fabsf(float) #1
 ; Function Attrs: nounwind
 define void @foo6() #0 {
 entry:
-  %0 = load double* @xdn, align 8
+  %0 = load double, double* @xdn, align 8
   %call = call double @fabs(double %0) #2
   store double %call, double* @yd, align 8
   ret void
@@ -108,7 +108,7 @@ declare double @fabs(double) #1
 ; Function Attrs: nounwind
 define void @foo7() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @sinf(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(sinf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -122,7 +122,7 @@ declare float @sinf(float) #0
 ; Function Attrs: nounwind
 define void @foo8() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @sin(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(sin)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -136,7 +136,7 @@ declare double @sin(double) #0
 ; Function Attrs: nounwind
 define void @foo9() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @cosf(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(cosf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -150,7 +150,7 @@ declare float @cosf(float) #0
 ; Function Attrs: nounwind
 define void @foo10() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @cos(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(cos)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -164,7 +164,7 @@ declare double @cos(double) #0
 ; Function Attrs: nounwind
 define void @foo11() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @sqrtf(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(sqrtf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -178,7 +178,7 @@ declare float @sqrtf(float) #0
 ; Function Attrs: nounwind
 define void @foo12() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @sqrt(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(sqrt)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -192,7 +192,7 @@ declare double @sqrt(double) #0
 ; Function Attrs: nounwind
 define void @foo13() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @floorf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(floorf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -206,7 +206,7 @@ declare float @floorf(float) #1
 ; Function Attrs: nounwind
 define void @foo14() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @floor(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(floor)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -220,7 +220,7 @@ declare double @floor(double) #1
 ; Function Attrs: nounwind
 define void @foo15() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @nearbyintf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(nearbyintf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -234,7 +234,7 @@ declare float @nearbyintf(float) #1
 ; Function Attrs: nounwind
 define void @foo16() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @nearbyint(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(nearbyint)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -248,7 +248,7 @@ declare double @nearbyint(double) #1
 ; Function Attrs: nounwind
 define void @foo17() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @ceilf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(ceilf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -262,7 +262,7 @@ declare float @ceilf(float) #1
 ; Function Attrs: nounwind
 define void @foo18() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @ceil(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(ceil)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -276,7 +276,7 @@ declare double @ceil(double) #1
 ; Function Attrs: nounwind
 define void @foo19() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @rintf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(rintf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -290,7 +290,7 @@ declare float @rintf(float) #1
 ; Function Attrs: nounwind
 define void @foo20() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @rint(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(rint)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -304,7 +304,7 @@ declare double @rint(double) #1
 ; Function Attrs: nounwind
 define void @foo21() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @truncf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(truncf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -318,7 +318,7 @@ declare float @truncf(float) #1
 ; Function Attrs: nounwind
 define void @foo22() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @trunc(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(trunc)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -332,7 +332,7 @@ declare double @trunc(double) #1
 ; Function Attrs: nounwind
 define void @foo23() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @log2f(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(log2f)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -346,7 +346,7 @@ declare float @log2f(float) #0
 ; Function Attrs: nounwind
 define void @foo24() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @log2(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(log2)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
@@ -360,7 +360,7 @@ declare double @log2(double) #0
 ; Function Attrs: nounwind
 define void @foo25() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   %call = call float @exp2f(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(exp2f)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
@@ -374,7 +374,7 @@ declare float @exp2f(float) #0
 ; Function Attrs: nounwind
 define void @foo26() #0 {
 entry:
-  %0 = load double* @xd, align 8
+  %0 = load double, double* @xd, align 8
   %call = call double @exp2(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(exp2)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})

Modified: llvm/trunk/test/CodeGen/Mips/fp16static.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/fp16static.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/fp16static.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/fp16static.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load float* @x, align 4
-  %1 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
+  %1 = load float, float* @x, align 4
   %mul = fmul float %0, %1
   store float %mul, float* @x, align 4
 ; CHECK-STATIC16: jal	__mips16_mulsf3

Modified: llvm/trunk/test/CodeGen/Mips/fpneeded.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/fpneeded.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/fpneeded.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/fpneeded.ll Fri Feb 27 15:17:42 2015
@@ -76,8 +76,8 @@ entry:
 define void @foo1() #0 {
 entry:
   store float 1.000000e+00, float* @zz, align 4
-  %0 = load float* @y, align 4
-  %1 = load float* @x, align 4
+  %0 = load float, float* @y, align 4
+  %1 = load float, float* @x, align 4
   %add = fadd float %0, %1
   store float %add, float* @z, align 4
   ret void
@@ -96,7 +96,7 @@ entry:
 
 define void @foo2() #0 {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   call void @vf(float %0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/Mips/fpnotneeded.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/fpnotneeded.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/fpnotneeded.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/fpnotneeded.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ entry:
 
 define i32 @iv() #0 {
 entry:
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   ret i32 %0
 }
 

Modified: llvm/trunk/test/CodeGen/Mips/global-address.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/global-address.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/global-address.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/global-address.ll Fri Feb 27 15:17:42 2015
@@ -33,9 +33,9 @@ entry:
 ; STATIC-N64: lw  ${{[0-9]+}}, %got_ofst(s1)($[[R1]])
 ; STATIC-N64: ld  ${{[0-9]+}}, %got_disp(g1)
 
-  %0 = load i32* @s1, align 4
+  %0 = load i32, i32* @s1, align 4
   tail call void @foo1(i32 %0) nounwind
-  %1 = load i32* @g1, align 4
+  %1 = load i32, i32* @g1, align 4
   store i32 %1, i32* @s1, align 4
   %add = add nsw i32 %1, 2
   store i32 %add, i32* @g1, align 4

Modified: llvm/trunk/test/CodeGen/Mips/gpreg-lazy-binding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/gpreg-lazy-binding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/gpreg-lazy-binding.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/gpreg-lazy-binding.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ declare void @externalFunc()
 
 define internal fastcc void @internalFunc() nounwind noinline {
 entry:
-  %0 = load i32* @g, align 4
+  %0 = load i32, i32* @g, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @g, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/Mips/gprestore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/gprestore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/gprestore.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/gprestore.ll Fri Feb 27 15:17:42 2015
@@ -18,10 +18,10 @@ entry:
 ; CHECK-NOT: got({{.*}})($gp)
 ; CHECK: lw $gp
   tail call void (...)* @f1() nounwind
-  %tmp = load i32* @p, align 4
+  %tmp = load i32, i32* @p, align 4
   tail call void @f2(i32 %tmp) nounwind
-  %tmp1 = load i32* @q, align 4
-  %tmp2 = load i32* @r, align 4
+  %tmp1 = load i32, i32* @q, align 4
+  %tmp2 = load i32, i32* @r, align 4
   tail call void @f3(i32 %tmp1, i32 %tmp2) nounwind
   ret void
 }

Modified: llvm/trunk/test/CodeGen/Mips/hf16_1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/hf16_1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/hf16_1.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/hf16_1.ll Fri Feb 27 15:17:42 2015
@@ -11,96 +11,96 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load float* @x, align 4
+  %0 = load float, float* @x, align 4
   call void @v_sf(float %0)
-  %1 = load double* @xd, align 8
+  %1 = load double, double* @xd, align 8
   call void @v_df(double %1)
-  %2 = load float* @x, align 4
-  %3 = load float* @y, align 4
+  %2 = load float, float* @x, align 4
+  %3 = load float, float* @y, align 4
   call void @v_sf_sf(float %2, float %3)
-  %4 = load double* @xd, align 8
-  %5 = load float* @x, align 4
+  %4 = load double, double* @xd, align 8
+  %5 = load float, float* @x, align 4
   call void @v_df_sf(double %4, float %5)
-  %6 = load double* @xd, align 8
-  %7 = load double* @yd, align 8
+  %6 = load double, double* @xd, align 8
+  %7 = load double, double* @yd, align 8
   call void @v_df_df(double %6, double %7)
   %call = call float @sf_v()
-  %8 = load float* @x, align 4
+  %8 = load float, float* @x, align 4
   %call1 = call float @sf_sf(float %8)
-  %9 = load double* @xd, align 8
+  %9 = load double, double* @xd, align 8
   %call2 = call float @sf_df(double %9)
-  %10 = load float* @x, align 4
-  %11 = load float* @y, align 4
+  %10 = load float, float* @x, align 4
+  %11 = load float, float* @y, align 4
   %call3 = call float @sf_sf_sf(float %10, float %11)
-  %12 = load double* @xd, align 8
-  %13 = load float* @x, align 4
+  %12 = load double, double* @xd, align 8
+  %13 = load float, float* @x, align 4
   %call4 = call float @sf_df_sf(double %12, float %13)
-  %14 = load double* @xd, align 8
-  %15 = load double* @yd, align 8
+  %14 = load double, double* @xd, align 8
+  %15 = load double, double* @yd, align 8
   %call5 = call float @sf_df_df(double %14, double %15)
   %call6 = call double @df_v()
-  %16 = load float* @x, align 4
+  %16 = load float, float* @x, align 4
   %call7 = call double @df_sf(float %16)
-  %17 = load double* @xd, align 8
+  %17 = load double, double* @xd, align 8
   %call8 = call double @df_df(double %17)
-  %18 = load float* @x, align 4
-  %19 = load float* @y, align 4
+  %18 = load float, float* @x, align 4
+  %19 = load float, float* @y, align 4
   %call9 = call double @df_sf_sf(float %18, float %19)
-  %20 = load double* @xd, align 8
-  %21 = load float* @x, align 4
+  %20 = load double, double* @xd, align 8
+  %21 = load float, float* @x, align 4
   %call10 = call double @df_df_sf(double %20, float %21)
-  %22 = load double* @xd, align 8
-  %23 = load double* @yd, align 8
+  %22 = load double, double* @xd, align 8
+  %23 = load double, double* @yd, align 8
   %call11 = call double @df_df_df(double %22, double %23)
   %call12 = call { float, float } @sc_v()
   %24 = extractvalue { float, float } %call12, 0
   %25 = extractvalue { float, float } %call12, 1
-  %26 = load float* @x, align 4
+  %26 = load float, float* @x, align 4
   %call13 = call { float, float } @sc_sf(float %26)
   %27 = extractvalue { float, float } %call13, 0
   %28 = extractvalue { float, float } %call13, 1
-  %29 = load double* @xd, align 8
+  %29 = load double, double* @xd, align 8
   %call14 = call { float, float } @sc_df(double %29)
   %30 = extractvalue { float, float } %call14, 0
   %31 = extractvalue { float, float } %call14, 1
-  %32 = load float* @x, align 4
-  %33 = load float* @y, align 4
+  %32 = load float, float* @x, align 4
+  %33 = load float, float* @y, align 4
   %call15 = call { float, float } @sc_sf_sf(float %32, float %33)
   %34 = extractvalue { float, float } %call15, 0
   %35 = extractvalue { float, float } %call15, 1
-  %36 = load double* @xd, align 8
-  %37 = load float* @x, align 4
+  %36 = load double, double* @xd, align 8
+  %37 = load float, float* @x, align 4
   %call16 = call { float, float } @sc_df_sf(double %36, float %37)
   %38 = extractvalue { float, float } %call16, 0
   %39 = extractvalue { float, float } %call16, 1
-  %40 = load double* @xd, align 8
-  %41 = load double* @yd, align 8
+  %40 = load double, double* @xd, align 8
+  %41 = load double, double* @yd, align 8
   %call17 = call { float, float } @sc_df_df(double %40, double %41)
   %42 = extractvalue { float, float } %call17, 0
   %43 = extractvalue { float, float } %call17, 1
   %call18 = call { double, double } @dc_v()
   %44 = extractvalue { double, double } %call18, 0
   %45 = extractvalue { double, double } %call18, 1
-  %46 = load float* @x, align 4
+  %46 = load float, float* @x, align 4
   %call19 = call { double, double } @dc_sf(float %46)
   %47 = extractvalue { double, double } %call19, 0
   %48 = extractvalue { double, double } %call19, 1
-  %49 = load double* @xd, align 8
+  %49 = load double, double* @xd, align 8
   %call20 = call { double, double } @dc_df(double %49)
   %50 = extractvalue { double, double } %call20, 0
   %51 = extractvalue { double, double } %call20, 1
-  %52 = load float* @x, align 4
-  %53 = load float* @y, align 4
+  %52 = load float, float* @x, align 4
+  %53 = load float, float* @y, align 4
   %call21 = call { double, double } @dc_sf_sf(float %52, float %53)
   %54 = extractvalue { double, double } %call21, 0
   %55 = extractvalue { double, double } %call21, 1
-  %56 = load double* @xd, align 8
-  %57 = load float* @x, align 4
+  %56 = load double, double* @xd, align 8
+  %57 = load float, float* @x, align 4
   %call22 = call { double, double } @dc_df_sf(double %56, float %57)
   %58 = extractvalue { double, double } %call22, 0
   %59 = extractvalue { double, double } %call22, 1
-  %60 = load double* @xd, align 8
-  %61 = load double* @yd, align 8
+  %60 = load double, double* @xd, align 8
+  %61 = load double, double* @yd, align 8
   %call23 = call { double, double } @dc_df_df(double %60, double %61)
   %62 = extractvalue { double, double } %call23, 0
   %63 = extractvalue { double, double } %call23, 1





More information about the llvm-commits mailing list