[llvm] r230794 - [opaque pointer type] Add textual IR support for explicit type parameter to load instruction

David Blaikie dblaikie at gmail.com
Fri Feb 27 13:18:04 PST 2015


Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
 ; CHECK-LABEL: t1:
 ; CHECK: ldr x[[REG:[0-9]+]], [x0]
 ; CHECK: str q0, [x[[REG]]]
-  %tmp1 = load %type1** %argtable, align 8
+  %tmp1 = load %type1*, %type1** %argtable, align 8
   %tmp2 = getelementptr inbounds %type1, %type1* %tmp1, i64 0, i32 0
   store <16 x i8> zeroinitializer, <16 x i8>* %tmp2, align 16
   ret void
@@ -23,7 +23,7 @@ entry:
 ; CHECK-LABEL: t2:
 ; CHECK: ldr x[[REG:[0-9]+]], [x0]
 ; CHECK: str d0, [x[[REG]]]
-  %tmp1 = load %type2** %argtable, align 8
+  %tmp1 = load %type2*, %type2** %argtable, align 8
   %tmp2 = getelementptr inbounds %type2, %type2* %tmp1, i64 0, i32 0
   store <8 x i8> zeroinitializer, <8 x i8>* %tmp2, align 8
   ret void
@@ -52,8 +52,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
   %arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %array, i64 %offset
-  %tmp = load <2 x i64>* %arrayidx, align 16
-  %tmp1 = load <2 x i64>** @globalArray64x2, align 8
+  %tmp = load <2 x i64>, <2 x i64>* %arrayidx, align 16
+  %tmp1 = load <2 x i64>*, <2 x i64>** @globalArray64x2, align 8
   %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %tmp1, i64 %offset
   store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
   ret void
@@ -66,8 +66,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
   %arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %array, i64 3
-  %tmp = load <2 x i64>* %arrayidx, align 16
-  %tmp1 = load <2 x i64>** @globalArray64x2, align 8
+  %tmp = load <2 x i64>, <2 x i64>* %arrayidx, align 16
+  %tmp1 = load <2 x i64>*, <2 x i64>** @globalArray64x2, align 8
   %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %tmp1, i64 5
   store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
   ret void
@@ -81,8 +81,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
   %arrayidx = getelementptr inbounds <4 x i32>, <4 x i32>* %array, i64 %offset
-  %tmp = load <4 x i32>* %arrayidx, align 16
-  %tmp1 = load <4 x i32>** @globalArray32x4, align 8
+  %tmp = load <4 x i32>, <4 x i32>* %arrayidx, align 16
+  %tmp1 = load <4 x i32>*, <4 x i32>** @globalArray32x4, align 8
   %arrayidx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %tmp1, i64 %offset
   store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
   ret void
@@ -95,8 +95,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
   %arrayidx = getelementptr inbounds <4 x i32>, <4 x i32>* %array, i64 3
-  %tmp = load <4 x i32>* %arrayidx, align 16
-  %tmp1 = load <4 x i32>** @globalArray32x4, align 8
+  %tmp = load <4 x i32>, <4 x i32>* %arrayidx, align 16
+  %tmp1 = load <4 x i32>*, <4 x i32>** @globalArray32x4, align 8
   %arrayidx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %tmp1, i64 5
   store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
   ret void
@@ -110,8 +110,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
   %arrayidx = getelementptr inbounds <8 x i16>, <8 x i16>* %array, i64 %offset
-  %tmp = load <8 x i16>* %arrayidx, align 16
-  %tmp1 = load <8 x i16>** @globalArray16x8, align 8
+  %tmp = load <8 x i16>, <8 x i16>* %arrayidx, align 16
+  %tmp1 = load <8 x i16>*, <8 x i16>** @globalArray16x8, align 8
   %arrayidx1 = getelementptr inbounds <8 x i16>, <8 x i16>* %tmp1, i64 %offset
   store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
   ret void
@@ -124,8 +124,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
   %arrayidx = getelementptr inbounds <8 x i16>, <8 x i16>* %array, i64 3
-  %tmp = load <8 x i16>* %arrayidx, align 16
-  %tmp1 = load <8 x i16>** @globalArray16x8, align 8
+  %tmp = load <8 x i16>, <8 x i16>* %arrayidx, align 16
+  %tmp1 = load <8 x i16>*, <8 x i16>** @globalArray16x8, align 8
   %arrayidx1 = getelementptr inbounds <8 x i16>, <8 x i16>* %tmp1, i64 5
   store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
   ret void
@@ -139,8 +139,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
   %arrayidx = getelementptr inbounds <16 x i8>, <16 x i8>* %array, i64 %offset
-  %tmp = load <16 x i8>* %arrayidx, align 16
-  %tmp1 = load <16 x i8>** @globalArray8x16, align 8
+  %tmp = load <16 x i8>, <16 x i8>* %arrayidx, align 16
+  %tmp1 = load <16 x i8>*, <16 x i8>** @globalArray8x16, align 8
   %arrayidx1 = getelementptr inbounds <16 x i8>, <16 x i8>* %tmp1, i64 %offset
   store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
   ret void
@@ -153,8 +153,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
   %arrayidx = getelementptr inbounds <16 x i8>, <16 x i8>* %array, i64 3
-  %tmp = load <16 x i8>* %arrayidx, align 16
-  %tmp1 = load <16 x i8>** @globalArray8x16, align 8
+  %tmp = load <16 x i8>, <16 x i8>* %arrayidx, align 16
+  %tmp1 = load <16 x i8>*, <16 x i8>** @globalArray8x16, align 8
   %arrayidx1 = getelementptr inbounds <16 x i8>, <16 x i8>* %tmp1, i64 5
   store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
   ret void
@@ -168,8 +168,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
   %arrayidx = getelementptr inbounds <1 x i64>, <1 x i64>* %array, i64 %offset
-  %tmp = load <1 x i64>* %arrayidx, align 8
-  %tmp1 = load <1 x i64>** @globalArray64x1, align 8
+  %tmp = load <1 x i64>, <1 x i64>* %arrayidx, align 8
+  %tmp1 = load <1 x i64>*, <1 x i64>** @globalArray64x1, align 8
   %arrayidx1 = getelementptr inbounds <1 x i64>, <1 x i64>* %tmp1, i64 %offset
   store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
   ret void
@@ -182,8 +182,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
   %arrayidx = getelementptr inbounds <1 x i64>, <1 x i64>* %array, i64 3
-  %tmp = load <1 x i64>* %arrayidx, align 8
-  %tmp1 = load <1 x i64>** @globalArray64x1, align 8
+  %tmp = load <1 x i64>, <1 x i64>* %arrayidx, align 8
+  %tmp1 = load <1 x i64>*, <1 x i64>** @globalArray64x1, align 8
   %arrayidx1 = getelementptr inbounds <1 x i64>, <1 x i64>* %tmp1, i64 5
   store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
   ret void
@@ -197,8 +197,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
   %arrayidx = getelementptr inbounds <2 x i32>, <2 x i32>* %array, i64 %offset
-  %tmp = load <2 x i32>* %arrayidx, align 8
-  %tmp1 = load <2 x i32>** @globalArray32x2, align 8
+  %tmp = load <2 x i32>, <2 x i32>* %arrayidx, align 8
+  %tmp1 = load <2 x i32>*, <2 x i32>** @globalArray32x2, align 8
   %arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %tmp1, i64 %offset
   store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
   ret void
@@ -211,8 +211,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
   %arrayidx = getelementptr inbounds <2 x i32>, <2 x i32>* %array, i64 3
-  %tmp = load <2 x i32>* %arrayidx, align 8
-  %tmp1 = load <2 x i32>** @globalArray32x2, align 8
+  %tmp = load <2 x i32>, <2 x i32>* %arrayidx, align 8
+  %tmp1 = load <2 x i32>*, <2 x i32>** @globalArray32x2, align 8
   %arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %tmp1, i64 5
   store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
   ret void
@@ -226,8 +226,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
   %arrayidx = getelementptr inbounds <4 x i16>, <4 x i16>* %array, i64 %offset
-  %tmp = load <4 x i16>* %arrayidx, align 8
-  %tmp1 = load <4 x i16>** @globalArray16x4, align 8
+  %tmp = load <4 x i16>, <4 x i16>* %arrayidx, align 8
+  %tmp1 = load <4 x i16>*, <4 x i16>** @globalArray16x4, align 8
   %arrayidx1 = getelementptr inbounds <4 x i16>, <4 x i16>* %tmp1, i64 %offset
   store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
   ret void
@@ -240,8 +240,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
   %arrayidx = getelementptr inbounds <4 x i16>, <4 x i16>* %array, i64 3
-  %tmp = load <4 x i16>* %arrayidx, align 8
-  %tmp1 = load <4 x i16>** @globalArray16x4, align 8
+  %tmp = load <4 x i16>, <4 x i16>* %arrayidx, align 8
+  %tmp1 = load <4 x i16>*, <4 x i16>** @globalArray16x4, align 8
   %arrayidx1 = getelementptr inbounds <4 x i16>, <4 x i16>* %tmp1, i64 5
   store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
   ret void
@@ -255,8 +255,8 @@ entry:
 ; CHECK: ldr [[BASE:x[0-9]+]],
 ; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
   %arrayidx = getelementptr inbounds <8 x i8>, <8 x i8>* %array, i64 %offset
-  %tmp = load <8 x i8>* %arrayidx, align 8
-  %tmp1 = load <8 x i8>** @globalArray8x8, align 8
+  %tmp = load <8 x i8>, <8 x i8>* %arrayidx, align 8
+  %tmp1 = load <8 x i8>*, <8 x i8>** @globalArray8x8, align 8
   %arrayidx1 = getelementptr inbounds <8 x i8>, <8 x i8>* %tmp1, i64 %offset
   store <8 x i8> %tmp, <8 x i8>* %arrayidx1, align 8
   ret void
@@ -270,7 +270,7 @@ define <1 x i64> @fct0() nounwind readon
 entry:
 ; CHECK-LABEL: fct0:
 ; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
-  %0 = load <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
+  %0 = load <1 x i64>, <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
   ret <1 x i64> %0
 }
 
@@ -278,7 +278,7 @@ define <2 x i32> @fct1() nounwind readon
 entry:
 ; CHECK-LABEL: fct1:
 ; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
-  %0 = load <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
+  %0 = load <2 x i32>, <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
   ret <2 x i32> %0
 }
 
@@ -286,7 +286,7 @@ define <4 x i16> @fct2() nounwind readon
 entry:
 ; CHECK-LABEL: fct2:
 ; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
-  %0 = load <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
+  %0 = load <4 x i16>, <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
   ret <4 x i16> %0
 }
 
@@ -294,7 +294,7 @@ define <8 x i8> @fct3() nounwind readonl
 entry:
 ; CHECK-LABEL: fct3:
 ; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
-  %0 = load <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
+  %0 = load <8 x i8>, <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
   ret <8 x i8> %0
 }
 
@@ -302,7 +302,7 @@ define <2 x i64> @fct4() nounwind readon
 entry:
 ; CHECK-LABEL: fct4:
 ; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
-  %0 = load <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
+  %0 = load <2 x i64>, <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
   ret <2 x i64> %0
 }
 
@@ -310,7 +310,7 @@ define <4 x i32> @fct5() nounwind readon
 entry:
 ; CHECK-LABEL: fct5:
 ; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
-  %0 = load <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
+  %0 = load <4 x i32>, <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
   ret <4 x i32> %0
 }
 
@@ -318,7 +318,7 @@ define <8 x i16> @fct6() nounwind readon
 entry:
 ; CHECK-LABEL: fct6:
 ; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
-  %0 = load <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
+  %0 = load <8 x i16>, <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
   ret <8 x i16> %0
 }
 
@@ -326,7 +326,7 @@ define <16 x i8> @fct7() nounwind readon
 entry:
 ; CHECK-LABEL: fct7:
 ; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
-  %0 = load <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
+  %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
   ret <16 x i8> %0
 }
 
@@ -335,7 +335,7 @@ entry:
 ; CHECK-LABEL: fct8:
 ; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
 ; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
-  %0 = load <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
+  %0 = load <1 x i64>, <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
   store <1 x i64> %0, <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <1 x i64>*), align 8
   ret void
 }
@@ -345,7 +345,7 @@ entry:
 ; CHECK-LABEL: fct9:
 ; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
 ; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
-  %0 = load <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
+  %0 = load <2 x i32>, <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
   store <2 x i32> %0, <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <2 x i32>*), align 8
   ret void
 }
@@ -355,7 +355,7 @@ entry:
 ; CHECK-LABEL: fct10:
 ; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
 ; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
-  %0 = load <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
+  %0 = load <4 x i16>, <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
   store <4 x i16> %0, <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <4 x i16>*), align 8
   ret void
 }
@@ -365,7 +365,7 @@ entry:
 ; CHECK-LABEL: fct11:
 ; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
 ; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
-  %0 = load <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
+  %0 = load <8 x i8>, <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
   store <8 x i8> %0, <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <8 x i8>*), align 8
   ret void
 }
@@ -375,7 +375,7 @@ entry:
 ; CHECK-LABEL: fct12:
 ; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
 ; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
-  %0 = load <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
+  %0 = load <2 x i64>, <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
   store <2 x i64> %0, <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <2 x i64>*), align 16
   ret void
 }
@@ -385,7 +385,7 @@ entry:
 ; CHECK-LABEL: fct13:
 ; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
 ; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
-  %0 = load <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
+  %0 = load <4 x i32>, <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
   store <4 x i32> %0, <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <4 x i32>*), align 16
   ret void
 }
@@ -395,7 +395,7 @@ entry:
 ; CHECK-LABEL: fct14:
 ; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
 ; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
-  %0 = load <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
+  %0 = load <8 x i16>, <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
   store <8 x i16> %0, <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <8 x i16>*), align 16
   ret void
 }
@@ -405,7 +405,7 @@ entry:
 ; CHECK-LABEL: fct15:
 ; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
 ; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
-  %0 = load <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
+  %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
   store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <16 x i8>*), align 16
   ret void
 }
@@ -420,7 +420,7 @@ define <8 x i8> @fct16(i8* nocapture %sp
 ; CHECK-NEXT: mul.8b v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i8, i8* %sp0, i64 1
-  %pix_sp0.0.copyload = load i8* %addr, align 1
+  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
   %vec = insertelement <8 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <8 x i8> %vec, %vec
   ret <8 x i8> %vmull.i
@@ -432,7 +432,7 @@ define <16 x i8> @fct17(i8* nocapture %s
 ; CHECK-NEXT: mul.16b v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i8, i8* %sp0, i64 1
-  %pix_sp0.0.copyload = load i8* %addr, align 1
+  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
   %vec = insertelement <16 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <16 x i8> %vec, %vec
   ret <16 x i8> %vmull.i
@@ -444,7 +444,7 @@ define <4 x i16> @fct18(i16* nocapture %
 ; CHECK-NEXT: mul.4h v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i16, i16* %sp0, i64 1
-  %pix_sp0.0.copyload = load i16* %addr, align 1
+  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
   %vec = insertelement <4 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <4 x i16> %vec, %vec
   ret <4 x i16> %vmull.i
@@ -456,7 +456,7 @@ define <8 x i16> @fct19(i16* nocapture %
 ; CHECK-NEXT: mul.8h v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i16, i16* %sp0, i64 1
-  %pix_sp0.0.copyload = load i16* %addr, align 1
+  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
   %vec = insertelement <8 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <8 x i16> %vec, %vec
   ret <8 x i16> %vmull.i
@@ -468,7 +468,7 @@ define <2 x i32> @fct20(i32* nocapture %
 ; CHECK-NEXT: mul.2s v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32* %addr, align 1
+  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
   %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <2 x i32> %vec, %vec
   ret <2 x i32> %vmull.i
@@ -480,7 +480,7 @@ define <4 x i32> @fct21(i32* nocapture %
 ; CHECK-NEXT: mul.4s v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32* %addr, align 1
+  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
   %vec = insertelement <4 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <4 x i32> %vec, %vec
   ret <4 x i32> %vmull.i
@@ -491,7 +491,7 @@ define <1 x i64> @fct22(i64* nocapture %
 ; CHECK: ldr d0, [x0, #8]
 entry:
   %addr = getelementptr i64, i64* %sp0, i64 1
-  %pix_sp0.0.copyload = load i64* %addr, align 1
+  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
   %vec = insertelement <1 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
    ret <1 x i64> %vec
 }
@@ -501,7 +501,7 @@ define <2 x i64> @fct23(i64* nocapture %
 ; CHECK: ldr d[[REGNUM:[0-9]+]], [x0, #8]
 entry:
   %addr = getelementptr i64, i64* %sp0, i64 1
-  %pix_sp0.0.copyload = load i64* %addr, align 1
+  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
   %vec = insertelement <2 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
   ret <2 x i64> %vec
 }
@@ -514,7 +514,7 @@ define <8 x i8> @fct24(i8* nocapture %sp
 ; CHECK-NEXT: mul.8b v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i8, i8* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i8* %addr, align 1
+  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
   %vec = insertelement <8 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <8 x i8> %vec, %vec
   ret <8 x i8> %vmull.i
@@ -526,7 +526,7 @@ define <16 x i8> @fct25(i8* nocapture %s
 ; CHECK-NEXT: mul.16b v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i8, i8* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i8* %addr, align 1
+  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
   %vec = insertelement <16 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <16 x i8> %vec, %vec
   ret <16 x i8> %vmull.i
@@ -538,7 +538,7 @@ define <4 x i16> @fct26(i16* nocapture %
 ; CHECK-NEXT: mul.4h v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i16, i16* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i16* %addr, align 1
+  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
   %vec = insertelement <4 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <4 x i16> %vec, %vec
   ret <4 x i16> %vmull.i
@@ -550,7 +550,7 @@ define <8 x i16> @fct27(i16* nocapture %
 ; CHECK-NEXT: mul.8h v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i16, i16* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i16* %addr, align 1
+  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
   %vec = insertelement <8 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <8 x i16> %vec, %vec
   ret <8 x i16> %vmull.i
@@ -562,7 +562,7 @@ define <2 x i32> @fct28(i32* nocapture %
 ; CHECK-NEXT: mul.2s v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i32, i32* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i32* %addr, align 1
+  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
   %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <2 x i32> %vec, %vec
   ret <2 x i32> %vmull.i
@@ -574,7 +574,7 @@ define <4 x i32> @fct29(i32* nocapture %
 ; CHECK-NEXT: mul.4s v0, v[[REGNUM]], v[[REGNUM]]
 entry:
   %addr = getelementptr i32, i32* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i32* %addr, align 1
+  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
   %vec = insertelement <4 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <4 x i32> %vec, %vec
   ret <4 x i32> %vmull.i
@@ -585,7 +585,7 @@ define <1 x i64> @fct30(i64* nocapture %
 ; CHECK: ldr d0, [x0, x1, lsl #3]
 entry:
   %addr = getelementptr i64, i64* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i64* %addr, align 1
+  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
   %vec = insertelement <1 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
    ret <1 x i64> %vec
 }
@@ -595,7 +595,7 @@ define <2 x i64> @fct31(i64* nocapture %
 ; CHECK: ldr d0, [x0, x1, lsl #3]
 entry:
   %addr = getelementptr i64, i64* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i64* %addr, align 1
+  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
   %vec = insertelement <2 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
   ret <2 x i64> %vec
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vext.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vext.ll Fri Feb 27 15:17:42 2015
@@ -6,12 +6,12 @@ define void @test_vext_s8() nounwind ssp
   %xS8x8 = alloca <8 x i8>, align 8
   %__a = alloca <8 x i8>, align 8
   %__b = alloca <8 x i8>, align 8
-  %tmp = load <8 x i8>* %xS8x8, align 8
+  %tmp = load <8 x i8>, <8 x i8>* %xS8x8, align 8
   store <8 x i8> %tmp, <8 x i8>* %__a, align 8
-  %tmp1 = load <8 x i8>* %xS8x8, align 8
+  %tmp1 = load <8 x i8>, <8 x i8>* %xS8x8, align 8
   store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
-  %tmp2 = load <8 x i8>* %__a, align 8
-  %tmp3 = load <8 x i8>* %__b, align 8
+  %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
+  %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
   %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
   store <8 x i8> %vext, <8 x i8>* %xS8x8, align 8
   ret void
@@ -23,12 +23,12 @@ define void @test_vext_u8() nounwind ssp
   %xU8x8 = alloca <8 x i8>, align 8
   %__a = alloca <8 x i8>, align 8
   %__b = alloca <8 x i8>, align 8
-  %tmp = load <8 x i8>* %xU8x8, align 8
+  %tmp = load <8 x i8>, <8 x i8>* %xU8x8, align 8
   store <8 x i8> %tmp, <8 x i8>* %__a, align 8
-  %tmp1 = load <8 x i8>* %xU8x8, align 8
+  %tmp1 = load <8 x i8>, <8 x i8>* %xU8x8, align 8
   store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
-  %tmp2 = load <8 x i8>* %__a, align 8
-  %tmp3 = load <8 x i8>* %__b, align 8
+  %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
+  %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
   %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
   store <8 x i8> %vext, <8 x i8>* %xU8x8, align 8
   ret void
@@ -40,12 +40,12 @@ define void @test_vext_p8() nounwind ssp
   %xP8x8 = alloca <8 x i8>, align 8
   %__a = alloca <8 x i8>, align 8
   %__b = alloca <8 x i8>, align 8
-  %tmp = load <8 x i8>* %xP8x8, align 8
+  %tmp = load <8 x i8>, <8 x i8>* %xP8x8, align 8
   store <8 x i8> %tmp, <8 x i8>* %__a, align 8
-  %tmp1 = load <8 x i8>* %xP8x8, align 8
+  %tmp1 = load <8 x i8>, <8 x i8>* %xP8x8, align 8
   store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
-  %tmp2 = load <8 x i8>* %__a, align 8
-  %tmp3 = load <8 x i8>* %__b, align 8
+  %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
+  %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
   %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
   store <8 x i8> %vext, <8 x i8>* %xP8x8, align 8
   ret void
@@ -57,13 +57,13 @@ define void @test_vext_s16() nounwind ss
   %xS16x4 = alloca <4 x i16>, align 8
   %__a = alloca <4 x i16>, align 8
   %__b = alloca <4 x i16>, align 8
-  %tmp = load <4 x i16>* %xS16x4, align 8
+  %tmp = load <4 x i16>, <4 x i16>* %xS16x4, align 8
   store <4 x i16> %tmp, <4 x i16>* %__a, align 8
-  %tmp1 = load <4 x i16>* %xS16x4, align 8
+  %tmp1 = load <4 x i16>, <4 x i16>* %xS16x4, align 8
   store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
-  %tmp2 = load <4 x i16>* %__a, align 8
+  %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
   %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
-  %tmp4 = load <4 x i16>* %__b, align 8
+  %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
   %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
   %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
@@ -78,13 +78,13 @@ define void @test_vext_u16() nounwind ss
   %xU16x4 = alloca <4 x i16>, align 8
   %__a = alloca <4 x i16>, align 8
   %__b = alloca <4 x i16>, align 8
-  %tmp = load <4 x i16>* %xU16x4, align 8
+  %tmp = load <4 x i16>, <4 x i16>* %xU16x4, align 8
   store <4 x i16> %tmp, <4 x i16>* %__a, align 8
-  %tmp1 = load <4 x i16>* %xU16x4, align 8
+  %tmp1 = load <4 x i16>, <4 x i16>* %xU16x4, align 8
   store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
-  %tmp2 = load <4 x i16>* %__a, align 8
+  %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
   %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
-  %tmp4 = load <4 x i16>* %__b, align 8
+  %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
   %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
   %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
@@ -99,13 +99,13 @@ define void @test_vext_p16() nounwind ss
   %xP16x4 = alloca <4 x i16>, align 8
   %__a = alloca <4 x i16>, align 8
   %__b = alloca <4 x i16>, align 8
-  %tmp = load <4 x i16>* %xP16x4, align 8
+  %tmp = load <4 x i16>, <4 x i16>* %xP16x4, align 8
   store <4 x i16> %tmp, <4 x i16>* %__a, align 8
-  %tmp1 = load <4 x i16>* %xP16x4, align 8
+  %tmp1 = load <4 x i16>, <4 x i16>* %xP16x4, align 8
   store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
-  %tmp2 = load <4 x i16>* %__a, align 8
+  %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
   %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
-  %tmp4 = load <4 x i16>* %__b, align 8
+  %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
   %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
   %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
@@ -120,13 +120,13 @@ define void @test_vext_s32() nounwind ss
   %xS32x2 = alloca <2 x i32>, align 8
   %__a = alloca <2 x i32>, align 8
   %__b = alloca <2 x i32>, align 8
-  %tmp = load <2 x i32>* %xS32x2, align 8
+  %tmp = load <2 x i32>, <2 x i32>* %xS32x2, align 8
   store <2 x i32> %tmp, <2 x i32>* %__a, align 8
-  %tmp1 = load <2 x i32>* %xS32x2, align 8
+  %tmp1 = load <2 x i32>, <2 x i32>* %xS32x2, align 8
   store <2 x i32> %tmp1, <2 x i32>* %__b, align 8
-  %tmp2 = load <2 x i32>* %__a, align 8
+  %tmp2 = load <2 x i32>, <2 x i32>* %__a, align 8
   %tmp3 = bitcast <2 x i32> %tmp2 to <8 x i8>
-  %tmp4 = load <2 x i32>* %__b, align 8
+  %tmp4 = load <2 x i32>, <2 x i32>* %__b, align 8
   %tmp5 = bitcast <2 x i32> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32>
   %tmp7 = bitcast <8 x i8> %tmp5 to <2 x i32>
@@ -141,13 +141,13 @@ define void @test_vext_u32() nounwind ss
   %xU32x2 = alloca <2 x i32>, align 8
   %__a = alloca <2 x i32>, align 8
   %__b = alloca <2 x i32>, align 8
-  %tmp = load <2 x i32>* %xU32x2, align 8
+  %tmp = load <2 x i32>, <2 x i32>* %xU32x2, align 8
   store <2 x i32> %tmp, <2 x i32>* %__a, align 8
-  %tmp1 = load <2 x i32>* %xU32x2, align 8
+  %tmp1 = load <2 x i32>, <2 x i32>* %xU32x2, align 8
   store <2 x i32> %tmp1, <2 x i32>* %__b, align 8
-  %tmp2 = load <2 x i32>* %__a, align 8
+  %tmp2 = load <2 x i32>, <2 x i32>* %__a, align 8
   %tmp3 = bitcast <2 x i32> %tmp2 to <8 x i8>
-  %tmp4 = load <2 x i32>* %__b, align 8
+  %tmp4 = load <2 x i32>, <2 x i32>* %__b, align 8
   %tmp5 = bitcast <2 x i32> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32>
   %tmp7 = bitcast <8 x i8> %tmp5 to <2 x i32>
@@ -162,13 +162,13 @@ define void @test_vext_f32() nounwind ss
   %xF32x2 = alloca <2 x float>, align 8
   %__a = alloca <2 x float>, align 8
   %__b = alloca <2 x float>, align 8
-  %tmp = load <2 x float>* %xF32x2, align 8
+  %tmp = load <2 x float>, <2 x float>* %xF32x2, align 8
   store <2 x float> %tmp, <2 x float>* %__a, align 8
-  %tmp1 = load <2 x float>* %xF32x2, align 8
+  %tmp1 = load <2 x float>, <2 x float>* %xF32x2, align 8
   store <2 x float> %tmp1, <2 x float>* %__b, align 8
-  %tmp2 = load <2 x float>* %__a, align 8
+  %tmp2 = load <2 x float>, <2 x float>* %__a, align 8
   %tmp3 = bitcast <2 x float> %tmp2 to <8 x i8>
-  %tmp4 = load <2 x float>* %__b, align 8
+  %tmp4 = load <2 x float>, <2 x float>* %__b, align 8
   %tmp5 = bitcast <2 x float> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <2 x float>
   %tmp7 = bitcast <8 x i8> %tmp5 to <2 x float>
@@ -184,13 +184,13 @@ define void @test_vext_s64() nounwind ss
   %xS64x1 = alloca <1 x i64>, align 8
   %__a = alloca <1 x i64>, align 8
   %__b = alloca <1 x i64>, align 8
-  %tmp = load <1 x i64>* %xS64x1, align 8
+  %tmp = load <1 x i64>, <1 x i64>* %xS64x1, align 8
   store <1 x i64> %tmp, <1 x i64>* %__a, align 8
-  %tmp1 = load <1 x i64>* %xS64x1, align 8
+  %tmp1 = load <1 x i64>, <1 x i64>* %xS64x1, align 8
   store <1 x i64> %tmp1, <1 x i64>* %__b, align 8
-  %tmp2 = load <1 x i64>* %__a, align 8
+  %tmp2 = load <1 x i64>, <1 x i64>* %__a, align 8
   %tmp3 = bitcast <1 x i64> %tmp2 to <8 x i8>
-  %tmp4 = load <1 x i64>* %__b, align 8
+  %tmp4 = load <1 x i64>, <1 x i64>* %__b, align 8
   %tmp5 = bitcast <1 x i64> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <1 x i64>
   %tmp7 = bitcast <8 x i8> %tmp5 to <1 x i64>
@@ -206,13 +206,13 @@ define void @test_vext_u64() nounwind ss
   %xU64x1 = alloca <1 x i64>, align 8
   %__a = alloca <1 x i64>, align 8
   %__b = alloca <1 x i64>, align 8
-  %tmp = load <1 x i64>* %xU64x1, align 8
+  %tmp = load <1 x i64>, <1 x i64>* %xU64x1, align 8
   store <1 x i64> %tmp, <1 x i64>* %__a, align 8
-  %tmp1 = load <1 x i64>* %xU64x1, align 8
+  %tmp1 = load <1 x i64>, <1 x i64>* %xU64x1, align 8
   store <1 x i64> %tmp1, <1 x i64>* %__b, align 8
-  %tmp2 = load <1 x i64>* %__a, align 8
+  %tmp2 = load <1 x i64>, <1 x i64>* %__a, align 8
   %tmp3 = bitcast <1 x i64> %tmp2 to <8 x i8>
-  %tmp4 = load <1 x i64>* %__b, align 8
+  %tmp4 = load <1 x i64>, <1 x i64>* %__b, align 8
   %tmp5 = bitcast <1 x i64> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <1 x i64>
   %tmp7 = bitcast <8 x i8> %tmp5 to <1 x i64>
@@ -227,12 +227,12 @@ define void @test_vextq_s8() nounwind ss
   %xS8x16 = alloca <16 x i8>, align 16
   %__a = alloca <16 x i8>, align 16
   %__b = alloca <16 x i8>, align 16
-  %tmp = load <16 x i8>* %xS8x16, align 16
+  %tmp = load <16 x i8>, <16 x i8>* %xS8x16, align 16
   store <16 x i8> %tmp, <16 x i8>* %__a, align 16
-  %tmp1 = load <16 x i8>* %xS8x16, align 16
+  %tmp1 = load <16 x i8>, <16 x i8>* %xS8x16, align 16
   store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
-  %tmp2 = load <16 x i8>* %__a, align 16
-  %tmp3 = load <16 x i8>* %__b, align 16
+  %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
+  %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
   %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
   store <16 x i8> %vext, <16 x i8>* %xS8x16, align 16
   ret void
@@ -244,12 +244,12 @@ define void @test_vextq_u8() nounwind ss
   %xU8x16 = alloca <16 x i8>, align 16
   %__a = alloca <16 x i8>, align 16
   %__b = alloca <16 x i8>, align 16
-  %tmp = load <16 x i8>* %xU8x16, align 16
+  %tmp = load <16 x i8>, <16 x i8>* %xU8x16, align 16
   store <16 x i8> %tmp, <16 x i8>* %__a, align 16
-  %tmp1 = load <16 x i8>* %xU8x16, align 16
+  %tmp1 = load <16 x i8>, <16 x i8>* %xU8x16, align 16
   store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
-  %tmp2 = load <16 x i8>* %__a, align 16
-  %tmp3 = load <16 x i8>* %__b, align 16
+  %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
+  %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
   %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
   store <16 x i8> %vext, <16 x i8>* %xU8x16, align 16
   ret void
@@ -261,12 +261,12 @@ define void @test_vextq_p8() nounwind ss
   %xP8x16 = alloca <16 x i8>, align 16
   %__a = alloca <16 x i8>, align 16
   %__b = alloca <16 x i8>, align 16
-  %tmp = load <16 x i8>* %xP8x16, align 16
+  %tmp = load <16 x i8>, <16 x i8>* %xP8x16, align 16
   store <16 x i8> %tmp, <16 x i8>* %__a, align 16
-  %tmp1 = load <16 x i8>* %xP8x16, align 16
+  %tmp1 = load <16 x i8>, <16 x i8>* %xP8x16, align 16
   store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
-  %tmp2 = load <16 x i8>* %__a, align 16
-  %tmp3 = load <16 x i8>* %__b, align 16
+  %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
+  %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
   %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21>
   store <16 x i8> %vext, <16 x i8>* %xP8x16, align 16
   ret void
@@ -278,13 +278,13 @@ define void @test_vextq_s16() nounwind s
   %xS16x8 = alloca <8 x i16>, align 16
   %__a = alloca <8 x i16>, align 16
   %__b = alloca <8 x i16>, align 16
-  %tmp = load <8 x i16>* %xS16x8, align 16
+  %tmp = load <8 x i16>, <8 x i16>* %xS16x8, align 16
   store <8 x i16> %tmp, <8 x i16>* %__a, align 16
-  %tmp1 = load <8 x i16>* %xS16x8, align 16
+  %tmp1 = load <8 x i16>, <8 x i16>* %xS16x8, align 16
   store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
-  %tmp2 = load <8 x i16>* %__a, align 16
+  %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
   %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
-  %tmp4 = load <8 x i16>* %__b, align 16
+  %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
   %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
   %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
@@ -299,13 +299,13 @@ define void @test_vextq_u16() nounwind s
   %xU16x8 = alloca <8 x i16>, align 16
   %__a = alloca <8 x i16>, align 16
   %__b = alloca <8 x i16>, align 16
-  %tmp = load <8 x i16>* %xU16x8, align 16
+  %tmp = load <8 x i16>, <8 x i16>* %xU16x8, align 16
   store <8 x i16> %tmp, <8 x i16>* %__a, align 16
-  %tmp1 = load <8 x i16>* %xU16x8, align 16
+  %tmp1 = load <8 x i16>, <8 x i16>* %xU16x8, align 16
   store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
-  %tmp2 = load <8 x i16>* %__a, align 16
+  %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
   %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
-  %tmp4 = load <8 x i16>* %__b, align 16
+  %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
   %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
   %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
@@ -320,13 +320,13 @@ define void @test_vextq_p16() nounwind s
   %xP16x8 = alloca <8 x i16>, align 16
   %__a = alloca <8 x i16>, align 16
   %__b = alloca <8 x i16>, align 16
-  %tmp = load <8 x i16>* %xP16x8, align 16
+  %tmp = load <8 x i16>, <8 x i16>* %xP16x8, align 16
   store <8 x i16> %tmp, <8 x i16>* %__a, align 16
-  %tmp1 = load <8 x i16>* %xP16x8, align 16
+  %tmp1 = load <8 x i16>, <8 x i16>* %xP16x8, align 16
   store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
-  %tmp2 = load <8 x i16>* %__a, align 16
+  %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
   %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
-  %tmp4 = load <8 x i16>* %__b, align 16
+  %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
   %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
   %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
@@ -341,13 +341,13 @@ define void @test_vextq_s32() nounwind s
   %xS32x4 = alloca <4 x i32>, align 16
   %__a = alloca <4 x i32>, align 16
   %__b = alloca <4 x i32>, align 16
-  %tmp = load <4 x i32>* %xS32x4, align 16
+  %tmp = load <4 x i32>, <4 x i32>* %xS32x4, align 16
   store <4 x i32> %tmp, <4 x i32>* %__a, align 16
-  %tmp1 = load <4 x i32>* %xS32x4, align 16
+  %tmp1 = load <4 x i32>, <4 x i32>* %xS32x4, align 16
   store <4 x i32> %tmp1, <4 x i32>* %__b, align 16
-  %tmp2 = load <4 x i32>* %__a, align 16
+  %tmp2 = load <4 x i32>, <4 x i32>* %__a, align 16
   %tmp3 = bitcast <4 x i32> %tmp2 to <16 x i8>
-  %tmp4 = load <4 x i32>* %__b, align 16
+  %tmp4 = load <4 x i32>, <4 x i32>* %__b, align 16
   %tmp5 = bitcast <4 x i32> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <4 x i32>
   %tmp7 = bitcast <16 x i8> %tmp5 to <4 x i32>
@@ -362,13 +362,13 @@ define void @test_vextq_u32() nounwind s
   %xU32x4 = alloca <4 x i32>, align 16
   %__a = alloca <4 x i32>, align 16
   %__b = alloca <4 x i32>, align 16
-  %tmp = load <4 x i32>* %xU32x4, align 16
+  %tmp = load <4 x i32>, <4 x i32>* %xU32x4, align 16
   store <4 x i32> %tmp, <4 x i32>* %__a, align 16
-  %tmp1 = load <4 x i32>* %xU32x4, align 16
+  %tmp1 = load <4 x i32>, <4 x i32>* %xU32x4, align 16
   store <4 x i32> %tmp1, <4 x i32>* %__b, align 16
-  %tmp2 = load <4 x i32>* %__a, align 16
+  %tmp2 = load <4 x i32>, <4 x i32>* %__a, align 16
   %tmp3 = bitcast <4 x i32> %tmp2 to <16 x i8>
-  %tmp4 = load <4 x i32>* %__b, align 16
+  %tmp4 = load <4 x i32>, <4 x i32>* %__b, align 16
   %tmp5 = bitcast <4 x i32> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <4 x i32>
   %tmp7 = bitcast <16 x i8> %tmp5 to <4 x i32>
@@ -383,13 +383,13 @@ define void @test_vextq_f32() nounwind s
   %xF32x4 = alloca <4 x float>, align 16
   %__a = alloca <4 x float>, align 16
   %__b = alloca <4 x float>, align 16
-  %tmp = load <4 x float>* %xF32x4, align 16
+  %tmp = load <4 x float>, <4 x float>* %xF32x4, align 16
   store <4 x float> %tmp, <4 x float>* %__a, align 16
-  %tmp1 = load <4 x float>* %xF32x4, align 16
+  %tmp1 = load <4 x float>, <4 x float>* %xF32x4, align 16
   store <4 x float> %tmp1, <4 x float>* %__b, align 16
-  %tmp2 = load <4 x float>* %__a, align 16
+  %tmp2 = load <4 x float>, <4 x float>* %__a, align 16
   %tmp3 = bitcast <4 x float> %tmp2 to <16 x i8>
-  %tmp4 = load <4 x float>* %__b, align 16
+  %tmp4 = load <4 x float>, <4 x float>* %__b, align 16
   %tmp5 = bitcast <4 x float> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <4 x float>
   %tmp7 = bitcast <16 x i8> %tmp5 to <4 x float>
@@ -404,13 +404,13 @@ define void @test_vextq_s64() nounwind s
   %xS64x2 = alloca <2 x i64>, align 16
   %__a = alloca <2 x i64>, align 16
   %__b = alloca <2 x i64>, align 16
-  %tmp = load <2 x i64>* %xS64x2, align 16
+  %tmp = load <2 x i64>, <2 x i64>* %xS64x2, align 16
   store <2 x i64> %tmp, <2 x i64>* %__a, align 16
-  %tmp1 = load <2 x i64>* %xS64x2, align 16
+  %tmp1 = load <2 x i64>, <2 x i64>* %xS64x2, align 16
   store <2 x i64> %tmp1, <2 x i64>* %__b, align 16
-  %tmp2 = load <2 x i64>* %__a, align 16
+  %tmp2 = load <2 x i64>, <2 x i64>* %__a, align 16
   %tmp3 = bitcast <2 x i64> %tmp2 to <16 x i8>
-  %tmp4 = load <2 x i64>* %__b, align 16
+  %tmp4 = load <2 x i64>, <2 x i64>* %__b, align 16
   %tmp5 = bitcast <2 x i64> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <2 x i64>
   %tmp7 = bitcast <16 x i8> %tmp5 to <2 x i64>
@@ -425,13 +425,13 @@ define void @test_vextq_u64() nounwind s
   %xU64x2 = alloca <2 x i64>, align 16
   %__a = alloca <2 x i64>, align 16
   %__b = alloca <2 x i64>, align 16
-  %tmp = load <2 x i64>* %xU64x2, align 16
+  %tmp = load <2 x i64>, <2 x i64>* %xU64x2, align 16
   store <2 x i64> %tmp, <2 x i64>* %__a, align 16
-  %tmp1 = load <2 x i64>* %xU64x2, align 16
+  %tmp1 = load <2 x i64>, <2 x i64>* %xU64x2, align 16
   store <2 x i64> %tmp1, <2 x i64>* %__b, align 16
-  %tmp2 = load <2 x i64>* %__a, align 16
+  %tmp2 = load <2 x i64>, <2 x i64>* %__a, align 16
   %tmp3 = bitcast <2 x i64> %tmp2 to <16 x i8>
-  %tmp4 = load <2 x i64>* %__b, align 16
+  %tmp4 = load <2 x i64>, <2 x i64>* %__b, align 16
   %tmp5 = bitcast <2 x i64> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <2 x i64>
   %tmp7 = bitcast <16 x i8> %tmp5 to <2 x i64>

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vhadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vhadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vhadd.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vhadd.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @shadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: shadd8b:
 ;CHECK: shadd.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @shadd8b(<8 x i8>* %A, <
 define <16 x i8> @shadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: shadd16b:
 ;CHECK: shadd.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -21,8 +21,8 @@ define <16 x i8> @shadd16b(<16 x i8>* %A
 define <4 x i16> @shadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: shadd4h:
 ;CHECK: shadd.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -30,8 +30,8 @@ define <4 x i16> @shadd4h(<4 x i16>* %A,
 define <8 x i16> @shadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: shadd8h:
 ;CHECK: shadd.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i16> @shadd8h(<8 x i16>* %A,
 define <2 x i32> @shadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: shadd2s:
 ;CHECK: shadd.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -48,8 +48,8 @@ define <2 x i32> @shadd2s(<2 x i32>* %A,
 define <4 x i32> @shadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: shadd4s:
 ;CHECK: shadd.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <4 x i32> @shadd4s(<4 x i32>* %A,
 define <8 x i8> @uhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: uhadd8b:
 ;CHECK: uhadd.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -66,8 +66,8 @@ define <8 x i8> @uhadd8b(<8 x i8>* %A, <
 define <16 x i8> @uhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: uhadd16b:
 ;CHECK: uhadd.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -75,8 +75,8 @@ define <16 x i8> @uhadd16b(<16 x i8>* %A
 define <4 x i16> @uhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: uhadd4h:
 ;CHECK: uhadd.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -84,8 +84,8 @@ define <4 x i16> @uhadd4h(<4 x i16>* %A,
 define <8 x i16> @uhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: uhadd8h:
 ;CHECK: uhadd.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -93,8 +93,8 @@ define <8 x i16> @uhadd8h(<8 x i16>* %A,
 define <2 x i32> @uhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: uhadd2s:
 ;CHECK: uhadd.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -102,8 +102,8 @@ define <2 x i32> @uhadd2s(<2 x i32>* %A,
 define <4 x i32> @uhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: uhadd4s:
 ;CHECK: uhadd.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -127,8 +127,8 @@ declare <4 x i32> @llvm.aarch64.neon.uha
 define <8 x i8> @srhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: srhadd8b:
 ;CHECK: srhadd.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -136,8 +136,8 @@ define <8 x i8> @srhadd8b(<8 x i8>* %A,
 define <16 x i8> @srhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: srhadd16b:
 ;CHECK: srhadd.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -145,8 +145,8 @@ define <16 x i8> @srhadd16b(<16 x i8>* %
 define <4 x i16> @srhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: srhadd4h:
 ;CHECK: srhadd.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -154,8 +154,8 @@ define <4 x i16> @srhadd4h(<4 x i16>* %A
 define <8 x i16> @srhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: srhadd8h:
 ;CHECK: srhadd.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -163,8 +163,8 @@ define <8 x i16> @srhadd8h(<8 x i16>* %A
 define <2 x i32> @srhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: srhadd2s:
 ;CHECK: srhadd.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -172,8 +172,8 @@ define <2 x i32> @srhadd2s(<2 x i32>* %A
 define <4 x i32> @srhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: srhadd4s:
 ;CHECK: srhadd.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -181,8 +181,8 @@ define <4 x i32> @srhadd4s(<4 x i32>* %A
 define <8 x i8> @urhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: urhadd8b:
 ;CHECK: urhadd.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -190,8 +190,8 @@ define <8 x i8> @urhadd8b(<8 x i8>* %A,
 define <16 x i8> @urhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: urhadd16b:
 ;CHECK: urhadd.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -199,8 +199,8 @@ define <16 x i8> @urhadd16b(<16 x i8>* %
 define <4 x i16> @urhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: urhadd4h:
 ;CHECK: urhadd.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -208,8 +208,8 @@ define <4 x i16> @urhadd4h(<4 x i16>* %A
 define <8 x i16> @urhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: urhadd8h:
 ;CHECK: urhadd.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -217,8 +217,8 @@ define <8 x i16> @urhadd8h(<8 x i16>* %A
 define <2 x i32> @urhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: urhadd2s:
 ;CHECK: urhadd.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -226,8 +226,8 @@ define <2 x i32> @urhadd2s(<2 x i32>* %A
 define <4 x i32> @urhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: urhadd4s:
 ;CHECK: urhadd.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vhsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vhsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vhsub.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vhsub.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @shsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: shsub8b:
 ;CHECK: shsub.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @shsub8b(<8 x i8>* %A, <
 define <16 x i8> @shsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: shsub16b:
 ;CHECK: shsub.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -21,8 +21,8 @@ define <16 x i8> @shsub16b(<16 x i8>* %A
 define <4 x i16> @shsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: shsub4h:
 ;CHECK: shsub.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -30,8 +30,8 @@ define <4 x i16> @shsub4h(<4 x i16>* %A,
 define <8 x i16> @shsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: shsub8h:
 ;CHECK: shsub.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i16> @shsub8h(<8 x i16>* %A,
 define <2 x i32> @shsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: shsub2s:
 ;CHECK: shsub.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -48,8 +48,8 @@ define <2 x i32> @shsub2s(<2 x i32>* %A,
 define <4 x i32> @shsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: shsub4s:
 ;CHECK: shsub.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.shsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <4 x i32> @shsub4s(<4 x i32>* %A,
 define <8 x i8> @uhsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: uhsub8b:
 ;CHECK: uhsub.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uhsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -66,8 +66,8 @@ define <8 x i8> @uhsub8b(<8 x i8>* %A, <
 define <16 x i8> @uhsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: uhsub16b:
 ;CHECK: uhsub.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uhsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -75,8 +75,8 @@ define <16 x i8> @uhsub16b(<16 x i8>* %A
 define <4 x i16> @uhsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: uhsub4h:
 ;CHECK: uhsub.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uhsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -84,8 +84,8 @@ define <4 x i16> @uhsub4h(<4 x i16>* %A,
 define <8 x i16> @uhsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: uhsub8h:
 ;CHECK: uhsub.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uhsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -93,8 +93,8 @@ define <8 x i16> @uhsub8h(<8 x i16>* %A,
 define <2 x i32> @uhsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: uhsub2s:
 ;CHECK: uhsub.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uhsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -102,8 +102,8 @@ define <2 x i32> @uhsub2s(<2 x i32>* %A,
 define <4 x i32> @uhsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: uhsub4s:
 ;CHECK: uhsub.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uhsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vmax.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vmax.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vmax.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @smax_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: smax_8b:
 ;CHECK: smax.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @smax_8b(<8 x i8>* %A, <
 define <16 x i8> @smax_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: smax_16b:
 ;CHECK: smax.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -21,8 +21,8 @@ define <16 x i8> @smax_16b(<16 x i8>* %A
 define <4 x i16> @smax_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: smax_4h:
 ;CHECK: smax.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -30,8 +30,8 @@ define <4 x i16> @smax_4h(<4 x i16>* %A,
 define <8 x i16> @smax_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: smax_8h:
 ;CHECK: smax.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i16> @smax_8h(<8 x i16>* %A,
 define <2 x i32> @smax_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: smax_2s:
 ;CHECK: smax.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -48,8 +48,8 @@ define <2 x i32> @smax_2s(<2 x i32>* %A,
 define <4 x i32> @smax_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: smax_4s:
 ;CHECK: smax.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -64,8 +64,8 @@ declare <4 x i32> @llvm.aarch64.neon.sma
 define <8 x i8> @umax_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: umax_8b:
 ;CHECK: umax.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.umax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -73,8 +73,8 @@ define <8 x i8> @umax_8b(<8 x i8>* %A, <
 define <16 x i8> @umax_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: umax_16b:
 ;CHECK: umax.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.umax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -82,8 +82,8 @@ define <16 x i8> @umax_16b(<16 x i8>* %A
 define <4 x i16> @umax_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: umax_4h:
 ;CHECK: umax.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.umax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -91,8 +91,8 @@ define <4 x i16> @umax_4h(<4 x i16>* %A,
 define <8 x i16> @umax_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: umax_8h:
 ;CHECK: umax.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -100,8 +100,8 @@ define <8 x i16> @umax_8h(<8 x i16>* %A,
 define <2 x i32> @umax_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: umax_2s:
 ;CHECK: umax.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -109,8 +109,8 @@ define <2 x i32> @umax_2s(<2 x i32>* %A,
 define <4 x i32> @umax_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: umax_4s:
 ;CHECK: umax.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.umax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -125,8 +125,8 @@ declare <4 x i32> @llvm.aarch64.neon.uma
 define <8 x i8> @smin_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: smin_8b:
 ;CHECK: smin.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -134,8 +134,8 @@ define <8 x i8> @smin_8b(<8 x i8>* %A, <
 define <16 x i8> @smin_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: smin_16b:
 ;CHECK: smin.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -143,8 +143,8 @@ define <16 x i8> @smin_16b(<16 x i8>* %A
 define <4 x i16> @smin_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: smin_4h:
 ;CHECK: smin.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -152,8 +152,8 @@ define <4 x i16> @smin_4h(<4 x i16>* %A,
 define <8 x i16> @smin_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: smin_8h:
 ;CHECK: smin.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -161,8 +161,8 @@ define <8 x i16> @smin_8h(<8 x i16>* %A,
 define <2 x i32> @smin_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: smin_2s:
 ;CHECK: smin.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -170,8 +170,8 @@ define <2 x i32> @smin_2s(<2 x i32>* %A,
 define <4 x i32> @smin_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: smin_4s:
 ;CHECK: smin.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -186,8 +186,8 @@ declare <4 x i32> @llvm.aarch64.neon.smi
 define <8 x i8> @umin_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: umin_8b:
 ;CHECK: umin.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -195,8 +195,8 @@ define <8 x i8> @umin_8b(<8 x i8>* %A, <
 define <16 x i8> @umin_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: umin_16b:
 ;CHECK: umin.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -204,8 +204,8 @@ define <16 x i8> @umin_16b(<16 x i8>* %A
 define <4 x i16> @umin_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: umin_4h:
 ;CHECK: umin.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -213,8 +213,8 @@ define <4 x i16> @umin_4h(<4 x i16>* %A,
 define <8 x i16> @umin_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: umin_8h:
 ;CHECK: umin.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -222,8 +222,8 @@ define <8 x i16> @umin_8h(<8 x i16>* %A,
 define <2 x i32> @umin_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: umin_2s:
 ;CHECK: umin.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -231,8 +231,8 @@ define <2 x i32> @umin_2s(<2 x i32>* %A,
 define <4 x i32> @umin_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: umin_4s:
 ;CHECK: umin.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -249,8 +249,8 @@ declare <4 x i32> @llvm.aarch64.neon.umi
 define <8 x i8> @smaxp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: smaxp_8b:
 ;CHECK: smaxp.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.smaxp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -258,8 +258,8 @@ define <8 x i8> @smaxp_8b(<8 x i8>* %A,
 define <16 x i8> @smaxp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: smaxp_16b:
 ;CHECK: smaxp.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.smaxp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -267,8 +267,8 @@ define <16 x i8> @smaxp_16b(<16 x i8>* %
 define <4 x i16> @smaxp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: smaxp_4h:
 ;CHECK: smaxp.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.smaxp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -276,8 +276,8 @@ define <4 x i16> @smaxp_4h(<4 x i16>* %A
 define <8 x i16> @smaxp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: smaxp_8h:
 ;CHECK: smaxp.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.smaxp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -285,8 +285,8 @@ define <8 x i16> @smaxp_8h(<8 x i16>* %A
 define <2 x i32> @smaxp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: smaxp_2s:
 ;CHECK: smaxp.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.smaxp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -294,8 +294,8 @@ define <2 x i32> @smaxp_2s(<2 x i32>* %A
 define <4 x i32> @smaxp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: smaxp_4s:
 ;CHECK: smaxp.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.smaxp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -310,8 +310,8 @@ declare <4 x i32> @llvm.aarch64.neon.sma
 define <8 x i8> @umaxp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: umaxp_8b:
 ;CHECK: umaxp.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.umaxp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -319,8 +319,8 @@ define <8 x i8> @umaxp_8b(<8 x i8>* %A,
 define <16 x i8> @umaxp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: umaxp_16b:
 ;CHECK: umaxp.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.umaxp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -328,8 +328,8 @@ define <16 x i8> @umaxp_16b(<16 x i8>* %
 define <4 x i16> @umaxp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: umaxp_4h:
 ;CHECK: umaxp.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.umaxp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -337,8 +337,8 @@ define <4 x i16> @umaxp_4h(<4 x i16>* %A
 define <8 x i16> @umaxp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: umaxp_8h:
 ;CHECK: umaxp.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.umaxp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -346,8 +346,8 @@ define <8 x i16> @umaxp_8h(<8 x i16>* %A
 define <2 x i32> @umaxp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: umaxp_2s:
 ;CHECK: umaxp.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.umaxp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -355,8 +355,8 @@ define <2 x i32> @umaxp_2s(<2 x i32>* %A
 define <4 x i32> @umaxp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: umaxp_4s:
 ;CHECK: umaxp.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -373,8 +373,8 @@ declare <4 x i32> @llvm.aarch64.neon.uma
 define <8 x i8> @sminp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: sminp_8b:
 ;CHECK: sminp.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sminp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -382,8 +382,8 @@ define <8 x i8> @sminp_8b(<8 x i8>* %A,
 define <16 x i8> @sminp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: sminp_16b:
 ;CHECK: sminp.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sminp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -391,8 +391,8 @@ define <16 x i8> @sminp_16b(<16 x i8>* %
 define <4 x i16> @sminp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sminp_4h:
 ;CHECK: sminp.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sminp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -400,8 +400,8 @@ define <4 x i16> @sminp_4h(<4 x i16>* %A
 define <8 x i16> @sminp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sminp_8h:
 ;CHECK: sminp.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sminp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -409,8 +409,8 @@ define <8 x i16> @sminp_8h(<8 x i16>* %A
 define <2 x i32> @sminp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sminp_2s:
 ;CHECK: sminp.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sminp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -418,8 +418,8 @@ define <2 x i32> @sminp_2s(<2 x i32>* %A
 define <4 x i32> @sminp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sminp_4s:
 ;CHECK: sminp.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sminp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -434,8 +434,8 @@ declare <4 x i32> @llvm.aarch64.neon.smi
 define <8 x i8> @uminp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: uminp_8b:
 ;CHECK: uminp.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uminp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -443,8 +443,8 @@ define <8 x i8> @uminp_8b(<8 x i8>* %A,
 define <16 x i8> @uminp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: uminp_16b:
 ;CHECK: uminp.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uminp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -452,8 +452,8 @@ define <16 x i8> @uminp_16b(<16 x i8>* %
 define <4 x i16> @uminp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: uminp_4h:
 ;CHECK: uminp.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uminp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -461,8 +461,8 @@ define <4 x i16> @uminp_4h(<4 x i16>* %A
 define <8 x i16> @uminp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: uminp_8h:
 ;CHECK: uminp.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uminp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -470,8 +470,8 @@ define <8 x i16> @uminp_8h(<8 x i16>* %A
 define <2 x i32> @uminp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: uminp_2s:
 ;CHECK: uminp.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uminp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -479,8 +479,8 @@ define <2 x i32> @uminp_2s(<2 x i32>* %A
 define <4 x i32> @uminp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: uminp_4s:
 ;CHECK: uminp.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uminp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -495,8 +495,8 @@ declare <4 x i32> @llvm.aarch64.neon.umi
 define <2 x float> @fmax_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: fmax_2s:
 ;CHECK: fmax.2s
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fmax.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -504,8 +504,8 @@ define <2 x float> @fmax_2s(<2 x float>*
 define <4 x float> @fmax_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: fmax_4s:
 ;CHECK: fmax.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fmax.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -513,8 +513,8 @@ define <4 x float> @fmax_4s(<4 x float>*
 define <2 x double> @fmax_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: fmax_2d:
 ;CHECK: fmax.2d
-	%tmp1 = load <2 x double>* %A
-	%tmp2 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, <2 x double>* %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fmax.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -526,8 +526,8 @@ declare <2 x double> @llvm.aarch64.neon.
 define <2 x float> @fmaxp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: fmaxp_2s:
 ;CHECK: fmaxp.2s
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fmaxp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -535,8 +535,8 @@ define <2 x float> @fmaxp_2s(<2 x float>
 define <4 x float> @fmaxp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: fmaxp_4s:
 ;CHECK: fmaxp.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fmaxp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -544,8 +544,8 @@ define <4 x float> @fmaxp_4s(<4 x float>
 define <2 x double> @fmaxp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: fmaxp_2d:
 ;CHECK: fmaxp.2d
-	%tmp1 = load <2 x double>* %A
-	%tmp2 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, <2 x double>* %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fmaxp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -557,8 +557,8 @@ declare <2 x double> @llvm.aarch64.neon.
 define <2 x float> @fmin_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: fmin_2s:
 ;CHECK: fmin.2s
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -566,8 +566,8 @@ define <2 x float> @fmin_2s(<2 x float>*
 define <4 x float> @fmin_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: fmin_4s:
 ;CHECK: fmin.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -575,8 +575,8 @@ define <4 x float> @fmin_4s(<4 x float>*
 define <2 x double> @fmin_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: fmin_2d:
 ;CHECK: fmin.2d
-	%tmp1 = load <2 x double>* %A
-	%tmp2 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, <2 x double>* %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -588,8 +588,8 @@ declare <2 x double> @llvm.aarch64.neon.
 define <2 x float> @fminp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: fminp_2s:
 ;CHECK: fminp.2s
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fminp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -597,8 +597,8 @@ define <2 x float> @fminp_2s(<2 x float>
 define <4 x float> @fminp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: fminp_4s:
 ;CHECK: fminp.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fminp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -606,8 +606,8 @@ define <4 x float> @fminp_4s(<4 x float>
 define <2 x double> @fminp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: fminp_2d:
 ;CHECK: fminp.2d
-	%tmp1 = load <2 x double>* %A
-	%tmp2 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, <2 x double>* %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fminp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -619,8 +619,8 @@ declare <2 x double> @llvm.aarch64.neon.
 define <2 x float> @fminnmp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: fminnmp_2s:
 ;CHECK: fminnmp.2s
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fminnmp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -628,8 +628,8 @@ define <2 x float> @fminnmp_2s(<2 x floa
 define <4 x float> @fminnmp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: fminnmp_4s:
 ;CHECK: fminnmp.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fminnmp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -637,8 +637,8 @@ define <4 x float> @fminnmp_4s(<4 x floa
 define <2 x double> @fminnmp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: fminnmp_2d:
 ;CHECK: fminnmp.2d
-	%tmp1 = load <2 x double>* %A
-	%tmp2 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, <2 x double>* %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fminnmp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -650,8 +650,8 @@ declare <2 x double> @llvm.aarch64.neon.
 define <2 x float> @fmaxnmp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: fmaxnmp_2s:
 ;CHECK: fmaxnmp.2s
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fmaxnmp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -659,8 +659,8 @@ define <2 x float> @fmaxnmp_2s(<2 x floa
 define <4 x float> @fmaxnmp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: fmaxnmp_4s:
 ;CHECK: fmaxnmp.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fmaxnmp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -668,8 +668,8 @@ define <4 x float> @fmaxnmp_4s(<4 x floa
 define <2 x double> @fmaxnmp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: fmaxnmp_2d:
 ;CHECK: fmaxnmp.2d
-	%tmp1 = load <2 x double>* %A
-	%tmp2 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, <2 x double>* %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fmaxnmp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vmul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vmul.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vmul.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@
 define <8 x i16> @smull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: smull8h:
 ;CHECK: smull.8h
-  %tmp1 = load <8 x i8>* %A
-  %tmp2 = load <8 x i8>* %B
+  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp2 = load <8 x i8>, <8 x i8>* %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
   ret <8 x i16> %tmp3
 }
@@ -13,8 +13,8 @@ define <8 x i16> @smull8h(<8 x i8>* %A,
 define <4 x i32> @smull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: smull4s:
 ;CHECK: smull.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i32> %tmp3
 }
@@ -22,8 +22,8 @@ define <4 x i32> @smull4s(<4 x i16>* %A,
 define <2 x i64> @smull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: smull2d:
 ;CHECK: smull.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i64> %tmp3
 }
@@ -35,8 +35,8 @@ declare <2 x i64> @llvm.aarch64.neon.smu
 define <8 x i16> @umull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: umull8h:
 ;CHECK: umull.8h
-  %tmp1 = load <8 x i8>* %A
-  %tmp2 = load <8 x i8>* %B
+  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp2 = load <8 x i8>, <8 x i8>* %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
   ret <8 x i16> %tmp3
 }
@@ -44,8 +44,8 @@ define <8 x i16> @umull8h(<8 x i8>* %A,
 define <4 x i32> @umull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: umull4s:
 ;CHECK: umull.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i32> %tmp3
 }
@@ -53,8 +53,8 @@ define <4 x i32> @umull4s(<4 x i16>* %A,
 define <2 x i64> @umull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: umull2d:
 ;CHECK: umull.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i64> %tmp3
 }
@@ -66,8 +66,8 @@ declare <2 x i64> @llvm.aarch64.neon.umu
 define <4 x i32> @sqdmull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqdmull4s:
 ;CHECK: sqdmull.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i32> %tmp3
 }
@@ -75,8 +75,8 @@ define <4 x i32> @sqdmull4s(<4 x i16>* %
 define <2 x i64> @sqdmull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqdmull2d:
 ;CHECK: sqdmull.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i64> %tmp3
 }
@@ -84,8 +84,8 @@ define <2 x i64> @sqdmull2d(<2 x i32>* %
 define <4 x i32> @sqdmull2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqdmull2_4s:
 ;CHECK: sqdmull2.4s
-  %load1 = load <8 x i16>* %A
-  %load2 = load <8 x i16>* %B
+  %load1 = load <8 x i16>, <8 x i16>* %A
+  %load2 = load <8 x i16>, <8 x i16>* %B
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -95,8 +95,8 @@ define <4 x i32> @sqdmull2_4s(<8 x i16>*
 define <2 x i64> @sqdmull2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqdmull2_2d:
 ;CHECK: sqdmull2.2d
-  %load1 = load <4 x i32>* %A
-  %load2 = load <4 x i32>* %B
+  %load1 = load <4 x i32>, <4 x i32>* %A
+  %load2 = load <4 x i32>, <4 x i32>* %B
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -110,8 +110,8 @@ declare <2 x i64> @llvm.aarch64.neon.sqd
 define <8 x i16> @pmull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: pmull8h:
 ;CHECK: pmull.8h
-  %tmp1 = load <8 x i8>* %A
-  %tmp2 = load <8 x i8>* %B
+  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp2 = load <8 x i8>, <8 x i8>* %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
   ret <8 x i16> %tmp3
 }
@@ -121,8 +121,8 @@ declare <8 x i16> @llvm.aarch64.neon.pmu
 define <4 x i16> @sqdmulh_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqdmulh_4h:
 ;CHECK: sqdmulh.4h
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i16> %tmp3
 }
@@ -130,8 +130,8 @@ define <4 x i16> @sqdmulh_4h(<4 x i16>*
 define <8 x i16> @sqdmulh_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqdmulh_8h:
 ;CHECK: sqdmulh.8h
-  %tmp1 = load <8 x i16>* %A
-  %tmp2 = load <8 x i16>* %B
+  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp2 = load <8 x i16>, <8 x i16>* %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
   ret <8 x i16> %tmp3
 }
@@ -139,8 +139,8 @@ define <8 x i16> @sqdmulh_8h(<8 x i16>*
 define <2 x i32> @sqdmulh_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqdmulh_2s:
 ;CHECK: sqdmulh.2s
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i32> %tmp3
 }
@@ -148,8 +148,8 @@ define <2 x i32> @sqdmulh_2s(<2 x i32>*
 define <4 x i32> @sqdmulh_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqdmulh_4s:
 ;CHECK: sqdmulh.4s
-  %tmp1 = load <4 x i32>* %A
-  %tmp2 = load <4 x i32>* %B
+  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp2 = load <4 x i32>, <4 x i32>* %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
   ret <4 x i32> %tmp3
 }
@@ -157,8 +157,8 @@ define <4 x i32> @sqdmulh_4s(<4 x i32>*
 define i32 @sqdmulh_1s(i32* %A, i32* %B) nounwind {
 ;CHECK-LABEL: sqdmulh_1s:
 ;CHECK: sqdmulh s0, {{s[0-9]+}}, {{s[0-9]+}}
-  %tmp1 = load i32* %A
-  %tmp2 = load i32* %B
+  %tmp1 = load i32, i32* %A
+  %tmp2 = load i32, i32* %B
   %tmp3 = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %tmp1, i32 %tmp2)
   ret i32 %tmp3
 }
@@ -172,8 +172,8 @@ declare i32 @llvm.aarch64.neon.sqdmulh.i
 define <4 x i16> @sqrdmulh_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqrdmulh_4h:
 ;CHECK: sqrdmulh.4h
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i16> %tmp3
 }
@@ -181,8 +181,8 @@ define <4 x i16> @sqrdmulh_4h(<4 x i16>*
 define <8 x i16> @sqrdmulh_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqrdmulh_8h:
 ;CHECK: sqrdmulh.8h
-  %tmp1 = load <8 x i16>* %A
-  %tmp2 = load <8 x i16>* %B
+  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp2 = load <8 x i16>, <8 x i16>* %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
   ret <8 x i16> %tmp3
 }
@@ -190,8 +190,8 @@ define <8 x i16> @sqrdmulh_8h(<8 x i16>*
 define <2 x i32> @sqrdmulh_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqrdmulh_2s:
 ;CHECK: sqrdmulh.2s
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i32> %tmp3
 }
@@ -199,8 +199,8 @@ define <2 x i32> @sqrdmulh_2s(<2 x i32>*
 define <4 x i32> @sqrdmulh_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqrdmulh_4s:
 ;CHECK: sqrdmulh.4s
-  %tmp1 = load <4 x i32>* %A
-  %tmp2 = load <4 x i32>* %B
+  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp2 = load <4 x i32>, <4 x i32>* %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
   ret <4 x i32> %tmp3
 }
@@ -208,8 +208,8 @@ define <4 x i32> @sqrdmulh_4s(<4 x i32>*
 define i32 @sqrdmulh_1s(i32* %A, i32* %B) nounwind {
 ;CHECK-LABEL: sqrdmulh_1s:
 ;CHECK: sqrdmulh s0, {{s[0-9]+}}, {{s[0-9]+}}
-  %tmp1 = load i32* %A
-  %tmp2 = load i32* %B
+  %tmp1 = load i32, i32* %A
+  %tmp2 = load i32, i32* %B
   %tmp3 = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %tmp1, i32 %tmp2)
   ret i32 %tmp3
 }
@@ -223,8 +223,8 @@ declare i32 @llvm.aarch64.neon.sqrdmulh.
 define <2 x float> @fmulx_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: fmulx_2s:
 ;CHECK: fmulx.2s
-  %tmp1 = load <2 x float>* %A
-  %tmp2 = load <2 x float>* %B
+  %tmp1 = load <2 x float>, <2 x float>* %A
+  %tmp2 = load <2 x float>, <2 x float>* %B
   %tmp3 = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
   ret <2 x float> %tmp3
 }
@@ -232,8 +232,8 @@ define <2 x float> @fmulx_2s(<2 x float>
 define <4 x float> @fmulx_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: fmulx_4s:
 ;CHECK: fmulx.4s
-  %tmp1 = load <4 x float>* %A
-  %tmp2 = load <4 x float>* %B
+  %tmp1 = load <4 x float>, <4 x float>* %A
+  %tmp2 = load <4 x float>, <4 x float>* %B
   %tmp3 = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
   ret <4 x float> %tmp3
 }
@@ -241,8 +241,8 @@ define <4 x float> @fmulx_4s(<4 x float>
 define <2 x double> @fmulx_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: fmulx_2d:
 ;CHECK: fmulx.2d
-  %tmp1 = load <2 x double>* %A
-  %tmp2 = load <2 x double>* %B
+  %tmp1 = load <2 x double>, <2 x double>* %A
+  %tmp2 = load <2 x double>, <2 x double>* %B
   %tmp3 = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
   ret <2 x double> %tmp3
 }
@@ -254,9 +254,9 @@ declare <2 x double> @llvm.aarch64.neon.
 define <4 x i32> @smlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: smlal4s:
 ;CHECK: smlal.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = add <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
@@ -265,9 +265,9 @@ define <4 x i32> @smlal4s(<4 x i16>* %A,
 define <2 x i64> @smlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: smlal2d:
 ;CHECK: smlal.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = add <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
@@ -276,9 +276,9 @@ define <2 x i64> @smlal2d(<2 x i32>* %A,
 define <4 x i32> @smlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: smlsl4s:
 ;CHECK: smlsl.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = sub <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
@@ -287,9 +287,9 @@ define <4 x i32> @smlsl4s(<4 x i16>* %A,
 define <2 x i64> @smlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: smlsl2d:
 ;CHECK: smlsl.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = sub <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
@@ -303,9 +303,9 @@ declare <2 x i64> @llvm.aarch64.neon.sqs
 define <4 x i32> @sqdmlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: sqdmlal4s:
 ;CHECK: sqdmlal.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp4)
   ret <4 x i32> %tmp5
@@ -314,9 +314,9 @@ define <4 x i32> @sqdmlal4s(<4 x i16>* %
 define <2 x i64> @sqdmlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: sqdmlal2d:
 ;CHECK: sqdmlal.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp4)
   ret <2 x i64> %tmp5
@@ -325,9 +325,9 @@ define <2 x i64> @sqdmlal2d(<2 x i32>* %
 define <4 x i32> @sqdmlal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: sqdmlal2_4s:
 ;CHECK: sqdmlal2.4s
-  %load1 = load <8 x i16>* %A
-  %load2 = load <8 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %load1 = load <8 x i16>, <8 x i16>* %A
+  %load2 = load <8 x i16>, <8 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -338,9 +338,9 @@ define <4 x i32> @sqdmlal2_4s(<8 x i16>*
 define <2 x i64> @sqdmlal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: sqdmlal2_2d:
 ;CHECK: sqdmlal2.2d
-  %load1 = load <4 x i32>* %A
-  %load2 = load <4 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %load1 = load <4 x i32>, <4 x i32>* %A
+  %load2 = load <4 x i32>, <4 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -351,9 +351,9 @@ define <2 x i64> @sqdmlal2_2d(<4 x i32>*
 define <4 x i32> @sqdmlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: sqdmlsl4s:
 ;CHECK: sqdmlsl.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp4)
   ret <4 x i32> %tmp5
@@ -362,9 +362,9 @@ define <4 x i32> @sqdmlsl4s(<4 x i16>* %
 define <2 x i64> @sqdmlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: sqdmlsl2d:
 ;CHECK: sqdmlsl.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp4)
   ret <2 x i64> %tmp5
@@ -373,9 +373,9 @@ define <2 x i64> @sqdmlsl2d(<2 x i32>* %
 define <4 x i32> @sqdmlsl2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: sqdmlsl2_4s:
 ;CHECK: sqdmlsl2.4s
-  %load1 = load <8 x i16>* %A
-  %load2 = load <8 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %load1 = load <8 x i16>, <8 x i16>* %A
+  %load2 = load <8 x i16>, <8 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -386,9 +386,9 @@ define <4 x i32> @sqdmlsl2_4s(<8 x i16>*
 define <2 x i64> @sqdmlsl2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: sqdmlsl2_2d:
 ;CHECK: sqdmlsl2.2d
-  %load1 = load <4 x i32>* %A
-  %load2 = load <4 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %load1 = load <4 x i32>, <4 x i32>* %A
+  %load2 = load <4 x i32>, <4 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -399,9 +399,9 @@ define <2 x i64> @sqdmlsl2_2d(<4 x i32>*
 define <4 x i32> @umlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: umlal4s:
 ;CHECK: umlal.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = add <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
@@ -410,9 +410,9 @@ define <4 x i32> @umlal4s(<4 x i16>* %A,
 define <2 x i64> @umlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: umlal2d:
 ;CHECK: umlal.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = add <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
@@ -421,9 +421,9 @@ define <2 x i64> @umlal2d(<2 x i32>* %A,
 define <4 x i32> @umlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
 ;CHECK-LABEL: umlsl4s:
 ;CHECK: umlsl.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = sub <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
@@ -432,9 +432,9 @@ define <4 x i32> @umlsl4s(<4 x i16>* %A,
 define <2 x i64> @umlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
 ;CHECK-LABEL: umlsl2d:
 ;CHECK: umlsl.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = sub <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
@@ -443,9 +443,9 @@ define <2 x i64> @umlsl2d(<2 x i32>* %A,
 define <2 x float> @fmla_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
 ;CHECK-LABEL: fmla_2s:
 ;CHECK: fmla.2s
-  %tmp1 = load <2 x float>* %A
-  %tmp2 = load <2 x float>* %B
-  %tmp3 = load <2 x float>* %C
+  %tmp1 = load <2 x float>, <2 x float>* %A
+  %tmp2 = load <2 x float>, <2 x float>* %B
+  %tmp3 = load <2 x float>, <2 x float>* %C
   %tmp4 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
   ret <2 x float> %tmp4
 }
@@ -453,9 +453,9 @@ define <2 x float> @fmla_2s(<2 x float>*
 define <4 x float> @fmla_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
 ;CHECK-LABEL: fmla_4s:
 ;CHECK: fmla.4s
-  %tmp1 = load <4 x float>* %A
-  %tmp2 = load <4 x float>* %B
-  %tmp3 = load <4 x float>* %C
+  %tmp1 = load <4 x float>, <4 x float>* %A
+  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp3 = load <4 x float>, <4 x float>* %C
   %tmp4 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
   ret <4 x float> %tmp4
 }
@@ -463,9 +463,9 @@ define <4 x float> @fmla_4s(<4 x float>*
 define <2 x double> @fmla_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
 ;CHECK-LABEL: fmla_2d:
 ;CHECK: fmla.2d
-  %tmp1 = load <2 x double>* %A
-  %tmp2 = load <2 x double>* %B
-  %tmp3 = load <2 x double>* %C
+  %tmp1 = load <2 x double>, <2 x double>* %A
+  %tmp2 = load <2 x double>, <2 x double>* %B
+  %tmp3 = load <2 x double>, <2 x double>* %C
   %tmp4 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp1, <2 x double> %tmp2, <2 x double> %tmp3)
   ret <2 x double> %tmp4
 }
@@ -477,9 +477,9 @@ declare <2 x double> @llvm.fma.v2f64(<2
 define <2 x float> @fmls_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
 ;CHECK-LABEL: fmls_2s:
 ;CHECK: fmls.2s
-  %tmp1 = load <2 x float>* %A
-  %tmp2 = load <2 x float>* %B
-  %tmp3 = load <2 x float>* %C
+  %tmp1 = load <2 x float>, <2 x float>* %A
+  %tmp2 = load <2 x float>, <2 x float>* %B
+  %tmp3 = load <2 x float>, <2 x float>* %C
   %tmp4 = fsub <2 x float> <float -0.0, float -0.0>, %tmp2
   %tmp5 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp1, <2 x float> %tmp4, <2 x float> %tmp3)
   ret <2 x float> %tmp5
@@ -488,9 +488,9 @@ define <2 x float> @fmls_2s(<2 x float>*
 define <4 x float> @fmls_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
 ;CHECK-LABEL: fmls_4s:
 ;CHECK: fmls.4s
-  %tmp1 = load <4 x float>* %A
-  %tmp2 = load <4 x float>* %B
-  %tmp3 = load <4 x float>* %C
+  %tmp1 = load <4 x float>, <4 x float>* %A
+  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp3 = load <4 x float>, <4 x float>* %C
   %tmp4 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %tmp2
   %tmp5 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp1, <4 x float> %tmp4, <4 x float> %tmp3)
   ret <4 x float> %tmp5
@@ -499,9 +499,9 @@ define <4 x float> @fmls_4s(<4 x float>*
 define <2 x double> @fmls_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
 ;CHECK-LABEL: fmls_2d:
 ;CHECK: fmls.2d
-  %tmp1 = load <2 x double>* %A
-  %tmp2 = load <2 x double>* %B
-  %tmp3 = load <2 x double>* %C
+  %tmp1 = load <2 x double>, <2 x double>* %A
+  %tmp2 = load <2 x double>, <2 x double>* %B
+  %tmp3 = load <2 x double>, <2 x double>* %C
   %tmp4 = fsub <2 x double> <double -0.0, double -0.0>, %tmp2
   %tmp5 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp1, <2 x double> %tmp4, <2 x double> %tmp3)
   ret <2 x double> %tmp5
@@ -510,9 +510,9 @@ define <2 x double> @fmls_2d(<2 x double
 define <2 x float> @fmls_commuted_neg_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
 ;CHECK-LABEL: fmls_commuted_neg_2s:
 ;CHECK: fmls.2s
-  %tmp1 = load <2 x float>* %A
-  %tmp2 = load <2 x float>* %B
-  %tmp3 = load <2 x float>* %C
+  %tmp1 = load <2 x float>, <2 x float>* %A
+  %tmp2 = load <2 x float>, <2 x float>* %B
+  %tmp3 = load <2 x float>, <2 x float>* %C
   %tmp4 = fsub <2 x float> <float -0.0, float -0.0>, %tmp2
   %tmp5 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp4, <2 x float> %tmp1, <2 x float> %tmp3)
   ret <2 x float> %tmp5
@@ -521,9 +521,9 @@ define <2 x float> @fmls_commuted_neg_2s
 define <4 x float> @fmls_commuted_neg_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
 ;CHECK-LABEL: fmls_commuted_neg_4s:
 ;CHECK: fmls.4s
-  %tmp1 = load <4 x float>* %A
-  %tmp2 = load <4 x float>* %B
-  %tmp3 = load <4 x float>* %C
+  %tmp1 = load <4 x float>, <4 x float>* %A
+  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp3 = load <4 x float>, <4 x float>* %C
   %tmp4 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %tmp2
   %tmp5 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp4, <4 x float> %tmp1, <4 x float> %tmp3)
   ret <4 x float> %tmp5
@@ -532,9 +532,9 @@ define <4 x float> @fmls_commuted_neg_4s
 define <2 x double> @fmls_commuted_neg_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
 ;CHECK-LABEL: fmls_commuted_neg_2d:
 ;CHECK: fmls.2d
-  %tmp1 = load <2 x double>* %A
-  %tmp2 = load <2 x double>* %B
-  %tmp3 = load <2 x double>* %C
+  %tmp1 = load <2 x double>, <2 x double>* %A
+  %tmp2 = load <2 x double>, <2 x double>* %B
+  %tmp3 = load <2 x double>, <2 x double>* %C
   %tmp4 = fsub <2 x double> <double -0.0, double -0.0>, %tmp2
   %tmp5 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp4, <2 x double> %tmp1, <2 x double> %tmp3)
   ret <2 x double> %tmp5
@@ -609,8 +609,8 @@ define <4 x i16> @mul_4h(<4 x i16>* %A,
 ;CHECK-LABEL: mul_4h:
 ;CHECK-NOT: dup
 ;CHECK: mul.4h
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = mul <4 x i16> %tmp1, %tmp3
   ret <4 x i16> %tmp4
@@ -620,8 +620,8 @@ define <8 x i16> @mul_8h(<8 x i16>* %A,
 ;CHECK-LABEL: mul_8h:
 ;CHECK-NOT: dup
 ;CHECK: mul.8h
-  %tmp1 = load <8 x i16>* %A
-  %tmp2 = load <8 x i16>* %B
+  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp2 = load <8 x i16>, <8 x i16>* %B
   %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %tmp4 = mul <8 x i16> %tmp1, %tmp3
   ret <8 x i16> %tmp4
@@ -631,8 +631,8 @@ define <2 x i32> @mul_2s(<2 x i32>* %A,
 ;CHECK-LABEL: mul_2s:
 ;CHECK-NOT: dup
 ;CHECK: mul.2s
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = mul <2 x i32> %tmp1, %tmp3
   ret <2 x i32> %tmp4
@@ -642,8 +642,8 @@ define <4 x i32> @mul_4s(<4 x i32>* %A,
 ;CHECK-LABEL: mul_4s:
 ;CHECK-NOT: dup
 ;CHECK: mul.4s
-  %tmp1 = load <4 x i32>* %A
-  %tmp2 = load <4 x i32>* %B
+  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp2 = load <4 x i32>, <4 x i32>* %B
   %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = mul <4 x i32> %tmp1, %tmp3
   ret <4 x i32> %tmp4
@@ -661,8 +661,8 @@ define <2 x float> @fmul_lane_2s(<2 x fl
 ;CHECK-LABEL: fmul_lane_2s:
 ;CHECK-NOT: dup
 ;CHECK: fmul.2s
-  %tmp1 = load <2 x float>* %A
-  %tmp2 = load <2 x float>* %B
+  %tmp1 = load <2 x float>, <2 x float>* %A
+  %tmp2 = load <2 x float>, <2 x float>* %B
   %tmp3 = shufflevector <2 x float> %tmp2, <2 x float> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = fmul <2 x float> %tmp1, %tmp3
   ret <2 x float> %tmp4
@@ -672,8 +672,8 @@ define <4 x float> @fmul_lane_4s(<4 x fl
 ;CHECK-LABEL: fmul_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: fmul.4s
-  %tmp1 = load <4 x float>* %A
-  %tmp2 = load <4 x float>* %B
+  %tmp1 = load <4 x float>, <4 x float>* %A
+  %tmp2 = load <4 x float>, <4 x float>* %B
   %tmp3 = shufflevector <4 x float> %tmp2, <4 x float> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = fmul <4 x float> %tmp1, %tmp3
   ret <4 x float> %tmp4
@@ -683,8 +683,8 @@ define <2 x double> @fmul_lane_2d(<2 x d
 ;CHECK-LABEL: fmul_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: fmul.2d
-  %tmp1 = load <2 x double>* %A
-  %tmp2 = load <2 x double>* %B
+  %tmp1 = load <2 x double>, <2 x double>* %A
+  %tmp2 = load <2 x double>, <2 x double>* %B
   %tmp3 = shufflevector <2 x double> %tmp2, <2 x double> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = fmul <2 x double> %tmp1, %tmp3
   ret <2 x double> %tmp4
@@ -714,8 +714,8 @@ define <2 x float> @fmulx_lane_2s(<2 x f
 ;CHECK-LABEL: fmulx_lane_2s:
 ;CHECK-NOT: dup
 ;CHECK: fmulx.2s
-  %tmp1 = load <2 x float>* %A
-  %tmp2 = load <2 x float>* %B
+  %tmp1 = load <2 x float>, <2 x float>* %A
+  %tmp2 = load <2 x float>, <2 x float>* %B
   %tmp3 = shufflevector <2 x float> %tmp2, <2 x float> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %tmp1, <2 x float> %tmp3)
   ret <2 x float> %tmp4
@@ -725,8 +725,8 @@ define <4 x float> @fmulx_lane_4s(<4 x f
 ;CHECK-LABEL: fmulx_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: fmulx.4s
-  %tmp1 = load <4 x float>* %A
-  %tmp2 = load <4 x float>* %B
+  %tmp1 = load <4 x float>, <4 x float>* %A
+  %tmp2 = load <4 x float>, <4 x float>* %B
   %tmp3 = shufflevector <4 x float> %tmp2, <4 x float> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %tmp1, <4 x float> %tmp3)
   ret <4 x float> %tmp4
@@ -736,8 +736,8 @@ define <2 x double> @fmulx_lane_2d(<2 x
 ;CHECK-LABEL: fmulx_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: fmulx.2d
-  %tmp1 = load <2 x double>* %A
-  %tmp2 = load <2 x double>* %B
+  %tmp1 = load <2 x double>, <2 x double>* %A
+  %tmp2 = load <2 x double>, <2 x double>* %B
   %tmp3 = shufflevector <2 x double> %tmp2, <2 x double> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %tmp1, <2 x double> %tmp3)
   ret <2 x double> %tmp4
@@ -747,8 +747,8 @@ define <4 x i16> @sqdmulh_lane_4h(<4 x i
 ;CHECK-LABEL: sqdmulh_lane_4h:
 ;CHECK-NOT: dup
 ;CHECK: sqdmulh.4h
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i16> %tmp4
@@ -758,8 +758,8 @@ define <8 x i16> @sqdmulh_lane_8h(<8 x i
 ;CHECK-LABEL: sqdmulh_lane_8h:
 ;CHECK-NOT: dup
 ;CHECK: sqdmulh.8h
-  %tmp1 = load <8 x i16>* %A
-  %tmp2 = load <8 x i16>* %B
+  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp2 = load <8 x i16>, <8 x i16>* %B
   %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp3)
   ret <8 x i16> %tmp4
@@ -769,8 +769,8 @@ define <2 x i32> @sqdmulh_lane_2s(<2 x i
 ;CHECK-LABEL: sqdmulh_lane_2s:
 ;CHECK-NOT: dup
 ;CHECK: sqdmulh.2s
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i32> %tmp4
@@ -780,8 +780,8 @@ define <4 x i32> @sqdmulh_lane_4s(<4 x i
 ;CHECK-LABEL: sqdmulh_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: sqdmulh.4s
-  %tmp1 = load <4 x i32>* %A
-  %tmp2 = load <4 x i32>* %B
+  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp2 = load <4 x i32>, <4 x i32>* %B
   %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp3)
   ret <4 x i32> %tmp4
@@ -800,8 +800,8 @@ define <4 x i16> @sqrdmulh_lane_4h(<4 x
 ;CHECK-LABEL: sqrdmulh_lane_4h:
 ;CHECK-NOT: dup
 ;CHECK: sqrdmulh.4h
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i16> %tmp4
@@ -811,8 +811,8 @@ define <8 x i16> @sqrdmulh_lane_8h(<8 x
 ;CHECK-LABEL: sqrdmulh_lane_8h:
 ;CHECK-NOT: dup
 ;CHECK: sqrdmulh.8h
-  %tmp1 = load <8 x i16>* %A
-  %tmp2 = load <8 x i16>* %B
+  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp2 = load <8 x i16>, <8 x i16>* %B
   %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp3)
   ret <8 x i16> %tmp4
@@ -822,8 +822,8 @@ define <2 x i32> @sqrdmulh_lane_2s(<2 x
 ;CHECK-LABEL: sqrdmulh_lane_2s:
 ;CHECK-NOT: dup
 ;CHECK: sqrdmulh.2s
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i32> %tmp4
@@ -833,8 +833,8 @@ define <4 x i32> @sqrdmulh_lane_4s(<4 x
 ;CHECK-LABEL: sqrdmulh_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: sqrdmulh.4s
-  %tmp1 = load <4 x i32>* %A
-  %tmp2 = load <4 x i32>* %B
+  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp2 = load <4 x i32>, <4 x i32>* %B
   %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp3)
   ret <4 x i32> %tmp4
@@ -853,8 +853,8 @@ define <4 x i32> @sqdmull_lane_4s(<4 x i
 ;CHECK-LABEL: sqdmull_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: sqdmull.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i32> %tmp4
@@ -864,8 +864,8 @@ define <2 x i64> @sqdmull_lane_2d(<2 x i
 ;CHECK-LABEL: sqdmull_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: sqdmull.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i64> %tmp4
@@ -875,8 +875,8 @@ define <4 x i32> @sqdmull2_lane_4s(<8 x
 ;CHECK-LABEL: sqdmull2_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: sqdmull2.4s
-  %load1 = load <8 x i16>* %A
-  %load2 = load <8 x i16>* %B
+  %load1 = load <8 x i16>, <8 x i16>* %A
+  %load2 = load <8 x i16>, <8 x i16>* %B
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -887,8 +887,8 @@ define <2 x i64> @sqdmull2_lane_2d(<4 x
 ;CHECK-LABEL: sqdmull2_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: sqdmull2.2d
-  %load1 = load <4 x i32>* %A
-  %load2 = load <4 x i32>* %B
+  %load1 = load <4 x i32>, <4 x i32>* %A
+  %load2 = load <4 x i32>, <4 x i32>* %B
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -899,8 +899,8 @@ define <4 x i32> @umull_lane_4s(<4 x i16
 ;CHECK-LABEL: umull_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: umull.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i32> %tmp4
@@ -910,8 +910,8 @@ define <2 x i64> @umull_lane_2d(<2 x i32
 ;CHECK-LABEL: umull_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: umull.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i64> %tmp4
@@ -921,8 +921,8 @@ define <4 x i32> @smull_lane_4s(<4 x i16
 ;CHECK-LABEL: smull_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: smull.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i32> %tmp4
@@ -932,8 +932,8 @@ define <2 x i64> @smull_lane_2d(<2 x i32
 ;CHECK-LABEL: smull_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: smull.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i64> %tmp4
@@ -943,9 +943,9 @@ define <4 x i32> @smlal_lane_4s(<4 x i16
 ;CHECK-LABEL: smlal_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: smlal.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = add <4 x i32> %tmp3, %tmp5
@@ -956,9 +956,9 @@ define <2 x i64> @smlal_lane_2d(<2 x i32
 ;CHECK-LABEL: smlal_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: smlal.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = add <2 x i64> %tmp3, %tmp5
@@ -969,9 +969,9 @@ define <4 x i32> @sqdmlal_lane_4s(<4 x i
 ;CHECK-LABEL: sqdmlal_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: sqdmlal.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp5)
@@ -982,9 +982,9 @@ define <2 x i64> @sqdmlal_lane_2d(<2 x i
 ;CHECK-LABEL: sqdmlal_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: sqdmlal.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp5)
@@ -995,9 +995,9 @@ define <4 x i32> @sqdmlal2_lane_4s(<8 x
 ;CHECK-LABEL: sqdmlal2_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: sqdmlal2.4s
-  %load1 = load <8 x i16>* %A
-  %load2 = load <8 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %load1 = load <8 x i16>, <8 x i16>* %A
+  %load2 = load <8 x i16>, <8 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1009,9 +1009,9 @@ define <2 x i64> @sqdmlal2_lane_2d(<4 x
 ;CHECK-LABEL: sqdmlal2_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: sqdmlal2.2d
-  %load1 = load <4 x i32>* %A
-  %load2 = load <4 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %load1 = load <4 x i32>, <4 x i32>* %A
+  %load2 = load <4 x i32>, <4 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1069,9 +1069,9 @@ define <4 x i32> @umlal_lane_4s(<4 x i16
 ;CHECK-LABEL: umlal_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: umlal.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = add <4 x i32> %tmp3, %tmp5
@@ -1082,9 +1082,9 @@ define <2 x i64> @umlal_lane_2d(<2 x i32
 ;CHECK-LABEL: umlal_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: umlal.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = add <2 x i64> %tmp3, %tmp5
@@ -1096,9 +1096,9 @@ define <4 x i32> @smlsl_lane_4s(<4 x i16
 ;CHECK-LABEL: smlsl_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: smlsl.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = sub <4 x i32> %tmp3, %tmp5
@@ -1109,9 +1109,9 @@ define <2 x i64> @smlsl_lane_2d(<2 x i32
 ;CHECK-LABEL: smlsl_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: smlsl.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = sub <2 x i64> %tmp3, %tmp5
@@ -1122,9 +1122,9 @@ define <4 x i32> @sqdmlsl_lane_4s(<4 x i
 ;CHECK-LABEL: sqdmlsl_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: sqdmlsl.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp5)
@@ -1135,9 +1135,9 @@ define <2 x i64> @sqdmlsl_lane_2d(<2 x i
 ;CHECK-LABEL: sqdmlsl_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: sqdmlsl.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp5)
@@ -1148,9 +1148,9 @@ define <4 x i32> @sqdmlsl2_lane_4s(<8 x
 ;CHECK-LABEL: sqdmlsl2_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: sqdmlsl2.4s
-  %load1 = load <8 x i16>* %A
-  %load2 = load <8 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %load1 = load <8 x i16>, <8 x i16>* %A
+  %load2 = load <8 x i16>, <8 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1162,9 +1162,9 @@ define <2 x i64> @sqdmlsl2_lane_2d(<4 x
 ;CHECK-LABEL: sqdmlsl2_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: sqdmlsl2.2d
-  %load1 = load <4 x i32>* %A
-  %load2 = load <4 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %load1 = load <4 x i32>, <4 x i32>* %A
+  %load2 = load <4 x i32>, <4 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1176,9 +1176,9 @@ define <4 x i32> @umlsl_lane_4s(<4 x i16
 ;CHECK-LABEL: umlsl_lane_4s:
 ;CHECK-NOT: dup
 ;CHECK: umlsl.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
-  %tmp3 = load <4 x i32>* %C
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp3 = load <4 x i32>, <4 x i32>* %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = sub <4 x i32> %tmp3, %tmp5
@@ -1189,9 +1189,9 @@ define <2 x i64> @umlsl_lane_2d(<2 x i32
 ;CHECK-LABEL: umlsl_lane_2d:
 ;CHECK-NOT: dup
 ;CHECK: umlsl.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
-  %tmp3 = load <2 x i64>* %C
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp3 = load <2 x i64>, <2 x i64>* %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = sub <2 x i64> %tmp3, %tmp5

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-volatile.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-volatile.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-volatile.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-volatile.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@ define i64 @normal_load(i64* nocapture %
 ; CHECK-NEXT: add
 ; CHECK-NEXT: ret
   %add.ptr = getelementptr inbounds i64, i64* %bar, i64 1
-  %tmp = load i64* %add.ptr, align 8
+  %tmp = load i64, i64* %add.ptr, align 8
   %add.ptr1 = getelementptr inbounds i64, i64* %bar, i64 2
-  %tmp1 = load i64* %add.ptr1, align 8
+  %tmp1 = load i64, i64* %add.ptr1, align 8
   %add = add nsw i64 %tmp1, %tmp
   ret i64 %add
 }
@@ -19,9 +19,9 @@ define i64 @volatile_load(i64* nocapture
 ; CHECK-NEXT: add
 ; CHECK-NEXT: ret
   %add.ptr = getelementptr inbounds i64, i64* %bar, i64 1
-  %tmp = load volatile i64* %add.ptr, align 8
+  %tmp = load volatile i64, i64* %add.ptr, align 8
   %add.ptr1 = getelementptr inbounds i64, i64* %bar, i64 2
-  %tmp1 = load volatile i64* %add.ptr1, align 8
+  %tmp1 = load volatile i64, i64* %add.ptr1, align 8
   %add = add nsw i64 %tmp1, %tmp
   ret i64 %add
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vqadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vqadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vqadd.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vqadd.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @sqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: sqadd8b:
 ;CHECK: sqadd.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @sqadd8b(<8 x i8>* %A, <
 define <4 x i16> @sqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqadd4h:
 ;CHECK: sqadd.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @sqadd4h(<4 x i16>* %A,
 define <2 x i32> @sqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqadd2s:
 ;CHECK: sqadd.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @sqadd2s(<2 x i32>* %A,
 define <8 x i8> @uqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: uqadd8b:
 ;CHECK: uqadd.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i8> @uqadd8b(<8 x i8>* %A, <
 define <4 x i16> @uqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: uqadd4h:
 ;CHECK: uqadd.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @uqadd4h(<4 x i16>* %A,
 define <2 x i32> @uqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: uqadd2s:
 ;CHECK: uqadd.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <2 x i32> @uqadd2s(<2 x i32>* %A,
 define <16 x i8> @sqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: sqadd16b:
 ;CHECK: sqadd.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -66,8 +66,8 @@ define <16 x i8> @sqadd16b(<16 x i8>* %A
 define <8 x i16> @sqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqadd8h:
 ;CHECK: sqadd.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -75,8 +75,8 @@ define <8 x i16> @sqadd8h(<8 x i16>* %A,
 define <4 x i32> @sqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqadd4s:
 ;CHECK: sqadd.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -84,8 +84,8 @@ define <4 x i32> @sqadd4s(<4 x i32>* %A,
 define <2 x i64> @sqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: sqadd2d:
 ;CHECK: sqadd.2d
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -93,8 +93,8 @@ define <2 x i64> @sqadd2d(<2 x i64>* %A,
 define <16 x i8> @uqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: uqadd16b:
 ;CHECK: uqadd.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -102,8 +102,8 @@ define <16 x i8> @uqadd16b(<16 x i8>* %A
 define <8 x i16> @uqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: uqadd8h:
 ;CHECK: uqadd.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -111,8 +111,8 @@ define <8 x i16> @uqadd8h(<8 x i16>* %A,
 define <4 x i32> @uqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: uqadd4s:
 ;CHECK: uqadd.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -120,8 +120,8 @@ define <4 x i32> @uqadd4s(<4 x i32>* %A,
 define <2 x i64> @uqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: uqadd2d:
 ;CHECK: uqadd.2d
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -149,8 +149,8 @@ declare <2 x i64> @llvm.aarch64.neon.uqa
 define <8 x i8> @usqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: usqadd8b:
 ;CHECK: usqadd.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.usqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -158,8 +158,8 @@ define <8 x i8> @usqadd8b(<8 x i8>* %A,
 define <4 x i16> @usqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: usqadd4h:
 ;CHECK: usqadd.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.usqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -167,8 +167,8 @@ define <4 x i16> @usqadd4h(<4 x i16>* %A
 define <2 x i32> @usqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: usqadd2s:
 ;CHECK: usqadd.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.usqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -176,8 +176,8 @@ define <2 x i32> @usqadd2s(<2 x i32>* %A
 define <16 x i8> @usqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: usqadd16b:
 ;CHECK: usqadd.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.usqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -185,8 +185,8 @@ define <16 x i8> @usqadd16b(<16 x i8>* %
 define <8 x i16> @usqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: usqadd8h:
 ;CHECK: usqadd.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -194,8 +194,8 @@ define <8 x i16> @usqadd8h(<8 x i16>* %A
 define <4 x i32> @usqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: usqadd4s:
 ;CHECK: usqadd.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -203,8 +203,8 @@ define <4 x i32> @usqadd4s(<4 x i32>* %A
 define <2 x i64> @usqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: usqadd2d:
 ;CHECK: usqadd.2d
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -238,8 +238,8 @@ declare <2 x i64> @llvm.aarch64.neon.usq
 define <8 x i8> @suqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: suqadd8b:
 ;CHECK: suqadd.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -247,8 +247,8 @@ define <8 x i8> @suqadd8b(<8 x i8>* %A,
 define <4 x i16> @suqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: suqadd4h:
 ;CHECK: suqadd.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -256,8 +256,8 @@ define <4 x i16> @suqadd4h(<4 x i16>* %A
 define <2 x i32> @suqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: suqadd2s:
 ;CHECK: suqadd.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -265,8 +265,8 @@ define <2 x i32> @suqadd2s(<2 x i32>* %A
 define <16 x i8> @suqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: suqadd16b:
 ;CHECK: suqadd.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -274,8 +274,8 @@ define <16 x i8> @suqadd16b(<16 x i8>* %
 define <8 x i16> @suqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: suqadd8h:
 ;CHECK: suqadd.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -283,8 +283,8 @@ define <8 x i16> @suqadd8h(<8 x i16>* %A
 define <4 x i32> @suqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: suqadd4s:
 ;CHECK: suqadd.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -292,8 +292,8 @@ define <4 x i32> @suqadd4s(<4 x i32>* %A
 define <2 x i64> @suqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: suqadd2d:
 ;CHECK: suqadd.2d
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vqsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vqsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vqsub.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vqsub.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @sqsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: sqsub8b:
 ;CHECK: sqsub.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @sqsub8b(<8 x i8>* %A, <
 define <4 x i16> @sqsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqsub4h:
 ;CHECK: sqsub.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @sqsub4h(<4 x i16>* %A,
 define <2 x i32> @sqsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqsub2s:
 ;CHECK: sqsub.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @sqsub2s(<2 x i32>* %A,
 define <8 x i8> @uqsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: uqsub8b:
 ;CHECK: uqsub.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i8> @uqsub8b(<8 x i8>* %A, <
 define <4 x i16> @uqsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: uqsub4h:
 ;CHECK: uqsub.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @uqsub4h(<4 x i16>* %A,
 define <2 x i32> @uqsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: uqsub2s:
 ;CHECK: uqsub.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <2 x i32> @uqsub2s(<2 x i32>* %A,
 define <16 x i8> @sqsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: sqsub16b:
 ;CHECK: sqsub.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -66,8 +66,8 @@ define <16 x i8> @sqsub16b(<16 x i8>* %A
 define <8 x i16> @sqsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqsub8h:
 ;CHECK: sqsub.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -75,8 +75,8 @@ define <8 x i16> @sqsub8h(<8 x i16>* %A,
 define <4 x i32> @sqsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqsub4s:
 ;CHECK: sqsub.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -84,8 +84,8 @@ define <4 x i32> @sqsub4s(<4 x i32>* %A,
 define <2 x i64> @sqsub2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: sqsub2d:
 ;CHECK: sqsub.2d
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -93,8 +93,8 @@ define <2 x i64> @sqsub2d(<2 x i64>* %A,
 define <16 x i8> @uqsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: uqsub16b:
 ;CHECK: uqsub.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
@@ -102,8 +102,8 @@ define <16 x i8> @uqsub16b(<16 x i8>* %A
 define <8 x i16> @uqsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: uqsub8h:
 ;CHECK: uqsub.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
@@ -111,8 +111,8 @@ define <8 x i16> @uqsub8h(<8 x i16>* %A,
 define <4 x i32> @uqsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: uqsub4s:
 ;CHECK: uqsub.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -120,8 +120,8 @@ define <4 x i32> @uqsub4s(<4 x i32>* %A,
 define <2 x i64> @uqsub2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: uqsub2d:
 ;CHECK: uqsub.2d
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vshift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vshift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vshift.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vshift.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @sqshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: sqshl8b:
 ;CHECK: sqshl.8b
-        %tmp1 = load <8 x i8>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @sqshl8b(<8 x i8>* %A, <
 define <4 x i16> @sqshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqshl4h:
 ;CHECK: sqshl.4h
-        %tmp1 = load <4 x i16>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @sqshl4h(<4 x i16>* %A,
 define <2 x i32> @sqshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqshl2s:
 ;CHECK: sqshl.2s
-        %tmp1 = load <2 x i32>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -30,8 +30,8 @@ define <2 x i32> @sqshl2s(<2 x i32>* %A,
 define <8 x i8> @uqshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: uqshl8b:
 ;CHECK: uqshl.8b
-        %tmp1 = load <8 x i8>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
@@ -39,8 +39,8 @@ define <8 x i8> @uqshl8b(<8 x i8>* %A, <
 define <4 x i16> @uqshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: uqshl4h:
 ;CHECK: uqshl.4h
-        %tmp1 = load <4 x i16>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
@@ -48,8 +48,8 @@ define <4 x i16> @uqshl4h(<4 x i16>* %A,
 define <2 x i32> @uqshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: uqshl2s:
 ;CHECK: uqshl.2s
-        %tmp1 = load <2 x i32>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -57,8 +57,8 @@ define <2 x i32> @uqshl2s(<2 x i32>* %A,
 define <16 x i8> @sqshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: sqshl16b:
 ;CHECK: sqshl.16b
-        %tmp1 = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
@@ -66,8 +66,8 @@ define <16 x i8> @sqshl16b(<16 x i8>* %A
 define <8 x i16> @sqshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqshl8h:
 ;CHECK: sqshl.8h
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
@@ -75,8 +75,8 @@ define <8 x i16> @sqshl8h(<8 x i16>* %A,
 define <4 x i32> @sqshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqshl4s:
 ;CHECK: sqshl.4s
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
@@ -84,8 +84,8 @@ define <4 x i32> @sqshl4s(<4 x i32>* %A,
 define <2 x i64> @sqshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: sqshl2d:
 ;CHECK: sqshl.2d
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
@@ -93,8 +93,8 @@ define <2 x i64> @sqshl2d(<2 x i64>* %A,
 define <16 x i8> @uqshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: uqshl16b:
 ;CHECK: uqshl.16b
-        %tmp1 = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
@@ -102,8 +102,8 @@ define <16 x i8> @uqshl16b(<16 x i8>* %A
 define <8 x i16> @uqshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: uqshl8h:
 ;CHECK: uqshl.8h
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
@@ -111,8 +111,8 @@ define <8 x i16> @uqshl8h(<8 x i16>* %A,
 define <4 x i32> @uqshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: uqshl4s:
 ;CHECK: uqshl.4s
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
@@ -120,8 +120,8 @@ define <4 x i32> @uqshl4s(<4 x i32>* %A,
 define <2 x i64> @uqshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: uqshl2d:
 ;CHECK: uqshl.2d
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
@@ -149,8 +149,8 @@ declare <2 x i64> @llvm.aarch64.neon.uqs
 define <8 x i8> @srshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: srshl8b:
 ;CHECK: srshl.8b
-        %tmp1 = load <8 x i8>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
@@ -158,8 +158,8 @@ define <8 x i8> @srshl8b(<8 x i8>* %A, <
 define <4 x i16> @srshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: srshl4h:
 ;CHECK: srshl.4h
-        %tmp1 = load <4 x i16>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
@@ -167,8 +167,8 @@ define <4 x i16> @srshl4h(<4 x i16>* %A,
 define <2 x i32> @srshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: srshl2s:
 ;CHECK: srshl.2s
-        %tmp1 = load <2 x i32>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -176,8 +176,8 @@ define <2 x i32> @srshl2s(<2 x i32>* %A,
 define <8 x i8> @urshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: urshl8b:
 ;CHECK: urshl.8b
-        %tmp1 = load <8 x i8>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
@@ -185,8 +185,8 @@ define <8 x i8> @urshl8b(<8 x i8>* %A, <
 define <4 x i16> @urshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: urshl4h:
 ;CHECK: urshl.4h
-        %tmp1 = load <4 x i16>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
@@ -194,8 +194,8 @@ define <4 x i16> @urshl4h(<4 x i16>* %A,
 define <2 x i32> @urshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: urshl2s:
 ;CHECK: urshl.2s
-        %tmp1 = load <2 x i32>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -203,8 +203,8 @@ define <2 x i32> @urshl2s(<2 x i32>* %A,
 define <16 x i8> @srshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: srshl16b:
 ;CHECK: srshl.16b
-        %tmp1 = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
@@ -212,8 +212,8 @@ define <16 x i8> @srshl16b(<16 x i8>* %A
 define <8 x i16> @srshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: srshl8h:
 ;CHECK: srshl.8h
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
@@ -221,8 +221,8 @@ define <8 x i16> @srshl8h(<8 x i16>* %A,
 define <4 x i32> @srshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: srshl4s:
 ;CHECK: srshl.4s
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
@@ -230,8 +230,8 @@ define <4 x i32> @srshl4s(<4 x i32>* %A,
 define <2 x i64> @srshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: srshl2d:
 ;CHECK: srshl.2d
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
@@ -239,8 +239,8 @@ define <2 x i64> @srshl2d(<2 x i64>* %A,
 define <16 x i8> @urshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: urshl16b:
 ;CHECK: urshl.16b
-        %tmp1 = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
@@ -248,8 +248,8 @@ define <16 x i8> @urshl16b(<16 x i8>* %A
 define <8 x i16> @urshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: urshl8h:
 ;CHECK: urshl.8h
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
@@ -257,8 +257,8 @@ define <8 x i16> @urshl8h(<8 x i16>* %A,
 define <4 x i32> @urshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: urshl4s:
 ;CHECK: urshl.4s
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
@@ -266,8 +266,8 @@ define <4 x i32> @urshl4s(<4 x i32>* %A,
 define <2 x i64> @urshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: urshl2d:
 ;CHECK: urshl.2d
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
@@ -295,8 +295,8 @@ declare <2 x i64> @llvm.aarch64.neon.urs
 define <8 x i8> @sqrshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: sqrshl8b:
 ;CHECK: sqrshl.8b
-        %tmp1 = load <8 x i8>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
@@ -304,8 +304,8 @@ define <8 x i8> @sqrshl8b(<8 x i8>* %A,
 define <4 x i16> @sqrshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqrshl4h:
 ;CHECK: sqrshl.4h
-        %tmp1 = load <4 x i16>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
@@ -313,8 +313,8 @@ define <4 x i16> @sqrshl4h(<4 x i16>* %A
 define <2 x i32> @sqrshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqrshl2s:
 ;CHECK: sqrshl.2s
-        %tmp1 = load <2 x i32>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -322,8 +322,8 @@ define <2 x i32> @sqrshl2s(<2 x i32>* %A
 define <8 x i8> @uqrshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: uqrshl8b:
 ;CHECK: uqrshl.8b
-        %tmp1 = load <8 x i8>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
@@ -331,8 +331,8 @@ define <8 x i8> @uqrshl8b(<8 x i8>* %A,
 define <4 x i16> @uqrshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: uqrshl4h:
 ;CHECK: uqrshl.4h
-        %tmp1 = load <4 x i16>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
@@ -340,8 +340,8 @@ define <4 x i16> @uqrshl4h(<4 x i16>* %A
 define <2 x i32> @uqrshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: uqrshl2s:
 ;CHECK: uqrshl.2s
-        %tmp1 = load <2 x i32>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -349,8 +349,8 @@ define <2 x i32> @uqrshl2s(<2 x i32>* %A
 define <16 x i8> @sqrshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: sqrshl16b:
 ;CHECK: sqrshl.16b
-        %tmp1 = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
@@ -358,8 +358,8 @@ define <16 x i8> @sqrshl16b(<16 x i8>* %
 define <8 x i16> @sqrshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sqrshl8h:
 ;CHECK: sqrshl.8h
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
@@ -367,8 +367,8 @@ define <8 x i16> @sqrshl8h(<8 x i16>* %A
 define <4 x i32> @sqrshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sqrshl4s:
 ;CHECK: sqrshl.4s
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
@@ -376,8 +376,8 @@ define <4 x i32> @sqrshl4s(<4 x i32>* %A
 define <2 x i64> @sqrshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: sqrshl2d:
 ;CHECK: sqrshl.2d
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
@@ -385,8 +385,8 @@ define <2 x i64> @sqrshl2d(<2 x i64>* %A
 define <16 x i8> @uqrshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: uqrshl16b:
 ;CHECK: uqrshl.16b
-        %tmp1 = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
@@ -394,8 +394,8 @@ define <16 x i8> @uqrshl16b(<16 x i8>* %
 define <8 x i16> @uqrshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: uqrshl8h:
 ;CHECK: uqrshl.8h
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
@@ -403,8 +403,8 @@ define <8 x i16> @uqrshl8h(<8 x i16>* %A
 define <4 x i32> @uqrshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: uqrshl4s:
 ;CHECK: uqrshl.4s
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
@@ -412,8 +412,8 @@ define <4 x i32> @uqrshl4s(<4 x i32>* %A
 define <2 x i64> @uqrshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: uqrshl2d:
 ;CHECK: uqrshl.2d
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
@@ -441,7 +441,7 @@ declare <2 x i64> @llvm.aarch64.neon.uqr
 define <8 x i8> @urshr8b(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: urshr8b:
 ;CHECK: urshr.8b
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
         ret <8 x i8> %tmp3
 }
@@ -449,7 +449,7 @@ define <8 x i8> @urshr8b(<8 x i8>* %A) n
 define <4 x i16> @urshr4h(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: urshr4h:
 ;CHECK: urshr.4h
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
         ret <4 x i16> %tmp3
 }
@@ -457,7 +457,7 @@ define <4 x i16> @urshr4h(<4 x i16>* %A)
 define <2 x i32> @urshr2s(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: urshr2s:
 ;CHECK: urshr.2s
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
         ret <2 x i32> %tmp3
 }
@@ -465,7 +465,7 @@ define <2 x i32> @urshr2s(<2 x i32>* %A)
 define <16 x i8> @urshr16b(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: urshr16b:
 ;CHECK: urshr.16b
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
         ret <16 x i8> %tmp3
 }
@@ -473,7 +473,7 @@ define <16 x i8> @urshr16b(<16 x i8>* %A
 define <8 x i16> @urshr8h(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: urshr8h:
 ;CHECK: urshr.8h
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
         ret <8 x i16> %tmp3
 }
@@ -481,7 +481,7 @@ define <8 x i16> @urshr8h(<8 x i16>* %A)
 define <4 x i32> @urshr4s(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: urshr4s:
 ;CHECK: urshr.4s
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
         ret <4 x i32> %tmp3
 }
@@ -489,7 +489,7 @@ define <4 x i32> @urshr4s(<4 x i32>* %A)
 define <2 x i64> @urshr2d(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: urshr2d:
 ;CHECK: urshr.2d
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
         ret <2 x i64> %tmp3
 }
@@ -497,7 +497,7 @@ define <2 x i64> @urshr2d(<2 x i64>* %A)
 define <8 x i8> @srshr8b(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: srshr8b:
 ;CHECK: srshr.8b
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
         ret <8 x i8> %tmp3
 }
@@ -505,7 +505,7 @@ define <8 x i8> @srshr8b(<8 x i8>* %A) n
 define <4 x i16> @srshr4h(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: srshr4h:
 ;CHECK: srshr.4h
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
         ret <4 x i16> %tmp3
 }
@@ -513,7 +513,7 @@ define <4 x i16> @srshr4h(<4 x i16>* %A)
 define <2 x i32> @srshr2s(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: srshr2s:
 ;CHECK: srshr.2s
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
         ret <2 x i32> %tmp3
 }
@@ -521,7 +521,7 @@ define <2 x i32> @srshr2s(<2 x i32>* %A)
 define <16 x i8> @srshr16b(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: srshr16b:
 ;CHECK: srshr.16b
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
         ret <16 x i8> %tmp3
 }
@@ -529,7 +529,7 @@ define <16 x i8> @srshr16b(<16 x i8>* %A
 define <8 x i16> @srshr8h(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: srshr8h:
 ;CHECK: srshr.8h
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
         ret <8 x i16> %tmp3
 }
@@ -537,7 +537,7 @@ define <8 x i16> @srshr8h(<8 x i16>* %A)
 define <4 x i32> @srshr4s(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: srshr4s:
 ;CHECK: srshr.4s
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
         ret <4 x i32> %tmp3
 }
@@ -545,7 +545,7 @@ define <4 x i32> @srshr4s(<4 x i32>* %A)
 define <2 x i64> @srshr2d(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: srshr2d:
 ;CHECK: srshr.2d
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
         ret <2 x i64> %tmp3
 }
@@ -553,7 +553,7 @@ define <2 x i64> @srshr2d(<2 x i64>* %A)
 define <8 x i8> @sqshlu8b(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: sqshlu8b:
 ;CHECK: sqshlu.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <8 x i8> %tmp3
 }
@@ -561,7 +561,7 @@ define <8 x i8> @sqshlu8b(<8 x i8>* %A)
 define <4 x i16> @sqshlu4h(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqshlu4h:
 ;CHECK: sqshlu.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
         ret <4 x i16> %tmp3
 }
@@ -569,7 +569,7 @@ define <4 x i16> @sqshlu4h(<4 x i16>* %A
 define <2 x i32> @sqshlu2s(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqshlu2s:
 ;CHECK: sqshlu.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
         ret <2 x i32> %tmp3
 }
@@ -577,7 +577,7 @@ define <2 x i32> @sqshlu2s(<2 x i32>* %A
 define <16 x i8> @sqshlu16b(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: sqshlu16b:
 ;CHECK: sqshlu.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <16 x i8> %tmp3
 }
@@ -585,7 +585,7 @@ define <16 x i8> @sqshlu16b(<16 x i8>* %
 define <8 x i16> @sqshlu8h(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqshlu8h:
 ;CHECK: sqshlu.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
         ret <8 x i16> %tmp3
 }
@@ -593,7 +593,7 @@ define <8 x i16> @sqshlu8h(<8 x i16>* %A
 define <4 x i32> @sqshlu4s(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqshlu4s:
 ;CHECK: sqshlu.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
@@ -601,7 +601,7 @@ define <4 x i32> @sqshlu4s(<4 x i32>* %A
 define <2 x i64> @sqshlu2d(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqshlu2d:
 ;CHECK: sqshlu.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
         ret <2 x i64> %tmp3
 }
@@ -619,7 +619,7 @@ declare <2 x i64> @llvm.aarch64.neon.sqs
 define <8 x i8> @rshrn8b(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: rshrn8b:
 ;CHECK: rshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
@@ -627,7 +627,7 @@ define <8 x i8> @rshrn8b(<8 x i16>* %A)
 define <4 x i16> @rshrn4h(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: rshrn4h:
 ;CHECK: rshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
@@ -635,7 +635,7 @@ define <4 x i16> @rshrn4h(<4 x i32>* %A)
 define <2 x i32> @rshrn2s(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: rshrn2s:
 ;CHECK: rshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
@@ -643,8 +643,8 @@ define <2 x i32> @rshrn2s(<2 x i64>* %A)
 define <16 x i8> @rshrn16b(<8 x i8> *%ret, <8 x i16>* %A) nounwind {
 ;CHECK-LABEL: rshrn16b:
 ;CHECK: rshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>* %ret
-        %tmp1 = load <8 x i16>* %A
+        %out = load <8 x i8>, <8 x i8>* %ret
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
@@ -653,8 +653,8 @@ define <16 x i8> @rshrn16b(<8 x i8> *%re
 define <8 x i16> @rshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
 ;CHECK-LABEL: rshrn8h:
 ;CHECK: rshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>* %ret
-        %tmp1 = load <4 x i32>* %A
+        %out = load <4 x i16>, <4 x i16>* %ret
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
@@ -663,8 +663,8 @@ define <8 x i16> @rshrn8h(<4 x i16>* %re
 define <4 x i32> @rshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
 ;CHECK-LABEL: rshrn4s:
 ;CHECK: rshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>* %ret
-        %tmp1 = load <2 x i64>* %A
+        %out = load <2 x i32>, <2 x i32>* %ret
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -677,7 +677,7 @@ declare <2 x i32> @llvm.aarch64.neon.rsh
 define <8 x i8> @shrn8b(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: shrn8b:
 ;CHECK: shrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
         ret <8 x i8> %tmp3
@@ -686,7 +686,7 @@ define <8 x i8> @shrn8b(<8 x i16>* %A) n
 define <4 x i16> @shrn4h(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: shrn4h:
 ;CHECK: shrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
         %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
         ret <4 x i16> %tmp3
@@ -695,7 +695,7 @@ define <4 x i16> @shrn4h(<4 x i32>* %A)
 define <2 x i32> @shrn2s(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: shrn2s:
 ;CHECK: shrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
         %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
         ret <2 x i32> %tmp3
@@ -704,8 +704,8 @@ define <2 x i32> @shrn2s(<2 x i64>* %A)
 define <16 x i8> @shrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
 ;CHECK-LABEL: shrn16b:
 ;CHECK: shrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>* %ret
-        %tmp1 = load <8 x i16>* %A
+        %out = load <8 x i8>, <8 x i8>* %ret
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -715,8 +715,8 @@ define <16 x i8> @shrn16b(<8 x i8>* %ret
 define <8 x i16> @shrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
 ;CHECK-LABEL: shrn8h:
 ;CHECK: shrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>* %ret
-        %tmp1 = load <4 x i32>* %A
+        %out = load <4 x i16>, <4 x i16>* %ret
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
         %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -726,8 +726,8 @@ define <8 x i16> @shrn8h(<4 x i16>* %ret
 define <4 x i32> @shrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
 ;CHECK-LABEL: shrn4s:
 ;CHECK: shrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>* %ret
-        %tmp1 = load <2 x i64>* %A
+        %out = load <2 x i32>, <2 x i32>* %ret
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
         %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -748,7 +748,7 @@ define i32 @sqshrn1s(i64 %A) nounwind {
 define <8 x i8> @sqshrn8b(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqshrn8b:
 ;CHECK: sqshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
@@ -756,7 +756,7 @@ define <8 x i8> @sqshrn8b(<8 x i16>* %A)
 define <4 x i16> @sqshrn4h(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqshrn4h:
 ;CHECK: sqshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
@@ -764,7 +764,7 @@ define <4 x i16> @sqshrn4h(<4 x i32>* %A
 define <2 x i32> @sqshrn2s(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqshrn2s:
 ;CHECK: sqshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
@@ -773,8 +773,8 @@ define <2 x i32> @sqshrn2s(<2 x i64>* %A
 define <16 x i8> @sqshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqshrn16b:
 ;CHECK: sqshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>* %ret
-        %tmp1 = load <8 x i16>* %A
+        %out = load <8 x i8>, <8 x i8>* %ret
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
@@ -783,8 +783,8 @@ define <16 x i8> @sqshrn16b(<8 x i8>* %r
 define <8 x i16> @sqshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqshrn8h:
 ;CHECK: sqshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>* %ret
-        %tmp1 = load <4 x i32>* %A
+        %out = load <4 x i16>, <4 x i16>* %ret
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
@@ -793,8 +793,8 @@ define <8 x i16> @sqshrn8h(<4 x i16>* %r
 define <4 x i32> @sqshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqshrn4s:
 ;CHECK: sqshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>* %ret
-        %tmp1 = load <2 x i64>* %A
+        %out = load <2 x i32>, <2 x i32>* %ret
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -815,7 +815,7 @@ define i32 @sqshrun1s(i64 %A) nounwind {
 define <8 x i8> @sqshrun8b(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqshrun8b:
 ;CHECK: sqshrun.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
@@ -823,7 +823,7 @@ define <8 x i8> @sqshrun8b(<8 x i16>* %A
 define <4 x i16> @sqshrun4h(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqshrun4h:
 ;CHECK: sqshrun.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
@@ -831,7 +831,7 @@ define <4 x i16> @sqshrun4h(<4 x i32>* %
 define <2 x i32> @sqshrun2s(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqshrun2s:
 ;CHECK: sqshrun.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
@@ -839,8 +839,8 @@ define <2 x i32> @sqshrun2s(<2 x i64>* %
 define <16 x i8> @sqshrun16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqshrun16b:
 ;CHECK: sqshrun2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>* %ret
-        %tmp1 = load <8 x i16>* %A
+        %out = load <8 x i8>, <8 x i8>* %ret
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
@@ -849,8 +849,8 @@ define <16 x i8> @sqshrun16b(<8 x i8>* %
 define <8 x i16> @sqshrun8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqshrun8h:
 ;CHECK: sqshrun2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>* %ret
-        %tmp1 = load <4 x i32>* %A
+        %out = load <4 x i16>, <4 x i16>* %ret
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
@@ -859,8 +859,8 @@ define <8 x i16> @sqshrun8h(<4 x i16>* %
 define <4 x i32> @sqshrun4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqshrun4s:
 ;CHECK: sqshrun2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>* %ret
-        %tmp1 = load <2 x i64>* %A
+        %out = load <2 x i32>, <2 x i32>* %ret
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -881,7 +881,7 @@ define i32 @sqrshrn1s(i64 %A) nounwind {
 define <8 x i8> @sqrshrn8b(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqrshrn8b:
 ;CHECK: sqrshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
@@ -889,7 +889,7 @@ define <8 x i8> @sqrshrn8b(<8 x i16>* %A
 define <4 x i16> @sqrshrn4h(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqrshrn4h:
 ;CHECK: sqrshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
@@ -897,7 +897,7 @@ define <4 x i16> @sqrshrn4h(<4 x i32>* %
 define <2 x i32> @sqrshrn2s(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqrshrn2s:
 ;CHECK: sqrshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
@@ -905,8 +905,8 @@ define <2 x i32> @sqrshrn2s(<2 x i64>* %
 define <16 x i8> @sqrshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqrshrn16b:
 ;CHECK: sqrshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>* %ret
-        %tmp1 = load <8 x i16>* %A
+        %out = load <8 x i8>, <8 x i8>* %ret
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
@@ -915,8 +915,8 @@ define <16 x i8> @sqrshrn16b(<8 x i8>* %
 define <8 x i16> @sqrshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqrshrn8h:
 ;CHECK: sqrshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>* %ret
-        %tmp1 = load <4 x i32>* %A
+        %out = load <4 x i16>, <4 x i16>* %ret
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
@@ -925,8 +925,8 @@ define <8 x i16> @sqrshrn8h(<4 x i16>* %
 define <4 x i32> @sqrshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqrshrn4s:
 ;CHECK: sqrshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>* %ret
-        %tmp1 = load <2 x i64>* %A
+        %out = load <2 x i32>, <2 x i32>* %ret
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -947,7 +947,7 @@ define i32 @sqrshrun1s(i64 %A) nounwind
 define <8 x i8> @sqrshrun8b(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqrshrun8b:
 ;CHECK: sqrshrun.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
@@ -955,7 +955,7 @@ define <8 x i8> @sqrshrun8b(<8 x i16>* %
 define <4 x i16> @sqrshrun4h(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqrshrun4h:
 ;CHECK: sqrshrun.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
@@ -963,7 +963,7 @@ define <4 x i16> @sqrshrun4h(<4 x i32>*
 define <2 x i32> @sqrshrun2s(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqrshrun2s:
 ;CHECK: sqrshrun.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
@@ -971,8 +971,8 @@ define <2 x i32> @sqrshrun2s(<2 x i64>*
 define <16 x i8> @sqrshrun16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqrshrun16b:
 ;CHECK: sqrshrun2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>* %ret
-        %tmp1 = load <8 x i16>* %A
+        %out = load <8 x i8>, <8 x i8>* %ret
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
@@ -981,8 +981,8 @@ define <16 x i8> @sqrshrun16b(<8 x i8>*
 define <8 x i16> @sqrshrun8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqrshrun8h:
 ;CHECK: sqrshrun2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>* %ret
-        %tmp1 = load <4 x i32>* %A
+        %out = load <4 x i16>, <4 x i16>* %ret
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
@@ -991,8 +991,8 @@ define <8 x i16> @sqrshrun8h(<4 x i16>*
 define <4 x i32> @sqrshrun4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqrshrun4s:
 ;CHECK: sqrshrun2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>* %ret
-        %tmp1 = load <2 x i64>* %A
+        %out = load <2 x i32>, <2 x i32>* %ret
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -1013,7 +1013,7 @@ define i32 @uqrshrn1s(i64 %A) nounwind {
 define <8 x i8> @uqrshrn8b(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: uqrshrn8b:
 ;CHECK: uqrshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
@@ -1021,7 +1021,7 @@ define <8 x i8> @uqrshrn8b(<8 x i16>* %A
 define <4 x i16> @uqrshrn4h(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: uqrshrn4h:
 ;CHECK: uqrshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
@@ -1029,7 +1029,7 @@ define <4 x i16> @uqrshrn4h(<4 x i32>* %
 define <2 x i32> @uqrshrn2s(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: uqrshrn2s:
 ;CHECK: uqrshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
@@ -1037,8 +1037,8 @@ define <2 x i32> @uqrshrn2s(<2 x i64>* %
 define <16 x i8> @uqrshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
 ;CHECK-LABEL: uqrshrn16b:
 ;CHECK: uqrshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>* %ret
-        %tmp1 = load <8 x i16>* %A
+        %out = load <8 x i8>, <8 x i8>* %ret
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
@@ -1047,8 +1047,8 @@ define <16 x i8> @uqrshrn16b(<8 x i8>* %
 define <8 x i16> @uqrshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
 ;CHECK-LABEL: uqrshrn8h:
 ;CHECK: uqrshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>* %ret
-        %tmp1 = load <4 x i32>* %A
+        %out = load <4 x i16>, <4 x i16>* %ret
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
@@ -1057,8 +1057,8 @@ define <8 x i16> @uqrshrn8h(<4 x i16>* %
 define <4 x i32> @uqrshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
 ;CHECK-LABEL: uqrshrn4s:
 ;CHECK: uqrshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>* %ret
-        %tmp1 = load <2 x i64>* %A
+        %out = load <2 x i32>, <2 x i32>* %ret
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -1079,7 +1079,7 @@ define i32 @uqshrn1s(i64 %A) nounwind {
 define <8 x i8> @uqshrn8b(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: uqshrn8b:
 ;CHECK: uqshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
@@ -1087,7 +1087,7 @@ define <8 x i8> @uqshrn8b(<8 x i16>* %A)
 define <4 x i16> @uqshrn4h(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: uqshrn4h:
 ;CHECK: uqshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
@@ -1095,7 +1095,7 @@ define <4 x i16> @uqshrn4h(<4 x i32>* %A
 define <2 x i32> @uqshrn2s(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: uqshrn2s:
 ;CHECK: uqshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
@@ -1103,8 +1103,8 @@ define <2 x i32> @uqshrn2s(<2 x i64>* %A
 define <16 x i8> @uqshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
 ;CHECK-LABEL: uqshrn16b:
 ;CHECK: uqshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>* %ret
-        %tmp1 = load <8 x i16>* %A
+        %out = load <8 x i8>, <8 x i8>* %ret
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
@@ -1113,8 +1113,8 @@ define <16 x i8> @uqshrn16b(<8 x i8>* %r
 define <8 x i16> @uqshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
 ;CHECK-LABEL: uqshrn8h:
 ;CHECK: uqshrn2.8h v0, {{v[0-9]+}}, #1
-  %out = load <4 x i16>* %ret
-  %tmp1 = load <4 x i32>* %A
+  %out = load <4 x i16>, <4 x i16>* %ret
+  %tmp1 = load <4 x i32>, <4 x i32>* %A
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
   %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i16> %tmp4
@@ -1123,8 +1123,8 @@ define <8 x i16> @uqshrn8h(<4 x i16>* %r
 define <4 x i32> @uqshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
 ;CHECK-LABEL: uqshrn4s:
 ;CHECK: uqshrn2.4s v0, {{v[0-9]+}}, #1
-  %out = load <2 x i32>* %ret
-  %tmp1 = load <2 x i64>* %A
+  %out = load <2 x i32>, <2 x i32>* %ret
+  %tmp1 = load <2 x i64>, <2 x i64>* %A
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
   %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i32> %tmp4
@@ -1138,7 +1138,7 @@ declare <2 x i32> @llvm.aarch64.neon.uqs
 define <8 x i16> @ushll8h(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: ushll8h:
 ;CHECK: ushll.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         ret <8 x i16> %tmp3
@@ -1147,7 +1147,7 @@ define <8 x i16> @ushll8h(<8 x i8>* %A)
 define <4 x i32> @ushll4s(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: ushll4s:
 ;CHECK: ushll.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
         ret <4 x i32> %tmp3
@@ -1156,7 +1156,7 @@ define <4 x i32> @ushll4s(<4 x i16>* %A)
 define <2 x i64> @ushll2d(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: ushll2d:
 ;CHECK: ushll.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
         ret <2 x i64> %tmp3
@@ -1165,7 +1165,7 @@ define <2 x i64> @ushll2d(<2 x i32>* %A)
 define <8 x i16> @ushll2_8h(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: ushll2_8h:
 ;CHECK: ushll2.8h v0, {{v[0-9]+}}, #1
-        %load1 = load <16 x i8>* %A
+        %load1 = load <16 x i8>, <16 x i8>* %A
         %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1175,7 +1175,7 @@ define <8 x i16> @ushll2_8h(<16 x i8>* %
 define <4 x i32> @ushll2_4s(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: ushll2_4s:
 ;CHECK: ushll2.4s v0, {{v[0-9]+}}, #1
-        %load1 = load <8 x i16>* %A
+        %load1 = load <8 x i16>, <8 x i16>* %A
         %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
@@ -1185,7 +1185,7 @@ define <4 x i32> @ushll2_4s(<8 x i16>* %
 define <2 x i64> @ushll2_2d(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: ushll2_2d:
 ;CHECK: ushll2.2d v0, {{v[0-9]+}}, #1
-        %load1 = load <4 x i32>* %A
+        %load1 = load <4 x i32>, <4 x i32>* %A
         %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
@@ -1195,7 +1195,7 @@ define <2 x i64> @ushll2_2d(<4 x i32>* %
 define <8 x i16> @sshll8h(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: sshll8h:
 ;CHECK: sshll.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         ret <8 x i16> %tmp3
@@ -1204,7 +1204,7 @@ define <8 x i16> @sshll8h(<8 x i8>* %A)
 define <4 x i32> @sshll4s(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: sshll4s:
 ;CHECK: sshll.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
         ret <4 x i32> %tmp3
@@ -1213,7 +1213,7 @@ define <4 x i32> @sshll4s(<4 x i16>* %A)
 define <2 x i64> @sshll2d(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: sshll2d:
 ;CHECK: sshll.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
         ret <2 x i64> %tmp3
@@ -1222,7 +1222,7 @@ define <2 x i64> @sshll2d(<2 x i32>* %A)
 define <8 x i16> @sshll2_8h(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: sshll2_8h:
 ;CHECK: sshll2.8h v0, {{v[0-9]+}}, #1
-        %load1 = load <16 x i8>* %A
+        %load1 = load <16 x i8>, <16 x i8>* %A
         %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1232,7 +1232,7 @@ define <8 x i16> @sshll2_8h(<16 x i8>* %
 define <4 x i32> @sshll2_4s(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sshll2_4s:
 ;CHECK: sshll2.4s v0, {{v[0-9]+}}, #1
-        %load1 = load <8 x i16>* %A
+        %load1 = load <8 x i16>, <8 x i16>* %A
         %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
@@ -1242,7 +1242,7 @@ define <4 x i32> @sshll2_4s(<8 x i16>* %
 define <2 x i64> @sshll2_2d(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sshll2_2d:
 ;CHECK: sshll2.2d v0, {{v[0-9]+}}, #1
-        %load1 = load <4 x i32>* %A
+        %load1 = load <4 x i32>, <4 x i32>* %A
         %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
@@ -1252,7 +1252,7 @@ define <2 x i64> @sshll2_2d(<4 x i32>* %
 define <8 x i8> @sqshli8b(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: sqshli8b:
 ;CHECK: sqshl.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <8 x i8> %tmp3
 }
@@ -1260,7 +1260,7 @@ define <8 x i8> @sqshli8b(<8 x i8>* %A)
 define <4 x i16> @sqshli4h(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqshli4h:
 ;CHECK: sqshl.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
         ret <4 x i16> %tmp3
 }
@@ -1268,7 +1268,7 @@ define <4 x i16> @sqshli4h(<4 x i16>* %A
 define <2 x i32> @sqshli2s(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqshli2s:
 ;CHECK: sqshl.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
         ret <2 x i32> %tmp3
 }
@@ -1276,7 +1276,7 @@ define <2 x i32> @sqshli2s(<2 x i32>* %A
 define <16 x i8> @sqshli16b(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: sqshli16b:
 ;CHECK: sqshl.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <16 x i8> %tmp3
 }
@@ -1284,7 +1284,7 @@ define <16 x i8> @sqshli16b(<16 x i8>* %
 define <8 x i16> @sqshli8h(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: sqshli8h:
 ;CHECK: sqshl.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
         ret <8 x i16> %tmp3
 }
@@ -1292,7 +1292,7 @@ define <8 x i16> @sqshli8h(<8 x i16>* %A
 define <4 x i32> @sqshli4s(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: sqshli4s:
 ;CHECK: sqshl.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
@@ -1300,7 +1300,7 @@ define <4 x i32> @sqshli4s(<4 x i32>* %A
 define <2 x i64> @sqshli2d(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: sqshli2d:
 ;CHECK: sqshl.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
         ret <2 x i64> %tmp3
 }
@@ -1308,7 +1308,7 @@ define <2 x i64> @sqshli2d(<2 x i64>* %A
 define <8 x i8> @uqshli8b(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: uqshli8b:
 ;CHECK: uqshl.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <8 x i8> %tmp3
 }
@@ -1317,7 +1317,7 @@ define <8 x i8> @uqshli8b_1(<8 x i8>* %A
 ;CHECK-LABEL: uqshli8b_1:
 ;CHECK: movi.8b [[REG:v[0-9]+]], #0x8
 ;CHECK: uqshl.8b v0, v0, [[REG]]
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>)
         ret <8 x i8> %tmp3
 }
@@ -1325,7 +1325,7 @@ define <8 x i8> @uqshli8b_1(<8 x i8>* %A
 define <4 x i16> @uqshli4h(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: uqshli4h:
 ;CHECK: uqshl.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
         ret <4 x i16> %tmp3
 }
@@ -1333,7 +1333,7 @@ define <4 x i16> @uqshli4h(<4 x i16>* %A
 define <2 x i32> @uqshli2s(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: uqshli2s:
 ;CHECK: uqshl.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
         ret <2 x i32> %tmp3
 }
@@ -1341,7 +1341,7 @@ define <2 x i32> @uqshli2s(<2 x i32>* %A
 define <16 x i8> @uqshli16b(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: uqshli16b:
 ;CHECK: uqshl.16b
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <16 x i8> %tmp3
 }
@@ -1349,7 +1349,7 @@ define <16 x i8> @uqshli16b(<16 x i8>* %
 define <8 x i16> @uqshli8h(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: uqshli8h:
 ;CHECK: uqshl.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
         ret <8 x i16> %tmp3
 }
@@ -1357,7 +1357,7 @@ define <8 x i16> @uqshli8h(<8 x i16>* %A
 define <4 x i32> @uqshli4s(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: uqshli4s:
 ;CHECK: uqshl.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
@@ -1365,7 +1365,7 @@ define <4 x i32> @uqshli4s(<4 x i32>* %A
 define <2 x i64> @uqshli2d(<2 x i64>* %A) nounwind {
 ;CHECK-LABEL: uqshli2d:
 ;CHECK: uqshl.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
         ret <2 x i64> %tmp3
 }
@@ -1373,9 +1373,9 @@ define <2 x i64> @uqshli2d(<2 x i64>* %A
 define <8 x i8> @ursra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: ursra8b:
 ;CHECK: ursra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <8 x i8>* %B
+        %tmp4 = load <8 x i8>, <8 x i8>* %B
         %tmp5 = add <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
@@ -1383,9 +1383,9 @@ define <8 x i8> @ursra8b(<8 x i8>* %A, <
 define <4 x i16> @ursra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: ursra4h:
 ;CHECK: ursra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <4 x i16>* %B
+        %tmp4 = load <4 x i16>, <4 x i16>* %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
@@ -1393,9 +1393,9 @@ define <4 x i16> @ursra4h(<4 x i16>* %A,
 define <2 x i32> @ursra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: ursra2s:
 ;CHECK: ursra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
-        %tmp4 = load <2 x i32>* %B
+        %tmp4 = load <2 x i32>, <2 x i32>* %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
@@ -1403,9 +1403,9 @@ define <2 x i32> @ursra2s(<2 x i32>* %A,
 define <16 x i8> @ursra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: ursra16b:
 ;CHECK: ursra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <16 x i8>* %B
+        %tmp4 = load <16 x i8>, <16 x i8>* %B
         %tmp5 = add <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
@@ -1413,9 +1413,9 @@ define <16 x i8> @ursra16b(<16 x i8>* %A
 define <8 x i16> @ursra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: ursra8h:
 ;CHECK: ursra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <8 x i16>* %B
+        %tmp4 = load <8 x i16>, <8 x i16>* %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
@@ -1423,9 +1423,9 @@ define <8 x i16> @ursra8h(<8 x i16>* %A,
 define <4 x i32> @ursra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: ursra4s:
 ;CHECK: ursra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        %tmp4 = load <4 x i32>* %B
+        %tmp4 = load <4 x i32>, <4 x i32>* %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
@@ -1433,9 +1433,9 @@ define <4 x i32> @ursra4s(<4 x i32>* %A,
 define <2 x i64> @ursra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: ursra2d:
 ;CHECK: ursra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
-        %tmp4 = load <2 x i64>* %B
+        %tmp4 = load <2 x i64>, <2 x i64>* %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
@@ -1443,9 +1443,9 @@ define <2 x i64> @ursra2d(<2 x i64>* %A,
 define <8 x i8> @srsra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: srsra8b:
 ;CHECK: srsra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <8 x i8>* %B
+        %tmp4 = load <8 x i8>, <8 x i8>* %B
         %tmp5 = add <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
@@ -1453,9 +1453,9 @@ define <8 x i8> @srsra8b(<8 x i8>* %A, <
 define <4 x i16> @srsra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: srsra4h:
 ;CHECK: srsra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <4 x i16>* %B
+        %tmp4 = load <4 x i16>, <4 x i16>* %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
@@ -1463,9 +1463,9 @@ define <4 x i16> @srsra4h(<4 x i16>* %A,
 define <2 x i32> @srsra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: srsra2s:
 ;CHECK: srsra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
-        %tmp4 = load <2 x i32>* %B
+        %tmp4 = load <2 x i32>, <2 x i32>* %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
@@ -1473,9 +1473,9 @@ define <2 x i32> @srsra2s(<2 x i32>* %A,
 define <16 x i8> @srsra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: srsra16b:
 ;CHECK: srsra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <16 x i8>* %B
+        %tmp4 = load <16 x i8>, <16 x i8>* %B
         %tmp5 = add <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
@@ -1483,9 +1483,9 @@ define <16 x i8> @srsra16b(<16 x i8>* %A
 define <8 x i16> @srsra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: srsra8h:
 ;CHECK: srsra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <8 x i16>* %B
+        %tmp4 = load <8 x i16>, <8 x i16>* %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
@@ -1493,9 +1493,9 @@ define <8 x i16> @srsra8h(<8 x i16>* %A,
 define <4 x i32> @srsra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: srsra4s:
 ;CHECK: srsra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        %tmp4 = load <4 x i32>* %B
+        %tmp4 = load <4 x i32>, <4 x i32>* %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
@@ -1503,9 +1503,9 @@ define <4 x i32> @srsra4s(<4 x i32>* %A,
 define <2 x i64> @srsra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: srsra2d:
 ;CHECK: srsra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
-        %tmp4 = load <2 x i64>* %B
+        %tmp4 = load <2 x i64>, <2 x i64>* %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
@@ -1513,9 +1513,9 @@ define <2 x i64> @srsra2d(<2 x i64>* %A,
 define <8 x i8> @usra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: usra8b:
 ;CHECK: usra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <8 x i8>* %B
+        %tmp4 = load <8 x i8>, <8 x i8>* %B
         %tmp5 = add <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
@@ -1523,9 +1523,9 @@ define <8 x i8> @usra8b(<8 x i8>* %A, <8
 define <4 x i16> @usra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: usra4h:
 ;CHECK: usra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <4 x i16>* %B
+        %tmp4 = load <4 x i16>, <4 x i16>* %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
@@ -1533,9 +1533,9 @@ define <4 x i16> @usra4h(<4 x i16>* %A,
 define <2 x i32> @usra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: usra2s:
 ;CHECK: usra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
-        %tmp4 = load <2 x i32>* %B
+        %tmp4 = load <2 x i32>, <2 x i32>* %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
@@ -1543,9 +1543,9 @@ define <2 x i32> @usra2s(<2 x i32>* %A,
 define <16 x i8> @usra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: usra16b:
 ;CHECK: usra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <16 x i8>* %B
+        %tmp4 = load <16 x i8>, <16 x i8>* %B
         %tmp5 = add <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
@@ -1553,9 +1553,9 @@ define <16 x i8> @usra16b(<16 x i8>* %A,
 define <8 x i16> @usra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: usra8h:
 ;CHECK: usra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <8 x i16>* %B
+        %tmp4 = load <8 x i16>, <8 x i16>* %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
@@ -1563,9 +1563,9 @@ define <8 x i16> @usra8h(<8 x i16>* %A,
 define <4 x i32> @usra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: usra4s:
 ;CHECK: usra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp4 = load <4 x i32>* %B
+        %tmp4 = load <4 x i32>, <4 x i32>* %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
@@ -1573,9 +1573,9 @@ define <4 x i32> @usra4s(<4 x i32>* %A,
 define <2 x i64> @usra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: usra2d:
 ;CHECK: usra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp4 = load <2 x i64>* %B
+        %tmp4 = load <2 x i64>, <2 x i64>* %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
@@ -1583,9 +1583,9 @@ define <2 x i64> @usra2d(<2 x i64>* %A,
 define <8 x i8> @ssra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: ssra8b:
 ;CHECK: ssra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp3 = ashr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <8 x i8>* %B
+        %tmp4 = load <8 x i8>, <8 x i8>* %B
         %tmp5 = add <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
@@ -1593,9 +1593,9 @@ define <8 x i8> @ssra8b(<8 x i8>* %A, <8
 define <4 x i16> @ssra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: ssra4h:
 ;CHECK: ssra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp3 = ashr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <4 x i16>* %B
+        %tmp4 = load <4 x i16>, <4 x i16>* %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
@@ -1603,9 +1603,9 @@ define <4 x i16> @ssra4h(<4 x i16>* %A,
 define <2 x i32> @ssra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: ssra2s:
 ;CHECK: ssra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
         %tmp3 = ashr <2 x i32> %tmp1, <i32 1, i32 1>
-        %tmp4 = load <2 x i32>* %B
+        %tmp4 = load <2 x i32>, <2 x i32>* %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
@@ -1613,9 +1613,9 @@ define <2 x i32> @ssra2s(<2 x i32>* %A,
 define <16 x i8> @ssra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: ssra16b:
 ;CHECK: ssra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %tmp3 = ashr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <16 x i8>* %B
+        %tmp4 = load <16 x i8>, <16 x i8>* %B
         %tmp5 = add <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
@@ -1623,9 +1623,9 @@ define <16 x i8> @ssra16b(<16 x i8>* %A,
 define <8 x i16> @ssra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: ssra8h:
 ;CHECK: ssra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp3 = ashr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <8 x i16>* %B
+        %tmp4 = load <8 x i16>, <8 x i16>* %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
@@ -1633,9 +1633,9 @@ define <8 x i16> @ssra8h(<8 x i16>* %A,
 define <4 x i32> @ssra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: ssra4s:
 ;CHECK: ssra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %tmp3 = ashr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp4 = load <4 x i32>* %B
+        %tmp4 = load <4 x i32>, <4 x i32>* %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
@@ -1643,9 +1643,9 @@ define <4 x i32> @ssra4s(<4 x i32>* %A,
 define <2 x i64> @ssra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: ssra2d:
 ;CHECK: ssra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
         %tmp3 = ashr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp4 = load <2 x i64>* %B
+        %tmp4 = load <2 x i64>, <2 x i64>* %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
@@ -1655,8 +1655,8 @@ define <8 x i8> @shr_orr8b(<8 x i8>* %A,
 ;CHECK: shr.8b v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <8 x i8>* %A
-        %tmp4 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp4 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
         %tmp5 = or <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
@@ -1667,8 +1667,8 @@ define <4 x i16> @shr_orr4h(<4 x i16>* %
 ;CHECK: shr.4h v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <4 x i16>* %A
-        %tmp4 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp4 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
         %tmp5 = or <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
@@ -1679,8 +1679,8 @@ define <2 x i32> @shr_orr2s(<2 x i32>* %
 ;CHECK: shr.2s v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <2 x i32>* %A
-        %tmp4 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp4 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
         %tmp5 = or <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
@@ -1691,8 +1691,8 @@ define <16 x i8> @shr_orr16b(<16 x i8>*
 ;CHECK: shr.16b v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <16 x i8>* %A
-        %tmp4 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp4 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
         %tmp5 = or <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
@@ -1703,8 +1703,8 @@ define <8 x i16> @shr_orr8h(<8 x i16>* %
 ;CHECK: shr.8h v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <8 x i16>* %A
-        %tmp4 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp4 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         %tmp5 = or <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
@@ -1715,8 +1715,8 @@ define <4 x i32> @shr_orr4s(<4 x i32>* %
 ;CHECK: shr.4s v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <4 x i32>* %A
-        %tmp4 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp4 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
         %tmp5 = or <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
@@ -1727,8 +1727,8 @@ define <2 x i64> @shr_orr2d(<2 x i64>* %
 ;CHECK: shr.2d v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <2 x i64>* %A
-        %tmp4 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp4 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
         %tmp5 = or <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
@@ -1739,8 +1739,8 @@ define <8 x i8> @shl_orr8b(<8 x i8>* %A,
 ;CHECK: shl.8b v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <8 x i8>* %A
-        %tmp4 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp4 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = shl <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
         %tmp5 = or <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
@@ -1751,8 +1751,8 @@ define <4 x i16> @shl_orr4h(<4 x i16>* %
 ;CHECK: shl.4h v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <4 x i16>* %A
-        %tmp4 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp4 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = shl <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
         %tmp5 = or <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
@@ -1763,8 +1763,8 @@ define <2 x i32> @shl_orr2s(<2 x i32>* %
 ;CHECK: shl.2s v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <2 x i32>* %A
-        %tmp4 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp4 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = shl <2 x i32> %tmp1, <i32 1, i32 1>
         %tmp5 = or <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
@@ -1775,8 +1775,8 @@ define <16 x i8> @shl_orr16b(<16 x i8>*
 ;CHECK: shl.16b v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <16 x i8>* %A
-        %tmp4 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp4 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shl <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
         %tmp5 = or <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
@@ -1787,8 +1787,8 @@ define <8 x i16> @shl_orr8h(<8 x i16>* %
 ;CHECK: shl.8h v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <8 x i16>* %A
-        %tmp4 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp4 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = shl <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         %tmp5 = or <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
@@ -1799,8 +1799,8 @@ define <4 x i32> @shl_orr4s(<4 x i32>* %
 ;CHECK: shl.4s v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <4 x i32>* %A
-        %tmp4 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp4 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = shl <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
         %tmp5 = or <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
@@ -1811,8 +1811,8 @@ define <2 x i64> @shl_orr2d(<2 x i64>* %
 ;CHECK: shl.2d v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <2 x i64>* %A
-        %tmp4 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp4 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = shl <2 x i64> %tmp1, <i64 1, i64 1>
         %tmp5 = or <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
@@ -1838,8 +1838,8 @@ define <4 x i32> @shll_high(<8 x i16> %i
 define <8 x i8> @sli8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: sli8b:
 ;CHECK: sli.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, i32 1)
         ret <8 x i8> %tmp3
 }
@@ -1847,8 +1847,8 @@ define <8 x i8> @sli8b(<8 x i8>* %A, <8
 define <4 x i16> @sli4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: sli4h:
 ;CHECK: sli.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, i32 1)
         ret <4 x i16> %tmp3
 }
@@ -1856,8 +1856,8 @@ define <4 x i16> @sli4h(<4 x i16>* %A, <
 define <2 x i32> @sli2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: sli2s:
 ;CHECK: sli.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, i32 1)
         ret <2 x i32> %tmp3
 }
@@ -1865,8 +1865,8 @@ define <2 x i32> @sli2s(<2 x i32>* %A, <
 define <1 x i64> @sli1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
 ;CHECK-LABEL: sli1d:
 ;CHECK: sli d0, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>* %A
-        %tmp2 = load <1 x i64>* %B
+        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp2 = load <1 x i64>, <1 x i64>* %B
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, i32 1)
         ret <1 x i64> %tmp3
 }
@@ -1874,8 +1874,8 @@ define <1 x i64> @sli1d(<1 x i64>* %A, <
 define <16 x i8> @sli16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: sli16b:
 ;CHECK: sli.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, i32 1)
         ret <16 x i8> %tmp3
 }
@@ -1883,8 +1883,8 @@ define <16 x i8> @sli16b(<16 x i8>* %A,
 define <8 x i16> @sli8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: sli8h:
 ;CHECK: sli.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, i32 1)
         ret <8 x i16> %tmp3
 }
@@ -1892,8 +1892,8 @@ define <8 x i16> @sli8h(<8 x i16>* %A, <
 define <4 x i32> @sli4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: sli4s:
 ;CHECK: sli.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, i32 1)
         ret <4 x i32> %tmp3
 }
@@ -1901,8 +1901,8 @@ define <4 x i32> @sli4s(<4 x i32>* %A, <
 define <2 x i64> @sli2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: sli2d:
 ;CHECK: sli.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, i32 1)
         ret <2 x i64> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vshr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vshr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vshr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vshr.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ entry:
   %b.addr = alloca <8 x i16>, align 16
   store <8 x i16> %a, <8 x i16>* %a.addr, align 16
   store <8 x i16> %b, <8 x i16>* %b.addr, align 16
-  %0 = load <8 x i16>* %a.addr, align 16
-  %1 = load <8 x i16>* %b.addr, align 16
+  %0 = load <8 x i16>, <8 x i16>* %a.addr, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b.addr, align 16
   %shr = ashr <8 x i16> %0, %1
   ret <8 x i16> %shr
 }
@@ -25,8 +25,8 @@ entry:
   %b.addr = alloca <4 x i32>, align 32
   store <4 x i32> %a, <4 x i32>* %a.addr, align 32
   store <4 x i32> %b, <4 x i32>* %b.addr, align 32
-  %0 = load <4 x i32>* %a.addr, align 32
-  %1 = load <4 x i32>* %b.addr, align 32
+  %0 = load <4 x i32>, <4 x i32>* %a.addr, align 32
+  %1 = load <4 x i32>, <4 x i32>* %b.addr, align 32
   %shr = ashr <4 x i32> %0, %1
   ret <4 x i32> %shr
 }
@@ -40,8 +40,8 @@ entry:
   %b.addr = alloca <8 x i16>, align 16
   store <8 x i16> %a, <8 x i16>* %a.addr, align 16
   store <8 x i16> %b, <8 x i16>* %b.addr, align 16
-  %0 = load <8 x i16>* %a.addr, align 16
-  %1 = load <8 x i16>* %b.addr, align 16
+  %0 = load <8 x i16>, <8 x i16>* %a.addr, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b.addr, align 16
   %shr = lshr <8 x i16> %0, %1
   ret <8 x i16> %shr
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vsqrt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vsqrt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vsqrt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vsqrt.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <2 x float> @frecps_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: frecps_2s:
 ;CHECK: frecps.2s
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -12,8 +12,8 @@ define <2 x float> @frecps_2s(<2 x float
 define <4 x float> @frecps_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: frecps_4s:
 ;CHECK: frecps.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x float> @frecps_4s(<4 x float
 define <2 x double> @frecps_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: frecps_2d:
 ;CHECK: frecps.2d
-	%tmp1 = load <2 x double>* %A
-	%tmp2 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, <2 x double>* %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -35,8 +35,8 @@ declare <2 x double> @llvm.aarch64.neon.
 define <2 x float> @frsqrts_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
 ;CHECK-LABEL: frsqrts_2s:
 ;CHECK: frsqrts.2s
-	%tmp1 = load <2 x float>* %A
-	%tmp2 = load <2 x float>* %B
+	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
@@ -44,8 +44,8 @@ define <2 x float> @frsqrts_2s(<2 x floa
 define <4 x float> @frsqrts_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK-LABEL: frsqrts_4s:
 ;CHECK: frsqrts.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
@@ -53,8 +53,8 @@ define <4 x float> @frsqrts_4s(<4 x floa
 define <2 x double> @frsqrts_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
 ;CHECK-LABEL: frsqrts_2d:
 ;CHECK: frsqrts.2d
-	%tmp1 = load <2 x double>* %A
-	%tmp2 = load <2 x double>* %B
+	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, <2 x double>* %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -66,7 +66,7 @@ declare <2 x double> @llvm.aarch64.neon.
 define <2 x float> @frecpe_2s(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: frecpe_2s:
 ;CHECK: frecpe.2s
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.frecpe.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp3
 }
@@ -74,7 +74,7 @@ define <2 x float> @frecpe_2s(<2 x float
 define <4 x float> @frecpe_4s(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: frecpe_4s:
 ;CHECK: frecpe.4s
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.frecpe.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp3
 }
@@ -82,7 +82,7 @@ define <4 x float> @frecpe_4s(<4 x float
 define <2 x double> @frecpe_2d(<2 x double>* %A) nounwind {
 ;CHECK-LABEL: frecpe_2d:
 ;CHECK: frecpe.2d
-	%tmp1 = load <2 x double>* %A
+	%tmp1 = load <2 x double>, <2 x double>* %A
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.frecpe.v2f64(<2 x double> %tmp1)
 	ret <2 x double> %tmp3
 }
@@ -90,7 +90,7 @@ define <2 x double> @frecpe_2d(<2 x doub
 define float @frecpe_s(float* %A) nounwind {
 ;CHECK-LABEL: frecpe_s:
 ;CHECK: frecpe s0, {{s[0-9]+}}
-  %tmp1 = load float* %A
+  %tmp1 = load float, float* %A
   %tmp3 = call float @llvm.aarch64.neon.frecpe.f32(float %tmp1)
   ret float %tmp3
 }
@@ -98,7 +98,7 @@ define float @frecpe_s(float* %A) nounwi
 define double @frecpe_d(double* %A) nounwind {
 ;CHECK-LABEL: frecpe_d:
 ;CHECK: frecpe d0, {{d[0-9]+}}
-  %tmp1 = load double* %A
+  %tmp1 = load double, double* %A
   %tmp3 = call double @llvm.aarch64.neon.frecpe.f64(double %tmp1)
   ret double %tmp3
 }
@@ -112,7 +112,7 @@ declare double @llvm.aarch64.neon.frecpe
 define float @frecpx_s(float* %A) nounwind {
 ;CHECK-LABEL: frecpx_s:
 ;CHECK: frecpx s0, {{s[0-9]+}}
-  %tmp1 = load float* %A
+  %tmp1 = load float, float* %A
   %tmp3 = call float @llvm.aarch64.neon.frecpx.f32(float %tmp1)
   ret float %tmp3
 }
@@ -120,7 +120,7 @@ define float @frecpx_s(float* %A) nounwi
 define double @frecpx_d(double* %A) nounwind {
 ;CHECK-LABEL: frecpx_d:
 ;CHECK: frecpx d0, {{d[0-9]+}}
-  %tmp1 = load double* %A
+  %tmp1 = load double, double* %A
   %tmp3 = call double @llvm.aarch64.neon.frecpx.f64(double %tmp1)
   ret double %tmp3
 }
@@ -131,7 +131,7 @@ declare double @llvm.aarch64.neon.frecpx
 define <2 x float> @frsqrte_2s(<2 x float>* %A) nounwind {
 ;CHECK-LABEL: frsqrte_2s:
 ;CHECK: frsqrte.2s
-	%tmp1 = load <2 x float>* %A
+	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrte.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp3
 }
@@ -139,7 +139,7 @@ define <2 x float> @frsqrte_2s(<2 x floa
 define <4 x float> @frsqrte_4s(<4 x float>* %A) nounwind {
 ;CHECK-LABEL: frsqrte_4s:
 ;CHECK: frsqrte.4s
-	%tmp1 = load <4 x float>* %A
+	%tmp1 = load <4 x float>, <4 x float>* %A
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrte.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp3
 }
@@ -147,7 +147,7 @@ define <4 x float> @frsqrte_4s(<4 x floa
 define <2 x double> @frsqrte_2d(<2 x double>* %A) nounwind {
 ;CHECK-LABEL: frsqrte_2d:
 ;CHECK: frsqrte.2d
-	%tmp1 = load <2 x double>* %A
+	%tmp1 = load <2 x double>, <2 x double>* %A
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrte.v2f64(<2 x double> %tmp1)
 	ret <2 x double> %tmp3
 }
@@ -155,7 +155,7 @@ define <2 x double> @frsqrte_2d(<2 x dou
 define float @frsqrte_s(float* %A) nounwind {
 ;CHECK-LABEL: frsqrte_s:
 ;CHECK: frsqrte s0, {{s[0-9]+}}
-  %tmp1 = load float* %A
+  %tmp1 = load float, float* %A
   %tmp3 = call float @llvm.aarch64.neon.frsqrte.f32(float %tmp1)
   ret float %tmp3
 }
@@ -163,7 +163,7 @@ define float @frsqrte_s(float* %A) nounw
 define double @frsqrte_d(double* %A) nounwind {
 ;CHECK-LABEL: frsqrte_d:
 ;CHECK: frsqrte d0, {{d[0-9]+}}
-  %tmp1 = load double* %A
+  %tmp1 = load double, double* %A
   %tmp3 = call double @llvm.aarch64.neon.frsqrte.f64(double %tmp1)
   ret double %tmp3
 }
@@ -177,7 +177,7 @@ declare double @llvm.aarch64.neon.frsqrt
 define <2 x i32> @urecpe_2s(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: urecpe_2s:
 ;CHECK: urecpe.2s
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.urecpe.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp3
 }
@@ -185,7 +185,7 @@ define <2 x i32> @urecpe_2s(<2 x i32>* %
 define <4 x i32> @urecpe_4s(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: urecpe_4s:
 ;CHECK: urecpe.4s
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.urecpe.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp3
 }
@@ -196,7 +196,7 @@ declare <4 x i32> @llvm.aarch64.neon.ure
 define <2 x i32> @ursqrte_2s(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: ursqrte_2s:
 ;CHECK: ursqrte.2s
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.ursqrte.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp3
 }
@@ -204,7 +204,7 @@ define <2 x i32> @ursqrte_2s(<2 x i32>*
 define <4 x i32> @ursqrte_4s(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: ursqrte_4s:
 ;CHECK: ursqrte.4s
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.ursqrte.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vsra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vsra.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vsra.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vsra.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsras8:
 ;CHECK: ssra.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
@@ -13,8 +13,8 @@ define <8 x i8> @vsras8(<8 x i8>* %A, <8
 define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsras16:
 ;CHECK: ssra.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
@@ -23,8 +23,8 @@ define <4 x i16> @vsras16(<4 x i16>* %A,
 define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsras32:
 ;CHECK: ssra.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 >
         %tmp4 = add <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
@@ -33,8 +33,8 @@ define <2 x i32> @vsras32(<2 x i32>* %A,
 define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsraQs8:
 ;CHECK: ssra.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
@@ -43,8 +43,8 @@ define <16 x i8> @vsraQs8(<16 x i8>* %A,
 define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsraQs16:
 ;CHECK: ssra.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -53,8 +53,8 @@ define <8 x i16> @vsraQs16(<8 x i16>* %A
 define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsraQs32:
 ;CHECK: ssra.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = ashr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
         %tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -63,8 +63,8 @@ define <4 x i32> @vsraQs32(<4 x i32>* %A
 define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsraQs64:
 ;CHECK: ssra.2d
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = ashr <2 x i64> %tmp2, < i64 63, i64 63 >
         %tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
@@ -73,8 +73,8 @@ define <2 x i64> @vsraQs64(<2 x i64>* %A
 define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsrau8:
 ;CHECK: usra.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
@@ -83,8 +83,8 @@ define <8 x i8> @vsrau8(<8 x i8>* %A, <8
 define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsrau16:
 ;CHECK: usra.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
@@ -93,8 +93,8 @@ define <4 x i16> @vsrau16(<4 x i16>* %A,
 define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsrau32:
 ;CHECK: usra.2s
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 >
         %tmp4 = add <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
@@ -104,8 +104,8 @@ define <2 x i32> @vsrau32(<2 x i32>* %A,
 define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: vsraQu8:
 ;CHECK: usra.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
@@ -114,8 +114,8 @@ define <16 x i8> @vsraQu8(<16 x i8>* %A,
 define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: vsraQu16:
 ;CHECK: usra.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
@@ -124,8 +124,8 @@ define <8 x i16> @vsraQu16(<8 x i16>* %A
 define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: vsraQu32:
 ;CHECK: usra.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
         %tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
@@ -134,8 +134,8 @@ define <4 x i32> @vsraQu32(<4 x i32>* %A
 define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: vsraQu64:
 ;CHECK: usra.2d
-	%tmp1 = load <2 x i64>* %A
-	%tmp2 = load <2 x i64>* %B
+	%tmp1 = load <2 x i64>, <2 x i64>* %A
+	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 >
         %tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vsub.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vsub.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define <8 x i8> @subhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: subhn8b:
 ;CHECK: subhn.8b
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.subhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i8> %tmp3
 }
@@ -12,8 +12,8 @@ define <8 x i8> @subhn8b(<8 x i16>* %A,
 define <4 x i16> @subhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: subhn4h:
 ;CHECK: subhn.4h
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.subhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i16> %tmp3
 }
@@ -21,8 +21,8 @@ define <4 x i16> @subhn4h(<4 x i32>* %A,
 define <2 x i32> @subhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: subhn2s:
 ;CHECK: subhn.2s
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.subhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -64,8 +64,8 @@ declare <8 x i8> @llvm.aarch64.neon.subh
 define <8 x i8> @rsubhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: rsubhn8b:
 ;CHECK: rsubhn.8b
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i16>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i8> %tmp3
 }
@@ -73,8 +73,8 @@ define <8 x i8> @rsubhn8b(<8 x i16>* %A,
 define <4 x i16> @rsubhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: rsubhn4h:
 ;CHECK: rsubhn.4h
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i32>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i16> %tmp3
 }
@@ -82,8 +82,8 @@ define <4 x i16> @rsubhn4h(<4 x i32>* %A
 define <2 x i32> @rsubhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
 ;CHECK-LABEL: rsubhn2s:
 ;CHECK: rsubhn.2s
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i64>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, <2 x i64>* %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -125,8 +125,8 @@ declare <8 x i8> @llvm.aarch64.neon.rsub
 define <8 x i16> @ssubl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: ssubl8h:
 ;CHECK: ssubl.8h
-        %tmp1 = load <8 x i8>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
   %tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = sub <8 x i16> %tmp3, %tmp4
@@ -136,8 +136,8 @@ define <8 x i16> @ssubl8h(<8 x i8>* %A,
 define <4 x i32> @ssubl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: ssubl4s:
 ;CHECK: ssubl.4s
-        %tmp1 = load <4 x i16>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = sub <4 x i32> %tmp3, %tmp4
@@ -147,8 +147,8 @@ define <4 x i32> @ssubl4s(<4 x i16>* %A,
 define <2 x i64> @ssubl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: ssubl2d:
 ;CHECK: ssubl.2d
-        %tmp1 = load <2 x i32>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = sub <2 x i64> %tmp3, %tmp4
@@ -158,11 +158,11 @@ define <2 x i64> @ssubl2d(<2 x i32>* %A,
 define <8 x i16> @ssubl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: ssubl2_8h:
 ;CHECK: ssubl2.8h
-        %tmp1 = load <16 x i8>* %A
+        %tmp1 = load <16 x i8>, <16 x i8>* %A
         %high1 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext1 = sext <8 x i8> %high1 to <8 x i16>
 
-        %tmp2 = load <16 x i8>* %B
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext2 = sext <8 x i8> %high2 to <8 x i16>
 
@@ -173,11 +173,11 @@ define <8 x i16> @ssubl2_8h(<16 x i8>* %
 define <4 x i32> @ssubl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: ssubl2_4s:
 ;CHECK: ssubl2.4s
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
         %high1 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext1 = sext <4 x i16> %high1 to <4 x i32>
 
-        %tmp2 = load <8 x i16>* %B
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext2 = sext <4 x i16> %high2 to <4 x i32>
 
@@ -188,11 +188,11 @@ define <4 x i32> @ssubl2_4s(<8 x i16>* %
 define <2 x i64> @ssubl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: ssubl2_2d:
 ;CHECK: ssubl2.2d
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
         %high1 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext1 = sext <2 x i32> %high1 to <2 x i64>
 
-        %tmp2 = load <4 x i32>* %B
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext2 = sext <2 x i32> %high2 to <2 x i64>
 
@@ -203,8 +203,8 @@ define <2 x i64> @ssubl2_2d(<4 x i32>* %
 define <8 x i16> @usubl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: usubl8h:
 ;CHECK: usubl.8h
-  %tmp1 = load <8 x i8>* %A
-  %tmp2 = load <8 x i8>* %B
+  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp2 = load <8 x i8>, <8 x i8>* %B
   %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = sub <8 x i16> %tmp3, %tmp4
@@ -214,8 +214,8 @@ define <8 x i16> @usubl8h(<8 x i8>* %A,
 define <4 x i32> @usubl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: usubl4s:
 ;CHECK: usubl.4s
-  %tmp1 = load <4 x i16>* %A
-  %tmp2 = load <4 x i16>* %B
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = sub <4 x i32> %tmp3, %tmp4
@@ -225,8 +225,8 @@ define <4 x i32> @usubl4s(<4 x i16>* %A,
 define <2 x i64> @usubl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: usubl2d:
 ;CHECK: usubl.2d
-  %tmp1 = load <2 x i32>* %A
-  %tmp2 = load <2 x i32>* %B
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = sub <2 x i64> %tmp3, %tmp4
@@ -236,11 +236,11 @@ define <2 x i64> @usubl2d(<2 x i32>* %A,
 define <8 x i16> @usubl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: usubl2_8h:
 ;CHECK: usubl2.8h
-  %tmp1 = load <16 x i8>* %A
+  %tmp1 = load <16 x i8>, <16 x i8>* %A
   %high1 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %ext1 = zext <8 x i8> %high1 to <8 x i16>
 
-  %tmp2 = load <16 x i8>* %B
+  %tmp2 = load <16 x i8>, <16 x i8>* %B
   %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %ext2 = zext <8 x i8> %high2 to <8 x i16>
 
@@ -251,11 +251,11 @@ define <8 x i16> @usubl2_8h(<16 x i8>* %
 define <4 x i32> @usubl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: usubl2_4s:
 ;CHECK: usubl2.4s
-  %tmp1 = load <8 x i16>* %A
+  %tmp1 = load <8 x i16>, <8 x i16>* %A
   %high1 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext1 = zext <4 x i16> %high1 to <4 x i32>
 
-  %tmp2 = load <8 x i16>* %B
+  %tmp2 = load <8 x i16>, <8 x i16>* %B
   %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext2 = zext <4 x i16> %high2 to <4 x i32>
 
@@ -266,11 +266,11 @@ define <4 x i32> @usubl2_4s(<8 x i16>* %
 define <2 x i64> @usubl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: usubl2_2d:
 ;CHECK: usubl2.2d
-  %tmp1 = load <4 x i32>* %A
+  %tmp1 = load <4 x i32>, <4 x i32>* %A
   %high1 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %ext1 = zext <2 x i32> %high1 to <2 x i64>
 
-  %tmp2 = load <4 x i32>* %B
+  %tmp2 = load <4 x i32>, <4 x i32>* %B
   %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %ext2 = zext <2 x i32> %high2 to <2 x i64>
 
@@ -281,8 +281,8 @@ define <2 x i64> @usubl2_2d(<4 x i32>* %
 define <8 x i16> @ssubw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: ssubw8h:
 ;CHECK: ssubw.8h
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
   %tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
   %tmp4 = sub <8 x i16> %tmp1, %tmp3
         ret <8 x i16> %tmp4
@@ -291,8 +291,8 @@ define <8 x i16> @ssubw8h(<8 x i16>* %A,
 define <4 x i32> @ssubw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: ssubw4s:
 ;CHECK: ssubw.4s
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
   %tmp4 = sub <4 x i32> %tmp1, %tmp3
         ret <4 x i32> %tmp4
@@ -301,8 +301,8 @@ define <4 x i32> @ssubw4s(<4 x i32>* %A,
 define <2 x i64> @ssubw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: ssubw2d:
 ;CHECK: ssubw.2d
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
   %tmp4 = sub <2 x i64> %tmp1, %tmp3
         ret <2 x i64> %tmp4
@@ -311,9 +311,9 @@ define <2 x i64> @ssubw2d(<2 x i64>* %A,
 define <8 x i16> @ssubw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: ssubw2_8h:
 ;CHECK: ssubw2.8h
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
 
-        %tmp2 = load <16 x i8>* %B
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext2 = sext <8 x i8> %high2 to <8 x i16>
 
@@ -324,9 +324,9 @@ define <8 x i16> @ssubw2_8h(<8 x i16>* %
 define <4 x i32> @ssubw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: ssubw2_4s:
 ;CHECK: ssubw2.4s
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
 
-        %tmp2 = load <8 x i16>* %B
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext2 = sext <4 x i16> %high2 to <4 x i32>
 
@@ -337,9 +337,9 @@ define <4 x i32> @ssubw2_4s(<4 x i32>* %
 define <2 x i64> @ssubw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: ssubw2_2d:
 ;CHECK: ssubw2.2d
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
 
-        %tmp2 = load <4 x i32>* %B
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext2 = sext <2 x i32> %high2 to <2 x i64>
 
@@ -350,8 +350,8 @@ define <2 x i64> @ssubw2_2d(<2 x i64>* %
 define <8 x i16> @usubw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK-LABEL: usubw8h:
 ;CHECK: usubw.8h
-        %tmp1 = load <8 x i16>* %A
-        %tmp2 = load <8 x i8>* %B
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp2 = load <8 x i8>, <8 x i8>* %B
   %tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp4 = sub <8 x i16> %tmp1, %tmp3
         ret <8 x i16> %tmp4
@@ -360,8 +360,8 @@ define <8 x i16> @usubw8h(<8 x i16>* %A,
 define <4 x i32> @usubw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
 ;CHECK-LABEL: usubw4s:
 ;CHECK: usubw.4s
-        %tmp1 = load <4 x i32>* %A
-        %tmp2 = load <4 x i16>* %B
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp2 = load <4 x i16>, <4 x i16>* %B
   %tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp4 = sub <4 x i32> %tmp1, %tmp3
         ret <4 x i32> %tmp4
@@ -370,8 +370,8 @@ define <4 x i32> @usubw4s(<4 x i32>* %A,
 define <2 x i64> @usubw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
 ;CHECK-LABEL: usubw2d:
 ;CHECK: usubw.2d
-        %tmp1 = load <2 x i64>* %A
-        %tmp2 = load <2 x i32>* %B
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i32>, <2 x i32>* %B
   %tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp4 = sub <2 x i64> %tmp1, %tmp3
         ret <2 x i64> %tmp4
@@ -380,9 +380,9 @@ define <2 x i64> @usubw2d(<2 x i64>* %A,
 define <8 x i16> @usubw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
 ;CHECK-LABEL: usubw2_8h:
 ;CHECK: usubw2.8h
-        %tmp1 = load <8 x i16>* %A
+        %tmp1 = load <8 x i16>, <8 x i16>* %A
 
-        %tmp2 = load <16 x i8>* %B
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext2 = zext <8 x i8> %high2 to <8 x i16>
 
@@ -393,9 +393,9 @@ define <8 x i16> @usubw2_8h(<8 x i16>* %
 define <4 x i32> @usubw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
 ;CHECK-LABEL: usubw2_4s:
 ;CHECK: usubw2.4s
-        %tmp1 = load <4 x i32>* %A
+        %tmp1 = load <4 x i32>, <4 x i32>* %A
 
-        %tmp2 = load <8 x i16>* %B
+        %tmp2 = load <8 x i16>, <8 x i16>* %B
         %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext2 = zext <4 x i16> %high2 to <4 x i32>
 
@@ -406,9 +406,9 @@ define <4 x i32> @usubw2_4s(<4 x i32>* %
 define <2 x i64> @usubw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
 ;CHECK-LABEL: usubw2_2d:
 ;CHECK: usubw2.2d
-        %tmp1 = load <2 x i64>* %A
+        %tmp1 = load <2 x i64>, <2 x i64>* %A
 
-        %tmp2 = load <4 x i32>* %B
+        %tmp2 = load <4 x i32>, <4 x i32>* %B
         %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext2 = zext <2 x i32> %high2 to <2 x i64>
 

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-weak-reference.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-weak-reference.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-weak-reference.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-weak-reference.ll Fri Feb 27 15:17:42 2015
@@ -5,6 +5,6 @@
 define i32 @fn() nounwind ssp {
 ; CHECK-LABEL: fn:
 ; CHECK: .weak_reference
-  %val = load i32* @x, align 4
+  %val = load i32, i32* @x, align 4
   ret i32 %val
 }

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-zextload-unscaled.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-zextload-unscaled.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-zextload-unscaled.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-zextload-unscaled.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define void @test_zextloadi1_unscaled(i1
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-7]
 
   %addr = getelementptr i1, i1* %base, i32 -7
-  %val = load i1* %addr, align 1
+  %val = load i1, i1* %addr, align 1
 
   %extended = zext i1 %val to i32
   store i32 %extended, i32* @var32, align 4
@@ -19,7 +19,7 @@ define void @test_zextloadi8_unscaled(i8
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-7]
 
   %addr = getelementptr i8, i8* %base, i32 -7
-  %val = load i8* %addr, align 1
+  %val = load i8, i8* %addr, align 1
 
   %extended = zext i8 %val to i32
   store i32 %extended, i32* @var32, align 4
@@ -31,7 +31,7 @@ define void @test_zextloadi16_unscaled(i
 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-14]
 
   %addr = getelementptr i16, i16* %base, i32 -7
-  %val = load i16* %addr, align 2
+  %val = load i16, i16* %addr, align 2
 
   %extended = zext i16 %val to i32
   store i32 %extended, i32* @var32, align 4

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-zip.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-zip.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-zip.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-zip.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@ define <8 x i8> @vzipi8(<8 x i8>* %A, <8
 ;CHECK: zip1.8b
 ;CHECK: zip2.8b
 ;CHECK-NEXT: add.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -18,8 +18,8 @@ define <4 x i16> @vzipi16(<4 x i16>* %A,
 ;CHECK: zip1.4h
 ;CHECK: zip2.4h
 ;CHECK-NEXT: add.4h
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = add <4 x i16> %tmp3, %tmp4
@@ -31,8 +31,8 @@ define <16 x i8> @vzipQi8(<16 x i8>* %A,
 ;CHECK: zip1.16b
 ;CHECK: zip2.16b
 ;CHECK-NEXT: add.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4
@@ -44,8 +44,8 @@ define <8 x i16> @vzipQi16(<8 x i16>* %A
 ;CHECK: zip1.8h
 ;CHECK: zip2.8h
 ;CHECK-NEXT: add.8h
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -57,8 +57,8 @@ define <4 x i32> @vzipQi32(<4 x i32>* %A
 ;CHECK: zip1.4s
 ;CHECK: zip2.4s
 ;CHECK-NEXT: add.4s
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -70,8 +70,8 @@ define <4 x float> @vzipQf(<4 x float>*
 ;CHECK: zip1.4s
 ;CHECK: zip2.4s
 ;CHECK-NEXT: fadd.4s
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = load <4 x float>* %B
+	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %B
 	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -85,8 +85,8 @@ define <8 x i8> @vzipi8_undef(<8 x i8>*
 ;CHECK: zip1.8b
 ;CHECK: zip2.8b
 ;CHECK-NEXT: add.8b
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 1, i32 9, i32 undef, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 undef, i32 undef, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -98,8 +98,8 @@ define <16 x i8> @vzipQi8_undef(<16 x i8
 ;CHECK: zip1.16b
 ;CHECK: zip2.16b
 ;CHECK-NEXT: add.16b
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 undef, i32 undef, i32 undef, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 undef, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 undef, i32 14, i32 30, i32 undef, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4

Modified: llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ if:
 else:
   %tmp3 = call i8* @llvm.returnaddress(i32 0)
   %ptr = getelementptr inbounds i8, i8* %tmp3, i64 -16
-  %ld = load i8* %ptr, align 4
+  %ld = load i8, i8* %ptr, align 4
   %tmp2 = inttoptr i8 %ld to i8*
   br label %end
 end:

Modified: llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define i32 @foo(i32* %var, i1 %cond) {
 ; CHECK-LABEL: foo:
   br i1 %cond, label %atomic_ver, label %simple_ver
 simple_ver:
-  %oldval = load i32* %var
+  %oldval = load i32, i32* %var
   %newval = add nsw i32 %oldval, -1
   store i32 %newval, i32* %var
   br label %somewhere

Modified: llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll Fri Feb 27 15:17:42 2015
@@ -972,7 +972,7 @@ define void @test_atomic_cmpxchg_i64(i64
 
 define i8 @test_atomic_load_monotonic_i8() nounwind {
 ; CHECK-LABEL: test_atomic_load_monotonic_i8:
-  %val = load atomic i8* @var8 monotonic, align 1
+  %val = load atomic i8, i8* @var8 monotonic, align 1
 ; CHECK-NOT: dmb
 ; CHECK: adrp x[[HIADDR:[0-9]+]], var8
 ; CHECK: ldrb w0, [x[[HIADDR]], {{#?}}:lo12:var8]
@@ -986,7 +986,7 @@ define i8 @test_atomic_load_monotonic_re
   %addr_int = add i64 %base, %off
   %addr = inttoptr i64 %addr_int to i8*
 
-  %val = load atomic i8* %addr monotonic, align 1
+  %val = load atomic i8, i8* %addr monotonic, align 1
 ; CHECK-NOT: dmb
 ; CHECK: ldrb w0, [x0, x1]
 ; CHECK-NOT: dmb
@@ -996,7 +996,7 @@ define i8 @test_atomic_load_monotonic_re
 
 define i8 @test_atomic_load_acquire_i8() nounwind {
 ; CHECK-LABEL: test_atomic_load_acquire_i8:
-  %val = load atomic i8* @var8 acquire, align 1
+  %val = load atomic i8, i8* @var8 acquire, align 1
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK-NOT: dmb
@@ -1009,7 +1009,7 @@ define i8 @test_atomic_load_acquire_i8()
 
 define i8 @test_atomic_load_seq_cst_i8() nounwind {
 ; CHECK-LABEL: test_atomic_load_seq_cst_i8:
-  %val = load atomic i8* @var8 seq_cst, align 1
+  %val = load atomic i8, i8* @var8 seq_cst, align 1
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[HIADDR:x[0-9]+]], var8
 ; CHECK-NOT: dmb
@@ -1022,7 +1022,7 @@ define i8 @test_atomic_load_seq_cst_i8()
 
 define i16 @test_atomic_load_monotonic_i16() nounwind {
 ; CHECK-LABEL: test_atomic_load_monotonic_i16:
-  %val = load atomic i16* @var16 monotonic, align 2
+  %val = load atomic i16, i16* @var16 monotonic, align 2
 ; CHECK-NOT: dmb
 ; CHECK: adrp x[[HIADDR:[0-9]+]], var16
 ; CHECK-NOT: dmb
@@ -1037,7 +1037,7 @@ define i32 @test_atomic_load_monotonic_r
   %addr_int = add i64 %base, %off
   %addr = inttoptr i64 %addr_int to i32*
 
-  %val = load atomic i32* %addr monotonic, align 4
+  %val = load atomic i32, i32* %addr monotonic, align 4
 ; CHECK-NOT: dmb
 ; CHECK: ldr w0, [x0, x1]
 ; CHECK-NOT: dmb
@@ -1047,7 +1047,7 @@ define i32 @test_atomic_load_monotonic_r
 
 define i64 @test_atomic_load_seq_cst_i64() nounwind {
 ; CHECK-LABEL: test_atomic_load_seq_cst_i64:
-  %val = load atomic i64* @var64 seq_cst, align 8
+  %val = load atomic i64, i64* @var64 seq_cst, align 8
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[HIADDR:x[0-9]+]], var64
 ; CHECK-NOT: dmb

Modified: llvm/trunk/test/CodeGen/AArch64/basic-pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/basic-pic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/basic-pic.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/basic-pic.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 define i32 @get_globalvar() {
 ; CHECK-LABEL: get_globalvar:
 
-  %val = load i32* @var
+  %val = load i32, i32* @var
 ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
 ; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], {{#?}}:got_lo12:var]
 ; CHECK: ldr w0, [x[[GOTLOC]]]
@@ -16,7 +16,7 @@ define i32 @get_globalvar() {
 define i32* @get_globalvaraddr() {
 ; CHECK-LABEL: get_globalvaraddr:
 
-  %val = load i32* @var
+  %val = load i32, i32* @var
 ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
 ; CHECK: ldr x0, [x[[GOTHI]], {{#?}}:got_lo12:var]
 
@@ -28,7 +28,7 @@ define i32* @get_globalvaraddr() {
 define i32 @get_hiddenvar() {
 ; CHECK-LABEL: get_hiddenvar:
 
-  %val = load i32* @hiddenvar
+  %val = load i32, i32* @hiddenvar
 ; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
 ; CHECK: ldr w0, [x[[HI]], {{#?}}:lo12:hiddenvar]
 
@@ -38,7 +38,7 @@ define i32 @get_hiddenvar() {
 define i32* @get_hiddenvaraddr() {
 ; CHECK-LABEL: get_hiddenvaraddr:
 
-  %val = load i32* @hiddenvar
+  %val = load i32, i32* @hiddenvar
 ; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
 ; CHECK: add x0, [[HI]], {{#?}}:lo12:hiddenvar
 

Modified: llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll Fri Feb 27 15:17:42 2015
@@ -6,10 +6,10 @@
 define void @test_bfi0(i32* %existing, i32* %new) {
 ; CHECK: bfxil {{w[0-9]+}}, {{w[0-9]+}}, #0, #18
 
-  %oldval = load volatile i32* %existing
+  %oldval = load volatile i32, i32* %existing
   %oldval_keep = and i32 %oldval, 4294705152 ; 0xfffc_0000
 
-  %newval = load volatile i32* %new
+  %newval = load volatile i32, i32* %new
   %newval_masked = and i32 %newval, 262143 ; = 0x0003_ffff
 
   %combined = or i32 %newval_masked, %oldval_keep

Modified: llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll Fri Feb 27 15:17:42 2015
@@ -28,10 +28,10 @@ define void @test_whole32(i32* %existing
 
 ; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #26, #5
 
-  %oldval = load volatile i32* %existing
+  %oldval = load volatile i32, i32* %existing
   %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
 
-  %newval = load volatile i32* %new
+  %newval = load volatile i32, i32* %new
   %newval_shifted = shl i32 %newval, 26
   %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000
 
@@ -47,10 +47,10 @@ define void @test_whole64(i64* %existing
 ; CHECK-NOT: and
 ; CHECK: ret
 
-  %oldval = load volatile i64* %existing
+  %oldval = load volatile i64, i64* %existing
   %oldval_keep = and i64 %oldval, 18446742974265032703 ; = 0xffffff0003ffffffL
 
-  %newval = load volatile i64* %new
+  %newval = load volatile i64, i64* %new
   %newval_shifted = shl i64 %newval, 26
   %newval_masked = and i64 %newval_shifted, 1099444518912 ; = 0xfffc000000
 
@@ -68,10 +68,10 @@ define void @test_whole32_from64(i64* %e
 
 ; CHECK: ret
 
-  %oldval = load volatile i64* %existing
+  %oldval = load volatile i64, i64* %existing
   %oldval_keep = and i64 %oldval, 4294901760 ; = 0xffff0000
 
-  %newval = load volatile i64* %new
+  %newval = load volatile i64, i64* %new
   %newval_masked = and i64 %newval, 65535 ; = 0xffff
 
   %combined = or i64 %oldval_keep, %newval_masked
@@ -86,10 +86,10 @@ define void @test_32bit_masked(i32 *%exi
 ; CHECK: and
 ; CHECK: bfi [[INSERT:w[0-9]+]], {{w[0-9]+}}, #3, #4
 
-  %oldval = load volatile i32* %existing
+  %oldval = load volatile i32, i32* %existing
   %oldval_keep = and i32 %oldval, 135 ; = 0x87
 
-  %newval = load volatile i32* %new
+  %newval = load volatile i32, i32* %new
   %newval_shifted = shl i32 %newval, 3
   %newval_masked = and i32 %newval_shifted, 120 ; = 0x78
 
@@ -104,10 +104,10 @@ define void @test_64bit_masked(i64 *%exi
 ; CHECK: and
 ; CHECK: bfi [[INSERT:x[0-9]+]], {{x[0-9]+}}, #40, #8
 
-  %oldval = load volatile i64* %existing
+  %oldval = load volatile i64, i64* %existing
   %oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000
 
-  %newval = load volatile i64* %new
+  %newval = load volatile i64, i64* %new
   %newval_shifted = shl i64 %newval, 40
   %newval_masked = and i64 %newval_shifted, 280375465082880 ; = 0xff00_0000_0000
 
@@ -124,10 +124,10 @@ define void @test_32bit_complexmask(i32
 ; CHECK: and
 ; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4
 
-  %oldval = load volatile i32* %existing
+  %oldval = load volatile i32, i32* %existing
   %oldval_keep = and i32 %oldval, 647 ; = 0x287
 
-  %newval = load volatile i32* %new
+  %newval = load volatile i32, i32* %new
   %newval_shifted = shl i32 %newval, 3
   %newval_masked = and i32 %newval_shifted, 120 ; = 0x278
 
@@ -144,10 +144,10 @@ define void @test_32bit_badmask(i32 *%ex
 ; CHECK-NOT: bfm
 ; CHECK: ret
 
-  %oldval = load volatile i32* %existing
+  %oldval = load volatile i32, i32* %existing
   %oldval_keep = and i32 %oldval, 135 ; = 0x87
 
-  %newval = load volatile i32* %new
+  %newval = load volatile i32, i32* %new
   %newval_shifted = shl i32 %newval, 3
   %newval_masked = and i32 %newval_shifted, 632 ; = 0x278
 
@@ -164,10 +164,10 @@ define void @test_64bit_badmask(i64 *%ex
 ; CHECK-NOT: bfm
 ; CHECK: ret
 
-  %oldval = load volatile i64* %existing
+  %oldval = load volatile i64, i64* %existing
   %oldval_keep = and i64 %oldval, 135 ; = 0x87
 
-  %newval = load volatile i64* %new
+  %newval = load volatile i64, i64* %new
   %newval_shifted = shl i64 %newval, 3
   %newval_masked = and i64 %newval_shifted, 664 ; = 0x278
 
@@ -182,10 +182,10 @@ define void @test_64bit_badmask(i64 *%ex
 define void @test_32bit_with_shr(i32* %existing, i32* %new) {
 ; CHECK-LABEL: test_32bit_with_shr:
 
-  %oldval = load volatile i32* %existing
+  %oldval = load volatile i32, i32* %existing
   %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
 
-  %newval = load i32* %new
+  %newval = load i32, i32* %new
   %newval_shifted = shl i32 %newval, 12
   %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000
 

Modified: llvm/trunk/test/CodeGen/AArch64/bitfield.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield.ll Fri Feb 27 15:17:42 2015
@@ -180,7 +180,7 @@ define i32 @test_ubfx32(i32* %addr) {
 ; CHECK-LABEL: test_ubfx32:
 ; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #23, #3
 
-   %fields = load i32* %addr
+   %fields = load i32, i32* %addr
    %shifted = lshr i32 %fields, 23
    %masked = and i32 %shifted, 7
    ret i32 %masked
@@ -189,7 +189,7 @@ define i32 @test_ubfx32(i32* %addr) {
 define i64 @test_ubfx64(i64* %addr) {
 ; CHECK-LABEL: test_ubfx64:
 ; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
-   %fields = load i64* %addr
+   %fields = load i64, i64* %addr
    %shifted = lshr i64 %fields, 25
    %masked = and i64 %shifted, 1023
    ret i64 %masked
@@ -199,7 +199,7 @@ define i32 @test_sbfx32(i32* %addr) {
 ; CHECK-LABEL: test_sbfx32:
 ; CHECK: sbfx {{w[0-9]+}}, {{w[0-9]+}}, #6, #3
 
-   %fields = load i32* %addr
+   %fields = load i32, i32* %addr
    %shifted = shl i32 %fields, 23
    %extended = ashr i32 %shifted, 29
    ret i32 %extended
@@ -209,7 +209,7 @@ define i64 @test_sbfx64(i64* %addr) {
 ; CHECK-LABEL: test_sbfx64:
 ; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #63
 
-   %fields = load i64* %addr
+   %fields = load i64, i64* %addr
    %shifted = shl i64 %fields, 1
    %extended = ashr i64 %shifted, 1
    ret i64 %extended

Modified: llvm/trunk/test/CodeGen/AArch64/blockaddress.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/blockaddress.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/blockaddress.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/blockaddress.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 define void @test_blockaddress() {
 ; CHECK-LABEL: test_blockaddress:
   store volatile i8* blockaddress(@test_blockaddress, %block), i8** @addr
-  %val = load volatile i8** @addr
+  %val = load volatile i8*, i8** @addr
   indirectbr i8* %val, [label %block]
 ; CHECK: adrp [[DEST_HI:x[0-9]+]], [[DEST_LBL:.Ltmp[0-9]+]]
 ; CHECK: add [[DEST:x[0-9]+]], [[DEST_HI]], {{#?}}:lo12:[[DEST_LBL]]

Modified: llvm/trunk/test/CodeGen/AArch64/bool-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bool-loads.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bool-loads.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bool-loads.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 define i32 @test_sextloadi32() {
 ; CHECK-LABEL: test_sextloadi32
 
-  %val = load i1* @var
+  %val = load i1, i1* @var
   %ret = sext i1 %val to i32
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
 ; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1|sbfx w[0-9]+, w[0-9]+, #0, #1}}
@@ -17,7 +17,7 @@ define i32 @test_sextloadi32() {
 define i64 @test_sextloadi64() {
 ; CHECK-LABEL: test_sextloadi64
 
-  %val = load i1* @var
+  %val = load i1, i1* @var
   %ret = sext i1 %val to i64
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
 ; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1}}
@@ -32,7 +32,7 @@ define i32 @test_zextloadi32() {
 ; It's not actually necessary that "ret" is next, but as far as LLVM
 ; is concerned only 0 or 1 should be loadable so no extension is
 ; necessary.
-  %val = load i1* @var
+  %val = load i1, i1* @var
   %ret = zext i1 %val to i32
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
 
@@ -46,7 +46,7 @@ define i64 @test_zextloadi64() {
 ; It's not actually necessary that "ret" is next, but as far as LLVM
 ; is concerned only 0 or 1 should be loadable so no extension is
 ; necessary.
-  %val = load i1* @var
+  %val = load i1, i1* @var
   %ret = zext i1 %val to i64
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
 

Modified: llvm/trunk/test/CodeGen/AArch64/breg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/breg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/breg.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/breg.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 
 define void @foo() {
 ; CHECK-LABEL: foo:
-  %lab = load i8** @stored_label
+  %lab = load i8*, i8** @stored_label
   indirectbr i8* %lab, [label  %otherlab, label %retlab]
 ; CHECK: adrp {{x[0-9]+}}, stored_label
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:stored_label]

Modified: llvm/trunk/test/CodeGen/AArch64/callee-save.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/callee-save.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/callee-save.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/callee-save.ll Fri Feb 27 15:17:42 2015
@@ -12,38 +12,38 @@ define void @foo() {
 
   ; Create lots of live variables to exhaust the supply of
   ; caller-saved registers
-  %val1 = load volatile float* @var
-  %val2 = load volatile float* @var
-  %val3 = load volatile float* @var
-  %val4 = load volatile float* @var
-  %val5 = load volatile float* @var
-  %val6 = load volatile float* @var
-  %val7 = load volatile float* @var
-  %val8 = load volatile float* @var
-  %val9 = load volatile float* @var
-  %val10 = load volatile float* @var
-  %val11 = load volatile float* @var
-  %val12 = load volatile float* @var
-  %val13 = load volatile float* @var
-  %val14 = load volatile float* @var
-  %val15 = load volatile float* @var
-  %val16 = load volatile float* @var
-  %val17 = load volatile float* @var
-  %val18 = load volatile float* @var
-  %val19 = load volatile float* @var
-  %val20 = load volatile float* @var
-  %val21 = load volatile float* @var
-  %val22 = load volatile float* @var
-  %val23 = load volatile float* @var
-  %val24 = load volatile float* @var
-  %val25 = load volatile float* @var
-  %val26 = load volatile float* @var
-  %val27 = load volatile float* @var
-  %val28 = load volatile float* @var
-  %val29 = load volatile float* @var
-  %val30 = load volatile float* @var
-  %val31 = load volatile float* @var
-  %val32 = load volatile float* @var
+  %val1 = load volatile float, float* @var
+  %val2 = load volatile float, float* @var
+  %val3 = load volatile float, float* @var
+  %val4 = load volatile float, float* @var
+  %val5 = load volatile float, float* @var
+  %val6 = load volatile float, float* @var
+  %val7 = load volatile float, float* @var
+  %val8 = load volatile float, float* @var
+  %val9 = load volatile float, float* @var
+  %val10 = load volatile float, float* @var
+  %val11 = load volatile float, float* @var
+  %val12 = load volatile float, float* @var
+  %val13 = load volatile float, float* @var
+  %val14 = load volatile float, float* @var
+  %val15 = load volatile float, float* @var
+  %val16 = load volatile float, float* @var
+  %val17 = load volatile float, float* @var
+  %val18 = load volatile float, float* @var
+  %val19 = load volatile float, float* @var
+  %val20 = load volatile float, float* @var
+  %val21 = load volatile float, float* @var
+  %val22 = load volatile float, float* @var
+  %val23 = load volatile float, float* @var
+  %val24 = load volatile float, float* @var
+  %val25 = load volatile float, float* @var
+  %val26 = load volatile float, float* @var
+  %val27 = load volatile float, float* @var
+  %val28 = load volatile float, float* @var
+  %val29 = load volatile float, float* @var
+  %val30 = load volatile float, float* @var
+  %val31 = load volatile float, float* @var
+  %val32 = load volatile float, float* @var
 
   store volatile float %val1, float* @var
   store volatile float %val2, float* @var

Modified: llvm/trunk/test/CodeGen/AArch64/cmpwithshort.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/cmpwithshort.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/cmpwithshort.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/cmpwithshort.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define i16 @test_1cmp_signed_1(i16* %ptr
 ; CHECK-NEXT: cmn
 entry:
   %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
-  %val = load i16* %addr, align 2
+  %val = load i16, i16* %addr, align 2
   %cmp = icmp eq i16 %val, -1
   br i1 %cmp, label %if, label %if.then
 if:
@@ -21,7 +21,7 @@ define i16 @test_1cmp_signed_2(i16* %ptr
 ; CHECK-NEXT: cmn
 entry:
   %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
-  %val = load i16* %addr, align 2
+  %val = load i16, i16* %addr, align 2
   %cmp = icmp sge i16 %val, -1
   br i1 %cmp, label %if, label %if.then
 if:
@@ -36,7 +36,7 @@ define i16 @test_1cmp_unsigned_1(i16* %p
 ; CHECK-NEXT: cmn
 entry:
   %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
-  %val = load i16* %addr, align 2
+  %val = load i16, i16* %addr, align 2
   %cmp = icmp uge i16 %val, -1
   br i1 %cmp, label %if, label %if.then
 if:

Modified: llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ define i8* @global_addr() {
 
 define i8 @global_i8() {
 ; CHECK-LABEL: global_i8:
-  %val = load i8* @var8
+  %val = load i8, i8* @var8
   ret i8 %val
 ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8
 ; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
@@ -29,7 +29,7 @@ define i8 @global_i8() {
 
 define i16 @global_i16() {
 ; CHECK-LABEL: global_i16:
-  %val = load i16* @var16
+  %val = load i16, i16* @var16
   ret i16 %val
 ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16
 ; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
@@ -40,7 +40,7 @@ define i16 @global_i16() {
 
 define i32 @global_i32() {
 ; CHECK-LABEL: global_i32:
-  %val = load i32* @var32
+  %val = load i32, i32* @var32
   ret i32 %val
 ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32
 ; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
@@ -51,7 +51,7 @@ define i32 @global_i32() {
 
 define i64 @global_i64() {
 ; CHECK-LABEL: global_i64:
-  %val = load i64* @var64
+  %val = load i64, i64* @var64
   ret i64 %val
 ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64
 ; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64

Modified: llvm/trunk/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/combine-comparisons-by-cse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/combine-comparisons-by-cse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/combine-comparisons-by-cse.ll Fri Feb 27 15:17:42 2015
@@ -15,13 +15,13 @@ define i32 @combine_gt_ge_10() #0 {
 ; CHECK-NOT: cmp
 ; CHECK: b.lt
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %cmp = icmp sgt i32 %0, 10
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32* @b, align 4
-  %2 = load i32* @c, align 4
+  %1 = load i32, i32* @b, align 4
+  %2 = load i32, i32* @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %land.lhs.true3
 
@@ -30,8 +30,8 @@ lor.lhs.false:
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false, %land.lhs.true
-  %3 = load i32* @b, align 4
-  %4 = load i32* @d, align 4
+  %3 = load i32, i32* @b, align 4
+  %4 = load i32, i32* @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -52,13 +52,13 @@ define i32 @combine_gt_lt_5() #0 {
 ; CHECK-NOT: cmp
 ; CHECK: b.ge
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %cmp = icmp sgt i32 %0, 5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32* @b, align 4
-  %2 = load i32* @c, align 4
+  %1 = load i32, i32* @b, align 4
+  %2 = load i32, i32* @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %if.end
 
@@ -67,8 +67,8 @@ lor.lhs.false:
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false
-  %3 = load i32* @b, align 4
-  %4 = load i32* @d, align 4
+  %3 = load i32, i32* @b, align 4
+  %4 = load i32, i32* @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -89,13 +89,13 @@ define i32 @combine_lt_ge_5() #0 {
 ; CHECK-NOT: cmp
 ; CHECK: b.gt
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %cmp = icmp slt i32 %0, 5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32* @b, align 4
-  %2 = load i32* @c, align 4
+  %1 = load i32, i32* @b, align 4
+  %2 = load i32, i32* @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %land.lhs.true3
 
@@ -104,8 +104,8 @@ lor.lhs.false:
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false, %land.lhs.true
-  %3 = load i32* @b, align 4
-  %4 = load i32* @d, align 4
+  %3 = load i32, i32* @b, align 4
+  %4 = load i32, i32* @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -126,13 +126,13 @@ define i32 @combine_lt_gt_5() #0 {
 ; CHECK-NOT: cmp
 ; CHECK: b.le
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %cmp = icmp slt i32 %0, 5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32* @b, align 4
-  %2 = load i32* @c, align 4
+  %1 = load i32, i32* @b, align 4
+  %2 = load i32, i32* @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %if.end
 
@@ -141,8 +141,8 @@ lor.lhs.false:
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false
-  %3 = load i32* @b, align 4
-  %4 = load i32* @d, align 4
+  %3 = load i32, i32* @b, align 4
+  %4 = load i32, i32* @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -163,13 +163,13 @@ define i32 @combine_gt_lt_n5() #0 {
 ; CHECK-NOT: cmn
 ; CHECK: b.ge
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %cmp = icmp sgt i32 %0, -5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32* @b, align 4
-  %2 = load i32* @c, align 4
+  %1 = load i32, i32* @b, align 4
+  %2 = load i32, i32* @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %if.end
 
@@ -178,8 +178,8 @@ lor.lhs.false:
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false
-  %3 = load i32* @b, align 4
-  %4 = load i32* @d, align 4
+  %3 = load i32, i32* @b, align 4
+  %4 = load i32, i32* @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -200,13 +200,13 @@ define i32 @combine_lt_gt_n5() #0 {
 ; CHECK-NOT: cmn
 ; CHECK: b.le
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %cmp = icmp slt i32 %0, -5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32* @b, align 4
-  %2 = load i32* @c, align 4
+  %1 = load i32, i32* @b, align 4
+  %2 = load i32, i32* @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %if.end
 
@@ -215,8 +215,8 @@ lor.lhs.false:
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false
-  %3 = load i32* @b, align 4
-  %4 = load i32* @d, align 4
+  %3 = load i32, i32* @b, align 4
+  %4 = load i32, i32* @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -238,17 +238,17 @@ declare %struct.Struct* @Update(%struct.
 define void @combine_non_adjacent_cmp_br(%struct.Struct* nocapture readonly %hdCall) #0 {
 entry:
   %size = getelementptr inbounds %struct.Struct, %struct.Struct* %hdCall, i64 0, i32 0
-  %0 = load i64* %size, align 8
+  %0 = load i64, i64* %size, align 8
   br label %land.rhs
 
 land.rhs:
   %rp.06 = phi i64 [ %0, %entry ], [ %sub, %while.body ]
-  %1 = load i64* inttoptr (i64 24 to i64*), align 8
+  %1 = load i64, i64* inttoptr (i64 24 to i64*), align 8
   %cmp2 = icmp sgt i64 %1, 0
   br i1 %cmp2, label %while.body, label %while.end
 
 while.body:
-  %2 = load %struct.Struct** @glob, align 8
+  %2 = load %struct.Struct*, %struct.Struct** @glob, align 8
   %call = tail call %struct.Struct* @Update(%struct.Struct* %2) #2
   %sub = add nsw i64 %rp.06, -2
   %cmp = icmp slt i64 %0, %rp.06
@@ -268,7 +268,7 @@ define i32 @do_nothing_if_resultant_opco
 ; CHECK: cmp
 ; CHECK: b.gt
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %cmp4 = icmp slt i32 %0, -1
   br i1 %cmp4, label %while.body.preheader, label %while.end
 
@@ -283,7 +283,7 @@ while.body:
   br i1 %cmp, label %while.body, label %while.cond.while.end_crit_edge
 
 while.cond.while.end_crit_edge:                   ; preds = %while.body
-  %.pre = load i32* @a, align 4
+  %.pre = load i32, i32* @a, align 4
   br label %while.end
 
 while.end:                                        ; preds = %while.cond.while.end_crit_edge, %entry
@@ -292,8 +292,8 @@ while.end:
   br i1 %cmp1, label %land.lhs.true, label %if.end
 
 land.lhs.true:                                    ; preds = %while.end
-  %2 = load i32* @b, align 4
-  %3 = load i32* @d, align 4
+  %2 = load i32, i32* @b, align 4
+  %3 = load i32, i32* @d, align 4
   %cmp2 = icmp eq i32 %2, %3
   br i1 %cmp2, label %return, label %if.end
 
@@ -312,7 +312,7 @@ define i32 @do_nothing_if_compares_can_n
 ; CHECK: cmn
 ; CHECK: b.lt
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %cmp4 = icmp slt i32 %0, 1
   br i1 %cmp4, label %while.body.preheader, label %while.end
 
@@ -330,13 +330,13 @@ while.end.loopexit:
   br label %while.end
 
 while.end:                                        ; preds = %while.end.loopexit, %entry
-  %1 = load i32* @c, align 4
+  %1 = load i32, i32* @c, align 4
   %cmp1 = icmp sgt i32 %1, -3
   br i1 %cmp1, label %land.lhs.true, label %if.end
 
 land.lhs.true:                                    ; preds = %while.end
-  %2 = load i32* @b, align 4
-  %3 = load i32* @d, align 4
+  %2 = load i32, i32* @b, align 4
+  %3 = load i32, i32* @d, align 4
   %cmp2 = icmp eq i32 %2, %3
   br i1 %cmp2, label %return, label %if.end
 
@@ -375,7 +375,7 @@ entry:
 
 land.lhs.true:                                    ; preds = %entry
   %arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1
-  %0 = load i8** %arrayidx, align 8
+  %0 = load i8*, i8** %arrayidx, align 8
   %cmp1 = icmp eq i8* %0, null
   br i1 %cmp1, label %if.end, label %return
 

Modified: llvm/trunk/test/CodeGen/AArch64/compare-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/compare-branch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/compare-branch.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/compare-branch.ll Fri Feb 27 15:17:42 2015
@@ -6,25 +6,25 @@
 define void @foo() {
 ; CHECK-LABEL: foo:
 
-  %val1 = load volatile i32* @var32
+  %val1 = load volatile i32, i32* @var32
   %tst1 = icmp eq i32 %val1, 0
   br i1 %tst1, label %end, label %test2
 ; CHECK: cbz {{w[0-9]+}}, .LBB
 
 test2:
-  %val2 = load volatile i32* @var32
+  %val2 = load volatile i32, i32* @var32
   %tst2 = icmp ne i32 %val2, 0
   br i1 %tst2, label %end, label %test3
 ; CHECK: cbnz {{w[0-9]+}}, .LBB
 
 test3:
-  %val3 = load volatile i64* @var64
+  %val3 = load volatile i64, i64* @var64
   %tst3 = icmp eq i64 %val3, 0
   br i1 %tst3, label %end, label %test4
 ; CHECK: cbz {{x[0-9]+}}, .LBB
 
 test4:
-  %val4 = load volatile i64* @var64
+  %val4 = load volatile i64, i64* @var64
   %tst4 = icmp ne i64 %val4, 0
   br i1 %tst4, label %end, label %test5
 ; CHECK: cbnz {{x[0-9]+}}, .LBB

Modified: llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll Fri Feb 27 15:17:42 2015
@@ -9,9 +9,9 @@ define void @store_combine() nounwind {
   %dst = alloca { double, double }, align 8
 
   %src.realp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 0
-  %src.real = load double* %src.realp
+  %src.real = load double, double* %src.realp
   %src.imagp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 1
-  %src.imag = load double* %src.imagp
+  %src.imag = load double, double* %src.imagp
 
   %dst.realp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 0
   %dst.imagp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 1

Modified: llvm/trunk/test/CodeGen/AArch64/complex-int-to-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/complex-int-to-fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/complex-int-to-fp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/complex-int-to-fp.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 ; CHECK: scvtf
 ; CHECK: ret
 define void @autogen_SD19655(<2 x i64>* %addr, <2 x float>* %addrfloat) {
-  %T = load <2 x i64>* %addr
+  %T = load <2 x i64>, <2 x i64>* %addr
   %F = sitofp <2 x i64> %T to <2 x float>
   store <2 x float> %F, <2 x float>* %addrfloat
   ret void

Modified: llvm/trunk/test/CodeGen/AArch64/dag-combine-invaraints.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/dag-combine-invaraints.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/dag-combine-invaraints.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/dag-combine-invaraints.ll Fri Feb 27 15:17:42 2015
@@ -12,14 +12,14 @@ main_:
   store i32 0, i32* %tmp
   store i32 15, i32* %i32T, align 4
   store i32 5, i32* %i32F, align 4
-  %tmp6 = load i32* %tmp, align 4
+  %tmp6 = load i32, i32* %tmp, align 4
   %tmp7 = icmp ne i32 %tmp6, 0
   %tmp8 = xor i1 %tmp7, true
-  %tmp9 = load i32* %i32T, align 4
-  %tmp10 = load i32* %i32F, align 4
+  %tmp9 = load i32, i32* %i32T, align 4
+  %tmp10 = load i32, i32* %i32F, align 4
   %DHSelect = select i1 %tmp8, i32 %tmp9, i32 %tmp10
   store i32 %DHSelect, i32* %i32X, align 4
-  %tmp15 = load i32* %i32X, align 4
+  %tmp15 = load i32, i32* %i32X, align 4
   %tmp17 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str2, i32 0, i32 0), i32 %tmp15)
   ret i32 0
 

Modified: llvm/trunk/test/CodeGen/AArch64/dp-3source.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/dp-3source.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/dp-3source.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/dp-3source.ll Fri Feb 27 15:17:42 2015
@@ -168,8 +168,8 @@ define i64 @test_umnegl(i32 %lhs, i32 %r
 
 define void @test_mneg(){
 ; CHECK-LABEL: test_mneg:
-  %1 = load i32* @a, align 4
-  %2 = load i32* @b, align 4
+  %1 = load i32, i32* @a, align 4
+  %2 = load i32, i32* @b, align 4
   %3 = sub i32 0, %1
   %4 = mul i32 %2, %3
   store i32 %4, i32* @c, align 4

Modified: llvm/trunk/test/CodeGen/AArch64/dp1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/dp1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/dp1.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/dp1.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define void @rev_i32() {
 ; CHECK-LABEL: rev_i32:
-    %val0_tmp = load i32* @var32
+    %val0_tmp = load i32, i32* @var32
     %val1_tmp = call i32 @llvm.bswap.i32(i32 %val0_tmp)
 ; CHECK: rev	{{w[0-9]+}}, {{w[0-9]+}}
     store volatile i32 %val1_tmp, i32* @var32
@@ -14,7 +14,7 @@ define void @rev_i32() {
 
 define void @rev_i64() {
 ; CHECK-LABEL: rev_i64:
-    %val0_tmp = load i64* @var64
+    %val0_tmp = load i64, i64* @var64
     %val1_tmp = call i64 @llvm.bswap.i64(i64 %val0_tmp)
 ; CHECK: rev	{{x[0-9]+}}, {{x[0-9]+}}
     store volatile i64 %val1_tmp, i64* @var64
@@ -23,7 +23,7 @@ define void @rev_i64() {
 
 define void @rev32_i64() {
 ; CHECK-LABEL: rev32_i64:
-    %val0_tmp = load i64* @var64
+    %val0_tmp = load i64, i64* @var64
     %val1_tmp = shl i64 %val0_tmp, 32
     %val5_tmp = sub i64 64, 32
     %val2_tmp = lshr i64 %val0_tmp, %val5_tmp
@@ -36,7 +36,7 @@ define void @rev32_i64() {
 
 define void @rev16_i32() {
 ; CHECK-LABEL: rev16_i32:
-    %val0_tmp = load i32* @var32
+    %val0_tmp = load i32, i32* @var32
     %val1_tmp = shl i32 %val0_tmp, 16
     %val2_tmp = lshr i32 %val0_tmp, 16
     %val3_tmp = or i32 %val1_tmp, %val2_tmp
@@ -48,7 +48,7 @@ define void @rev16_i32() {
 
 define void @clz_zerodef_i32() {
 ; CHECK-LABEL: clz_zerodef_i32:
-    %val0_tmp = load i32* @var32
+    %val0_tmp = load i32, i32* @var32
     %val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 0)
 ; CHECK: clz	{{w[0-9]+}}, {{w[0-9]+}}
     store volatile i32 %val4_tmp, i32* @var32
@@ -57,7 +57,7 @@ define void @clz_zerodef_i32() {
 
 define void @clz_zerodef_i64() {
 ; CHECK-LABEL: clz_zerodef_i64:
-    %val0_tmp = load i64* @var64
+    %val0_tmp = load i64, i64* @var64
     %val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 0)
 ; CHECK: clz	{{x[0-9]+}}, {{x[0-9]+}}
     store volatile i64 %val4_tmp, i64* @var64
@@ -66,7 +66,7 @@ define void @clz_zerodef_i64() {
 
 define void @clz_zeroundef_i32() {
 ; CHECK-LABEL: clz_zeroundef_i32:
-    %val0_tmp = load i32* @var32
+    %val0_tmp = load i32, i32* @var32
     %val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 1)
 ; CHECK: clz	{{w[0-9]+}}, {{w[0-9]+}}
     store volatile i32 %val4_tmp, i32* @var32
@@ -75,7 +75,7 @@ define void @clz_zeroundef_i32() {
 
 define void @clz_zeroundef_i64() {
 ; CHECK-LABEL: clz_zeroundef_i64:
-    %val0_tmp = load i64* @var64
+    %val0_tmp = load i64, i64* @var64
     %val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 1)
 ; CHECK: clz	{{x[0-9]+}}, {{x[0-9]+}}
     store volatile i64 %val4_tmp, i64* @var64
@@ -84,7 +84,7 @@ define void @clz_zeroundef_i64() {
 
 define void @cttz_zerodef_i32() {
 ; CHECK-LABEL: cttz_zerodef_i32:
-    %val0_tmp = load i32* @var32
+    %val0_tmp = load i32, i32* @var32
     %val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 0)
 ; CHECK: rbit   [[REVERSED:w[0-9]+]], {{w[0-9]+}}
 ; CHECK: clz	{{w[0-9]+}}, [[REVERSED]]
@@ -94,7 +94,7 @@ define void @cttz_zerodef_i32() {
 
 define void @cttz_zerodef_i64() {
 ; CHECK-LABEL: cttz_zerodef_i64:
-    %val0_tmp = load i64* @var64
+    %val0_tmp = load i64, i64* @var64
     %val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 0)
 ; CHECK: rbit   [[REVERSED:x[0-9]+]], {{x[0-9]+}}
 ; CHECK: clz	{{x[0-9]+}}, [[REVERSED]]
@@ -104,7 +104,7 @@ define void @cttz_zerodef_i64() {
 
 define void @cttz_zeroundef_i32() {
 ; CHECK-LABEL: cttz_zeroundef_i32:
-    %val0_tmp = load i32* @var32
+    %val0_tmp = load i32, i32* @var32
     %val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 1)
 ; CHECK: rbit   [[REVERSED:w[0-9]+]], {{w[0-9]+}}
 ; CHECK: clz	{{w[0-9]+}}, [[REVERSED]]
@@ -114,7 +114,7 @@ define void @cttz_zeroundef_i32() {
 
 define void @cttz_zeroundef_i64() {
 ; CHECK-LABEL: cttz_zeroundef_i64:
-    %val0_tmp = load i64* @var64
+    %val0_tmp = load i64, i64* @var64
     %val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 1)
 ; CHECK: rbit   [[REVERSED:x[0-9]+]], {{x[0-9]+}}
 ; CHECK: clz	{{x[0-9]+}}, [[REVERSED]]
@@ -126,7 +126,7 @@ define void @cttz_zeroundef_i64() {
 ; ISelLowering.
 define void @ctpop_i32() {
 ; CHECK-LABEL: ctpop_i32:
-    %val0_tmp = load i32* @var32
+    %val0_tmp = load i32, i32* @var32
     %val4_tmp = call i32 @llvm.ctpop.i32(i32 %val0_tmp)
     store volatile i32 %val4_tmp, i32* @var32
     ret void
@@ -134,7 +134,7 @@ define void @ctpop_i32() {
 
 define void @ctpop_i64() {
 ; CHECK-LABEL: ctpop_i64:
-    %val0_tmp = load i64* @var64
+    %val0_tmp = load i64, i64* @var64
     %val4_tmp = call i64 @llvm.ctpop.i64(i64 %val0_tmp)
     store volatile i64 %val4_tmp, i64* @var64
     ret void

Modified: llvm/trunk/test/CodeGen/AArch64/dp2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/dp2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/dp2.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/dp2.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 
 define void @rorv_i64() {
 ; CHECK-LABEL: rorv_i64:
-    %val0_tmp = load i64* @var64_0
-    %val1_tmp = load i64* @var64_1
+    %val0_tmp = load i64, i64* @var64_0
+    %val1_tmp = load i64, i64* @var64_1
     %val2_tmp = sub i64 64, %val1_tmp
     %val3_tmp = shl i64 %val0_tmp, %val2_tmp
     %val4_tmp = lshr i64 %val0_tmp, %val1_tmp
@@ -20,8 +20,8 @@ define void @rorv_i64() {
 
 define void @asrv_i64() {
 ; CHECK-LABEL: asrv_i64:
-    %val0_tmp = load i64* @var64_0
-    %val1_tmp = load i64* @var64_1
+    %val0_tmp = load i64, i64* @var64_0
+    %val1_tmp = load i64, i64* @var64_1
     %val4_tmp = ashr i64 %val0_tmp, %val1_tmp
 ; CHECK: {{asr|asrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
     store volatile i64 %val4_tmp, i64* @var64_1
@@ -30,8 +30,8 @@ define void @asrv_i64() {
 
 define void @lsrv_i64() {
 ; CHECK-LABEL: lsrv_i64:
-    %val0_tmp = load i64* @var64_0
-    %val1_tmp = load i64* @var64_1
+    %val0_tmp = load i64, i64* @var64_0
+    %val1_tmp = load i64, i64* @var64_1
     %val4_tmp = lshr i64 %val0_tmp, %val1_tmp
 ; CHECK: {{lsr|lsrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
     store volatile i64 %val4_tmp, i64* @var64_0
@@ -40,8 +40,8 @@ define void @lsrv_i64() {
 
 define void @lslv_i64() {
 ; CHECK-LABEL: lslv_i64:
-    %val0_tmp = load i64* @var64_0
-    %val1_tmp = load i64* @var64_1
+    %val0_tmp = load i64, i64* @var64_0
+    %val1_tmp = load i64, i64* @var64_1
     %val4_tmp = shl i64 %val0_tmp, %val1_tmp
 ; CHECK: {{lsl|lslv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
     store volatile i64 %val4_tmp, i64* @var64_1
@@ -50,8 +50,8 @@ define void @lslv_i64() {
 
 define void @udiv_i64() {
 ; CHECK-LABEL: udiv_i64:
-    %val0_tmp = load i64* @var64_0
-    %val1_tmp = load i64* @var64_1
+    %val0_tmp = load i64, i64* @var64_0
+    %val1_tmp = load i64, i64* @var64_1
     %val4_tmp = udiv i64 %val0_tmp, %val1_tmp
 ; CHECK: udiv	{{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
     store volatile i64 %val4_tmp, i64* @var64_0
@@ -60,8 +60,8 @@ define void @udiv_i64() {
 
 define void @sdiv_i64() {
 ; CHECK-LABEL: sdiv_i64:
-    %val0_tmp = load i64* @var64_0
-    %val1_tmp = load i64* @var64_1
+    %val0_tmp = load i64, i64* @var64_0
+    %val1_tmp = load i64, i64* @var64_1
     %val4_tmp = sdiv i64 %val0_tmp, %val1_tmp
 ; CHECK: sdiv	{{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
     store volatile i64 %val4_tmp, i64* @var64_1
@@ -71,8 +71,8 @@ define void @sdiv_i64() {
 
 define void @lsrv_i32() {
 ; CHECK-LABEL: lsrv_i32:
-    %val0_tmp = load i32* @var32_0
-    %val1_tmp = load i32* @var32_1
+    %val0_tmp = load i32, i32* @var32_0
+    %val1_tmp = load i32, i32* @var32_1
     %val2_tmp = add i32 1, %val1_tmp
     %val4_tmp = lshr i32 %val0_tmp, %val2_tmp
 ; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
@@ -82,8 +82,8 @@ define void @lsrv_i32() {
 
 define void @lslv_i32() {
 ; CHECK-LABEL: lslv_i32:
-    %val0_tmp = load i32* @var32_0
-    %val1_tmp = load i32* @var32_1
+    %val0_tmp = load i32, i32* @var32_0
+    %val1_tmp = load i32, i32* @var32_1
     %val2_tmp = add i32 1, %val1_tmp
     %val4_tmp = shl i32 %val0_tmp, %val2_tmp
 ; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
@@ -93,8 +93,8 @@ define void @lslv_i32() {
 
 define void @rorv_i32() {
 ; CHECK-LABEL: rorv_i32:
-    %val0_tmp = load i32* @var32_0
-    %val6_tmp = load i32* @var32_1
+    %val0_tmp = load i32, i32* @var32_0
+    %val6_tmp = load i32, i32* @var32_1
     %val1_tmp = add i32 1, %val6_tmp
     %val2_tmp = sub i32 32, %val1_tmp
     %val3_tmp = shl i32 %val0_tmp, %val2_tmp
@@ -107,8 +107,8 @@ define void @rorv_i32() {
 
 define void @asrv_i32() {
 ; CHECK-LABEL: asrv_i32:
-    %val0_tmp = load i32* @var32_0
-    %val1_tmp = load i32* @var32_1
+    %val0_tmp = load i32, i32* @var32_0
+    %val1_tmp = load i32, i32* @var32_1
     %val2_tmp = add i32 1, %val1_tmp
     %val4_tmp = ashr i32 %val0_tmp, %val2_tmp
 ; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
@@ -118,8 +118,8 @@ define void @asrv_i32() {
 
 define void @sdiv_i32() {
 ; CHECK-LABEL: sdiv_i32:
-    %val0_tmp = load i32* @var32_0
-    %val1_tmp = load i32* @var32_1
+    %val0_tmp = load i32, i32* @var32_0
+    %val1_tmp = load i32, i32* @var32_1
     %val4_tmp = sdiv i32 %val0_tmp, %val1_tmp
 ; CHECK: sdiv	{{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
     store volatile i32 %val4_tmp, i32* @var32_1
@@ -128,8 +128,8 @@ define void @sdiv_i32() {
 
 define void @udiv_i32() {
 ; CHECK-LABEL: udiv_i32:
-    %val0_tmp = load i32* @var32_0
-    %val1_tmp = load i32* @var32_1
+    %val0_tmp = load i32, i32* @var32_0
+    %val1_tmp = load i32, i32* @var32_1
     %val4_tmp = udiv i32 %val0_tmp, %val1_tmp
 ; CHECK: udiv	{{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
     store volatile i32 %val4_tmp, i32* @var32_0
@@ -141,7 +141,7 @@ define void @udiv_i32() {
 define i32 @test_lsl32() {
 ; CHECK-LABEL: test_lsl32:
 
-  %val = load i32* @var32_0
+  %val = load i32, i32* @var32_0
   %ret = shl i32 1, %val
 ; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
 
@@ -151,7 +151,7 @@ define i32 @test_lsl32() {
 define i32 @test_lsr32() {
 ; CHECK-LABEL: test_lsr32:
 
-  %val = load i32* @var32_0
+  %val = load i32, i32* @var32_0
   %ret = lshr i32 1, %val
 ; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
 
@@ -161,7 +161,7 @@ define i32 @test_lsr32() {
 define i32 @test_asr32(i32 %in) {
 ; CHECK-LABEL: test_asr32:
 
-  %val = load i32* @var32_0
+  %val = load i32, i32* @var32_0
   %ret = ashr i32 %in, %val
 ; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
 

Modified: llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll Fri Feb 27 15:17:42 2015
@@ -15,10 +15,10 @@ entry:
 for.body4.us:
   %indvars.iv = phi i64 [ 0, %for.body4.lr.ph.us ], [ %indvars.iv.next, %for.body4.us ]
   %arrayidx6.us = getelementptr inbounds [8 x i8], [8 x i8]* %a, i64 %indvars.iv26, i64 %indvars.iv
-  %0 = load i8* %arrayidx6.us, align 1
+  %0 = load i8, i8* %arrayidx6.us, align 1
   %idxprom7.us = zext i8 %0 to i64
   %arrayidx8.us = getelementptr inbounds i8, i8* %box, i64 %idxprom7.us
-  %1 = load i8* %arrayidx8.us, align 1
+  %1 = load i8, i8* %arrayidx8.us, align 1
   store i8 %1, i8* %arrayidx6.us, align 1
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %2 = trunc i64 %indvars.iv.next to i32

Modified: llvm/trunk/test/CodeGen/AArch64/f16-convert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/f16-convert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/f16-convert.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/f16-convert.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define float @load0(i16* nocapture reado
 ; CHECK-NEXT: fcvt s0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %tmp = load i16* %a, align 2
+  %tmp = load i16, i16* %a, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
@@ -17,7 +17,7 @@ define double @load1(i16* nocapture read
 ; CHECK-NEXT: fcvt d0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %tmp = load i16* %a, align 2
+  %tmp = load i16, i16* %a, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
@@ -30,7 +30,7 @@ define float @load2(i16* nocapture reado
 
   %idxprom = sext i32 %i to i64
   %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
-  %tmp = load i16* %arrayidx, align 2
+  %tmp = load i16, i16* %arrayidx, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
@@ -43,7 +43,7 @@ define double @load3(i16* nocapture read
 
   %idxprom = sext i32 %i to i64
   %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
-  %tmp = load i16* %arrayidx, align 2
+  %tmp = load i16, i16* %arrayidx, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
@@ -55,7 +55,7 @@ define float @load4(i16* nocapture reado
 ; CHECK-NEXT: ret
 
   %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
-  %tmp = load i16* %arrayidx, align 2
+  %tmp = load i16, i16* %arrayidx, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
@@ -67,7 +67,7 @@ define double @load5(i16* nocapture read
 ; CHECK-NEXT: ret
 
   %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
-  %tmp = load i16* %arrayidx, align 2
+  %tmp = load i16, i16* %arrayidx, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
@@ -79,7 +79,7 @@ define float @load6(i16* nocapture reado
 ; CHECK-NEXT: ret
 
   %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
-  %tmp = load i16* %arrayidx, align 2
+  %tmp = load i16, i16* %arrayidx, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
@@ -91,7 +91,7 @@ define double @load7(i16* nocapture read
 ; CHECK-NEXT: ret
 
   %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
-  %tmp = load i16* %arrayidx, align 2
+  %tmp = load i16, i16* %arrayidx, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
@@ -103,7 +103,7 @@ define float @load8(i16* nocapture reado
 ; CHECK-NEXT: ret
 
   %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
-  %tmp = load i16* %arrayidx, align 2
+  %tmp = load i16, i16* %arrayidx, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
@@ -115,7 +115,7 @@ define double @load9(i16* nocapture read
 ; CHECK-NEXT: ret
 
   %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
-  %tmp = load i16* %arrayidx, align 2
+  %tmp = load i16, i16* %arrayidx, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }

Modified: llvm/trunk/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fast-isel-addressing-modes.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fast-isel-addressing-modes.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fast-isel-addressing-modes.ll Fri Feb 27 15:17:42 2015
@@ -5,49 +5,49 @@
 define zeroext i1 @load_breg_i1(i1* %a) {
 ; CHECK-LABEL: load_breg_i1
 ; CHECK:       ldrb {{w[0-9]+}}, [x0]
-  %1 = load i1* %a
+  %1 = load i1, i1* %a
   ret i1 %1
 }
 
 define zeroext i8 @load_breg_i8(i8* %a) {
 ; CHECK-LABEL: load_breg_i8
 ; CHECK:       ldrb {{w[0-9]+}}, [x0]
-  %1 = load i8* %a
+  %1 = load i8, i8* %a
   ret i8 %1
 }
 
 define zeroext i16 @load_breg_i16(i16* %a) {
 ; CHECK-LABEL: load_breg_i16
 ; CHECK:       ldrh {{w[0-9]+}}, [x0]
-  %1 = load i16* %a
+  %1 = load i16, i16* %a
   ret i16 %1
 }
 
 define i32 @load_breg_i32(i32* %a) {
 ; CHECK-LABEL: load_breg_i32
 ; CHECK:       ldr {{w[0-9]+}}, [x0]
-  %1 = load i32* %a
+  %1 = load i32, i32* %a
   ret i32 %1
 }
 
 define i64 @load_breg_i64(i64* %a) {
 ; CHECK-LABEL: load_breg_i64
 ; CHECK:       ldr {{x[0-9]+}}, [x0]
-  %1 = load i64* %a
+  %1 = load i64, i64* %a
   ret i64 %1
 }
 
 define float @load_breg_f32(float* %a) {
 ; CHECK-LABEL: load_breg_f32
 ; CHECK:       ldr {{s[0-9]+}}, [x0]
-  %1 = load float* %a
+  %1 = load float, float* %a
   ret float %1
 }
 
 define double @load_breg_f64(double* %a) {
 ; CHECK-LABEL: load_breg_f64
 ; CHECK:       ldr {{d[0-9]+}}, [x0]
-  %1 = load double* %a
+  %1 = load double, double* %a
   ret double %1
 }
 
@@ -113,7 +113,7 @@ define i32 @load_immoff_1() {
 ; CHECK:       orr {{w|x}}[[REG:[0-9]+]], {{wzr|xzr}}, #0x80
 ; CHECK:       ldr {{w[0-9]+}}, {{\[}}x[[REG]]{{\]}}
   %1 = inttoptr i64 128 to i32*
-  %2 = load i32* %1
+  %2 = load i32, i32* %1
   ret i32 %2
 }
 
@@ -124,7 +124,7 @@ define i32 @load_breg_immoff_1(i64 %a) {
 ; CHECK:       ldur {{w[0-9]+}}, [x0, #-256]
   %1 = add i64 %a, -256
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   ret i32 %3
 }
 
@@ -135,7 +135,7 @@ define i32 @load_breg_immoff_2(i64 %a) {
 ; CHECK-NEXT:  ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
   %1 = add i64 %a, -257
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   ret i32 %3
 }
 
@@ -145,7 +145,7 @@ define i32 @load_breg_immoff_3(i64 %a) {
 ; CHECK:       ldur {{w[0-9]+}}, [x0, #255]
   %1 = add i64 %a, 255
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   ret i32 %3
 }
 
@@ -156,7 +156,7 @@ define i32 @load_breg_immoff_4(i64 %a) {
 ; CHECK-NEXT:  ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
   %1 = add i64 %a, 257
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   ret i32 %3
 }
 
@@ -166,7 +166,7 @@ define i32 @load_breg_immoff_5(i64 %a) {
 ; CHECK:       ldr {{w[0-9]+}}, [x0, #16380]
   %1 = add i64 %a, 16380
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   ret i32 %3
 }
 
@@ -180,7 +180,7 @@ define i32 @load_breg_immoff_6(i64 %a) {
 ; FAST-NEXT:  ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
   %1 = add i64 %a, 16384
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   ret i32 %3
 }
 
@@ -255,7 +255,7 @@ define i64 @load_breg_immoff_7(i64 %a) {
 ; CHECK:       ldr {{x[0-9]+}}, [x0, #48]
   %1 = add i64 %a, 48
   %2 = inttoptr i64 %1 to i64*
-  %3 = load i64* %2
+  %3 = load i64, i64* %2
   ret i64 %3
 }
 
@@ -265,7 +265,7 @@ define i64 @load_breg_immoff_8(i64 %a) {
 ; CHECK:       ldr {{x[0-9]+}}, [x0, #48]
   %1 = add i64 48, %a
   %2 = inttoptr i64 %1 to i64*
-  %3 = load i64* %2
+  %3 = load i64, i64* %2
   ret i64 %3
 }
 
@@ -275,7 +275,7 @@ define i64 @load_breg_offreg_1(i64 %a, i
 ; CHECK:       ldr {{x[0-9]+}}, [x0, x1]
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i64*
-  %3 = load i64* %2
+  %3 = load i64, i64* %2
   ret i64 %3
 }
 
@@ -285,7 +285,7 @@ define i64 @load_breg_offreg_2(i64 %a, i
 ; CHECK:       ldr {{x[0-9]+}}, [x1, x0]
   %1 = add i64 %b, %a
   %2 = inttoptr i64 %1 to i64*
-  %3 = load i64* %2
+  %3 = load i64, i64* %2
   ret i64 %3
 }
 
@@ -297,7 +297,7 @@ define i64 @load_breg_offreg_immoff_1(i6
   %1 = add i64 %a, %b
   %2 = add i64 %1, 48
   %3 = inttoptr i64 %2 to i64*
-  %4 = load i64* %3
+  %4 = load i64, i64* %3
   ret i64 %4
 }
 
@@ -312,7 +312,7 @@ define i64 @load_breg_offreg_immoff_2(i6
   %1 = add i64 %a, %b
   %2 = add i64 %1, 61440
   %3 = inttoptr i64 %2 to i64*
-  %4 = load i64* %3
+  %4 = load i64, i64* %3
   ret i64 %4
 }
 
@@ -323,7 +323,7 @@ define i32 @load_shift_offreg_1(i64 %a)
 ; CHECK:       ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
   %1 = shl i64 %a, 2
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   ret i32 %3
 }
 
@@ -333,7 +333,7 @@ define i32 @load_mul_offreg_1(i64 %a) {
 ; CHECK:       ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
   %1 = mul i64 %a, 4
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   ret i32 %3
 }
 
@@ -344,7 +344,7 @@ define i32 @load_breg_shift_offreg_1(i64
   %1 = shl i64 %a, 2
   %2 = add i64 %1, %b
   %3 = inttoptr i64 %2 to i32*
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   ret i32 %4
 }
 
@@ -354,7 +354,7 @@ define i32 @load_breg_shift_offreg_2(i64
   %1 = shl i64 %a, 2
   %2 = add i64 %b, %1
   %3 = inttoptr i64 %2 to i32*
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   ret i32 %4
 }
 
@@ -369,7 +369,7 @@ define i32 @load_breg_shift_offreg_3(i64
   %2 = shl i64 %b, 2
   %3 = add i64 %1, %2
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -384,7 +384,7 @@ define i32 @load_breg_shift_offreg_4(i64
   %2 = shl i64 %b, 2
   %3 = add i64 %2, %1
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -399,7 +399,7 @@ define i32 @load_breg_shift_offreg_5(i64
   %2 = shl i64 %b, 3
   %3 = add i64 %1, %2
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -409,7 +409,7 @@ define i32 @load_breg_mul_offreg_1(i64 %
   %1 = mul i64 %a, 4
   %2 = add i64 %1, %b
   %3 = inttoptr i64 %2 to i32*
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   ret i32 %4
 }
 
@@ -419,7 +419,7 @@ define zeroext i8 @load_breg_and_offreg_
   %1 = and i64 %a, 4294967295
   %2 = add i64 %1, %b
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   ret i8 %4
 }
 
@@ -430,7 +430,7 @@ define zeroext i16 @load_breg_and_offreg
   %2 = shl i64 %1, 1
   %3 = add i64 %2, %b
   %4 = inttoptr i64 %3 to i16*
-  %5 = load i16* %4
+  %5 = load i16, i16* %4
   ret i16 %5
 }
 
@@ -441,7 +441,7 @@ define i32 @load_breg_and_offreg_3(i64 %
   %2 = shl i64 %1, 2
   %3 = add i64 %2, %b
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -452,7 +452,7 @@ define i64 @load_breg_and_offreg_4(i64 %
   %2 = shl i64 %1, 3
   %3 = add i64 %2, %b
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -464,7 +464,7 @@ define i64 @load_breg_and_offreg_5(i64 %
   %1 = and i64 %a, %c
   %2 = add i64 %1, %b
   %3 = inttoptr i64 %2 to i64*
-  %4 = load i64* %3
+  %4 = load i64, i64* %3
   ret i64 %4
 }
 
@@ -476,7 +476,7 @@ define i64 @load_breg_and_offreg_6(i64 %
   %2 = shl i64 %1, 3
   %3 = add i64 %2, %b
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -488,7 +488,7 @@ define i32 @load_breg_zext_shift_offreg_
   %2 = shl i64 %1, 2
   %3 = add i64 %2, %b
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -499,7 +499,7 @@ define i32 @load_breg_zext_shift_offreg_
   %2 = shl i64 %1, 2
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -510,7 +510,7 @@ define i32 @load_breg_zext_mul_offreg_1(
   %2 = mul i64 %1, 4
   %3 = add i64 %2, %b
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -521,7 +521,7 @@ define i32 @load_breg_sext_shift_offreg_
   %2 = shl i64 %1, 2
   %3 = add i64 %2, %b
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -532,7 +532,7 @@ define i32 @load_breg_sext_shift_offreg_
   %2 = shl i64 %1, 2
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -546,7 +546,7 @@ define i32 @load_breg_sext_shift_offreg_
   %3 = shl i64 %2, 2
   %4 = add i64 %b, %3
   %5 = inttoptr i64 %4 to i32*
-  %6 = load i32* %5
+  %6 = load i32, i32* %5
   ret i32 %6
 }
 
@@ -558,7 +558,7 @@ define i32 @load_breg_sext_mul_offreg_1(
   %2 = mul i64 %1, 4
   %3 = add i64 %2, %b
   %4 = inttoptr i64 %3 to i32*
-  %5 = load i32* %4
+  %5 = load i32, i32* %4
   ret i32 %5
 }
 
@@ -571,7 +571,7 @@ define i64 @load_sext_shift_offreg_imm1(
   %2 = shl i64 %1, 3
   %3 = add i64 %2, 8
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -585,7 +585,7 @@ define i64 @load_breg_sext_shift_offreg_
   %3 = add i64 %b, %2
   %4 = add i64 %3, 8
   %5 = inttoptr i64 %4 to i64*
-  %6 = load i64* %5
+  %6 = load i64, i64* %5
   ret i64 %6
 }
 
@@ -594,7 +594,7 @@ define i64 @kill_reg(i64 %a) {
   %1 = sub i64 %a, 8
   %2 = add i64 %1, 96
   %3 = inttoptr i64 %2 to i64*
-  %4 = load i64* %3
+  %4 = load i64, i64* %3
   %5 = add i64 %2, %4
   ret i64 %5
 }
@@ -621,7 +621,7 @@ define i32 @load_fi(i64 %i) {
   %3 = mul i64 %i, 4
   %4 = add i64 %2, %3
   %5 = inttoptr i64 %4 to i32*
-  %6 = load i32* %5, align 4
+  %6 = load i32, i32* %5, align 4
   ret i32 %6
 }
 

Modified: llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ define i64 @load_addr_shift_zext1(i32 %a
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -24,7 +24,7 @@ define i64 @load_addr_shift_zext2(i32 ze
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -35,7 +35,7 @@ define i64 @load_addr_shift_zext3(i32 si
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -46,7 +46,7 @@ define i64 @load_addr_shift_sext1(i32 %a
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -57,7 +57,7 @@ define i64 @load_addr_shift_sext2(i32 ze
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -68,7 +68,7 @@ define i64 @load_addr_shift_sext3(i32 si
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -82,7 +82,7 @@ define i64 @load_addr_mul_zext1(i32 %a,
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -93,7 +93,7 @@ define i64 @load_addr_mul_zext2(i32 zero
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -104,7 +104,7 @@ define i64 @load_addr_mul_zext3(i32 sign
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -115,7 +115,7 @@ define i64 @load_addr_mul_sext1(i32 %a,
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -126,7 +126,7 @@ define i64 @load_addr_mul_sext2(i32 zero
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -137,7 +137,7 @@ define i64 @load_addr_mul_sext3(i32 sign
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
   %4 = inttoptr i64 %3 to i64*
-  %5 = load i64* %4
+  %5 = load i64, i64* %4
   ret i64 %5
 }
 
@@ -153,7 +153,7 @@ define i32 @load_unscaled_zext_i8_to_i32
 ; CHECK-NOT:   uxtb
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = zext i8 %3 to i32
   ret i32 %4
 }
@@ -164,7 +164,7 @@ define i32 @load_unscaled_zext_i16_to_i3
 ; CHECK-NOT:   uxth
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   %4 = zext i16 %3 to i32
   ret i32 %4
 }
@@ -175,7 +175,7 @@ define i64 @load_unscaled_zext_i8_to_i64
 ; CHECK-NOT:   uxtb
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = zext i8 %3 to i64
   ret i64 %4
 }
@@ -186,7 +186,7 @@ define i64 @load_unscaled_zext_i16_to_i6
 ; CHECK-NOT:   uxth
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   %4 = zext i16 %3 to i64
   ret i64 %4
 }
@@ -197,7 +197,7 @@ define i64 @load_unscaled_zext_i32_to_i6
 ; CHECK-NOT:   uxtw
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   %4 = zext i32 %3 to i64
   ret i64 %4
 }
@@ -208,7 +208,7 @@ define i32 @load_unscaled_sext_i8_to_i32
 ; CHECK-NOT:   sxtb
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = sext i8 %3 to i32
   ret i32 %4
 }
@@ -219,7 +219,7 @@ define i32 @load_unscaled_sext_i16_to_i3
 ; CHECK-NOT:   sxth
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   %4 = sext i16 %3 to i32
   ret i32 %4
 }
@@ -230,7 +230,7 @@ define i64 @load_unscaled_sext_i8_to_i64
 ; CHECK-NOT:   sxtb
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = sext i8 %3 to i64
   ret i64 %4
 }
@@ -241,7 +241,7 @@ define i64 @load_unscaled_sext_i16_to_i6
 ; CHECK-NOT:   sxth
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   %4 = sext i16 %3 to i64
   ret i64 %4
 }
@@ -252,7 +252,7 @@ define i64 @load_unscaled_sext_i32_to_i6
 ; CHECK-NOT:   sxtw
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   %4 = sext i32 %3 to i64
   ret i64 %4
 }
@@ -264,7 +264,7 @@ define i32 @load_register_zext_i8_to_i32
 ; CHECK-NOT:   uxtb
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = zext i8 %3 to i32
   ret i32 %4
 }
@@ -275,7 +275,7 @@ define i32 @load_register_zext_i16_to_i3
 ; CHECK-NOT:   uxth
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   %4 = zext i16 %3 to i32
   ret i32 %4
 }
@@ -286,7 +286,7 @@ define i64 @load_register_zext_i8_to_i64
 ; CHECK-NOT:   uxtb
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = zext i8 %3 to i64
   ret i64 %4
 }
@@ -297,7 +297,7 @@ define i64 @load_register_zext_i16_to_i6
 ; CHECK-NOT:   uxth
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   %4 = zext i16 %3 to i64
   ret i64 %4
 }
@@ -308,7 +308,7 @@ define i64 @load_register_zext_i32_to_i6
 ; CHECK-NOT:   uxtw
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   %4 = zext i32 %3 to i64
   ret i64 %4
 }
@@ -319,7 +319,7 @@ define i32 @load_register_sext_i8_to_i32
 ; CHECK-NOT:   sxtb
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = sext i8 %3 to i32
   ret i32 %4
 }
@@ -330,7 +330,7 @@ define i32 @load_register_sext_i16_to_i3
 ; CHECK-NOT:   sxth
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   %4 = sext i16 %3 to i32
   ret i32 %4
 }
@@ -341,7 +341,7 @@ define i64 @load_register_sext_i8_to_i64
 ; CHECK-NOT:   sxtb
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = sext i8 %3 to i64
   ret i64 %4
 }
@@ -352,7 +352,7 @@ define i64 @load_register_sext_i16_to_i6
 ; CHECK-NOT:   sxth
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   %4 = sext i16 %3 to i64
   ret i64 %4
 }
@@ -363,7 +363,7 @@ define i64 @load_register_sext_i32_to_i6
 ; CHECK-NOT:   sxtw
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   %4 = sext i32 %3 to i64
   ret i64 %4
 }
@@ -376,7 +376,7 @@ define i32 @load_extend_zext_i8_to_i32(i
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   %5 = zext i8 %4 to i32
   ret i32 %5
 }
@@ -388,7 +388,7 @@ define i32 @load_extend_zext_i16_to_i32(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i16*
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   %5 = zext i16 %4 to i32
   ret i32 %5
 }
@@ -400,7 +400,7 @@ define i64 @load_extend_zext_i8_to_i64(i
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   %5 = zext i8 %4 to i64
   ret i64 %5
 }
@@ -412,7 +412,7 @@ define i64 @load_extend_zext_i16_to_i64(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i16*
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   %5 = zext i16 %4 to i64
   ret i64 %5
 }
@@ -424,7 +424,7 @@ define i64 @load_extend_zext_i32_to_i64(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i32*
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   %5 = zext i32 %4 to i64
   ret i64 %5
 }
@@ -436,7 +436,7 @@ define i32 @load_extend_sext_i8_to_i32(i
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   %5 = sext i8 %4 to i32
   ret i32 %5
 }
@@ -448,7 +448,7 @@ define i32 @load_extend_sext_i16_to_i32(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i16*
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   %5 = sext i16 %4 to i32
   ret i32 %5
 }
@@ -460,7 +460,7 @@ define i64 @load_extend_sext_i8_to_i64(i
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   %5 = sext i8 %4 to i64
   ret i64 %5
 }
@@ -472,7 +472,7 @@ define i64 @load_extend_sext_i16_to_i64(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i16*
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   %5 = sext i16 %4 to i64
   ret i64 %5
 }
@@ -484,7 +484,7 @@ define i64 @load_extend_sext_i32_to_i64(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i32*
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   %5 = sext i32 %4 to i64
   ret i64 %5
 }

Modified: llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext2.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext2.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i32 @load_unscaled_zext_i8_to_i32
 ; CHECK-NOT:   uxtb
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   br label %bb2
 
 bb2:
@@ -25,7 +25,7 @@ define i32 @load_unscaled_zext_i16_to_i3
 ; CHECK-NOT:   uxth
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   br label %bb2
 
 bb2:
@@ -39,7 +39,7 @@ define i64 @load_unscaled_zext_i8_to_i64
 ; CHECK-NOT:   uxtb
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   br label %bb2
 
 bb2:
@@ -53,7 +53,7 @@ define i64 @load_unscaled_zext_i16_to_i6
 ; CHECK-NOT:   uxth
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   br label %bb2
 
 bb2:
@@ -67,7 +67,7 @@ define i64 @load_unscaled_zext_i32_to_i6
 ; CHECK-NOT:   uxtw
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   br label %bb2
 
 bb2:
@@ -81,7 +81,7 @@ define i32 @load_unscaled_sext_i8_to_i32
 ; CHECK-NOT:   sxtb
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   br label %bb2
 
 bb2:
@@ -95,7 +95,7 @@ define i32 @load_unscaled_sext_i16_to_i3
 ; CHECK-NOT:   sxth
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   br label %bb2
 
 bb2:
@@ -109,7 +109,7 @@ define i64 @load_unscaled_sext_i8_to_i64
 ; CHECK-NOT:   sxtb
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   br label %bb2
 
 bb2:
@@ -123,7 +123,7 @@ define i64 @load_unscaled_sext_i16_to_i6
 ; CHECK-NOT:   sxth
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   br label %bb2
 
 bb2:
@@ -137,7 +137,7 @@ define i64 @load_unscaled_sext_i32_to_i6
 ; CHECK-NOT:   sxtw
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   br label %bb2
 
 bb2:
@@ -152,7 +152,7 @@ define i32 @load_register_zext_i8_to_i32
 ; CHECK-NOT:   uxtb
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   br label %bb2
 
 bb2:
@@ -166,7 +166,7 @@ define i32 @load_register_zext_i16_to_i3
 ; CHECK-NOT:   uxth
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   br label %bb2
 
 bb2:
@@ -180,7 +180,7 @@ define i64 @load_register_zext_i8_to_i64
 ; CHECK-NOT:   uxtb
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   br label %bb2
 
 bb2:
@@ -194,7 +194,7 @@ define i64 @load_register_zext_i16_to_i6
 ; CHECK-NOT:   uxth
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   br label %bb2
 
 bb2:
@@ -208,7 +208,7 @@ define i64 @load_register_zext_i32_to_i6
 ; CHECK-NOT:   uxtw
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   br label %bb2
 
 bb2:
@@ -222,7 +222,7 @@ define i32 @load_register_sext_i8_to_i32
 ; CHECK-NOT:   sxtb
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   br label %bb2
 
 bb2:
@@ -236,7 +236,7 @@ define i32 @load_register_sext_i16_to_i3
 ; CHECK-NOT:   sxth
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   br label %bb2
 
 bb2:
@@ -250,7 +250,7 @@ define i64 @load_register_sext_i8_to_i64
 ; CHECK-NOT:   sxtb
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i8*
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   br label %bb2
 
 bb2:
@@ -264,7 +264,7 @@ define i64 @load_register_sext_i16_to_i6
 ; CHECK-NOT:   sxth
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i16*
-  %3 = load i16* %2
+  %3 = load i16, i16* %2
   br label %bb2
 
 bb2:
@@ -278,7 +278,7 @@ define i64 @load_register_sext_i32_to_i6
 ; CHECK-NOT:   sxtw
   %1 = add i64 %a, %b
   %2 = inttoptr i64 %1 to i32*
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   br label %bb2
 
 bb2:
@@ -294,7 +294,7 @@ define i32 @load_extend_zext_i8_to_i32(i
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   br label %bb2
 
 bb2:
@@ -309,7 +309,7 @@ define i32 @load_extend_zext_i16_to_i32(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i16*
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   br label %bb2
 
 bb2:
@@ -324,7 +324,7 @@ define i64 @load_extend_zext_i8_to_i64(i
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   br label %bb2
 
 bb2:
@@ -339,7 +339,7 @@ define i64 @load_extend_zext_i16_to_i64(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i16*
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   br label %bb2
 
 bb2:
@@ -354,7 +354,7 @@ define i64 @load_extend_zext_i32_to_i64(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i32*
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   br label %bb2
 
 bb2:
@@ -369,7 +369,7 @@ define i32 @load_extend_sext_i8_to_i32(i
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   br label %bb2
 
 bb2:
@@ -384,7 +384,7 @@ define i32 @load_extend_sext_i16_to_i32(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i16*
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   br label %bb2
 
 bb2:
@@ -399,7 +399,7 @@ define i64 @load_extend_sext_i8_to_i64(i
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i8*
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   br label %bb2
 
 bb2:
@@ -414,7 +414,7 @@ define i64 @load_extend_sext_i16_to_i64(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i16*
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   br label %bb2
 
 bb2:
@@ -429,7 +429,7 @@ define i64 @load_extend_sext_i32_to_i64(
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
   %3 = inttoptr i64 %2 to i32*
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   br label %bb2
 
 bb2:

Modified: llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext3.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext3.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i32 @load_unscaled_zext_i8_to_i32
 ; CHECK:       uxtb w0, [[REG]]
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8 addrspace(256)*
-  %3 = load i8 addrspace(256)* %2
+  %3 = load i8, i8 addrspace(256)* %2
   %4 = zext i8 %3 to i32
   ret i32 %4
 }
@@ -22,7 +22,7 @@ define i32 @load_unscaled_zext_i16_to_i3
 ; CHECK:       uxth w0, [[REG]]
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16 addrspace(256)*
-  %3 = load i16 addrspace(256)* %2
+  %3 = load i16, i16 addrspace(256)* %2
   %4 = zext i16 %3 to i32
   ret i32 %4
 }
@@ -33,7 +33,7 @@ define i64 @load_unscaled_zext_i8_to_i64
 ; CHECK:       ubfx x0, x[[REG]], #0, #8
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8 addrspace(256)*
-  %3 = load i8 addrspace(256)* %2
+  %3 = load i8, i8 addrspace(256)* %2
   %4 = zext i8 %3 to i64
   ret i64 %4
 }
@@ -44,7 +44,7 @@ define i64 @load_unscaled_zext_i16_to_i6
 ; CHECK:       ubfx x0, x[[REG]], #0, #16
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16 addrspace(256)*
-  %3 = load i16 addrspace(256)* %2
+  %3 = load i16, i16 addrspace(256)* %2
   %4 = zext i16 %3 to i64
   ret i64 %4
 }
@@ -55,7 +55,7 @@ define i64 @load_unscaled_zext_i32_to_i6
 ; CHECK:       ubfx x0, x[[REG]], #0, #32
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i32 addrspace(256)*
-  %3 = load i32 addrspace(256)* %2
+  %3 = load i32, i32 addrspace(256)* %2
   %4 = zext i32 %3 to i64
   ret i64 %4
 }
@@ -66,7 +66,7 @@ define i32 @load_unscaled_sext_i8_to_i32
 ; CHECK:       sxtb w0, [[REG]]
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8 addrspace(256)*
-  %3 = load i8 addrspace(256)* %2
+  %3 = load i8, i8 addrspace(256)* %2
   %4 = sext i8 %3 to i32
   ret i32 %4
 }
@@ -77,7 +77,7 @@ define i32 @load_unscaled_sext_i16_to_i3
 ; CHECK:       sxth w0, [[REG]]
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16 addrspace(256)*
-  %3 = load i16 addrspace(256)* %2
+  %3 = load i16, i16 addrspace(256)* %2
   %4 = sext i16 %3 to i32
   ret i32 %4
 }
@@ -88,7 +88,7 @@ define i64 @load_unscaled_sext_i8_to_i64
 ; CHECK:       sxtb x0, [[REG]]
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i8 addrspace(256)*
-  %3 = load i8 addrspace(256)* %2
+  %3 = load i8, i8 addrspace(256)* %2
   %4 = sext i8 %3 to i64
   ret i64 %4
 }
@@ -99,7 +99,7 @@ define i64 @load_unscaled_sext_i16_to_i6
 ; CHECK:       sxth x0, [[REG]]
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i16 addrspace(256)*
-  %3 = load i16 addrspace(256)* %2
+  %3 = load i16, i16 addrspace(256)* %2
   %4 = sext i16 %3 to i64
   ret i64 %4
 }
@@ -110,7 +110,7 @@ define i64 @load_unscaled_sext_i32_to_i6
 ; CHECK:       sxtw x0, [[REG]]
   %1 = sub i64 %a, 8
   %2 = inttoptr i64 %1 to i32 addrspace(256)*
-  %3 = load i32 addrspace(256)* %2
+  %3 = load i32, i32 addrspace(256)* %2
   %4 = sext i32 %3 to i64
   ret i64 %4
 }

Modified: llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll Fri Feb 27 15:17:42 2015
@@ -27,7 +27,7 @@ declare double @nearbyint(double) readon
 
 define void @simple_float() {
 ; CHECK-LABEL: simple_float:
-  %val1 = load volatile float* @varfloat
+  %val1 = load volatile float, float* @varfloat
 
   %valabs = call float @fabsf(float %val1)
   store volatile float %valabs, float* @varfloat
@@ -66,7 +66,7 @@ define void @simple_float() {
 
 define void @simple_double() {
 ; CHECK-LABEL: simple_double:
-  %val1 = load volatile double* @vardouble
+  %val1 = load volatile double, double* @vardouble
 
   %valabs = call double @fabs(double %val1)
   store volatile double %valabs, double* @vardouble
@@ -106,9 +106,9 @@ define void @simple_double() {
 define void @converts() {
 ; CHECK-LABEL: converts:
 
-  %val16 = load volatile half* @varhalf
-  %val32 = load volatile float* @varfloat
-  %val64 = load volatile double* @vardouble
+  %val16 = load volatile half, half* @varhalf
+  %val32 = load volatile float, float* @varfloat
+  %val64 = load volatile double, double* @vardouble
 
   %val16to32 = fpext half %val16 to float
   store volatile float %val16to32, float* @varfloat

Modified: llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define void @testfloat() {
 ; CHECK-LABEL: testfloat:
-  %val1 = load float* @varfloat
+  %val1 = load float, float* @varfloat
 
   %val2 = fadd float %val1, %val1
 ; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
@@ -33,7 +33,7 @@ define void @testfloat() {
 
 define void @testdouble() {
 ; CHECK-LABEL: testdouble:
-  %val1 = load double* @vardouble
+  %val1 = load double, double* @vardouble
 
   %val2 = fadd double %val1, %val1
 ; CHECK: fadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}

Modified: llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define fp128 @test_folding() {
 ; CHECK-LABEL: test_folding:
   %l = alloca i32
   store i32 42, i32* %l
-  %val = load i32* %l
+  %val = load i32, i32* %l
   %fpval = sitofp i32 %val to fp128
   ; If the value is loaded from a constant pool into an fp128, it's been folded
   ; successfully.

Modified: llvm/trunk/test/CodeGen/AArch64/fp16-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp16-instructions.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp16-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp16-instructions.ll Fri Feb 27 15:17:42 2015
@@ -52,7 +52,7 @@ define half @load_h(half* %a) {
 entry:
 ; CHECK-LABEL: load_h:
 ; CHECK: ldr h0, [x0]
-  %0 = load half* %a, align 4
+  %0 = load half, half* %a, align 4
   ret half %0
 }
 

Modified: llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll Fri Feb 27 15:17:42 2015
@@ -52,7 +52,7 @@ define <4 x half> @load_h(<4 x half>* %a
 entry:
 ; CHECK-LABEL: load_h:
 ; CHECK: ldr d0, [x0]
-  %0 = load <4 x half>* %a, align 4
+  %0 = load <4 x half>, <4 x half>* %a, align 4
   ret <4 x half> %0
 }
 

Modified: llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll Fri Feb 27 15:17:42 2015
@@ -164,7 +164,7 @@ define <8 x half> @load_h(<8 x half>* %a
 entry:
 ; CHECK-LABEL: load_h:
 ; CHECK: ldr q0, [x0]
-  %0 = load <8 x half>* %a, align 4
+  %0 = load <8 x half>, <8 x half>* %a, align 4
   ret <8 x half> %0
 }
 

Modified: llvm/trunk/test/CodeGen/AArch64/fp16-vector-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp16-vector-load-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp16-vector-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp16-vector-load-store.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define <4 x half> @load_64(<4 x half>* n
 ; CHECK-LABEL: load_64:
 ; CHECK: ldr d0, [x0]
 entry:
-  %0 = load <4 x half>* %a, align 8
+  %0 = load <4 x half>, <4 x half>* %a, align 8
   ret <4 x half> %0
 }
 
@@ -14,7 +14,7 @@ define <8 x half> @load_128(<8 x half>*
 ; CHECK-LABEL: load_128:
 ; CHECK: ldr q0, [x0]
 entry:
-  %0 = load <8 x half>* %a, align 16
+  %0 = load <8 x half>, <8 x half>* %a, align 16
   ret <8 x half> %0
 }
 
@@ -23,7 +23,7 @@ define <4 x half> @load_dup_64(half* noc
 ; CHECK-LABEL: load_dup_64:
 ; CHECK: ld1r { v0.4h }, [x0]
 entry:
-  %0 = load half* %a, align 2
+  %0 = load half, half* %a, align 2
   %1 = insertelement <4 x half> undef, half %0, i32 0
   %2 = shufflevector <4 x half> %1, <4 x half> undef, <4 x i32> zeroinitializer
   ret <4 x half> %2
@@ -34,7 +34,7 @@ define <8 x half> @load_dup_128(half* no
 ; CHECK-LABEL: load_dup_128:
 ; CHECK: ld1r { v0.8h }, [x0]
 entry:
-  %0 = load half* %a, align 2
+  %0 = load half, half* %a, align 2
   %1 = insertelement <8 x half> undef, half %0, i32 0
   %2 = shufflevector <8 x half> %1, <8 x half> undef, <8 x i32> zeroinitializer
   ret <8 x half> %2
@@ -45,7 +45,7 @@ define <4 x half> @load_lane_64(half* no
 ; CHECK-LABEL: load_lane_64:
 ; CHECK: ld1 { v0.h }[2], [x0]
 entry:
-  %0 = load half* %a, align 2
+  %0 = load half, half* %a, align 2
   %1 = insertelement <4 x half> %b, half %0, i32 2
   ret <4 x half> %1
 }
@@ -55,7 +55,7 @@ define <8 x half> @load_lane_128(half* n
 ; CHECK-LABEL: load_lane_128:
 ; CHECK: ld1 { v0.h }[5], [x0]
 entry:
-  %0 = load half* %a, align 2
+  %0 = load half, half* %a, align 2
   %1 = insertelement <8 x half> %b, half %0, i32 5
   ret <8 x half> %1
 }

Modified: llvm/trunk/test/CodeGen/AArch64/fpimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fpimm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fpimm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fpimm.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 define void @check_float() {
 ; CHECK-LABEL: check_float:
 
-  %val = load float* @varf32
+  %val = load float, float* @varf32
   %newval1 = fadd float %val, 8.5
   store volatile float %newval1, float* @varf32
 ; CHECK-DAG: fmov [[EIGHT5:s[0-9]+]], #8.5
@@ -24,7 +24,7 @@ define void @check_float() {
 define void @check_double() {
 ; CHECK-LABEL: check_double:
 
-  %val = load double* @varf64
+  %val = load double, double* @varf64
   %newval1 = fadd double %val, 8.5
   store volatile double %newval1, double* @varf64
 ; CHECK-DAG: fmov {{d[0-9]+}}, #8.5

Modified: llvm/trunk/test/CodeGen/AArch64/free-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/free-zext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/free-zext.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/free-zext.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@ define i64 @test_free_zext(i8* %a, i16*
 ; CHECK-DAG: ldrb w[[A:[0-9]+]], [x0]
 ; CHECK: ldrh w[[B:[0-9]+]], [x1]
 ; CHECK: add x0, x[[B]], x[[A]]
-  %1 = load i8* %a, align 1
+  %1 = load i8, i8* %a, align 1
   %conv = zext i8 %1 to i64
-  %2 = load i16* %b, align 2
+  %2 = load i16, i16* %b, align 2
   %conv1 = zext i16 %2 to i64
   %add = add nsw i64 %conv1, %conv
   ret i64 %add

Modified: llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll Fri Feb 27 15:17:42 2015
@@ -37,13 +37,13 @@ define void @take_struct(%myStruct* byva
     %addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2
     %addr1 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 0
 
-    %val0 = load volatile i32* %addr0
+    %val0 = load volatile i32, i32* %addr0
     ; Some weird move means x0 is used for one access
 ; CHECK: ldr [[REG32:w[0-9]+]], [{{x[0-9]+|sp}}, #12]
     store volatile i32 %val0, i32* @var32
 ; CHECK: str [[REG32]], [{{x[0-9]+}}, {{#?}}:lo12:var32]
 
-    %val1 = load volatile i64* %addr1
+    %val1 = load volatile i64, i64* %addr1
 ; CHECK: ldr [[REG64:x[0-9]+]], [{{x[0-9]+|sp}}]
     store volatile i64 %val1, i64* @var64
 ; CHECK: str [[REG64]], [{{x[0-9]+}}, {{#?}}:lo12:var64]
@@ -58,13 +58,13 @@ define void @check_byval_align(i32* byva
     %addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2
     %addr1 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 0
 
-    %val0 = load volatile i32* %addr0
+    %val0 = load volatile i32, i32* %addr0
     ; Some weird move means x0 is used for one access
 ; CHECK: ldr [[REG32:w[0-9]+]], [sp, #28]
     store i32 %val0, i32* @var32
 ; CHECK: str [[REG32]], [{{x[0-9]+}}, {{#?}}:lo12:var32]
 
-    %val1 = load volatile i64* %addr1
+    %val1 = load volatile i64, i64* %addr1
 ; CHECK: ldr [[REG64:x[0-9]+]], [sp, #16]
     store i64 %val1, i64* @var64
 ; CHECK: str [[REG64]], [{{x[0-9]+}}, {{#?}}:lo12:var64]
@@ -74,7 +74,7 @@ define void @check_byval_align(i32* byva
 
 define i32 @return_int() {
 ; CHECK-LABEL: return_int:
-    %val = load i32* @var32
+    %val = load i32, i32* @var32
     ret i32 %val
 ; CHECK: ldr w0, [{{x[0-9]+}}, {{#?}}:lo12:var32]
     ; Make sure epilogue follows
@@ -94,7 +94,7 @@ define double @return_double() {
 define [2 x i64] @return_struct() {
 ; CHECK-LABEL: return_struct:
     %addr = bitcast %myStruct* @varstruct to [2 x i64]*
-    %val = load [2 x i64]* %addr
+    %val = load [2 x i64], [2 x i64]* %addr
     ret [2 x i64] %val
 ; CHECK: add x[[VARSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varstruct
 ; CHECK: ldp x0, x1, [x[[VARSTRUCT]]]
@@ -130,7 +130,7 @@ define i32 @struct_on_stack(i8 %var0, i1
                           double %notstacked) {
 ; CHECK-LABEL: struct_on_stack:
     %addr = getelementptr %myStruct, %myStruct* %struct, i64 0, i32 0
-    %val64 = load volatile i64* %addr
+    %val64 = load volatile i64, i64* %addr
     store volatile i64 %val64, i64* @var64
     ; Currently nothing on local stack, so struct should be at sp
 ; CHECK: ldr [[VAL64:x[0-9]+]], [sp]
@@ -141,7 +141,7 @@ define i32 @struct_on_stack(i8 %var0, i1
 ; CHECK: str d0, [{{x[0-9]+}}, {{#?}}:lo12:vardouble
 ; CHECK-NOFP-NOT: str d0,
 
-    %retval = load volatile i32* %stacked
+    %retval = load volatile i32, i32* %stacked
     ret i32 %retval
 ; CHECK-LE: ldr w0, [sp, #16]
 }

Modified: llvm/trunk/test/CodeGen/AArch64/func-calls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/func-calls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/func-calls.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/func-calls.ll Fri Feb 27 15:17:42 2015
@@ -21,15 +21,15 @@ declare void @take_floats(float %val1, f
 
 define void @simple_args() {
 ; CHECK-LABEL: simple_args:
-  %char1 = load i8* @var8
-  %char2 = load i8* @var8_2
+  %char1 = load i8, i8* @var8
+  %char2 = load i8, i8* @var8_2
   call void @take_i8s(i8 %char1, i8 %char2)
 ; CHECK-DAG: ldrb w0, [{{x[0-9]+}}, {{#?}}:lo12:var8]
 ; CHECK-DAG: ldrb w1, [{{x[0-9]+}}, {{#?}}:lo12:var8_2]
 ; CHECK: bl take_i8s
 
-  %float1 = load float* @varfloat
-  %float2 = load float* @varfloat_2
+  %float1 = load float, float* @varfloat
+  %float2 = load float, float* @varfloat_2
   call void @take_floats(float %float1, float %float2)
 ; CHECK-DAG: ldr s1, [{{x[0-9]+}}, {{#?}}:lo12:varfloat_2]
 ; CHECK-DAG: ldr s0, [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
@@ -124,7 +124,7 @@ declare void @check_i128_regalign(i32 %v
 
 define void @check_i128_align() {
 ; CHECK-LABEL: check_i128_align:
-  %val = load i128* @var128
+  %val = load i128, i128* @var128
   call void @check_i128_stackalign(i32 0, i32 1, i32 2, i32 3,
                                    i32 4, i32 5, i32 6, i32 7,
                                    i32 42, i128 %val)
@@ -152,7 +152,7 @@ define void @check_i128_align() {
 
 define void @check_indirect_call() {
 ; CHECK-LABEL: check_indirect_call:
-  %func = load void()** @fptr
+  %func = load void()*, void()** @fptr
   call void %func()
 ; CHECK: ldr [[FPTR:x[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:fptr]
 ; CHECK: blr [[FPTR]]

Modified: llvm/trunk/test/CodeGen/AArch64/funcptr_cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/funcptr_cast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/funcptr_cast.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/funcptr_cast.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define i8 @test() {
 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, :lo12:foo
 ; CHECK: ldrb w0, [{{x[0-9]+}}]
 entry:
-  %0 = load i8* bitcast (void (...)* @foo to i8*), align 1
+  %0 = load i8, i8* bitcast (void (...)* @foo to i8*), align 1
   ret i8 %0
 }
 

Modified: llvm/trunk/test/CodeGen/AArch64/ghc-cc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ghc-cc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ghc-cc.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ghc-cc.ll Fri Feb 27 15:17:42 2015
@@ -51,7 +51,7 @@ entry:
   ; CHECK-NEXT:  bl      bar_i64
   ; CHECK-NEXT:  ret
 
-  %0 = load i64* @base
+  %0 = load i64, i64* @base
   tail call ghccc void @bar_i64( i64 %0 ) nounwind
   ret void
 }
@@ -64,7 +64,7 @@ entry:
   ; CHECK-NEXT:  bl      bar_float
   ; CHECK-NEXT:  ret
 
-  %0 = load float* @f1
+  %0 = load float, float* @f1
   tail call ghccc void @bar_float( float %0 ) nounwind
   ret void
 }
@@ -77,7 +77,7 @@ entry:
   ; CHECK-NEXT:  bl      bar_double
   ; CHECK-NEXT:  ret
 
-  %0 = load double* @d1
+  %0 = load double, double* @d1
   tail call ghccc void @bar_double( double %0 ) nounwind
   ret void
 }

Modified: llvm/trunk/test/CodeGen/AArch64/global-alignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/global-alignment.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/global-alignment.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/global-alignment.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i64 @test_align32() {
 
   ; Since @var32 is only guaranteed to be aligned to 32-bits, it's invalid to
   ; emit an "LDR x0, [x0, #:lo12:var32] instruction to implement this load.
-  %val = load i64* %addr
+  %val = load i64, i64* %addr
 ; CHECK: adrp [[HIBITS:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], {{#?}}:lo12:var32
 ; CHECK: ldr x0, [x[[ADDR]]]
@@ -25,7 +25,7 @@ define i64 @test_align64() {
 
   ; However, var64 *is* properly aligned and emitting an adrp/add/ldr would be
   ; inefficient.
-  %val = load i64* %addr
+  %val = load i64, i64* %addr
 ; CHECK: adrp x[[HIBITS:[0-9]+]], var64
 ; CHECK-NOT: add x[[HIBITS]]
 ; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:var64]
@@ -39,7 +39,7 @@ define i64 @test_var32_align64() {
 
   ; Since @var32 is only guaranteed to be aligned to 32-bits, it's invalid to
   ; emit an "LDR x0, [x0, #:lo12:var32] instruction to implement this load.
-  %val = load i64* %addr
+  %val = load i64, i64* %addr
 ; CHECK: adrp x[[HIBITS:[0-9]+]], var32_align64
 ; CHECK-NOT: add x[[HIBITS]]
 ; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:var32_align64]
@@ -52,7 +52,7 @@ define i64 @test_var32_alias() {
   %addr = bitcast [3 x i32]* @alias to i64*
 
   ; Test that we can find the alignment for aliases.
-  %val = load i64* %addr
+  %val = load i64, i64* %addr
 ; CHECK: adrp x[[HIBITS:[0-9]+]], alias
 ; CHECK-NOT: add x[[HIBITS]]
 ; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:alias]
@@ -68,7 +68,7 @@ define i64 @test_yet_another_var() {
   ; @yet_another_var has a preferred alignment of 8, but that's not enough if
   ; we're going to be linking against other things. Its ABI alignment is only 4
   ; so we can't fold the load.
-  %val = load i64* bitcast({i32, i32}* @yet_another_var to i64*)
+  %val = load i64, i64* bitcast({i32, i32}* @yet_another_var to i64*)
 ; CHECK: adrp [[HIBITS:x[0-9]+]], yet_another_var
 ; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], {{#?}}:lo12:yet_another_var
 ; CHECK: ldr x0, [x[[ADDR]]]

Modified: llvm/trunk/test/CodeGen/AArch64/global-merge-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/global-merge-4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/global-merge-4.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/global-merge-4.ll Fri Feb 27 15:17:42 2015
@@ -36,24 +36,24 @@ declare i32 @calc(...)
 
 ; Function Attrs: nounwind ssp
 define internal void @calculate() #0 {
-  %1 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 0), align 4
-  %2 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 0), align 4
+  %1 = load i32, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 0), align 4
+  %2 = load i32, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 0), align 4
   %3 = mul nsw i32 %2, %1
   store i32 %3, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 0), align 4
-  %4 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 1), align 4
-  %5 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 1), align 4
+  %4 = load i32, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 1), align 4
+  %5 = load i32, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 1), align 4
   %6 = mul nsw i32 %5, %4
   store i32 %6, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 1), align 4
-  %7 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 2), align 4
-  %8 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 2), align 4
+  %7 = load i32, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 2), align 4
+  %8 = load i32, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 2), align 4
   %9 = mul nsw i32 %8, %7
   store i32 %9, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 2), align 4
-  %10 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 3), align 4
-  %11 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 3), align 4
+  %10 = load i32, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 3), align 4
+  %11 = load i32, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 3), align 4
   %12 = mul nsw i32 %11, %10
   store i32 %12, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 3), align 4
-  %13 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 4), align 4
-  %14 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 4), align 4
+  %13 = load i32, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 4), align 4
+  %14 = load i32, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 4), align 4
   %15 = mul nsw i32 %14, %13
   store i32 %15, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 4), align 4
   ret void

Modified: llvm/trunk/test/CodeGen/AArch64/half.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/half.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/half.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/half.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define void @test_load_store(half* %in,
 ; CHECK-LABEL: test_load_store:
 ; CHECK: ldr [[TMP:h[0-9]+]], [x0]
 ; CHECK: str [[TMP]], [x1]
-  %val = load half* %in
+  %val = load half, half* %in
   store half %val, half* %out
   ret void
 }
@@ -12,7 +12,7 @@ define void @test_load_store(half* %in,
 define i16 @test_bitcast_from_half(half* %addr) {
 ; CHECK-LABEL: test_bitcast_from_half:
 ; CHECK: ldrh w0, [x0]
-  %val = load half* %addr
+  %val = load half, half* %addr
   %val_int = bitcast half %val to i16
   ret i16 %val_int
 }
@@ -50,7 +50,7 @@ define float @test_extend32(half* %addr)
 ; CHECK-LABEL: test_extend32:
 ; CHECK: fcvt {{s[0-9]+}}, {{h[0-9]+}}
 
-  %val16 = load half* %addr
+  %val16 = load half, half* %addr
   %val32 = fpext half %val16 to float
   ret float %val32
 }
@@ -59,7 +59,7 @@ define double @test_extend64(half* %addr
 ; CHECK-LABEL: test_extend64:
 ; CHECK: fcvt {{d[0-9]+}}, {{h[0-9]+}}
 
-  %val16 = load half* %addr
+  %val16 = load half, half* %addr
   %val32 = fpext half %val16 to double
   ret double %val32
 }

Modified: llvm/trunk/test/CodeGen/AArch64/i1-contents.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/i1-contents.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/i1-contents.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/i1-contents.ll Fri Feb 27 15:17:42 2015
@@ -32,7 +32,7 @@ define i1 @produce_i1_ret() {
 ; CHECK-LABEL: produce_i1_ret:
 ; CHECK: ldr [[VAR32:w[0-9]+]], [{{x[0-9]+}}, :lo12:var]
 ; CHECK: and w0, [[VAR32]], #{{0x1|0xff}}
-  %val = load %big* @var
+  %val = load %big, %big* @var
   %val1 = trunc %big %val to i1
   ret i1 %val1
 }
@@ -42,7 +42,7 @@ define void @produce_i1_arg() {
 ; CHECK: ldr [[VAR32:w[0-9]+]], [{{x[0-9]+}}, :lo12:var]
 ; CHECK: and w0, [[VAR32]], #{{0x1|0xff}}
 ; CHECK: bl consume_i1_arg
-  %val = load %big* @var
+  %val = load %big, %big* @var
   %val1 = trunc %big %val to i1
   call void @consume_i1_arg(i1 %val1)
   ret void

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-opt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-opt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-opt.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ define void @load-pre-indexed-word(%stru
 ; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
   %a = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 0
-  %add = load i32* %a, align 4
+  %add = load i32, i32* %a, align 4
   br label %bar
 bar:
   %c = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1
@@ -59,7 +59,7 @@ define void @load-pre-indexed-doubleword
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
   %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0
-  %add = load i64* %a, align 4
+  %add = load i64, i64* %a, align 4
   br label %bar
 bar:
   %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1
@@ -87,7 +87,7 @@ define void @load-pre-indexed-quadword(%
 ; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
   %a = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1, i32 0
-  %add = load fp128* %a, align 4
+  %add = load fp128, fp128* %a, align 4
   br label %bar
 bar:
   %c = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1
@@ -115,7 +115,7 @@ define void @load-pre-indexed-float(%str
 ; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
   %a = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1, i32 0
-  %add = load float* %a, align 4
+  %add = load float, float* %a, align 4
   br label %bar
 bar:
   %c = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1
@@ -143,7 +143,7 @@ define void @load-pre-indexed-double(%st
 ; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
   %a = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1, i32 0
-  %add = load double* %a, align 4
+  %add = load double, double* %a, align 4
   br label %bar
 bar:
   %c = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1
@@ -186,7 +186,7 @@ define i32 @load-pre-indexed-word2(%pre.
 ; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #4]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i32** %this
+  %load1 = load %pre.struct.i32*, %pre.struct.i32** %this
   %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -194,7 +194,7 @@ if.end:
   br label %return
 return:
   %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load i32* %retptr
+  %ret = load i32, i32* %retptr
   ret i32 %ret
 }
 
@@ -204,7 +204,7 @@ define i64 @load-pre-indexed-doubleword2
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i64** %this
+  %load1 = load %pre.struct.i64*, %pre.struct.i64** %this
   %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -212,7 +212,7 @@ if.end:
   br label %return
 return:
   %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load i64* %retptr
+  %ret = load i64, i64* %retptr
   ret i64 %ret
 }
 
@@ -222,7 +222,7 @@ define <2 x i64> @load-pre-indexed-quadw
 ; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #16]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i128** %this
+  %load1 = load %pre.struct.i128*, %pre.struct.i128** %this
   %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -230,7 +230,7 @@ if.end:
   br label %return
 return:
   %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load <2 x i64>* %retptr
+  %ret = load <2 x i64>, <2 x i64>* %retptr
   ret <2 x i64> %ret
 }
 
@@ -240,7 +240,7 @@ define float @load-pre-indexed-float2(%p
 ; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #4]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.float** %this
+  %load1 = load %pre.struct.float*, %pre.struct.float** %this
   %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -248,7 +248,7 @@ if.end:
   br label %return
 return:
   %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load float* %retptr
+  %ret = load float, float* %retptr
   ret float %ret
 }
 
@@ -258,7 +258,7 @@ define double @load-pre-indexed-double2(
 ; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.double** %this
+  %load1 = load %pre.struct.double*, %pre.struct.double** %this
   %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -266,7 +266,7 @@ if.end:
   br label %return
 return:
   %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load double* %retptr
+  %ret = load double, double* %retptr
   ret double %ret
 }
 
@@ -287,7 +287,7 @@ define void @store-pre-indexed-word2(%pr
 ; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}, #4]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i32** %this
+  %load1 = load %pre.struct.i32*, %pre.struct.i32** %this
   %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -306,7 +306,7 @@ define void @store-pre-indexed-doublewor
 ; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i64** %this
+  %load1 = load %pre.struct.i64*, %pre.struct.i64** %this
   %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -325,7 +325,7 @@ define void @store-pre-indexed-quadword2
 ; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}, #16]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i128** %this
+  %load1 = load %pre.struct.i128*, %pre.struct.i128** %this
   %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -344,7 +344,7 @@ define void @store-pre-indexed-float2(%p
 ; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}, #4]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.float** %this
+  %load1 = load %pre.struct.float*, %pre.struct.float** %this
   %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -363,7 +363,7 @@ define void @store-pre-indexed-double2(%
 ; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.double** %this
+  %load1 = load %pre.struct.double*, %pre.struct.double** %this
   %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 1
   br label %return
 if.end:
@@ -396,9 +396,9 @@ body:
   %iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr i32, i32* %iv2, i64 -1
-  %load = load i32* %gep2
+  %load = load i32, i32* %gep2
   call void @use-word(i32 %load)
-  %load2 = load i32* %iv2
+  %load2 = load i32, i32* %iv2
   call void @use-word(i32 %load2)
   %iv.next = add i64 %iv, -4
   %gep3 = getelementptr i32, i32* %iv2, i64 4
@@ -420,9 +420,9 @@ body:
   %iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr i64, i64* %iv2, i64 -1
-  %load = load i64* %gep2
+  %load = load i64, i64* %gep2
   call void @use-doubleword(i64 %load)
-  %load2 = load i64* %iv2
+  %load2 = load i64, i64* %iv2
   call void @use-doubleword(i64 %load2)
   %iv.next = add i64 %iv, -4
   %gep3 = getelementptr i64, i64* %iv2, i64 4
@@ -444,9 +444,9 @@ body:
   %iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 -1
-  %load = load <2 x i64>* %gep2
+  %load = load <2 x i64>, <2 x i64>* %gep2
   call void @use-quadword(<2 x i64> %load)
-  %load2 = load <2 x i64>* %iv2
+  %load2 = load <2 x i64>, <2 x i64>* %iv2
   call void @use-quadword(<2 x i64> %load2)
   %iv.next = add i64 %iv, -4
   %gep3 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 4
@@ -468,9 +468,9 @@ body:
   %iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr float, float* %iv2, i64 -1
-  %load = load float* %gep2
+  %load = load float, float* %gep2
   call void @use-float(float %load)
-  %load2 = load float* %iv2
+  %load2 = load float, float* %iv2
   call void @use-float(float %load2)
   %iv.next = add i64 %iv, -4
   %gep3 = getelementptr float, float* %iv2, i64 4
@@ -492,9 +492,9 @@ body:
   %iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr double, double* %iv2, i64 -1
-  %load = load double* %gep2
+  %load = load double, double* %gep2
   call void @use-double(double %load)
-  %load2 = load double* %iv2
+  %load2 = load double, double* %iv2
   call void @use-double(double %load2)
   %iv.next = add i64 %iv, -4
   %gep3 = getelementptr double, double* %iv2, i64 4
@@ -526,7 +526,7 @@ body:
   %iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr i32, i32* %iv2, i64 -1
-  %load = load i32* %gep2
+  %load = load i32, i32* %gep2
   call void @use-word(i32 %load)
   store i32 %val, i32* %iv2
   %iv.next = add i64 %iv, -4
@@ -549,7 +549,7 @@ body:
   %iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr i64, i64* %iv2, i64 -1
-  %load = load i64* %gep2
+  %load = load i64, i64* %gep2
   call void @use-doubleword(i64 %load)
   store i64 %val, i64* %iv2
   %iv.next = add i64 %iv, -4
@@ -572,7 +572,7 @@ body:
   %iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 -1
-  %load = load <2 x i64>* %gep2
+  %load = load <2 x i64>, <2 x i64>* %gep2
   call void @use-quadword(<2 x i64> %load)
   store <2 x i64> %val, <2 x i64>* %iv2
   %iv.next = add i64 %iv, -4
@@ -595,7 +595,7 @@ body:
   %iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr float, float* %iv2, i64 -1
-  %load = load float* %gep2
+  %load = load float, float* %gep2
   call void @use-float(float %load)
   store float %val, float* %iv2
   %iv.next = add i64 %iv, -4
@@ -618,7 +618,7 @@ body:
   %iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
   %gep2 = getelementptr double, double* %iv2, i64 -1
-  %load = load double* %gep2
+  %load = load double, double* %gep2
   call void @use-double(double %load)
   store double %val, double* %iv2
   %iv.next = add i64 %iv, -4
@@ -656,10 +656,10 @@ for.body:
   %phi2 = phi i32* [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
   %gep1 = getelementptr i32, i32* %phi1, i64 -1
-  %load1 = load i32* %gep1
+  %load1 = load i32, i32* %gep1
   %gep2 = getelementptr i32, i32* %phi2, i64 -1
   store i32 %load1, i32* %gep2
-  %load2 = load i32* %phi1
+  %load2 = load i32, i32* %phi1
   store i32 %load2, i32* %phi2
   %dec.i = add nsw i64 %i, -1
   %gep3 = getelementptr i32, i32* %phi2, i64 -2
@@ -680,10 +680,10 @@ for.body:
   %phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
   %gep1 = getelementptr i64, i64* %phi1, i64 -1
-  %load1 = load i64* %gep1
+  %load1 = load i64, i64* %gep1
   %gep2 = getelementptr i64, i64* %phi2, i64 -1
   store i64 %load1, i64* %gep2
-  %load2 = load i64* %phi1
+  %load2 = load i64, i64* %phi1
   store i64 %load2, i64* %phi2
   %dec.i = add nsw i64 %i, -1
   %gep3 = getelementptr i64, i64* %phi2, i64 -2
@@ -704,10 +704,10 @@ for.body:
   %phi2 = phi <2 x i64>* [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
   %gep1 = getelementptr <2 x i64>, <2 x i64>* %phi1, i64 -1
-  %load1 = load <2 x i64>* %gep1
+  %load1 = load <2 x i64>, <2 x i64>* %gep1
   %gep2 = getelementptr <2 x i64>, <2 x i64>* %phi2, i64 -1
   store <2 x i64> %load1, <2 x i64>* %gep2
-  %load2 = load <2 x i64>* %phi1
+  %load2 = load <2 x i64>, <2 x i64>* %phi1
   store <2 x i64> %load2, <2 x i64>* %phi2
   %dec.i = add nsw i64 %i, -1
   %gep3 = getelementptr <2 x i64>, <2 x i64>* %phi2, i64 -2
@@ -728,10 +728,10 @@ for.body:
   %phi2 = phi float* [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
   %gep1 = getelementptr float, float* %phi1, i64 -1
-  %load1 = load float* %gep1
+  %load1 = load float, float* %gep1
   %gep2 = getelementptr float, float* %phi2, i64 -1
   store float %load1, float* %gep2
-  %load2 = load float* %phi1
+  %load2 = load float, float* %phi1
   store float %load2, float* %phi2
   %dec.i = add nsw i64 %i, -1
   %gep3 = getelementptr float, float* %phi2, i64 -2
@@ -752,10 +752,10 @@ for.body:
   %phi2 = phi double* [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
   %gep1 = getelementptr double, double* %phi1, i64 -1
-  %load1 = load double* %gep1
+  %load1 = load double, double* %gep1
   %gep2 = getelementptr double, double* %phi2, i64 -1
   store double %load1, double* %gep2
-  %load2 = load double* %phi1
+  %load2 = load double, double* %phi1
   store double %load2, double* %phi2
   %dec.i = add nsw i64 %i, -1
   %gep3 = getelementptr double, double* %phi2, i64 -2

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll Fri Feb 27 15:17:42 2015
@@ -13,13 +13,13 @@ define void @ldst_8bit(i8* %base, i32 %o
 ; CHECK-LABEL: ldst_8bit:
 
    %addr8_sxtw = getelementptr i8, i8* %base, i32 %off32
-   %val8_sxtw = load volatile i8* %addr8_sxtw
+   %val8_sxtw = load volatile i8, i8* %addr8_sxtw
    %val32_signed = sext i8 %val8_sxtw to i32
    store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{[wx][0-9]+}}, sxtw]
 
   %addr_lsl = getelementptr i8, i8* %base, i64 %off64
-  %val8_lsl = load volatile i8* %addr_lsl
+  %val8_lsl = load volatile i8, i8* %addr_lsl
   %val32_unsigned = zext i8 %val8_lsl to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
@@ -28,7 +28,7 @@ define void @ldst_8bit(i8* %base, i32 %o
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
   %addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
-  %val8_uxtw = load volatile i8* %addr_uxtw
+  %val8_uxtw = load volatile i8, i8* %addr_uxtw
   %newval8 = add i8 %val8_uxtw, 1
   store volatile i8 %newval8, i8* @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
@@ -41,13 +41,13 @@ define void @ldst_16bit(i16* %base, i32
 ; CHECK-LABEL: ldst_16bit:
 
    %addr8_sxtwN = getelementptr i16, i16* %base, i32 %off32
-   %val8_sxtwN = load volatile i16* %addr8_sxtwN
+   %val8_sxtwN = load volatile i16, i16* %addr8_sxtwN
    %val32_signed = sext i16 %val8_sxtwN to i32
    store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #1]
 
   %addr_lslN = getelementptr i16, i16* %base, i64 %off64
-  %val8_lslN = load volatile i16* %addr_lslN
+  %val8_lslN = load volatile i16, i16* %addr_lslN
   %val32_unsigned = zext i16 %val8_lslN to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #1]
@@ -56,7 +56,7 @@ define void @ldst_16bit(i16* %base, i32
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
   %addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
-  %val8_uxtw = load volatile i16* %addr_uxtw
+  %val8_uxtw = load volatile i16, i16* %addr_uxtw
   %newval8 = add i16 %val8_uxtw, 1
   store volatile i16 %newval8, i16* @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
@@ -65,7 +65,7 @@ define void @ldst_16bit(i16* %base, i32
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
   %addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
-  %val16_sxtw = load volatile i16* %addr_sxtw
+  %val16_sxtw = load volatile i16, i16* %addr_sxtw
   %val64_signed = sext i16 %val16_sxtw to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{[wx][0-9]+}}, sxtw]
@@ -74,7 +74,7 @@ define void @ldst_16bit(i16* %base, i32
   %base_lsl = ptrtoint i16* %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
   %addr_lsl = inttoptr i64 %addrint_lsl to i16*
-  %val16_lsl = load volatile i16* %addr_lsl
+  %val16_lsl = load volatile i16, i16* %addr_lsl
   %val64_unsigned = zext i16 %val16_lsl to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
@@ -84,7 +84,7 @@ define void @ldst_16bit(i16* %base, i32
   %offset2_uxtwN = shl i64 %offset_uxtwN, 1
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
-  %val32 = load volatile i32* @var_32bit
+  %val32 = load volatile i32, i32* @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
   store volatile i16 %val16_trunc32, i16* %addr_uxtwN
 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #1]
@@ -95,12 +95,12 @@ define void @ldst_32bit(i32* %base, i32
 ; CHECK-LABEL: ldst_32bit:
 
    %addr_sxtwN = getelementptr i32, i32* %base, i32 %off32
-   %val_sxtwN = load volatile i32* %addr_sxtwN
+   %val_sxtwN = load volatile i32, i32* %addr_sxtwN
    store volatile i32 %val_sxtwN, i32* @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #2]
 
   %addr_lslN = getelementptr i32, i32* %base, i64 %off64
-  %val_lslN = load volatile i32* %addr_lslN
+  %val_lslN = load volatile i32, i32* %addr_lslN
   store volatile i32 %val_lslN, i32* @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
 
@@ -108,7 +108,7 @@ define void @ldst_32bit(i32* %base, i32
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
   %addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
-  %val_uxtw = load volatile i32* %addr_uxtw
+  %val_uxtw = load volatile i32, i32* %addr_uxtw
   %newval8 = add i32 %val_uxtw, 1
   store volatile i32 %newval8, i32* @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
@@ -118,7 +118,7 @@ define void @ldst_32bit(i32* %base, i32
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
   %addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
-  %val16_sxtw = load volatile i32* %addr_sxtw
+  %val16_sxtw = load volatile i32, i32* %addr_sxtw
   %val64_signed = sext i32 %val16_sxtw to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
@@ -127,7 +127,7 @@ define void @ldst_32bit(i32* %base, i32
   %base_lsl = ptrtoint i32* %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
   %addr_lsl = inttoptr i64 %addrint_lsl to i32*
-  %val16_lsl = load volatile i32* %addr_lsl
+  %val16_lsl = load volatile i32, i32* %addr_lsl
   %val64_unsigned = zext i32 %val16_lsl to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
@@ -137,7 +137,7 @@ define void @ldst_32bit(i32* %base, i32
   %offset2_uxtwN = shl i64 %offset_uxtwN, 2
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
-  %val32 = load volatile i32* @var_32bit
+  %val32 = load volatile i32, i32* @var_32bit
   store volatile i32 %val32, i32* %addr_uxtwN
 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #2]
    ret void
@@ -147,12 +147,12 @@ define void @ldst_64bit(i64* %base, i32
 ; CHECK-LABEL: ldst_64bit:
 
    %addr_sxtwN = getelementptr i64, i64* %base, i32 %off32
-   %val_sxtwN = load volatile i64* %addr_sxtwN
+   %val_sxtwN = load volatile i64, i64* %addr_sxtwN
    store volatile i64 %val_sxtwN, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #3]
 
   %addr_lslN = getelementptr i64, i64* %base, i64 %off64
-  %val_lslN = load volatile i64* %addr_lslN
+  %val_lslN = load volatile i64, i64* %addr_lslN
   store volatile i64 %val_lslN, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
 
@@ -160,7 +160,7 @@ define void @ldst_64bit(i64* %base, i32
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
   %addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
-  %val8_uxtw = load volatile i64* %addr_uxtw
+  %val8_uxtw = load volatile i64, i64* %addr_uxtw
   %newval8 = add i64 %val8_uxtw, 1
   store volatile i64 %newval8, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
@@ -169,14 +169,14 @@ define void @ldst_64bit(i64* %base, i32
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
   %addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
-  %val64_sxtw = load volatile i64* %addr_sxtw
+  %val64_sxtw = load volatile i64, i64* %addr_sxtw
   store volatile i64 %val64_sxtw, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 
   %base_lsl = ptrtoint i64* %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
   %addr_lsl = inttoptr i64 %addrint_lsl to i64*
-  %val64_lsl = load volatile i64* %addr_lsl
+  %val64_lsl = load volatile i64, i64* %addr_lsl
   store volatile i64 %val64_lsl, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 
@@ -185,7 +185,7 @@ define void @ldst_64bit(i64* %base, i32
   %offset2_uxtwN = shl i64 %offset_uxtwN, 3
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
-  %val64 = load volatile i64* @var_64bit
+  %val64 = load volatile i64, i64* @var_64bit
   store volatile i64 %val64, i64* %addr_uxtwN
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #3]
    ret void
@@ -195,13 +195,13 @@ define void @ldst_float(float* %base, i3
 ; CHECK-LABEL: ldst_float:
 
    %addr_sxtwN = getelementptr float, float* %base, i32 %off32
-   %val_sxtwN = load volatile float* %addr_sxtwN
+   %val_sxtwN = load volatile float, float* %addr_sxtwN
    store volatile float %val_sxtwN, float* @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #2]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
 
   %addr_lslN = getelementptr float, float* %base, i64 %off64
-  %val_lslN = load volatile float* %addr_lslN
+  %val_lslN = load volatile float, float* %addr_lslN
   store volatile float %val_lslN, float* @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
@@ -210,7 +210,7 @@ define void @ldst_float(float* %base, i3
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
   %addr_uxtw = inttoptr i64 %addrint1_uxtw to float*
-  %val_uxtw = load volatile float* %addr_uxtw
+  %val_uxtw = load volatile float, float* %addr_uxtw
   store volatile float %val_uxtw, float* @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
@@ -219,7 +219,7 @@ define void @ldst_float(float* %base, i3
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
   %addr_sxtw = inttoptr i64 %addrint_sxtw to float*
-  %val64_sxtw = load volatile float* %addr_sxtw
+  %val64_sxtw = load volatile float, float* %addr_sxtw
   store volatile float %val64_sxtw, float* @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
@@ -227,7 +227,7 @@ define void @ldst_float(float* %base, i3
   %base_lsl = ptrtoint float* %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
   %addr_lsl = inttoptr i64 %addrint_lsl to float*
-  %val64_lsl = load volatile float* %addr_lsl
+  %val64_lsl = load volatile float, float* %addr_lsl
   store volatile float %val64_lsl, float* @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
@@ -237,7 +237,7 @@ define void @ldst_float(float* %base, i3
   %offset2_uxtwN = shl i64 %offset_uxtwN, 2
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to float*
-  %val64 = load volatile float* @var_float
+  %val64 = load volatile float, float* @var_float
   store volatile float %val64, float* %addr_uxtwN
 ; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #2]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
@@ -248,13 +248,13 @@ define void @ldst_double(double* %base,
 ; CHECK-LABEL: ldst_double:
 
    %addr_sxtwN = getelementptr double, double* %base, i32 %off32
-   %val_sxtwN = load volatile double* %addr_sxtwN
+   %val_sxtwN = load volatile double, double* %addr_sxtwN
    store volatile double %val_sxtwN, double* @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #3]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
 
   %addr_lslN = getelementptr double, double* %base, i64 %off64
-  %val_lslN = load volatile double* %addr_lslN
+  %val_lslN = load volatile double, double* %addr_lslN
   store volatile double %val_lslN, double* @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
@@ -263,7 +263,7 @@ define void @ldst_double(double* %base,
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
   %addr_uxtw = inttoptr i64 %addrint1_uxtw to double*
-  %val_uxtw = load volatile double* %addr_uxtw
+  %val_uxtw = load volatile double, double* %addr_uxtw
   store volatile double %val_uxtw, double* @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
@@ -272,7 +272,7 @@ define void @ldst_double(double* %base,
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
   %addr_sxtw = inttoptr i64 %addrint_sxtw to double*
-  %val64_sxtw = load volatile double* %addr_sxtw
+  %val64_sxtw = load volatile double, double* %addr_sxtw
   store volatile double %val64_sxtw, double* @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
@@ -280,7 +280,7 @@ define void @ldst_double(double* %base,
   %base_lsl = ptrtoint double* %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
   %addr_lsl = inttoptr i64 %addrint_lsl to double*
-  %val64_lsl = load volatile double* %addr_lsl
+  %val64_lsl = load volatile double, double* %addr_lsl
   store volatile double %val64_lsl, double* @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
@@ -290,7 +290,7 @@ define void @ldst_double(double* %base,
   %offset2_uxtwN = shl i64 %offset_uxtwN, 3
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to double*
-  %val64 = load volatile double* @var_double
+  %val64 = load volatile double, double* @var_double
   store volatile double %val64, double* %addr_uxtwN
 ; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #3]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
@@ -302,13 +302,13 @@ define void @ldst_128bit(fp128* %base, i
 ; CHECK-LABEL: ldst_128bit:
 
    %addr_sxtwN = getelementptr fp128, fp128* %base, i32 %off32
-   %val_sxtwN = load volatile fp128* %addr_sxtwN
+   %val_sxtwN = load volatile fp128, fp128* %addr_sxtwN
    store volatile fp128 %val_sxtwN, fp128* %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
 
   %addr_lslN = getelementptr fp128, fp128* %base, i64 %off64
-  %val_lslN = load volatile fp128* %addr_lslN
+  %val_lslN = load volatile fp128, fp128* %addr_lslN
   store volatile fp128 %val_lslN, fp128* %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #4]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
@@ -317,7 +317,7 @@ define void @ldst_128bit(fp128* %base, i
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
   %addr_uxtw = inttoptr i64 %addrint1_uxtw to fp128*
-  %val_uxtw = load volatile fp128* %addr_uxtw
+  %val_uxtw = load volatile fp128, fp128* %addr_uxtw
   store volatile fp128 %val_uxtw, fp128* %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
@@ -326,7 +326,7 @@ define void @ldst_128bit(fp128* %base, i
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
   %addr_sxtw = inttoptr i64 %addrint_sxtw to fp128*
-  %val64_sxtw = load volatile fp128* %addr_sxtw
+  %val64_sxtw = load volatile fp128, fp128* %addr_sxtw
   store volatile fp128 %val64_sxtw, fp128* %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
@@ -334,7 +334,7 @@ define void @ldst_128bit(fp128* %base, i
   %base_lsl = ptrtoint fp128* %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
   %addr_lsl = inttoptr i64 %addrint_lsl to fp128*
-  %val64_lsl = load volatile fp128* %addr_lsl
+  %val64_lsl = load volatile fp128, fp128* %addr_lsl
   store volatile fp128 %val64_lsl, fp128* %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
@@ -344,7 +344,7 @@ define void @ldst_128bit(fp128* %base, i
   %offset2_uxtwN = shl i64 %offset_uxtwN, 4
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to fp128*
-  %val64 = load volatile fp128* %base
+  %val64 = load volatile fp128, fp128* %base
   store volatile fp128 %val64, fp128* %addr_uxtwN
 ; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #4]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll Fri Feb 27 15:17:42 2015
@@ -16,32 +16,32 @@ define void @ldst_8bit() {
 
 ; No architectural support for loads to 16-bit or 8-bit since we
 ; promote i8 during lowering.
-  %addr_8bit = load i8** @varptr
+  %addr_8bit = load i8*, i8** @varptr
 
 ; match a sign-extending load 8-bit -> 32-bit
    %addr_sext32 = getelementptr i8, i8* %addr_8bit, i64 -256
-   %val8_sext32 = load volatile i8* %addr_sext32
+   %val8_sext32 = load volatile i8, i8* %addr_sext32
    %val32_signed = sext i8 %val8_sext32 to i32
    store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: ldursb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
 
 ; match a zero-extending load volatile 8-bit -> 32-bit
   %addr_zext32 = getelementptr i8, i8* %addr_8bit, i64 -12
-  %val8_zext32 = load volatile i8* %addr_zext32
+  %val8_zext32 = load volatile i8, i8* %addr_zext32
   %val32_unsigned = zext i8 %val8_zext32 to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-12]
 
 ; match an any-extending load volatile 8-bit -> 32-bit
   %addr_anyext = getelementptr i8, i8* %addr_8bit, i64 -1
-  %val8_anyext = load volatile i8* %addr_anyext
+  %val8_anyext = load volatile i8, i8* %addr_anyext
   %newval8 = add i8 %val8_anyext, 1
   store volatile i8 %newval8, i8* @var_8bit
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
 
 ; match a sign-extending load volatile 8-bit -> 64-bit
   %addr_sext64 = getelementptr i8, i8* %addr_8bit, i64 -5
-  %val8_sext64 = load volatile i8* %addr_sext64
+  %val8_sext64 = load volatile i8, i8* %addr_sext64
   %val64_signed = sext i8 %val8_sext64 to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldursb {{x[0-9]+}}, [{{x[0-9]+}}, #-5]
@@ -50,21 +50,21 @@ define void @ldst_8bit() {
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
 ; of x0 so it's identical to load volatileing to 32-bits.
   %addr_zext64 = getelementptr i8, i8* %addr_8bit, i64 -9
-  %val8_zext64 = load volatile i8* %addr_zext64
+  %val8_zext64 = load volatile i8, i8* %addr_zext64
   %val64_unsigned = zext i8 %val8_zext64 to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-9]
 
 ; truncating store volatile 32-bits to 8-bits
   %addr_trunc32 = getelementptr i8, i8* %addr_8bit, i64 -256
-  %val32 = load volatile i32* @var_32bit
+  %val32 = load volatile i32, i32* @var_32bit
   %val8_trunc32 = trunc i32 %val32 to i8
   store volatile i8 %val8_trunc32, i8* %addr_trunc32
 ; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
 
 ; truncating store volatile 64-bits to 8-bits
   %addr_trunc64 = getelementptr i8, i8* %addr_8bit, i64 -1
-  %val64 = load volatile i64* @var_64bit
+  %val64 = load volatile i64, i64* @var_64bit
   %val8_trunc64 = trunc i64 %val64 to i8
   store volatile i8 %val8_trunc64, i8* %addr_trunc64
 ; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
@@ -77,12 +77,12 @@ define void @ldst_16bit() {
 
 ; No architectural support for loads to 16-bit or 16-bit since we
 ; promote i16 during lowering.
-  %addr_8bit = load i8** @varptr
+  %addr_8bit = load i8*, i8** @varptr
 
 ; match a sign-extending load 16-bit -> 32-bit
    %addr8_sext32 = getelementptr i8, i8* %addr_8bit, i64 -256
    %addr_sext32 = bitcast i8* %addr8_sext32 to i16*
-   %val16_sext32 = load volatile i16* %addr_sext32
+   %val16_sext32 = load volatile i16, i16* %addr_sext32
    %val32_signed = sext i16 %val16_sext32 to i32
    store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: ldursh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
@@ -90,7 +90,7 @@ define void @ldst_16bit() {
 ; match a zero-extending load volatile 16-bit -> 32-bit. With offset that would be unaligned.
   %addr8_zext32 = getelementptr i8, i8* %addr_8bit, i64 15
   %addr_zext32 = bitcast i8* %addr8_zext32 to i16*
-  %val16_zext32 = load volatile i16* %addr_zext32
+  %val16_zext32 = load volatile i16, i16* %addr_zext32
   %val32_unsigned = zext i16 %val16_zext32 to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #15]
@@ -98,7 +98,7 @@ define void @ldst_16bit() {
 ; match an any-extending load volatile 16-bit -> 32-bit
   %addr8_anyext = getelementptr i8, i8* %addr_8bit, i64 -1
   %addr_anyext = bitcast i8* %addr8_anyext to i16*
-  %val16_anyext = load volatile i16* %addr_anyext
+  %val16_anyext = load volatile i16, i16* %addr_anyext
   %newval16 = add i16 %val16_anyext, 1
   store volatile i16 %newval16, i16* @var_16bit
 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
@@ -106,7 +106,7 @@ define void @ldst_16bit() {
 ; match a sign-extending load volatile 16-bit -> 64-bit
   %addr8_sext64 = getelementptr i8, i8* %addr_8bit, i64 -5
   %addr_sext64 = bitcast i8* %addr8_sext64 to i16*
-  %val16_sext64 = load volatile i16* %addr_sext64
+  %val16_sext64 = load volatile i16, i16* %addr_sext64
   %val64_signed = sext i16 %val16_sext64 to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldursh {{x[0-9]+}}, [{{x[0-9]+}}, #-5]
@@ -116,7 +116,7 @@ define void @ldst_16bit() {
 ; of x0 so it's identical to load volatileing to 32-bits.
   %addr8_zext64 = getelementptr i8, i8* %addr_8bit, i64 9
   %addr_zext64 = bitcast i8* %addr8_zext64 to i16*
-  %val16_zext64 = load volatile i16* %addr_zext64
+  %val16_zext64 = load volatile i16, i16* %addr_zext64
   %val64_unsigned = zext i16 %val16_zext64 to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #9]
@@ -124,7 +124,7 @@ define void @ldst_16bit() {
 ; truncating store volatile 32-bits to 16-bits
   %addr8_trunc32 = getelementptr i8, i8* %addr_8bit, i64 -256
   %addr_trunc32 = bitcast i8* %addr8_trunc32 to i16*
-  %val32 = load volatile i32* @var_32bit
+  %val32 = load volatile i32, i32* @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
   store volatile i16 %val16_trunc32, i16* %addr_trunc32
 ; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
@@ -132,7 +132,7 @@ define void @ldst_16bit() {
 ; truncating store volatile 64-bits to 16-bits
   %addr8_trunc64 = getelementptr i8, i8* %addr_8bit, i64 -1
   %addr_trunc64 = bitcast i8* %addr8_trunc64 to i16*
-  %val64 = load volatile i64* @var_64bit
+  %val64 = load volatile i64, i64* @var_64bit
   %val16_trunc64 = trunc i64 %val64 to i16
   store volatile i16 %val16_trunc64, i16* %addr_trunc64
 ; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
@@ -143,12 +143,12 @@ define void @ldst_16bit() {
 define void @ldst_32bit() {
 ; CHECK-LABEL: ldst_32bit:
 
-  %addr_8bit = load i8** @varptr
+  %addr_8bit = load i8*, i8** @varptr
 
 ; Straight 32-bit load/store
   %addr32_8_noext = getelementptr i8, i8* %addr_8bit, i64 1
   %addr32_noext = bitcast i8* %addr32_8_noext to i32*
-  %val32_noext = load volatile i32* %addr32_noext
+  %val32_noext = load volatile i32, i32* %addr32_noext
   store volatile i32 %val32_noext, i32* %addr32_noext
 ; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
 ; CHECK: stur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
@@ -156,7 +156,7 @@ define void @ldst_32bit() {
 ; Zero-extension to 64-bits
   %addr32_8_zext = getelementptr i8, i8* %addr_8bit, i64 -256
   %addr32_zext = bitcast i8* %addr32_8_zext to i32*
-  %val32_zext = load volatile i32* %addr32_zext
+  %val32_zext = load volatile i32, i32* %addr32_zext
   %val64_unsigned = zext i32 %val32_zext to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
@@ -165,7 +165,7 @@ define void @ldst_32bit() {
 ; Sign-extension to 64-bits
   %addr32_8_sext = getelementptr i8, i8* %addr_8bit, i64 -12
   %addr32_sext = bitcast i8* %addr32_8_sext to i32*
-  %val32_sext = load volatile i32* %addr32_sext
+  %val32_sext = load volatile i32, i32* %addr32_sext
   %val64_signed = sext i32 %val32_sext to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldursw {{x[0-9]+}}, [{{x[0-9]+}}, #-12]
@@ -177,7 +177,7 @@ define void @ldst_32bit() {
   %addr32_8_trunc = getelementptr i8, i8* %addr_8bit, i64 -20
   %addr32_trunc = bitcast i8* %addr32_8_trunc to i32*
 
-  %val64_trunc = load volatile i64* %addr64_trunc
+  %val64_trunc = load volatile i64, i64* %addr64_trunc
   %val32_trunc = trunc i64 %val64_trunc to i32
   store volatile i32 %val32_trunc, i32* %addr32_trunc
 ; CHECK: ldur {{x[0-9]+}}, [{{x[0-9]+}}, #255]
@@ -189,11 +189,11 @@ define void @ldst_32bit() {
 define void @ldst_float() {
 ; CHECK-LABEL: ldst_float:
 
-  %addr_8bit = load i8** @varptr
+  %addr_8bit = load i8*, i8** @varptr
   %addrfp_8 = getelementptr i8, i8* %addr_8bit, i64 -5
   %addrfp = bitcast i8* %addrfp_8 to float*
 
-  %valfp = load volatile float* %addrfp
+  %valfp = load volatile float, float* %addrfp
 ; CHECK: ldur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
 ; CHECK-NOFP-NOT: ldur {{s[0-9]+}},
 
@@ -207,11 +207,11 @@ define void @ldst_float() {
 define void @ldst_double() {
 ; CHECK-LABEL: ldst_double:
 
-  %addr_8bit = load i8** @varptr
+  %addr_8bit = load i8*, i8** @varptr
   %addrfp_8 = getelementptr i8, i8* %addr_8bit, i64 4
   %addrfp = bitcast i8* %addrfp_8 to double*
 
-  %valfp = load volatile double* %addrfp
+  %valfp = load volatile double, double* %addrfp
 ; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #4]
 ; CHECK-NOFP-NOT: ldur {{d[0-9]+}},
 

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll Fri Feb 27 15:17:42 2015
@@ -16,26 +16,26 @@ define void @ldst_8bit() {
 ; promote i8 during lowering.
 
 ; match a sign-extending load 8-bit -> 32-bit
-   %val8_sext32 = load volatile i8* @var_8bit
+   %val8_sext32 = load volatile i8, i8* @var_8bit
    %val32_signed = sext i8 %val8_sext32 to i32
    store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_8bit
 ; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match a zero-extending load volatile 8-bit -> 32-bit
-  %val8_zext32 = load volatile i8* @var_8bit
+  %val8_zext32 = load volatile i8, i8* @var_8bit
   %val32_unsigned = zext i8 %val8_zext32 to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match an any-extending load volatile 8-bit -> 32-bit
-  %val8_anyext = load volatile i8* @var_8bit
+  %val8_anyext = load volatile i8, i8* @var_8bit
   %newval8 = add i8 %val8_anyext, 1
   store volatile i8 %newval8, i8* @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match a sign-extending load volatile 8-bit -> 64-bit
-  %val8_sext64 = load volatile i8* @var_8bit
+  %val8_sext64 = load volatile i8, i8* @var_8bit
   %val64_signed = sext i8 %val8_sext64 to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldrsb {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
@@ -43,19 +43,19 @@ define void @ldst_8bit() {
 ; match a zero-extending load volatile 8-bit -> 64-bit.
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
 ; of x0 so it's identical to load volatileing to 32-bits.
-  %val8_zext64 = load volatile i8* @var_8bit
+  %val8_zext64 = load volatile i8, i8* @var_8bit
   %val64_unsigned = zext i8 %val8_zext64 to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; truncating store volatile 32-bits to 8-bits
-  %val32 = load volatile i32* @var_32bit
+  %val32 = load volatile i32, i32* @var_32bit
   %val8_trunc32 = trunc i32 %val32 to i8
   store volatile i8 %val8_trunc32, i8* @var_8bit
 ; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; truncating store volatile 64-bits to 8-bits
-  %val64 = load volatile i64* @var_64bit
+  %val64 = load volatile i64, i64* @var_64bit
   %val8_trunc64 = trunc i64 %val64 to i8
   store volatile i8 %val8_trunc64, i8* @var_8bit
 ; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
@@ -70,26 +70,26 @@ define void @ldst_16bit() {
 ; lowering.
 
 ; match a sign-extending load volatile 16-bit -> 32-bit
-  %val16_sext32 = load volatile i16* @var_16bit
+  %val16_sext32 = load volatile i16, i16* @var_16bit
   %val32_signed = sext i16 %val16_sext32 to i32
   store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_16bit
 ; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match a zero-extending load volatile 16-bit -> 32-bit
-  %val16_zext32 = load volatile i16* @var_16bit
+  %val16_zext32 = load volatile i16, i16* @var_16bit
   %val32_unsigned = zext i16 %val16_zext32 to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match an any-extending load volatile 16-bit -> 32-bit
-  %val16_anyext = load volatile i16* @var_16bit
+  %val16_anyext = load volatile i16, i16* @var_16bit
   %newval16 = add i16 %val16_anyext, 1
   store volatile i16 %newval16, i16* @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match a sign-extending load volatile 16-bit -> 64-bit
-  %val16_sext64 = load volatile i16* @var_16bit
+  %val16_sext64 = load volatile i16, i16* @var_16bit
   %val64_signed = sext i16 %val16_sext64 to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
@@ -97,19 +97,19 @@ define void @ldst_16bit() {
 ; match a zero-extending load volatile 16-bit -> 64-bit.
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
 ; of x0 so it's identical to load volatileing to 32-bits.
-  %val16_zext64 = load volatile i16* @var_16bit
+  %val16_zext64 = load volatile i16, i16* @var_16bit
   %val64_unsigned = zext i16 %val16_zext64 to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; truncating store volatile 32-bits to 16-bits
-  %val32 = load volatile i32* @var_32bit
+  %val32 = load volatile i32, i32* @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
   store volatile i16 %val16_trunc32, i16* @var_16bit
 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; truncating store volatile 64-bits to 16-bits
-  %val64 = load volatile i64* @var_64bit
+  %val64 = load volatile i64, i64* @var_64bit
   %val16_trunc64 = trunc i64 %val64 to i16
   store volatile i16 %val16_trunc64, i16* @var_16bit
 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
@@ -121,28 +121,28 @@ define void @ldst_32bit() {
 ; CHECK-LABEL: ldst_32bit:
 
 ; Straight 32-bit load/store
-  %val32_noext = load volatile i32* @var_32bit
+  %val32_noext = load volatile i32, i32* @var_32bit
   store volatile i32 %val32_noext, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 
 ; Zero-extension to 64-bits
-  %val32_zext = load volatile i32* @var_32bit
+  %val32_zext = load volatile i32, i32* @var_32bit
   %val64_unsigned = zext i32 %val32_zext to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Sign-extension to 64-bits
-  %val32_sext = load volatile i32* @var_32bit
+  %val32_sext = load volatile i32, i32* @var_32bit
   %val64_signed = sext i32 %val32_sext to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Truncation from 64-bits
-  %val64_trunc = load volatile i64* @var_64bit
+  %val64_trunc = load volatile i64, i64* @var_64bit
   %val32_trunc = trunc i64 %val64_trunc to i32
   store volatile i32 %val32_trunc, i32* @var_32bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
@@ -163,62 +163,62 @@ define void @ldst_32bit() {
 
 define void @ldst_complex_offsets() {
 ; CHECK: ldst_complex_offsets
-  %arr8_addr = load volatile i8** @arr8
+  %arr8_addr = load volatile i8*, i8** @arr8
 ; CHECK: adrp {{x[0-9]+}}, arr8
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr8]
 
   %arr8_sub1_addr = getelementptr i8, i8* %arr8_addr, i64 1
-  %arr8_sub1 = load volatile i8* %arr8_sub1_addr
+  %arr8_sub1 = load volatile i8, i8* %arr8_sub1_addr
   store volatile i8 %arr8_sub1, i8* @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #1]
 
   %arr8_sub4095_addr = getelementptr i8, i8* %arr8_addr, i64 4095
-  %arr8_sub4095 = load volatile i8* %arr8_sub4095_addr
+  %arr8_sub4095 = load volatile i8, i8* %arr8_sub4095_addr
   store volatile i8 %arr8_sub4095, i8* @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #4095]
 
 
-  %arr16_addr = load volatile i16** @arr16
+  %arr16_addr = load volatile i16*, i16** @arr16
 ; CHECK: adrp {{x[0-9]+}}, arr16
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr16]
 
   %arr16_sub1_addr = getelementptr i16, i16* %arr16_addr, i64 1
-  %arr16_sub1 = load volatile i16* %arr16_sub1_addr
+  %arr16_sub1 = load volatile i16, i16* %arr16_sub1_addr
   store volatile i16 %arr16_sub1, i16* @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #2]
 
   %arr16_sub4095_addr = getelementptr i16, i16* %arr16_addr, i64 4095
-  %arr16_sub4095 = load volatile i16* %arr16_sub4095_addr
+  %arr16_sub4095 = load volatile i16, i16* %arr16_sub4095_addr
   store volatile i16 %arr16_sub4095, i16* @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #8190]
 
 
-  %arr32_addr = load volatile i32** @arr32
+  %arr32_addr = load volatile i32*, i32** @arr32
 ; CHECK: adrp {{x[0-9]+}}, arr32
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr32]
 
   %arr32_sub1_addr = getelementptr i32, i32* %arr32_addr, i64 1
-  %arr32_sub1 = load volatile i32* %arr32_sub1_addr
+  %arr32_sub1 = load volatile i32, i32* %arr32_sub1_addr
   store volatile i32 %arr32_sub1, i32* @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #4]
 
   %arr32_sub4095_addr = getelementptr i32, i32* %arr32_addr, i64 4095
-  %arr32_sub4095 = load volatile i32* %arr32_sub4095_addr
+  %arr32_sub4095 = load volatile i32, i32* %arr32_sub4095_addr
   store volatile i32 %arr32_sub4095, i32* @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #16380]
 
 
-  %arr64_addr = load volatile i64** @arr64
+  %arr64_addr = load volatile i64*, i64** @arr64
 ; CHECK: adrp {{x[0-9]+}}, arr64
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr64]
 
   %arr64_sub1_addr = getelementptr i64, i64* %arr64_addr, i64 1
-  %arr64_sub1 = load volatile i64* %arr64_sub1_addr
+  %arr64_sub1 = load volatile i64, i64* %arr64_sub1_addr
   store volatile i64 %arr64_sub1, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #8]
 
   %arr64_sub4095_addr = getelementptr i64, i64* %arr64_addr, i64 4095
-  %arr64_sub4095 = load volatile i64* %arr64_sub4095_addr
+  %arr64_sub4095 = load volatile i64, i64* %arr64_sub4095_addr
   store volatile i64 %arr64_sub4095, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #32760]
 
@@ -228,7 +228,7 @@ define void @ldst_complex_offsets() {
 define void @ldst_float() {
 ; CHECK-LABEL: ldst_float:
 
-   %valfp = load volatile float* @var_float
+   %valfp = load volatile float, float* @var_float
 ; CHECK: adrp {{x[0-9]+}}, var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_float]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
@@ -243,7 +243,7 @@ define void @ldst_float() {
 define void @ldst_double() {
 ; CHECK-LABEL: ldst_double:
 
-   %valfp = load volatile double* @var_double
+   %valfp = load volatile double, double* @var_double
 ; CHECK: adrp {{x[0-9]+}}, var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_double]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},

Modified: llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 define void @floating_lits() {
 ; CHECK-LABEL: floating_lits:
 
-  %floatval = load float* @varfloat
+  %floatval = load float, float* @varfloat
   %newfloat = fadd float %floatval, 128.0
 ; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]]
 ; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
@@ -26,7 +26,7 @@ define void @floating_lits() {
 
   store float %newfloat, float* @varfloat
 
-  %doubleval = load double* @vardouble
+  %doubleval = load double, double* @vardouble
   %newdouble = fadd double %doubleval, 129.0
 ; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]]
 ; CHECK: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]

Modified: llvm/trunk/test/CodeGen/AArch64/local_vars.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/local_vars.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/local_vars.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/local_vars.ll Fri Feb 27 15:17:42 2015
@@ -49,7 +49,7 @@ define void @stack_local() {
 ; CHECK-LABEL: stack_local:
 ; CHECK: sub sp, sp, #16
 
-  %val = load i64* @var
+  %val = load i64, i64* @var
   store i64 %val, i64* %local_var
 ; CHECK-DAG: str {{x[0-9]+}}, [sp, #{{[0-9]+}}]
 

Modified: llvm/trunk/test/CodeGen/AArch64/logical_shifted_reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/logical_shifted_reg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/logical_shifted_reg.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/logical_shifted_reg.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@
 
 define void @logical_32bit() minsize {
 ; CHECK-LABEL: logical_32bit:
-  %val1 = load i32* @var1_32
-  %val2 = load i32* @var2_32
+  %val1 = load i32, i32* @var1_32
+  %val2 = load i32, i32* @var2_32
 
   ; First check basic and/bic/or/orn/eor/eon patterns with no shift
   %neg_val2 = xor i32 -1, %val2
@@ -98,8 +98,8 @@ define void @logical_32bit() minsize {
 
 define void @logical_64bit() minsize {
 ; CHECK-LABEL: logical_64bit:
-  %val1 = load i64* @var1_64
-  %val2 = load i64* @var2_64
+  %val1 = load i64, i64* @var1_64
+  %val2 = load i64, i64* @var2_64
 
   ; First check basic and/bic/or/orn/eor/eon patterns with no shift
   %neg_val2 = xor i64 -1, %val2
@@ -191,8 +191,8 @@ define void @logical_64bit() minsize {
 
 define void @flag_setting() {
 ; CHECK-LABEL: flag_setting:
-  %val1 = load i64* @var1_64
-  %val2 = load i64* @var2_64
+  %val1 = load i64, i64* @var1_64
+  %val2 = load i64, i64* @var2_64
 
 ; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}
 ; CHECK: b.gt .L

Modified: llvm/trunk/test/CodeGen/AArch64/machine_cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/machine_cse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/machine_cse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/machine_cse.ll Fri Feb 27 15:17:42 2015
@@ -14,11 +14,11 @@ define void @combine-sign-comparisons-by
 ; CHECK: b.le
 
 entry:
-  %a = load i32* @a, align 4
-  %b = load i32* @b, align 4
-  %c = load i32* @c, align 4
-  %d = load i32* @d, align 4
-  %e = load i32* @e, align 4
+  %a = load i32, i32* @a, align 4
+  %b = load i32, i32* @b, align 4
+  %c = load i32, i32* @c, align 4
+  %d = load i32, i32* @d, align 4
+  %e = load i32, i32* @e, align 4
 
   %cmp = icmp slt i32 %a, %e
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false

Modified: llvm/trunk/test/CodeGen/AArch64/neon-fpround_f128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-fpround_f128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-fpround_f128.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-fpround_f128.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define <1 x double> @test_fpround_v1f128(<1 x fp128>* %a) {
 ; CHECK-LABEL: test_fpround_v1f128:
 ; CHECK: bl __trunctfdf2
-  %b = load <1 x fp128>* %a
+  %b = load <1 x fp128>, <1 x fp128>* %a
   %c = fptrunc <1 x fp128> %b to <1 x double>
   ret <1 x double> %c
 }
@@ -12,7 +12,7 @@ define <2 x double> @test_fpround_v2f128
 ; CHECK-LABEL: test_fpround_v2f128:
 ; CHECK: bl __trunctfdf2
 ; CHECK: bl __trunctfdf2
-  %b = load <2 x fp128>* %a
+  %b = load <2 x fp128>, <2 x fp128>* %a
   %c = fptrunc <2 x fp128> %b to <2 x double>
   ret <2 x double> %c
 }

Modified: llvm/trunk/test/CodeGen/AArch64/neon-truncStore-extLoad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-truncStore-extLoad.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-truncStore-extLoad.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-truncStore-extLoad.ll Fri Feb 27 15:17:42 2015
@@ -34,7 +34,7 @@ define void @truncStore.v8i16(<8 x i16>
 define <4 x i32> @loadSExt.v4i8(<4 x i8>* %ref) {
 ; CHECK-LABEL: loadSExt.v4i8:
 ; CHECK: ldrsb
-  %a = load <4 x i8>* %ref
+  %a = load <4 x i8>, <4 x i8>* %ref
   %conv = sext <4 x i8> %a to <4 x i32>
   ret <4 x i32> %conv
 }
@@ -42,7 +42,7 @@ define <4 x i32> @loadSExt.v4i8(<4 x i8>
 define <4 x i32> @loadZExt.v4i8(<4 x i8>* %ref) {
 ; CHECK-LABEL: loadZExt.v4i8:
 ; CHECK: ldrb
-  %a = load <4 x i8>* %ref
+  %a = load <4 x i8>, <4 x i8>* %ref
   %conv = zext <4 x i8> %a to <4 x i32>
   ret <4 x i32> %conv
 }
@@ -50,7 +50,7 @@ define <4 x i32> @loadZExt.v4i8(<4 x i8>
 define i32 @loadExt.i32(<4 x i8>* %ref) {
 ; CHECK-LABEL: loadExt.i32:
 ; CHECK: ldrb
-  %a = load <4 x i8>* %ref
+  %a = load <4 x i8>, <4 x i8>* %ref
   %vecext = extractelement <4 x i8> %a, i32 0
   %conv = zext i8 %vecext to i32
   ret i32 %conv

Modified: llvm/trunk/test/CodeGen/AArch64/nzcv-save.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/nzcv-save.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/nzcv-save.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/nzcv-save.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 ; must be saved for later.
 define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256* nocapture %dd) nounwind uwtable noinline ssp {
 entry:
-  %c = load i256* %cc
-  %d = load i256* %dd
+  %c = load i256, i256* %cc
+  %d = load i256, i256* %dd
   %add = add nsw i256 %c, %d
   store i256 %add, i256* %a, align 8
   %or = or i256 %c, 1606938044258990275541962092341162602522202993782792835301376

Modified: llvm/trunk/test/CodeGen/AArch64/paired-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/paired-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/paired-load.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/paired-load.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@ target triple = "aarch64-linux-gnu"
 define void @f(i64* %p, i64* %q) {
   %addr2 = getelementptr i64, i64* %q, i32 1
   %addr = getelementptr i64, i64* %p, i32 1
-  %x = load i64* %p
-  %y = load i64* %addr
+  %x = load i64, i64* %p
+  %y = load i64, i64* %addr
   store i64 %x, i64* %q
   store i64 %y, i64* %addr2
   ret void

Modified: llvm/trunk/test/CodeGen/AArch64/pic-eh-stubs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/pic-eh-stubs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/pic-eh-stubs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/pic-eh-stubs.ll Fri Feb 27 15:17:42 2015
@@ -38,7 +38,7 @@ catch:
   %3 = extractvalue { i8*, i32 } %0, 0
   %4 = tail call i8* @__cxa_begin_catch(i8* %3) nounwind
   %5 = bitcast i8* %4 to i32*
-  %exn.scalar = load i32* %5, align 4
+  %exn.scalar = load i32, i32* %5, align 4
   tail call void @__cxa_end_catch() nounwind
   br label %return
 

Modified: llvm/trunk/test/CodeGen/AArch64/ragreedy-csr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ragreedy-csr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ragreedy-csr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ragreedy-csr.ll Fri Feb 27 15:17:42 2015
@@ -35,31 +35,31 @@ declare i32 @__maskrune(i32, i64) #7
 define fastcc i32 @prune_match(%struct.Connector_struct* nocapture readonly %a, %struct.Connector_struct* nocapture readonly %b) #9 {
 entry:
   %label56 = bitcast %struct.Connector_struct* %a to i16*
-  %0 = load i16* %label56, align 2
+  %0 = load i16, i16* %label56, align 2
   %label157 = bitcast %struct.Connector_struct* %b to i16*
-  %1 = load i16* %label157, align 2
+  %1 = load i16, i16* %label157, align 2
   %cmp = icmp eq i16 %0, %1
   br i1 %cmp, label %if.end, label %return, !prof !988
 if.end:
   %priority = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 2
-  %2 = load i8* %priority, align 1
+  %2 = load i8, i8* %priority, align 1
   %priority5 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 2
-  %3 = load i8* %priority5, align 1
+  %3 = load i8, i8* %priority5, align 1
   %string = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 5
-  %4 = load i8** %string, align 8
+  %4 = load i8*, i8** %string, align 8
   %string7 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 5
-  %5 = load i8** %string7, align 8
+  %5 = load i8*, i8** %string7, align 8
   br label %while.cond
 while.cond:
   %lsr.iv27 = phi i64 [ %lsr.iv.next28, %if.end17 ], [ 0, %if.end ]
   %scevgep55 = getelementptr i8, i8* %4, i64 %lsr.iv27
-  %6 = load i8* %scevgep55, align 1
+  %6 = load i8, i8* %scevgep55, align 1
   %idxprom.i.i = sext i8 %6 to i64
   %isascii.i.i224 = icmp sgt i8 %6, -1
   br i1 %isascii.i.i224, label %cond.true.i.i, label %cond.false.i.i, !prof !181
 cond.true.i.i:
   %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
-  %7 = load i32* %arrayidx.i.i, align 4
+  %7 = load i32, i32* %arrayidx.i.i, align 4
   %and.i.i = and i32 %7, 32768
   br label %isupper.exit
 cond.false.i.i:
@@ -75,13 +75,13 @@ lor.rhs:
   %sunkaddr = ptrtoint i8* %5 to i64
   %sunkaddr58 = add i64 %sunkaddr, %lsr.iv27
   %sunkaddr59 = inttoptr i64 %sunkaddr58 to i8*
-  %9 = load i8* %sunkaddr59, align 1
+  %9 = load i8, i8* %sunkaddr59, align 1
   %idxprom.i.i214 = sext i8 %9 to i64
   %isascii.i.i213225 = icmp sgt i8 %9, -1
   br i1 %isascii.i.i213225, label %cond.true.i.i217, label %cond.false.i.i219, !prof !181
 cond.true.i.i217:
   %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
-  %10 = load i32* %arrayidx.i.i215, align 4
+  %10 = load i32, i32* %arrayidx.i.i215, align 4
   %and.i.i216 = and i32 %10, 32768
   br label %isupper.exit223
 cond.false.i.i219:
@@ -97,11 +97,11 @@ while.body:
   %sunkaddr60 = ptrtoint i8* %4 to i64
   %sunkaddr61 = add i64 %sunkaddr60, %lsr.iv27
   %sunkaddr62 = inttoptr i64 %sunkaddr61 to i8*
-  %12 = load i8* %sunkaddr62, align 1
+  %12 = load i8, i8* %sunkaddr62, align 1
   %sunkaddr63 = ptrtoint i8* %5 to i64
   %sunkaddr64 = add i64 %sunkaddr63, %lsr.iv27
   %sunkaddr65 = inttoptr i64 %sunkaddr64 to i8*
-  %13 = load i8* %sunkaddr65, align 1
+  %13 = load i8, i8* %sunkaddr65, align 1
   %cmp14 = icmp eq i8 %12, %13
   br i1 %cmp14, label %if.end17, label %return, !prof !991
 if.end17:
@@ -115,13 +115,13 @@ if.then23:
   %sunkaddr66 = ptrtoint %struct.Connector_struct* %a to i64
   %sunkaddr67 = add i64 %sunkaddr66, 16
   %sunkaddr68 = inttoptr i64 %sunkaddr67 to i8**
-  %16 = load i8** %sunkaddr68, align 8
-  %17 = load i8* %16, align 1
+  %16 = load i8*, i8** %sunkaddr68, align 8
+  %17 = load i8, i8* %16, align 1
   %cmp26 = icmp eq i8 %17, 83
   %sunkaddr69 = ptrtoint i8* %4 to i64
   %sunkaddr70 = add i64 %sunkaddr69, %lsr.iv27
   %sunkaddr71 = inttoptr i64 %sunkaddr70 to i8*
-  %18 = load i8* %sunkaddr71, align 1
+  %18 = load i8, i8* %sunkaddr71, align 1
   br i1 %cmp26, label %land.lhs.true28, label %while.cond59.preheader, !prof !993
 land.lhs.true28:
   switch i8 %18, label %land.rhs.preheader [
@@ -132,7 +132,7 @@ land.lhs.true35:
   %sunkaddr72 = ptrtoint i8* %5 to i64
   %sunkaddr73 = add i64 %sunkaddr72, %lsr.iv27
   %sunkaddr74 = inttoptr i64 %sunkaddr73 to i8*
-  %19 = load i8* %sunkaddr74, align 1
+  %19 = load i8, i8* %sunkaddr74, align 1
   switch i8 %19, label %land.rhs.preheader [
     i8 112, label %land.lhs.true43
   ], !prof !995
@@ -157,7 +157,7 @@ land.lhs.true52:
   %sunkaddr76 = add i64 %sunkaddr75, %lsr.iv27
   %sunkaddr77 = add i64 %sunkaddr76, -1
   %sunkaddr78 = inttoptr i64 %sunkaddr77 to i8*
-  %24 = load i8* %sunkaddr78, align 1
+  %24 = load i8, i8* %sunkaddr78, align 1
   %cmp55 = icmp eq i8 %24, 73
   %cmp61233 = icmp eq i8 %18, 0
   %or.cond265 = or i1 %cmp55, %cmp61233
@@ -173,7 +173,7 @@ land.rhs:
   %lsr.iv = phi i64 [ 0, %land.rhs.preheader ], [ %lsr.iv.next, %if.then83 ]
   %25 = phi i8 [ %27, %if.then83 ], [ %18, %land.rhs.preheader ]
   %scevgep34 = getelementptr i8, i8* %scevgep33, i64 %lsr.iv
-  %26 = load i8* %scevgep34, align 1
+  %26 = load i8, i8* %scevgep34, align 1
   %cmp64 = icmp eq i8 %26, 0
   br i1 %cmp64, label %return, label %while.body66, !prof !1000
 while.body66:
@@ -189,7 +189,7 @@ lor.lhs.false74:
 if.then83:
   %scevgep44 = getelementptr i8, i8* %scevgep43, i64 %lsr.iv
   %scevgep45 = getelementptr i8, i8* %scevgep44, i64 1
-  %27 = load i8* %scevgep45, align 1
+  %27 = load i8, i8* %scevgep45, align 1
   %cmp61 = icmp eq i8 %27, 0
   %lsr.iv.next = add i64 %lsr.iv, 1
   br i1 %cmp61, label %return, label %land.rhs, !prof !999
@@ -202,7 +202,7 @@ while.cond95.preheader:
   %sunkaddr79 = ptrtoint i8* %4 to i64
   %sunkaddr80 = add i64 %sunkaddr79, %lsr.iv27
   %sunkaddr81 = inttoptr i64 %sunkaddr80 to i8*
-  %28 = load i8* %sunkaddr81, align 1
+  %28 = load i8, i8* %sunkaddr81, align 1
   %cmp97238 = icmp eq i8 %28, 0
   br i1 %cmp97238, label %return, label %land.rhs99.preheader, !prof !1004
 land.rhs99.preheader:
@@ -213,7 +213,7 @@ land.rhs99:
   %lsr.iv17 = phi i64 [ 0, %land.rhs99.preheader ], [ %lsr.iv.next18, %if.then117 ]
   %29 = phi i8 [ %31, %if.then117 ], [ %28, %land.rhs99.preheader ]
   %scevgep32 = getelementptr i8, i8* %scevgep31, i64 %lsr.iv17
-  %30 = load i8* %scevgep32, align 1
+  %30 = load i8, i8* %scevgep32, align 1
   %cmp101 = icmp eq i8 %30, 0
   br i1 %cmp101, label %return, label %while.body104, !prof !1005
 while.body104:
@@ -226,7 +226,7 @@ while.body104:
 if.then117:
   %scevgep41 = getelementptr i8, i8* %scevgep40, i64 %lsr.iv17
   %scevgep42 = getelementptr i8, i8* %scevgep41, i64 1
-  %31 = load i8* %scevgep42, align 1
+  %31 = load i8, i8* %scevgep42, align 1
   %cmp97 = icmp eq i8 %31, 0
   %lsr.iv.next18 = add i64 %lsr.iv17, 1
   br i1 %cmp97, label %return, label %land.rhs99, !prof !1004
@@ -239,7 +239,7 @@ while.cond130.preheader:
   %sunkaddr82 = ptrtoint i8* %4 to i64
   %sunkaddr83 = add i64 %sunkaddr82, %lsr.iv27
   %sunkaddr84 = inttoptr i64 %sunkaddr83 to i8*
-  %32 = load i8* %sunkaddr84, align 1
+  %32 = load i8, i8* %sunkaddr84, align 1
   %cmp132244 = icmp eq i8 %32, 0
   br i1 %cmp132244, label %return, label %land.rhs134.preheader, !prof !1008
 land.rhs134.preheader:
@@ -250,7 +250,7 @@ land.rhs134:
   %lsr.iv22 = phi i64 [ 0, %land.rhs134.preheader ], [ %lsr.iv.next23, %if.then152 ]
   %33 = phi i8 [ %35, %if.then152 ], [ %32, %land.rhs134.preheader ]
   %scevgep30 = getelementptr i8, i8* %scevgep29, i64 %lsr.iv22
-  %34 = load i8* %scevgep30, align 1
+  %34 = load i8, i8* %scevgep30, align 1
   %cmp136 = icmp eq i8 %34, 0
   br i1 %cmp136, label %return, label %while.body139, !prof !1009
 while.body139:
@@ -263,7 +263,7 @@ while.body139:
 if.then152:
   %scevgep38 = getelementptr i8, i8* %scevgep37, i64 %lsr.iv22
   %scevgep39 = getelementptr i8, i8* %scevgep38, i64 1
-  %35 = load i8* %scevgep39, align 1
+  %35 = load i8, i8* %scevgep39, align 1
   %cmp132 = icmp eq i8 %35, 0
   %lsr.iv.next23 = add i64 %lsr.iv22, 1
   br i1 %cmp132, label %return, label %land.rhs134, !prof !1008

Modified: llvm/trunk/test/CodeGen/AArch64/regress-tail-livereg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/regress-tail-livereg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/regress-tail-livereg.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/regress-tail-livereg.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ declare void @bar()
 
 define void @foo() {
 ; CHECK-LABEL: foo:
-       %func = load void()** @var
+       %func = load void()*, void()** @var
 
        ; Calling a function encourages @foo to use a callee-saved register,
        ; which makes it a natural choice for the tail call itself. But we don't

Modified: llvm/trunk/test/CodeGen/AArch64/regress-tblgen-chains.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/regress-tblgen-chains.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/regress-tblgen-chains.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/regress-tblgen-chains.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define i64 @test_chains() {
   call void @bar(i8* %locvar)
 ; CHECK: bl {{_?bar}}
 
-  %inc.1 = load i8* %locvar
+  %inc.1 = load i8, i8* %locvar
   %inc.2 = zext i8 %inc.1 to i64
   %inc.3 = add i64 %inc.2, 1
   %inc.4 = trunc i64 %inc.3 to i8
@@ -30,7 +30,7 @@ define i64 @test_chains() {
 ; CHECK: sturb {{w[0-9]+}}, [x29, [[LOCADDR]]]
 ; CHECK: ldurb {{w[0-9]+}}, [x29, [[LOCADDR]]]
 
-  %ret.1 = load i8* %locvar
+  %ret.1 = load i8, i8* %locvar
   %ret.2 = zext i8 %ret.1 to i64
   ret i64 %ret.2
 ; CHECK: ret

Modified: llvm/trunk/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll Fri Feb 27 15:17:42 2015
@@ -7,15 +7,15 @@ define void @test_w29_reserved() {
 ; CHECK-LABEL: test_w29_reserved:
 ; CHECK: add x29, sp, #{{[0-9]+}}
 
-  %val1 = load volatile i32* @var
-  %val2 = load volatile i32* @var
-  %val3 = load volatile i32* @var
-  %val4 = load volatile i32* @var
-  %val5 = load volatile i32* @var
-  %val6 = load volatile i32* @var
-  %val7 = load volatile i32* @var
-  %val8 = load volatile i32* @var
-  %val9 = load volatile i32* @var
+  %val1 = load volatile i32, i32* @var
+  %val2 = load volatile i32, i32* @var
+  %val3 = load volatile i32, i32* @var
+  %val4 = load volatile i32, i32* @var
+  %val5 = load volatile i32, i32* @var
+  %val6 = load volatile i32, i32* @var
+  %val7 = load volatile i32, i32* @var
+  %val8 = load volatile i32, i32* @var
+  %val9 = load volatile i32, i32* @var
 
 ; CHECK-NOT: ldr w29,
 

Modified: llvm/trunk/test/CodeGen/AArch64/rm_redundant_cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/rm_redundant_cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/rm_redundant_cmp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/rm_redundant_cmp.ll Fri Feb 27 15:17:42 2015
@@ -15,8 +15,8 @@ define void @test_i16_2cmp_signed_1() {
 ; CHECK-NOT: cmp
 ; CHECK: b.ne
 entry:
-  %0 = load i16* getelementptr inbounds (%struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 1), align 2
-  %1 = load i16* getelementptr inbounds (%struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 2), align 2
+  %0 = load i16, i16* getelementptr inbounds (%struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 1), align 2
+  %1 = load i16, i16* getelementptr inbounds (%struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 2), align 2
   %cmp = icmp sgt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
@@ -43,8 +43,8 @@ define void @test_i16_2cmp_signed_2() {
 ; CHECK-NOT: cmp
 ; CHECK: b.ge
 entry:
-  %0 = load i16* getelementptr inbounds (%struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 1), align 2
-  %1 = load i16* getelementptr inbounds (%struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 2), align 2
+  %0 = load i16, i16* getelementptr inbounds (%struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 1), align 2
+  %1 = load i16, i16* getelementptr inbounds (%struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 2), align 2
   %cmp = icmp sgt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
@@ -71,8 +71,8 @@ define void @test_i16_2cmp_unsigned_1()
 ; CHECK-NOT: cmp
 ; CHECK: b.ne
 entry:
-  %0 = load i16* getelementptr inbounds (%struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 1), align 2
-  %1 = load i16* getelementptr inbounds (%struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 2), align 2
+  %0 = load i16, i16* getelementptr inbounds (%struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 1), align 2
+  %1 = load i16, i16* getelementptr inbounds (%struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 2), align 2
   %cmp = icmp ugt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
@@ -99,8 +99,8 @@ define void @test_i16_2cmp_unsigned_2()
 ; CHECK-NOT: cmp
 ; CHECK: b.hs
 entry:
-  %0 = load i16* getelementptr inbounds (%struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 1), align 2
-  %1 = load i16* getelementptr inbounds (%struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 2), align 2
+  %0 = load i16, i16* getelementptr inbounds (%struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 1), align 2
+  %1 = load i16, i16* getelementptr inbounds (%struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 2), align 2
   %cmp = icmp ugt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
@@ -136,8 +136,8 @@ define void @test_i8_2cmp_signed_1() {
 ; CHECK-NOT: cmp
 ; CHECK: b.ne
 entry:
-  %0 = load i8* getelementptr inbounds (%struct.s_signed_i8* @cost_s, i64 0, i32 1), align 2
-  %1 = load i8* getelementptr inbounds (%struct.s_signed_i8* @cost_s, i64 0, i32 2), align 2
+  %0 = load i8, i8* getelementptr inbounds (%struct.s_signed_i8* @cost_s, i64 0, i32 1), align 2
+  %1 = load i8, i8* getelementptr inbounds (%struct.s_signed_i8* @cost_s, i64 0, i32 2), align 2
   %cmp = icmp sgt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
@@ -164,8 +164,8 @@ define void @test_i8_2cmp_signed_2() {
 ; CHECK-NOT: cmp
 ; CHECK: b.ge
 entry:
-  %0 = load i8* getelementptr inbounds (%struct.s_signed_i8* @cost_s, i64 0, i32 1), align 2
-  %1 = load i8* getelementptr inbounds (%struct.s_signed_i8* @cost_s, i64 0, i32 2), align 2
+  %0 = load i8, i8* getelementptr inbounds (%struct.s_signed_i8* @cost_s, i64 0, i32 1), align 2
+  %1 = load i8, i8* getelementptr inbounds (%struct.s_signed_i8* @cost_s, i64 0, i32 2), align 2
   %cmp = icmp sgt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
@@ -192,8 +192,8 @@ define void @test_i8_2cmp_unsigned_1() {
 ; CHECK-NOT: cmp
 ; CHECK: b.ne
 entry:
-  %0 = load i8* getelementptr inbounds (%struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 1), align 2
-  %1 = load i8* getelementptr inbounds (%struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 2), align 2
+  %0 = load i8, i8* getelementptr inbounds (%struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 1), align 2
+  %1 = load i8, i8* getelementptr inbounds (%struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 2), align 2
   %cmp = icmp ugt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
@@ -220,8 +220,8 @@ define void @test_i8_2cmp_unsigned_2() {
 ; CHECK-NOT: cmp
 ; CHECK: b.hs
 entry:
-  %0 = load i8* getelementptr inbounds (%struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 1), align 2
-  %1 = load i8* getelementptr inbounds (%struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 2), align 2
+  %0 = load i8, i8* getelementptr inbounds (%struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 1), align 2
+  %1 = load i8, i8* getelementptr inbounds (%struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 2), align 2
   %cmp = icmp ugt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 

Modified: llvm/trunk/test/CodeGen/AArch64/sibling-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/sibling-call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/sibling-call.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/sibling-call.ll Fri Feb 27 15:17:42 2015
@@ -88,7 +88,7 @@ define void @indirect_tail() {
 ; CHECK-LABEL: indirect_tail:
 ; CHECK-NOT: sub sp, sp
 
-  %fptr = load void(i32)** @func
+  %fptr = load void(i32)*, void(i32)** @func
   tail call void %fptr(i32 42)
   ret void
 ; CHECK: ldr [[FPTR:x[1-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:func]

Modified: llvm/trunk/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@
 define i32 @test_stack_guard_remat2() {
 entry:
   %StackGuardSlot = alloca i8*
-  %StackGuard = load i8** bitcast (i64** @__stack_chk_guard to i8**)
+  %StackGuard = load i8*, i8** bitcast (i64** @__stack_chk_guard to i8**)
   call void @llvm.stackprotector(i8* %StackGuard, i8** %StackGuardSlot)
   %container = alloca [32 x i8], align 1
   call void @llvm.stackprotectorcheck(i8** bitcast (i64** @__stack_chk_guard to i8**))

Modified: llvm/trunk/test/CodeGen/AArch64/tbz-tbnz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/tbz-tbnz.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/tbz-tbnz.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/tbz-tbnz.ll Fri Feb 27 15:17:42 2015
@@ -211,7 +211,7 @@ define void @test11(i64 %val1, i64* %ptr
 ; CHECK-NOT: cmp
 ; CHECK: tbz [[CMP]], #63
 
-  %val = load i64* %ptr
+  %val = load i64, i64* %ptr
   %tst = icmp slt i64 %val, 0
   br i1 %tst, label %if.then, label %if.end
 

Modified: llvm/trunk/test/CodeGen/AArch64/tst-br.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/tst-br.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/tst-br.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/tst-br.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@
 define i32 @test_tbz() {
 ; CHECK-LABEL: test_tbz:
 
-  %val = load i32* @var32
-  %val64 = load i64* @var64
+  %val = load i32, i32* @var32
+  %val64 = load i64, i64* @var64
 
   %tbit0 = and i32 %val, 32768
   %tst0 = icmp ne i32 %tbit0, 0

Modified: llvm/trunk/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ bb169.i:                ; preds = %entry
         ret void
 
 cond_true11:            ; preds = %entry
-        %tmp.i32 = load %struct.layer_data** @ld                ; <%struct.layer_data*> [#uses=2]
+        %tmp.i32 = load %struct.layer_data*, %struct.layer_data** @ld                ; <%struct.layer_data*> [#uses=2]
         %tmp3.i35 = getelementptr %struct.layer_data, %struct.layer_data* %tmp.i32, i32 0, i32 1, i32 2048; <i8*> [#uses=2]
         %tmp.i36 = getelementptr %struct.layer_data, %struct.layer_data* %tmp.i32, i32 0, i32 2          ; <i8**> [#uses=1]
         store i8* %tmp3.i35, i8** %tmp.i36

Modified: llvm/trunk/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll Fri Feb 27 15:17:42 2015
@@ -15,15 +15,15 @@ entry:
 	br label %cond_next489
 
 cond_next489:		; preds = %cond_false, %bb471
-	%j.7.in = load i8* null		; <i8> [#uses=1]
-	%i.8.in = load i8* null		; <i8> [#uses=1]
+	%j.7.in = load i8, i8* null		; <i8> [#uses=1]
+	%i.8.in = load i8, i8* null		; <i8> [#uses=1]
 	%i.8 = zext i8 %i.8.in to i32		; <i32> [#uses=4]
 	%j.7 = zext i8 %j.7.in to i32		; <i32> [#uses=4]
 	%tmp495 = getelementptr [4 x [4 x i32]], [4 x [4 x i32]]* %predicted_block, i32 0, i32 %i.8, i32 %j.7		; <i32*> [#uses=2]
-	%tmp496 = load i32* %tmp495		; <i32> [#uses=2]
-	%tmp502 = load i32* null		; <i32> [#uses=1]
+	%tmp496 = load i32, i32* %tmp495		; <i32> [#uses=2]
+	%tmp502 = load i32, i32* null		; <i32> [#uses=1]
 	%tmp542 = getelementptr [6 x [4 x [4 x i32]]], [6 x [4 x [4 x i32]]]* @quant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7		; <i32*> [#uses=1]
-	%tmp543 = load i32* %tmp542		; <i32> [#uses=1]
+	%tmp543 = load i32, i32* %tmp542		; <i32> [#uses=1]
 	%tmp548 = ashr i32 0, 0		; <i32> [#uses=3]
 	%tmp561 = sub i32 0, %tmp496		; <i32> [#uses=3]
 	%abscond563 = icmp sgt i32 %tmp561, -1		; <i1> [#uses=1]
@@ -36,9 +36,9 @@ cond_next489:		; preds = %cond_false, %b
 
 cond_next589:		; preds = %cond_next489
 	%tmp605 = getelementptr [6 x [4 x [4 x i32]]], [6 x [4 x [4 x i32]]]* @dequant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7		; <i32*> [#uses=1]
-	%tmp606 = load i32* %tmp605		; <i32> [#uses=1]
-	%tmp612 = load i32* null		; <i32> [#uses=1]
-	%tmp629 = load i32* null		; <i32> [#uses=1]
+	%tmp606 = load i32, i32* %tmp605		; <i32> [#uses=1]
+	%tmp612 = load i32, i32* null		; <i32> [#uses=1]
+	%tmp629 = load i32, i32* null		; <i32> [#uses=1]
 	%tmp629a = sitofp i32 %tmp629 to double		; <double> [#uses=1]
 	%tmp631 = fmul double %tmp629a, 0.000000e+00		; <double> [#uses=1]
 	%tmp632 = fadd double 0.000000e+00, %tmp631		; <double> [#uses=1]
@@ -85,9 +85,9 @@ bb737:		; preds = %cond_false689
 
 cond_true740:		; preds = %bb737
 	%tmp761 = call fastcc i32 @sign( i32 %tmp576, i32 0 )		; <i32> [#uses=1]
-	%tmp780 = load i32* null		; <i32> [#uses=1]
+	%tmp780 = load i32, i32* null		; <i32> [#uses=1]
 	%tmp785 = getelementptr [4 x [4 x i32]], [4 x [4 x i32]]* @A, i32 0, i32 %i.8, i32 %j.7		; <i32*> [#uses=1]
-	%tmp786 = load i32* %tmp785		; <i32> [#uses=1]
+	%tmp786 = load i32, i32* %tmp785		; <i32> [#uses=1]
 	%tmp781 = mul i32 %tmp780, %tmp761		; <i32> [#uses=1]
 	%tmp787 = mul i32 %tmp781, %tmp786		; <i32> [#uses=1]
 	%tmp789 = shl i32 %tmp787, 0		; <i32> [#uses=1]
@@ -96,7 +96,7 @@ cond_true740:		; preds = %bb737
 
 cond_next791:		; preds = %cond_true740, %bb737
 	%ilev.1 = phi i32 [ %tmp790, %cond_true740 ], [ 0, %bb737 ]		; <i32> [#uses=1]
-	%tmp796 = load i32* %tmp495		; <i32> [#uses=1]
+	%tmp796 = load i32, i32* %tmp495		; <i32> [#uses=1]
 	%tmp798 = add i32 %tmp796, %ilev.1		; <i32> [#uses=1]
 	%tmp812 = mul i32 0, %tmp502		; <i32> [#uses=0]
 	%tmp818 = call fastcc i32 @sign( i32 0, i32 %tmp798 )		; <i32> [#uses=0]

Modified: llvm/trunk/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define fastcc i8* @read_sleb128(i8* %p,
 
 bb:		; preds = %bb, %0
 	%p_addr.0 = getelementptr i8, i8* %p, i32 0		; <i8*> [#uses=1]
-	%tmp2 = load i8* %p_addr.0		; <i8> [#uses=2]
+	%tmp2 = load i8, i8* %p_addr.0		; <i8> [#uses=2]
 	%tmp4.rec = add i32 0, 1		; <i32> [#uses=1]
 	%tmp4 = getelementptr i8, i8* %p, i32 %tmp4.rec		; <i8*> [#uses=1]
 	%tmp56 = zext i8 %tmp2 to i32		; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2007-03-13-InstrSched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-03-13-InstrSched.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-03-13-InstrSched.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-03-13-InstrSched.ll Fri Feb 27 15:17:42 2015
@@ -23,25 +23,25 @@ bb74:		; preds = %bb26, %newFuncRoot
 	%d1.1 = phi i32 [ %tmp54, %bb26 ], [ 8192, %newFuncRoot ]		; <i32> [#uses=2]
 	%d2.1 = phi i32 [ %tmp64, %bb26 ], [ 8192, %newFuncRoot ]		; <i32> [#uses=2]
 	%d3.1 = phi i32 [ %tmp69, %bb26 ], [ 8192, %newFuncRoot ]		; <i32> [#uses=2]
-	%fm.1 = load i32* %fm.1.in		; <i32> [#uses=4]
+	%fm.1 = load i32, i32* %fm.1.in		; <i32> [#uses=4]
 	icmp eq i32 %fp.1.rec, %tmp8		; <i1>:0 [#uses=1]
 	br i1 %0, label %bb78.exitStub, label %bb26
 
 bb26:		; preds = %bb74
 	%tmp28 = getelementptr i32*, i32** %tmp1, i32 %fp.1.rec		; <i32**> [#uses=1]
-	%tmp30 = load i32** %tmp28		; <i32*> [#uses=4]
+	%tmp30 = load i32*, i32** %tmp28		; <i32*> [#uses=4]
 	%tmp33 = getelementptr i32, i32* %tmp30, i32 %i.0196.0.ph		; <i32*> [#uses=1]
-	%tmp34 = load i32* %tmp33		; <i32> [#uses=1]
+	%tmp34 = load i32, i32* %tmp33		; <i32> [#uses=1]
 	%tmp38 = getelementptr i32, i32* %tmp30, i32 %tmp36224		; <i32*> [#uses=1]
-	%tmp39 = load i32* %tmp38		; <i32> [#uses=1]
+	%tmp39 = load i32, i32* %tmp38		; <i32> [#uses=1]
 	%tmp42 = mul i32 %tmp34, %fm.1		; <i32> [#uses=1]
 	%tmp44 = add i32 %tmp42, %d0.1		; <i32> [#uses=1]
 	%tmp48 = getelementptr i32, i32* %tmp30, i32 %tmp46223		; <i32*> [#uses=1]
-	%tmp49 = load i32* %tmp48		; <i32> [#uses=1]
+	%tmp49 = load i32, i32* %tmp48		; <i32> [#uses=1]
 	%tmp52 = mul i32 %tmp39, %fm.1		; <i32> [#uses=1]
 	%tmp54 = add i32 %tmp52, %d1.1		; <i32> [#uses=1]
 	%tmp58 = getelementptr i32, i32* %tmp30, i32 %tmp56222		; <i32*> [#uses=1]
-	%tmp59 = load i32* %tmp58		; <i32> [#uses=1]
+	%tmp59 = load i32, i32* %tmp58		; <i32> [#uses=1]
 	%tmp62 = mul i32 %tmp49, %fm.1		; <i32> [#uses=1]
 	%tmp64 = add i32 %tmp62, %d2.1		; <i32> [#uses=1]
 	%tmp67 = mul i32 %tmp59, %fm.1		; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll Fri Feb 27 15:17:42 2015
@@ -71,7 +71,7 @@ cond_next856:		; preds = %cond_true851
 	ret void
 
 bb866:		; preds = %cond_true851
-	%tmp874 = load i32* %tmp2122		; <i32> [#uses=1]
+	%tmp874 = load i32, i32* %tmp2122		; <i32> [#uses=1]
 	%tmp876877 = trunc i32 %tmp874 to i8		; <i8> [#uses=1]
 	icmp eq i8 %tmp876877, 1		; <i1>:0 [#uses=1]
 	br i1 %0, label %cond_next881, label %cond_true878
@@ -82,7 +82,7 @@ cond_true878:		; preds = %bb866
 cond_next881:		; preds = %bb866
 	%tmp884885 = inttoptr i64 %tmp10959 to %struct.tree_identifier*		; <%struct.tree_identifier*> [#uses=1]
 	%tmp887 = getelementptr %struct.tree_identifier, %struct.tree_identifier* %tmp884885, i32 0, i32 1, i32 0		; <i8**> [#uses=1]
-	%tmp888 = load i8** %tmp887		; <i8*> [#uses=1]
+	%tmp888 = load i8*, i8** %tmp887		; <i8*> [#uses=1]
 	tail call void (i32, ...)* @error( i32 undef, i8* %tmp888 )
 	ret void
 

Modified: llvm/trunk/test/CodeGen/ARM/2007-03-27-RegScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-03-27-RegScavengerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-03-27-RegScavengerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-03-27-RegScavengerAssert.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ cond_true340:		; preds = %entry
 	ret void
 
 cond_next416:		; preds = %entry
-	%tmp1085 = load %struct.rtx_def** %ad_addr		; <%struct.rtx_def*> [#uses=1]
+	%tmp1085 = load %struct.rtx_def*, %struct.rtx_def** %ad_addr		; <%struct.rtx_def*> [#uses=1]
 	br i1 false, label %bb1084, label %cond_true418
 
 cond_true418:		; preds = %cond_next416
@@ -25,7 +25,7 @@ bb1084:		; preds = %cond_next416
 cond_true1092:		; preds = %bb1084
 	%tmp1094 = getelementptr %struct.rtx_def, %struct.rtx_def* %tmp1085, i32 0, i32 3		; <%struct.u*> [#uses=1]
 	%tmp10981099 = bitcast %struct.u* %tmp1094 to %struct.rtx_def**		; <%struct.rtx_def**> [#uses=2]
-	%tmp1101 = load %struct.rtx_def** %tmp10981099		; <%struct.rtx_def*> [#uses=1]
+	%tmp1101 = load %struct.rtx_def*, %struct.rtx_def** %tmp10981099		; <%struct.rtx_def*> [#uses=1]
 	store %struct.rtx_def* %tmp1101, %struct.rtx_def** %ad_addr
 	br label %cond_next1102
 

Modified: llvm/trunk/test/CodeGen/ARM/2007-04-02-RegScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-04-02-RegScavengerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-04-02-RegScavengerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-04-02-RegScavengerAssert.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ entry:
 	br i1 false, label %bb.preheader, label %return
 
 bb.preheader:		; preds = %entry
-	%tbl.014.us = load i32* null		; <i32> [#uses=1]
+	%tbl.014.us = load i32, i32* null		; <i32> [#uses=1]
 	br i1 false, label %cond_next.us, label %bb
 
 cond_next51.us:		; preds = %cond_next.us, %cond_true33.us.cond_true46.us_crit_edge
@@ -41,7 +41,7 @@ cond_true33.us.cond_true46.us_crit_edge:
 
 cond_next.us:		; preds = %bb.preheader
 	%tmp37.us = getelementptr %struct.X_Y, %struct.X_Y* %cinfo, i32 0, i32 17, i32 %tbl.014.us		; <%struct.H_TBL**> [#uses=3]
-	%tmp4524.us = load %struct.H_TBL** %tmp37.us		; <%struct.H_TBL*> [#uses=1]
+	%tmp4524.us = load %struct.H_TBL*, %struct.H_TBL** %tmp37.us		; <%struct.H_TBL*> [#uses=1]
 	icmp eq %struct.H_TBL* %tmp4524.us, null		; <i1>:0 [#uses=1]
 	br i1 %0, label %cond_true33.us.cond_true46.us_crit_edge, label %cond_next51.us
 

Modified: llvm/trunk/test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 define internal void @_ZN1B1iEv(%struct.B* %this) {
 entry:
 	%tmp1 = getelementptr %struct.B, %struct.B* %this, i32 0, i32 0		; <i32*> [#uses=1]
-	%tmp2 = load i32* %tmp1		; <i32> [#uses=1]
+	%tmp2 = load i32, i32* %tmp1		; <i32> [#uses=1]
 	%tmp4 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @str, i32 0, i32 0), i32 %tmp2 )		; <i32> [#uses=0]
 	ret void
 }
@@ -19,7 +19,7 @@ declare i32 @printf(i8*, ...)
 define internal void @_ZN1B1jEv(%struct.B* %this) {
 entry:
 	%tmp1 = getelementptr %struct.B, %struct.B* %this, i32 0, i32 0		; <i32*> [#uses=1]
-	%tmp2 = load i32* %tmp1		; <i32> [#uses=1]
+	%tmp2 = load i32, i32* %tmp1		; <i32> [#uses=1]
 	%tmp4 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @str1, i32 0, i32 0), i32 %tmp2 )		; <i32> [#uses=0]
 	ret void
 }
@@ -37,11 +37,11 @@ cond_true.i:		; preds = %entry
 	%b2.i = bitcast %struct.B* %b.i to i8*		; <i8*> [#uses=1]
 	%ctg23.i = getelementptr i8, i8* %b2.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1)		; <i8*> [#uses=1]
 	%tmp121314.i = bitcast i8* %ctg23.i to i32 (...)***		; <i32 (...)***> [#uses=1]
-	%tmp15.i = load i32 (...)*** %tmp121314.i		; <i32 (...)**> [#uses=1]
+	%tmp15.i = load i32 (...)**, i32 (...)*** %tmp121314.i		; <i32 (...)**> [#uses=1]
 	%tmp151.i = bitcast i32 (...)** %tmp15.i to i8*		; <i8*> [#uses=1]
 	%ctg2.i = getelementptr i8, i8* %tmp151.i, i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32)		; <i8*> [#uses=1]
 	%tmp2021.i = bitcast i8* %ctg2.i to i32 (...)**		; <i32 (...)**> [#uses=1]
-	%tmp22.i = load i32 (...)** %tmp2021.i		; <i32 (...)*> [#uses=1]
+	%tmp22.i = load i32 (...)*, i32 (...)** %tmp2021.i		; <i32 (...)*> [#uses=1]
 	%tmp2223.i = bitcast i32 (...)* %tmp22.i to void (%struct.B*)*		; <void (%struct.B*)*> [#uses=1]
 	br label %_Z3fooiM1BFvvE.exit
 
@@ -59,11 +59,11 @@ cond_true.i46:		; preds = %_Z3fooiM1BFvv
 	%b2.i35 = bitcast %struct.B* %b.i29 to i8*		; <i8*> [#uses=1]
 	%ctg23.i36 = getelementptr i8, i8* %b2.i35, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1)		; <i8*> [#uses=1]
 	%tmp121314.i37 = bitcast i8* %ctg23.i36 to i32 (...)***		; <i32 (...)***> [#uses=1]
-	%tmp15.i38 = load i32 (...)*** %tmp121314.i37		; <i32 (...)**> [#uses=1]
+	%tmp15.i38 = load i32 (...)**, i32 (...)*** %tmp121314.i37		; <i32 (...)**> [#uses=1]
 	%tmp151.i41 = bitcast i32 (...)** %tmp15.i38 to i8*		; <i8*> [#uses=1]
 	%ctg2.i42 = getelementptr i8, i8* %tmp151.i41, i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32)		; <i8*> [#uses=1]
 	%tmp2021.i43 = bitcast i8* %ctg2.i42 to i32 (...)**		; <i32 (...)**> [#uses=1]
-	%tmp22.i44 = load i32 (...)** %tmp2021.i43		; <i32 (...)*> [#uses=1]
+	%tmp22.i44 = load i32 (...)*, i32 (...)** %tmp2021.i43		; <i32 (...)*> [#uses=1]
 	%tmp2223.i45 = bitcast i32 (...)* %tmp22.i44 to void (%struct.B*)*		; <void (%struct.B*)*> [#uses=1]
 	br label %_Z3fooiM1BFvvE.exit56
 
@@ -81,11 +81,11 @@ cond_true.i18:		; preds = %_Z3fooiM1BFvv
 	%b2.i7 = bitcast %struct.B* %b.i1 to i8*		; <i8*> [#uses=1]
 	%ctg23.i8 = getelementptr i8, i8* %b2.i7, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1)		; <i8*> [#uses=1]
 	%tmp121314.i9 = bitcast i8* %ctg23.i8 to i32 (...)***		; <i32 (...)***> [#uses=1]
-	%tmp15.i10 = load i32 (...)*** %tmp121314.i9		; <i32 (...)**> [#uses=1]
+	%tmp15.i10 = load i32 (...)**, i32 (...)*** %tmp121314.i9		; <i32 (...)**> [#uses=1]
 	%tmp151.i13 = bitcast i32 (...)** %tmp15.i10 to i8*		; <i8*> [#uses=1]
 	%ctg2.i14 = getelementptr i8, i8* %tmp151.i13, i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32)		; <i8*> [#uses=1]
 	%tmp2021.i15 = bitcast i8* %ctg2.i14 to i32 (...)**		; <i32 (...)**> [#uses=1]
-	%tmp22.i16 = load i32 (...)** %tmp2021.i15		; <i32 (...)*> [#uses=1]
+	%tmp22.i16 = load i32 (...)*, i32 (...)** %tmp2021.i15		; <i32 (...)*> [#uses=1]
 	%tmp2223.i17 = bitcast i32 (...)* %tmp22.i16 to void (%struct.B*)*		; <void (%struct.B*)*> [#uses=1]
 	br label %_Z3fooiM1BFvvE.exit28
 

Modified: llvm/trunk/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll Fri Feb 27 15:17:42 2015
@@ -9,15 +9,15 @@ target triple = "arm-apple-darwin8"
 
 define fastcc void @EvaluateDevelopment() {
 entry:
-        %tmp7 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 7)         ; <i64> [#uses=1]
-        %tmp50 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 0)                ; <i64> [#uses=1]
-        %tmp52 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 1)                ; <i64> [#uses=1]
+        %tmp7 = load i64, i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 7)         ; <i64> [#uses=1]
+        %tmp50 = load i64, i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 0)                ; <i64> [#uses=1]
+        %tmp52 = load i64, i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 1)                ; <i64> [#uses=1]
         %tmp53 = or i64 %tmp52, %tmp50          ; <i64> [#uses=1]
-        %tmp57.b = load i1* @rank_mask.1.b              ; <i1> [#uses=1]
+        %tmp57.b = load i1, i1* @rank_mask.1.b              ; <i1> [#uses=1]
         %tmp57 = select i1 %tmp57.b, i64 71776119061217280, i64 0               ; <i64> [#uses=1]
         %tmp58 = and i64 %tmp57, %tmp7          ; <i64> [#uses=1]
         %tmp59 = lshr i64 %tmp58, 8             ; <i64> [#uses=1]
-        %tmp63 = load i64* getelementptr ([8 x i64]* @file_mask, i32 0, i32 4)          ; <i64> [#uses=1]
+        %tmp63 = load i64, i64* getelementptr ([8 x i64]* @file_mask, i32 0, i32 4)          ; <i64> [#uses=1]
         %tmp64 = or i64 %tmp63, 0               ; <i64> [#uses=1]
         %tmp65 = and i64 %tmp59, %tmp53         ; <i64> [#uses=1]
         %tmp66 = and i64 %tmp65, %tmp64         ; <i64> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@
 
 define fastcc void @Draw7(i32 %Option, i32* %Status) {
 entry:
-	%tmp115.b = load i1* @FirstTime.4637.b		; <i1> [#uses=1]
+	%tmp115.b = load i1, i1* @FirstTime.4637.b		; <i1> [#uses=1]
 	br i1 %tmp115.b, label %cond_next239, label %cond_next.i
 
 cond_next.i:		; preds = %entry
@@ -88,11 +88,11 @@ cond_next1267:		; preds = %cond_next1235
 	br i1 %tmp1148, label %cond_next1275, label %cond_true1272
 
 cond_true1272:		; preds = %cond_next1267
-	%tmp1273 = load %struct.TestObj** null		; <%struct.TestObj*> [#uses=2]
+	%tmp1273 = load %struct.TestObj*, %struct.TestObj** null		; <%struct.TestObj*> [#uses=2]
 	%tmp2930.i = ptrtoint %struct.TestObj* %tmp1273 to i32		; <i32> [#uses=1]
 	%tmp42.i348 = sub i32 0, %tmp2930.i		; <i32> [#uses=1]
 	%tmp45.i = getelementptr %struct.TestObj, %struct.TestObj* %tmp1273, i32 0, i32 0		; <i8**> [#uses=2]
-	%tmp48.i = load i8** %tmp45.i		; <i8*> [#uses=1]
+	%tmp48.i = load i8*, i8** %tmp45.i		; <i8*> [#uses=1]
 	%tmp50.i350 = call i32 (i8*, i8*, ...)* @sprintf( i8* getelementptr ([256 x i8]* @Msg, i32 0, i32 0), i8* getelementptr ([48 x i8]* @.str53615, i32 0, i32 0), i8* null, i8** %tmp45.i, i8* %tmp48.i )		; <i32> [#uses=0]
 	br i1 false, label %cond_true.i632.i, label %Ut_TraceMsg.exit648.i
 

Modified: llvm/trunk/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ entry:
 	%retval = alloca i32, align 4		; <i32*> [#uses=1]
 	store i32 %i, i32* %i_addr
 	store i32 %q, i32* %q_addr
-	%tmp = load i32* %i_addr		; <i32> [#uses=1]
+	%tmp = load i32, i32* %i_addr		; <i32> [#uses=1]
 	%tmp1 = icmp ne i32 %tmp, 0		; <i1> [#uses=1]
 	%tmp12 = zext i1 %tmp1 to i8		; <i8> [#uses=1]
 	%toBool = icmp ne i8 %tmp12, 0		; <i1> [#uses=1]
@@ -34,7 +34,7 @@ cond_false:		; preds = %entry
 	br label %cond_next
 
 cond_next:		; preds = %cond_false, %cond_true
-	%tmp7 = load i32* %q_addr		; <i32> [#uses=1]
+	%tmp7 = load i32, i32* %q_addr		; <i32> [#uses=1]
 	%tmp8 = icmp ne i32 %tmp7, 0		; <i1> [#uses=1]
 	%tmp89 = zext i1 %tmp8 to i8		; <i8> [#uses=1]
 	%toBool10 = icmp ne i8 %tmp89, 0		; <i1> [#uses=1]
@@ -55,7 +55,7 @@ cond_next18:		; preds = %cond_false15, %
 	br label %return
 
 return:		; preds = %cond_next18
-	%retval20 = load i32* %retval		; <i32> [#uses=1]
+	%retval20 = load i32, i32* %retval		; <i32> [#uses=1]
 	ret i32 %retval20
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ entry:
 	%retval = alloca i32, align 4		; <i32*> [#uses=1]
 	store i32 %i, i32* %i_addr
 	store i32 %q, i32* %q_addr
-	%tmp = load i32* %i_addr		; <i32> [#uses=1]
+	%tmp = load i32, i32* %i_addr		; <i32> [#uses=1]
 	%tmp1 = icmp ne i32 %tmp, 0		; <i1> [#uses=1]
 	%tmp12 = zext i1 %tmp1 to i8		; <i8> [#uses=1]
 	%toBool = icmp ne i8 %tmp12, 0		; <i1> [#uses=1]
@@ -28,7 +28,7 @@ entry:
 cond_true:		; preds = %entry
 	%tmp3 = call i32 (...)* @bar( )		; <i32> [#uses=0]
 	%tmp4 = call i32 (...)* @baz( i32 5, i32 6 )		; <i32> [#uses=0]
-	%tmp7 = load i32* %q_addr		; <i32> [#uses=1]
+	%tmp7 = load i32, i32* %q_addr		; <i32> [#uses=1]
 	%tmp8 = icmp ne i32 %tmp7, 0		; <i1> [#uses=1]
 	%tmp89 = zext i1 %tmp8 to i8		; <i8> [#uses=1]
 	%toBool10 = icmp ne i8 %tmp89, 0		; <i1> [#uses=1]
@@ -37,7 +37,7 @@ cond_true:		; preds = %entry
 cond_false:		; preds = %entry
 	%tmp5 = call i32 (...)* @foo( )		; <i32> [#uses=0]
 	%tmp6 = call i32 (...)* @baz( i32 5, i32 6 )		; <i32> [#uses=0]
-	%tmp27 = load i32* %q_addr		; <i32> [#uses=1]
+	%tmp27 = load i32, i32* %q_addr		; <i32> [#uses=1]
 	%tmp28 = icmp ne i32 %tmp27, 0		; <i1> [#uses=1]
 	%tmp289 = zext i1 %tmp28 to i8		; <i8> [#uses=1]
 	%toBool210 = icmp ne i8 %tmp289, 0		; <i1> [#uses=1]
@@ -58,7 +58,7 @@ cond_next18:		; preds = %cond_false15, %
 	br label %return
 
 return:		; preds = %cond_next18
-	%retval20 = load i32* %retval		; <i32> [#uses=1]
+	%retval20 = load i32, i32* %retval		; <i32> [#uses=1]
 	ret i32 %retval20
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ entry:
 	%retval = alloca i32, align 4		; <i32*> [#uses=1]
 	store i32 %i, i32* %i_addr
 	store i32 %q, i32* %q_addr
-	%tmp = load i32* %i_addr		; <i32> [#uses=1]
+	%tmp = load i32, i32* %i_addr		; <i32> [#uses=1]
 	%tmp1 = icmp ne i32 %tmp, 0		; <i1> [#uses=1]
 	%tmp12 = zext i1 %tmp1 to i8		; <i8> [#uses=1]
 	%toBool = icmp ne i8 %tmp12, 0		; <i1> [#uses=1]
@@ -38,7 +38,7 @@ entry:
 cond_true:		; preds = %entry
 	%tmp3 = call i32 (...)* @bar( )		; <i32> [#uses=0]
 	%tmp4 = call i32 (...)* @baz( i32 5, i32 6 )		; <i32> [#uses=0]
-	%tmp7 = load i32* %q_addr		; <i32> [#uses=1]
+	%tmp7 = load i32, i32* %q_addr		; <i32> [#uses=1]
 	%tmp8 = icmp ne i32 %tmp7, 0		; <i1> [#uses=1]
 	%tmp89 = zext i1 %tmp8 to i8		; <i8> [#uses=1]
 	%toBool10 = icmp ne i8 %tmp89, 0		; <i1> [#uses=1]
@@ -47,7 +47,7 @@ cond_true:		; preds = %entry
 cond_false:		; preds = %entry
 	%tmp5 = call i32 (...)* @foo( )		; <i32> [#uses=0]
 	%tmp6 = call i32 (...)* @baz( i32 5, i32 6 )		; <i32> [#uses=0]
-	%tmp27 = load i32* %q_addr		; <i32> [#uses=1]
+	%tmp27 = load i32, i32* %q_addr		; <i32> [#uses=1]
 	%tmp28 = icmp ne i32 %tmp27, 0		; <i1> [#uses=1]
 	%tmp289 = zext i1 %tmp28 to i8		; <i8> [#uses=1]
 	%toBool210 = icmp ne i8 %tmp289, 0		; <i1> [#uses=1]
@@ -68,7 +68,7 @@ cond_next18:		; preds = %cond_false15, %
 	br label %return
 
 return:		; preds = %cond_next18
-	%retval20 = load i32* %retval		; <i32> [#uses=1]
+	%retval20 = load i32, i32* %retval		; <i32> [#uses=1]
 	ret i32 %retval20
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/2007-08-15-ReuseBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2007-08-15-ReuseBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2007-08-15-ReuseBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2007-08-15-ReuseBug.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ bb102.i:		; preds = %cond_next212.i
 
 cond_true110.i:		; preds = %bb102.i
 	%tmp116.i = getelementptr i8*, i8** %argv_addr.2321.0.i, i32 2		; <i8**> [#uses=1]
-	%tmp117.i = load i8** %tmp116.i		; <i8*> [#uses=1]
+	%tmp117.i = load i8*, i8** %tmp116.i		; <i8*> [#uses=1]
 	%tmp126425.i = call %struct.FILE* @fopen( i8* %tmp117.i, i8* getelementptr ([2 x i8]* @.str44, i32 0, i32 0) )		; <%struct.FILE*> [#uses=0]
 	ret i32 0
 
@@ -60,7 +60,7 @@ C_addcmd.exit120.i:		; preds = %cond_nex
 	%tmp3.i.i.i.i105.i = call i8* @calloc( i32 15, i32 1 )		; <i8*> [#uses=1]
 	%tmp1.i108.i = getelementptr [100 x i8*], [100 x i8*]* @_C_cmds, i32 0, i32 0		; <i8**> [#uses=1]
 	store i8* %tmp3.i.i.i.i105.i, i8** %tmp1.i108.i, align 4
-	%tmp.i91.i = load i32* @_C_nextcmd, align 4		; <i32> [#uses=1]
+	%tmp.i91.i = load i32, i32* @_C_nextcmd, align 4		; <i32> [#uses=1]
 	store i32 0, i32* @_C_nextcmd, align 4
 	%tmp3.i.i.i.i95.i = call i8* @calloc( i32 15, i32 1 )		; <i8*> [#uses=1]
 	%tmp1.i98.i = getelementptr [100 x i8*], [100 x i8*]* @_C_cmds, i32 0, i32 %tmp.i91.i		; <i8**> [#uses=1]
@@ -78,7 +78,7 @@ cond_next212.i:		; preds = %cond_next212
 	%argv_addr.2321.0.i = phi i8** [ %argv, %entry ], [ %tmp214.i, %bb192.i ], [ %tmp214.i, %C_addcmd.exit120.i ], [ %tmp214.i, %bb30.i ], [ %tmp214.i, %bb21.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ]		; <i8**> [#uses=2]
 	%argc_addr.2358.0.i = phi i32 [ %argc, %entry ], [ %tmp205399.i, %bb30.i ], [ 0, %bb21.i ], [ 0, %C_addcmd.exit120.i ], [ 0, %bb192.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ]		; <i32> [#uses=1]
 	%tmp214.i = getelementptr i8*, i8** %argv_addr.2321.0.i, i32 1		; <i8**> [#uses=9]
-	%tmp215.i = load i8** %tmp214.i		; <i8*> [#uses=1]
+	%tmp215.i = load i8*, i8** %tmp214.i		; <i8*> [#uses=1]
 	%tmp1314.i = sext i8 0 to i32		; <i32> [#uses=1]
 	switch i32 %tmp1314.i, label %bb192.i [
 		 i32 76, label %C_addcmd.exit120.i

Modified: llvm/trunk/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 define i32 @vorbis_staticbook_pack(%struct.static_codebook* %c, %struct.oggpack_buffer* %opb) {
 entry:
 	%opb_addr = alloca %struct.oggpack_buffer*		; <%struct.oggpack_buffer**> [#uses=1]
-	%tmp1 = load %struct.oggpack_buffer** %opb_addr, align 4		; <%struct.oggpack_buffer*> [#uses=1]
+	%tmp1 = load %struct.oggpack_buffer*, %struct.oggpack_buffer** %opb_addr, align 4		; <%struct.oggpack_buffer*> [#uses=1]
 	call void @oggpack_write( %struct.oggpack_buffer* %tmp1, i32 5653314, i32 24 ) nounwind 
 	call void @oggpack_write( %struct.oggpack_buffer* null, i32 0, i32 24 ) nounwind 
 	unreachable

Modified: llvm/trunk/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define i32 @main(i32 %argc, i8** %argv)
 entry:
 	br label %bb1
 bb1:		; preds = %entry
-	%tmp3.i.i = load i8* null, align 1		; <i8> [#uses=1]
+	%tmp3.i.i = load i8, i8* null, align 1		; <i8> [#uses=1]
 	%tmp4.i.i = icmp slt i8 %tmp3.i.i, 0		; <i1> [#uses=1]
 	br i1 %tmp4.i.i, label %bb2, label %bb3
 bb2:		; preds = %bb1

Modified: llvm/trunk/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ bb74.i:		; preds = %bb88.i, %bb74.i, %en
 bb88.i:		; preds = %bb74.i
 	br i1 false, label %mandel.exit, label %bb74.i
 mandel.exit:		; preds = %bb88.i
-	%tmp2 = load volatile double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8		; <double> [#uses=1]
+	%tmp2 = load volatile double, double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8		; <double> [#uses=1]
 	%tmp23 = fptosi double %tmp2 to i32		; <i32> [#uses=1]
 	%tmp5 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @.str, i32 0, i32 0), i32 %tmp23 )		; <i32> [#uses=0]
 	ret i32 0

Modified: llvm/trunk/test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll Fri Feb 27 15:17:42 2015
@@ -57,7 +57,7 @@ cond_false373.i.i:		; preds = %bb.i350.i
 	br i1 false, label %cond_true380.i.i, label %cond_next602.i.i
 cond_true380.i.i:		; preds = %cond_false373.i.i
 	%tmp394.i418.i = add i32 %cell.0.i.i, 1		; <i32> [#uses=1]
-	%tmp397.i420.i = load %struct.cellbox** null, align 4		; <%struct.cellbox*> [#uses=1]
+	%tmp397.i420.i = load %struct.cellbox*, %struct.cellbox** null, align 4		; <%struct.cellbox*> [#uses=1]
 	br label %bb398.i.i
 bb398.i.i:		; preds = %bb398.i.i, %cond_true380.i.i
 	br i1 false, label %bb414.i.i, label %bb398.i.i
@@ -74,7 +74,7 @@ bb609.i.i:		; preds = %cond_next602.i.i
 bb620.i.i:		; preds = %bb620.i.i, %bb609.i.i
 	%indvar166.i465.i = phi i32 [ %indvar.next167.i.i, %bb620.i.i ], [ 0, %bb609.i.i ]		; <i32> [#uses=1]
 	%tmp640.i.i = call i32 (%struct.FILE*, i8*, ...)* @fscanf( %struct.FILE* %tmp61, i8* getelementptr ([5 x i8]* @.str584, i32 0, i32 0), [1024 x i8]* null )		; <i32> [#uses=0]
-	%tmp648.i.i = load i32* null, align 4		; <i32> [#uses=1]
+	%tmp648.i.i = load i32, i32* null, align 4		; <i32> [#uses=1]
 	%tmp650.i468.i = icmp sgt i32 0, %tmp648.i.i		; <i1> [#uses=1]
 	%tmp624.i469.i = call i32 (%struct.FILE*, i8*, ...)* @fscanf( %struct.FILE* %tmp61, i8* getelementptr ([5 x i8]* @.str584, i32 0, i32 0), [1024 x i8]* null )		; <i32> [#uses=0]
 	%indvar.next167.i.i = add i32 %indvar166.i465.i, 1		; <i32> [#uses=1]
@@ -126,7 +126,7 @@ cond_true163:		; preds = %cond_next144
 bb.i53:		; preds = %cond_true163
 	ret void
 bb34.i:		; preds = %cond_true163
-	%tmp37.i55 = load i32* null, align 4		; <i32> [#uses=1]
+	%tmp37.i55 = load i32, i32* null, align 4		; <i32> [#uses=1]
 	br i1 false, label %bb65.preheader.i, label %bb78.i
 bb65.preheader.i:		; preds = %bb34.i
 	br label %bb65.outer.us.i
@@ -149,7 +149,7 @@ bb155.i:		; preds = %cond_next215.i, %bb
 	%indvar90.i = phi i32 [ %indvar.next91.i, %cond_next215.i ], [ 0, %bb151.i ]		; <i32> [#uses=2]
 	%sfb.3.reg2mem.0.i = add i32 %indvar90.i, %tmp37.i55		; <i32> [#uses=4]
 	%tmp161.i = getelementptr [4 x [21 x double]], [4 x [21 x double]]* null, i32 0, i32 %tmp15747.i, i32 %sfb.3.reg2mem.0.i		; <double*> [#uses=1]
-	%tmp162.i74 = load double* %tmp161.i, align 4		; <double> [#uses=0]
+	%tmp162.i74 = load double, double* %tmp161.i, align 4		; <double> [#uses=0]
 	br i1 false, label %cond_true167.i, label %cond_next215.i
 cond_true167.i:		; preds = %bb155.i
 	%tmp173.i = getelementptr %struct.III_scalefac_t, %struct.III_scalefac_t* null, i32 0, i32 1, i32 %sfb.3.reg2mem.0.i, i32 %i.154.i		; <i32*> [#uses=1]
@@ -157,7 +157,7 @@ cond_true167.i:		; preds = %bb155.i
 	%tmp182.1.i = getelementptr [14 x i32], [14 x i32]* @scalefac_band.1, i32 0, i32 %sfb.3.reg2mem.0.i		; <i32*> [#uses=0]
 	%tmp185.i78 = add i32 %sfb.3.reg2mem.0.i, 1		; <i32> [#uses=1]
 	%tmp187.1.i = getelementptr [14 x i32], [14 x i32]* @scalefac_band.1, i32 0, i32 %tmp185.i78		; <i32*> [#uses=1]
-	%tmp188.i = load i32* %tmp187.1.i, align 4		; <i32> [#uses=1]
+	%tmp188.i = load i32, i32* %tmp187.1.i, align 4		; <i32> [#uses=1]
 	%tmp21153.i = icmp slt i32 0, %tmp188.i		; <i1> [#uses=1]
 	br i1 %tmp21153.i, label %bb190.preheader.i, label %cond_next215.i
 bb190.preheader.i:		; preds = %cond_true167.i
@@ -224,7 +224,7 @@ cond_next144:		; preds = %cond_next104,
 	%over.1 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ]		; <i32> [#uses=1]
 	%best_over.0 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ]		; <i32> [#uses=1]
 	%notdone.0 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ]		; <i32> [#uses=1]
-	%tmp147 = load i32* null, align 4		; <i32> [#uses=1]
+	%tmp147 = load i32, i32* null, align 4		; <i32> [#uses=1]
 	%tmp148 = icmp eq i32 %tmp147, 0		; <i1> [#uses=1]
 	%tmp153 = icmp eq i32 %over.1, 0		; <i1> [#uses=1]
 	%bothcond = and i1 %tmp148, %tmp153		; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll Fri Feb 27 15:17:42 2015
@@ -21,8 +21,8 @@ entry:
 	br i1 false, label %init_orig_buffers.exit, label %cond_true.i29
 
 cond_true.i29:		; preds = %entry
-	%tmp17.i = load i32* getelementptr (%struct.ImageParameters* @images, i32 0, i32 20), align 8		; <i32> [#uses=1]
-	%tmp20.i27 = load i32* getelementptr (%struct.ImageParameters* @images, i32 0, i32 16), align 8		; <i32> [#uses=1]
+	%tmp17.i = load i32, i32* getelementptr (%struct.ImageParameters* @images, i32 0, i32 20), align 8		; <i32> [#uses=1]
+	%tmp20.i27 = load i32, i32* getelementptr (%struct.ImageParameters* @images, i32 0, i32 16), align 8		; <i32> [#uses=1]
 	%tmp8.i.i = select i1 false, i32 1, i32 0		; <i32> [#uses=1]
 	br label %bb.i8.us.i
 

Modified: llvm/trunk/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll Fri Feb 27 15:17:42 2015
@@ -4,6 +4,6 @@
 define void @main({ i32 }*) {
 entry:
 	%sret1 = alloca { i32 }		; <{ i32 }*> [#uses=1]
-	load { i32 }* %sret1		; <{ i32 }>:1 [#uses=0]
+	load { i32 }, { i32 }* %sret1		; <{ i32 }>:1 [#uses=0]
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll Fri Feb 27 15:17:42 2015
@@ -8,6 +8,6 @@
 
 define i32 @__gcov_close() nounwind {
 entry:
-	load i32* getelementptr (%struct.__gcov_var* @__gcov_var, i32 0, i32 5), align 4		; <i32>:0 [#uses=1]
+	load i32, i32* getelementptr (%struct.__gcov_var* @__gcov_var, i32 0, i32 5), align 4		; <i32>:0 [#uses=1]
 	ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/ARM/2009-02-16-SpillerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-02-16-SpillerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-02-16-SpillerBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-02-16-SpillerBug.ll Fri Feb 27 15:17:42 2015
@@ -87,7 +87,7 @@ bb394:		; preds = %bb122
 bb396:		; preds = %bb394, %bb131, %bb122, %bb122, %bb122, %bb122, %RESUME
 	%stop_link.3 = phi %struct.rec* [ null, %RESUME ], [ %stop_link.3, %bb394 ], [ %stop_link.3, %bb122 ], [ %stop_link.3, %bb122 ], [ %stop_link.3, %bb122 ], [ %stop_link.3, %bb122 ], [ %link.1, %bb131 ]		; <%struct.rec*> [#uses=7]
 	%headers_seen.1 = phi i32 [ 0, %RESUME ], [ %headers_seen.1, %bb394 ], [ 1, %bb122 ], [ 1, %bb122 ], [ 1, %bb122 ], [ 1, %bb122 ], [ %headers_seen.1, %bb131 ]		; <i32> [#uses=2]
-	%link.1 = load %struct.rec** null		; <%struct.rec*> [#uses=2]
+	%link.1 = load %struct.rec*, %struct.rec** null		; <%struct.rec*> [#uses=2]
 	%1 = icmp eq %struct.rec* %link.1, %hd		; <i1> [#uses=1]
 	br i1 %1, label %bb398, label %bb122
 

Modified: llvm/trunk/test/CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll Fri Feb 27 15:17:42 2015
@@ -15,6 +15,6 @@ entry:
 	br label %return
 
 return:		; preds = %entry
-	%2 = load i32* %retval		; <i32> [#uses=1]
+	%2 = load i32, i32* %retval		; <i32> [#uses=1]
 	ret i32 %2
 }

Modified: llvm/trunk/test/CodeGen/ARM/2009-02-27-SpillerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-02-27-SpillerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-02-27-SpillerBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-02-27-SpillerBug.ll Fri Feb 27 15:17:42 2015
@@ -28,12 +28,12 @@ bb53:		; preds = %bb52
 	br i1 %phitmp, label %bb55, label %bb52
 
 bb55:		; preds = %bb53
-	%4 = load double* @a, align 4		; <double> [#uses=10]
+	%4 = load double, double* @a, align 4		; <double> [#uses=10]
 	%5 = fadd double %4, 0.000000e+00		; <double> [#uses=16]
 	%6 = fcmp ogt double %k.4, 0.000000e+00		; <i1> [#uses=1]
 	%.pn404 = fmul double %4, %4		; <double> [#uses=4]
 	%.pn402 = fmul double %5, %5		; <double> [#uses=5]
-	%.pn165.in = load double* @N		; <double> [#uses=5]
+	%.pn165.in = load double, double* @N		; <double> [#uses=5]
 	%.pn198 = fmul double 0.000000e+00, %5		; <double> [#uses=1]
 	%.pn185 = fsub double -0.000000e+00, 0.000000e+00		; <double> [#uses=1]
 	%.pn147 = fsub double -0.000000e+00, 0.000000e+00		; <double> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2009-03-07-SpillerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-03-07-SpillerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-03-07-SpillerBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-03-07-SpillerBug.ll Fri Feb 27 15:17:42 2015
@@ -42,7 +42,7 @@ bb3:
   %17 = fdiv double %16, %0
   %18 = fadd double 0.000000e+00, %17
   %19 = call double @acos(double %18) nounwind readonly
-  %20 = load double* null, align 4
+  %20 = load double, double* null, align 4
   %21 = fmul double %20, 0x401921FB54442D18
   %22 = call double @sin(double %19) nounwind readonly
   %23 = fmul double %22, 0.000000e+00

Modified: llvm/trunk/test/CodeGen/ARM/2009-04-06-AsmModifier.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-04-06-AsmModifier.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-04-06-AsmModifier.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-04-06-AsmModifier.ll Fri Feb 27 15:17:42 2015
@@ -8,14 +8,14 @@ entry:
 	store i32 0, i32* %retval
 	%res = alloca i32		; <i32*> [#uses=0]
 	%fh = alloca i32		; <i32*> [#uses=1]
-	%1 = load i32* %fh		; <i32> [#uses=1]
-	%2 = load i32* %ptr		; <i32> [#uses=1]
+	%1 = load i32, i32* %fh		; <i32> [#uses=1]
+	%2 = load i32, i32* %ptr		; <i32> [#uses=1]
 	%3 = call i32 asm "mov r0, $2; mov r1, $3; swi ${1:a}; mov $0, r0", "=r,i,r,r,~{r0},~{r1}"(i32 107, i32 %1, i32 %2) nounwind		; <i32> [#uses=1]
         store i32 %3, i32* %retval
 	br label %return
 
 return:		; preds = %entry
-	%4 = load i32* %retval		; <i32> [#uses=1]
+	%4 = load i32, i32* %retval		; <i32> [#uses=1]
 	ret i32 %4
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll Fri Feb 27 15:17:42 2015
@@ -8,11 +8,11 @@ entry:
 	%b = alloca { double, double }		; <{ double, double }*> [#uses=1]
 	store { i32, { double, double }* } %d_arg, { i32, { double, double }* }* %d
 	store i32 %x_arg, i32* %x
-	%tmp = load i32* %x		; <i32> [#uses=1]
+	%tmp = load i32, i32* %x		; <i32> [#uses=1]
 	%tmp1 = getelementptr { i32, { double, double }* }, { i32, { double, double }* }* %d, i32 0, i32 1		; <{ double, double }**> [#uses=1]
-	%.ptr = load { double, double }** %tmp1		; <{ double, double }*> [#uses=1]
+	%.ptr = load { double, double }*, { double, double }** %tmp1		; <{ double, double }*> [#uses=1]
 	%tmp2 = getelementptr { double, double }, { double, double }* %.ptr, i32 %tmp		; <{ double, double }*> [#uses=1]
-	%tmp3 = load { double, double }* %tmp2		; <{ double, double }> [#uses=1]
+	%tmp3 = load { double, double }, { double, double }* %tmp2		; <{ double, double }> [#uses=1]
 	store { double, double } %tmp3, { double, double }* %b
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/2009-04-08-FloatUndef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-04-08-FloatUndef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-04-08-FloatUndef.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-04-08-FloatUndef.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
 
 define void @execute_shader(<4 x float>* %OUT, <4 x float>* %IN, <4 x float>* %CONST) {
 entry:
-	%input2 = load <4 x float>* null, align 16		; <<4 x float>> [#uses=2]
+	%input2 = load <4 x float>, <4 x float>* null, align 16		; <<4 x float>> [#uses=2]
 	%shuffle7 = shufflevector <4 x float> %input2, <4 x float> <float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>		; <<4 x float>> [#uses=1]
 	%mul1 = fmul <4 x float> %shuffle7, zeroinitializer		; <<4 x float>> [#uses=1]
 	%add2 = fadd <4 x float> %mul1, %input2		; <<4 x float>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 define void @foo(...) nounwind {
 entry:
 	%rr = alloca i32		; <i32*> [#uses=2]
-	%0 = load i32* %rr		; <i32> [#uses=1]
+	%0 = load i32, i32* %rr		; <i32> [#uses=1]
 	%1 = call i32 asm "nop", "=r,0"(i32 %0) nounwind		; <i32> [#uses=1]
 	store i32 %1, i32* %rr
 	br label %return

Modified: llvm/trunk/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ bb1:		; preds = %bb
 bb3:		; preds = %bb1, %bb
 	%iftmp.0.0 = phi i32 [ 0, %bb1 ], [ -1, %bb ]		; <i32> [#uses=1]
 	%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i32 0), i32 0, i32 %iftmp.0.0) nounwind		; <i32> [#uses=0]
-	%2 = load %struct.List** null, align 4		; <%struct.List*> [#uses=2]
+	%2 = load %struct.List*, %struct.List** null, align 4		; <%struct.List*> [#uses=2]
 	%phitmp = icmp eq %struct.List* %2, null		; <i1> [#uses=1]
 	br i1 %phitmp, label %bb5, label %bb
 

Modified: llvm/trunk/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll Fri Feb 27 15:17:42 2015
@@ -136,7 +136,7 @@ bb138:		; preds = %bb77
 	br label %bb141
 
 bb139:		; preds = %bb141
-	%scevgep441442881 = load i16* undef		; <i16> [#uses=1]
+	%scevgep441442881 = load i16, i16* undef		; <i16> [#uses=1]
 	%1 = icmp ugt i16 %scevgep441442881, %0		; <i1> [#uses=1]
 	br i1 %1, label %bb141, label %bb142
 

Modified: llvm/trunk/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@
 
 define void @simplify_unary_real(i8* nocapture %p) nounwind {
 entry:
-	%tmp121 = load i64* null, align 4		; <i64> [#uses=1]
+	%tmp121 = load i64, i64* null, align 4		; <i64> [#uses=1]
 	%0 = getelementptr %struct.rtx_def, %struct.rtx_def* null, i32 0, i32 3, i32 3, i32 0		; <i64*> [#uses=1]
-	%tmp122 = load i64* %0, align 4		; <i64> [#uses=1]
+	%tmp122 = load i64, i64* %0, align 4		; <i64> [#uses=1]
 	%1 = zext i64 undef to i192		; <i192> [#uses=2]
 	%2 = zext i64 %tmp121 to i192		; <i192> [#uses=1]
 	%3 = shl i192 %2, 64		; <i192> [#uses=2]

Modified: llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll Fri Feb 27 15:17:42 2015
@@ -35,10 +35,10 @@ bb10:		; preds = %bb9
 	unreachable
 
 bb11:		; preds = %bb9
-	%0 = load i32* undef, align 4		; <i32> [#uses=2]
+	%0 = load i32, i32* undef, align 4		; <i32> [#uses=2]
 	%1 = add i32 %0, 1		; <i32> [#uses=2]
 	store i32 %1, i32* undef, align 4
-	%2 = load i32* undef, align 4		; <i32> [#uses=1]
+	%2 = load i32, i32* undef, align 4		; <i32> [#uses=1]
 	store i32 %2, i32* @nn, align 4
 	store i32 0, i32* @al_len, align 4
 	store i32 0, i32* @no_mat, align 4
@@ -48,9 +48,9 @@ bb11:		; preds = %bb9
 	%4 = sitofp i32 undef to double		; <double> [#uses=1]
 	%5 = fdiv double %4, 1.000000e+01		; <double> [#uses=1]
 	%6 = tail call  i32 (i8*, ...)* @printf(i8* getelementptr ([29 x i8]* @"\01LC12", i32 0, i32 0), double %5) nounwind		; <i32> [#uses=0]
-	%7 = load i32* @al_len, align 4		; <i32> [#uses=1]
-	%8 = load i32* @no_mat, align 4		; <i32> [#uses=1]
-	%9 = load i32* @no_mis, align 4		; <i32> [#uses=1]
+	%7 = load i32, i32* @al_len, align 4		; <i32> [#uses=1]
+	%8 = load i32, i32* @no_mat, align 4		; <i32> [#uses=1]
+	%9 = load i32, i32* @no_mis, align 4		; <i32> [#uses=1]
 	%10 = sub i32 %7, %8		; <i32> [#uses=1]
 	%11 = sub i32 %10, %9		; <i32> [#uses=1]
 	%12 = tail call  i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC16", i32 0, i32 0), i32 %11) nounwind		; <i32> [#uses=0]

Modified: llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll Fri Feb 27 15:17:42 2015
@@ -33,17 +33,17 @@ bb10:		; preds = %bb9
 	unreachable
 
 bb11:		; preds = %bb9
-	%0 = load i32* undef, align 4		; <i32> [#uses=3]
+	%0 = load i32, i32* undef, align 4		; <i32> [#uses=3]
 	%1 = add i32 %0, 1		; <i32> [#uses=2]
 	store i32 %1, i32* undef, align 4
-	%2 = load i32* undef, align 4		; <i32> [#uses=2]
+	%2 = load i32, i32* undef, align 4		; <i32> [#uses=2]
 	%3 = sub i32 %2, %0		; <i32> [#uses=1]
 	store i32 0, i32* @no_mat, align 4
 	store i32 0, i32* @no_mis, align 4
 	%4 = getelementptr i8, i8* %B, i32 %0		; <i8*> [#uses=1]
 	tail call  void @diff(i8* undef, i8* %4, i32 undef, i32 %3, i32 undef, i32 undef) nounwind
 	%5 = tail call  i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC11", i32 0, i32 0), i32 %tmp13) nounwind		; <i32> [#uses=0]
-	%6 = load i32* @no_mis, align 4		; <i32> [#uses=1]
+	%6 = load i32, i32* @no_mis, align 4		; <i32> [#uses=1]
 	%7 = tail call  i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC15", i32 0, i32 0), i32 %6) nounwind		; <i32> [#uses=0]
 	%8 = tail call  i32 (i8*, ...)* @printf(i8* getelementptr ([47 x i8]* @"\01LC17", i32 0, i32 0), i32 undef, i32 %1, i32 undef, i32 %2) nounwind		; <i32> [#uses=0]
 	br i1 undef, label %bb15, label %bb12

Modified: llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ bb11:		; preds = %bb9
 	br i1 undef, label %bb15, label %bb12
 
 bb12:		; preds = %bb11
-	%0 = load i32** @JJ, align 4		; <i32*> [#uses=1]
+	%0 = load i32*, i32** @JJ, align 4		; <i32*> [#uses=1]
 	br label %bb228.i
 
 bb74.i:		; preds = %bb228.i
@@ -85,9 +85,9 @@ bb168.i:		; preds = %bb167.i, %bb163.i,
 	%fi.5.i = phi i32 [ undef, %bb167.i ], [ %ci.910.i, %bb158.i ], [ undef, %bb160.i ], [ %ci.910.i, %bb161.i ], [ undef, %bb163.i ]		; <i32> [#uses=1]
 	%fj.4.i = phi i32 [ undef, %bb167.i ], [ undef, %bb158.i ], [ %fj.515.i, %bb160.i ], [ undef, %bb161.i ], [ %fj.515.i, %bb163.i ]		; <i32> [#uses=2]
 	%scevgep88.i = getelementptr i32, i32* null, i32 %i.121.i		; <i32*> [#uses=3]
-	%4 = load i32* %scevgep88.i, align 4		; <i32> [#uses=2]
+	%4 = load i32, i32* %scevgep88.i, align 4		; <i32> [#uses=2]
 	%scevgep89.i = getelementptr i32, i32* %0, i32 %i.121.i		; <i32*> [#uses=3]
-	%5 = load i32* %scevgep89.i, align 4		; <i32> [#uses=1]
+	%5 = load i32, i32* %scevgep89.i, align 4		; <i32> [#uses=1]
 	%ci.10.i = select i1 undef, i32 %pi.316.i, i32 %i.121.i		; <i32> [#uses=0]
 	%cj.9.i = select i1 undef, i32 %pj.317.i, i32 undef		; <i32> [#uses=0]
 	%6 = icmp slt i32 undef, 0		; <i1> [#uses=3]
@@ -95,8 +95,8 @@ bb168.i:		; preds = %bb167.i, %bb163.i,
 	%cj.11.i100 = select i1 %6, i32 %fj.4.i, i32 %5		; <i32> [#uses=1]
 	%c.14.i = select i1 %6, i32 0, i32 undef		; <i32> [#uses=2]
 	store i32 %c.14.i, i32* undef, align 4
-	%7 = load i32* %scevgep88.i, align 4		; <i32> [#uses=1]
-	%8 = load i32* %scevgep89.i, align 4		; <i32> [#uses=1]
+	%7 = load i32, i32* %scevgep88.i, align 4		; <i32> [#uses=1]
+	%8 = load i32, i32* %scevgep89.i, align 4		; <i32> [#uses=1]
 	store i32 %ci.12.i, i32* %scevgep88.i, align 4
 	store i32 %cj.11.i100, i32* %scevgep89.i, align 4
 	store i32 %4, i32* undef, align 4

Modified: llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ bb6:		; preds = %bb6, %bb5
 	br i1 undef, label %bb8, label %bb6
 
 bb8:		; preds = %bb6, %bb5
-	%0 = load i8** @name1, align 4		; <i8*> [#uses=0]
+	%0 = load i8*, i8** @name1, align 4		; <i8*> [#uses=0]
 	br label %bb15
 
 bb9:		; preds = %bb15
@@ -39,9 +39,9 @@ bb11:		; preds = %bb9
 	br i1 undef, label %bb15, label %bb12
 
 bb12:		; preds = %bb11
-	%3 = load i32** @II, align 4		; <i32*> [#uses=1]
-	%4 = load i32* @r, align 4		; <i32> [#uses=1]
-	%5 = load i32* @qr, align 4		; <i32> [#uses=1]
+	%3 = load i32*, i32** @II, align 4		; <i32*> [#uses=1]
+	%4 = load i32, i32* @r, align 4		; <i32> [#uses=1]
+	%5 = load i32, i32* @qr, align 4		; <i32> [#uses=1]
 	br label %bb228.i
 
 bb74.i:		; preds = %bb228.i
@@ -100,7 +100,7 @@ bb168.i:		; preds = %bb167.i, %bb163.i,
 	%ci.12.i = select i1 undef, i32 %fi.5.i, i32 undef		; <i32> [#uses=1]
 	%cj.11.i100 = select i1 undef, i32 %fj.4.i, i32 undef		; <i32> [#uses=1]
 	%c.14.i = select i1 undef, i32 %f.5.i, i32 undef		; <i32> [#uses=1]
-	%10 = load i32* %scevgep88.i, align 4		; <i32> [#uses=1]
+	%10 = load i32, i32* %scevgep88.i, align 4		; <i32> [#uses=1]
 	br i1 undef, label %bb211.i, label %bb218.i
 
 bb211.i:		; preds = %bb168.i

Modified: llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ bb11:		; preds = %bb9
 	br i1 undef, label %bb15, label %bb12
 
 bb12:		; preds = %bb11
-	%0 = load i32** @XX, align 4		; <i32*> [#uses=0]
+	%0 = load i32*, i32** @XX, align 4		; <i32*> [#uses=0]
 	br label %bb228.i
 
 bb74.i:		; preds = %bb228.i

Modified: llvm/trunk/test/CodeGen/ARM/2009-07-01-CommuteBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-07-01-CommuteBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-07-01-CommuteBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-07-01-CommuteBug.ll Fri Feb 27 15:17:42 2015
@@ -30,9 +30,9 @@ bb11:		; preds = %bb9
 	br i1 undef, label %bb15, label %bb12
 
 bb12:		; preds = %bb11
-	%0 = load i32** @II, align 4		; <i32*> [#uses=1]
-	%1 = load i32** @JJ, align 4		; <i32*> [#uses=1]
-	%2 = load i32* @qr, align 4		; <i32> [#uses=1]
+	%0 = load i32*, i32** @II, align 4		; <i32*> [#uses=1]
+	%1 = load i32*, i32** @JJ, align 4		; <i32*> [#uses=1]
+	%2 = load i32, i32* @qr, align 4		; <i32> [#uses=1]
 	br label %bb228.i
 
 bb74.i:		; preds = %bb228.i
@@ -97,8 +97,8 @@ bb168.i:		; preds = %bb167.i, %bb163.i,
 	%ci.12.i = select i1 undef, i32 %fi.5.i, i32 undef		; <i32> [#uses=2]
 	%cj.11.i100 = select i1 undef, i32 %fj.4.i, i32 undef		; <i32> [#uses=2]
 	%c.14.i = select i1 undef, i32 %f.5.i, i32 undef		; <i32> [#uses=1]
-	%6 = load i32* %scevgep88.i, align 4		; <i32> [#uses=1]
-	%7 = load i32* %scevgep89.i, align 4		; <i32> [#uses=1]
+	%6 = load i32, i32* %scevgep88.i, align 4		; <i32> [#uses=1]
+	%7 = load i32, i32* %scevgep89.i, align 4		; <i32> [#uses=1]
 	store i32 %ci.12.i, i32* %scevgep88.i, align 4
 	store i32 %cj.11.i100, i32* %scevgep89.i, align 4
 	br i1 undef, label %bb211.i, label %bb218.i

Modified: llvm/trunk/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-07-18-RewriterBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-07-18-RewriterBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-07-18-RewriterBug.ll Fri Feb 27 15:17:42 2015
@@ -17,42 +17,42 @@ entry:
 
 bb:		; preds = %entry
 	%1 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 2		; <%struct.VERTEX**> [#uses=1]
-	%2 = load %struct.VERTEX** %1, align 4		; <%struct.VERTEX*> [#uses=2]
+	%2 = load %struct.VERTEX*, %struct.VERTEX** %1, align 4		; <%struct.VERTEX*> [#uses=2]
 	%3 = icmp eq %struct.VERTEX* %2, null		; <i1> [#uses=1]
 	br i1 %3, label %bb7, label %bb1.i
 
 bb1.i:		; preds = %bb1.i, %bb
 	%tree_addr.0.i = phi %struct.VERTEX* [ %5, %bb1.i ], [ %tree, %bb ]		; <%struct.VERTEX*> [#uses=3]
 	%4 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree_addr.0.i, i32 0, i32 1		; <%struct.VERTEX**> [#uses=1]
-	%5 = load %struct.VERTEX** %4, align 4		; <%struct.VERTEX*> [#uses=2]
+	%5 = load %struct.VERTEX*, %struct.VERTEX** %4, align 4		; <%struct.VERTEX*> [#uses=2]
 	%6 = icmp eq %struct.VERTEX* %5, null		; <i1> [#uses=1]
 	br i1 %6, label %get_low.exit, label %bb1.i
 
 get_low.exit:		; preds = %bb1.i
 	call  void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delright, %struct.VERTEX* %2, %struct.VERTEX* %extra) nounwind
 	%7 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 1		; <%struct.VERTEX**> [#uses=1]
-	%8 = load %struct.VERTEX** %7, align 4		; <%struct.VERTEX*> [#uses=1]
+	%8 = load %struct.VERTEX*, %struct.VERTEX** %7, align 4		; <%struct.VERTEX*> [#uses=1]
 	call  void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delleft, %struct.VERTEX* %8, %struct.VERTEX* %tree) nounwind
 	%9 = getelementptr %struct.EDGE_PAIR, %struct.EDGE_PAIR* %delleft, i32 0, i32 0		; <%struct.edge_rec**> [#uses=1]
-	%10 = load %struct.edge_rec** %9, align 8		; <%struct.edge_rec*> [#uses=2]
+	%10 = load %struct.edge_rec*, %struct.edge_rec** %9, align 8		; <%struct.edge_rec*> [#uses=2]
 	%11 = getelementptr %struct.EDGE_PAIR, %struct.EDGE_PAIR* %delleft, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%12 = load %struct.edge_rec** %11, align 4		; <%struct.edge_rec*> [#uses=1]
+	%12 = load %struct.edge_rec*, %struct.edge_rec** %11, align 4		; <%struct.edge_rec*> [#uses=1]
 	%13 = getelementptr %struct.EDGE_PAIR, %struct.EDGE_PAIR* %delright, i32 0, i32 0		; <%struct.edge_rec**> [#uses=1]
-	%14 = load %struct.edge_rec** %13, align 8		; <%struct.edge_rec*> [#uses=1]
+	%14 = load %struct.edge_rec*, %struct.edge_rec** %13, align 8		; <%struct.edge_rec*> [#uses=1]
 	%15 = getelementptr %struct.EDGE_PAIR, %struct.EDGE_PAIR* %delright, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%16 = load %struct.edge_rec** %15, align 4		; <%struct.edge_rec*> [#uses=2]
+	%16 = load %struct.edge_rec*, %struct.edge_rec** %15, align 4		; <%struct.edge_rec*> [#uses=2]
 	br label %bb.i
 
 bb.i:		; preds = %bb4.i, %get_low.exit
 	%rdi_addr.0.i = phi %struct.edge_rec* [ %14, %get_low.exit ], [ %72, %bb4.i ]		; <%struct.edge_rec*> [#uses=2]
 	%ldi_addr.1.i = phi %struct.edge_rec* [ %12, %get_low.exit ], [ %ldi_addr.0.i, %bb4.i ]		; <%struct.edge_rec*> [#uses=3]
 	%17 = getelementptr %struct.edge_rec, %struct.edge_rec* %rdi_addr.0.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%18 = load %struct.VERTEX** %17, align 4		; <%struct.VERTEX*> [#uses=3]
+	%18 = load %struct.VERTEX*, %struct.VERTEX** %17, align 4		; <%struct.VERTEX*> [#uses=3]
 	%19 = ptrtoint %struct.edge_rec* %ldi_addr.1.i to i32		; <i32> [#uses=1]
 	%20 = getelementptr %struct.VERTEX, %struct.VERTEX* %18, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%21 = load double* %20, align 4		; <double> [#uses=3]
+	%21 = load double, double* %20, align 4		; <double> [#uses=3]
 	%22 = getelementptr %struct.VERTEX, %struct.VERTEX* %18, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%23 = load double* %22, align 4		; <double> [#uses=3]
+	%23 = load double, double* %22, align 4		; <double> [#uses=3]
 	br label %bb2.i
 
 bb1.i1:		; preds = %bb2.i
@@ -63,7 +63,7 @@ bb1.i1:		; preds = %bb2.i
 	%28 = or i32 %26, %27		; <i32> [#uses=1]
 	%29 = inttoptr i32 %28 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%30 = getelementptr %struct.edge_rec, %struct.edge_rec* %29, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%31 = load %struct.edge_rec** %30, align 4		; <%struct.edge_rec*> [#uses=1]
+	%31 = load %struct.edge_rec*, %struct.edge_rec** %30, align 4		; <%struct.edge_rec*> [#uses=1]
 	%32 = ptrtoint %struct.edge_rec* %31 to i32		; <i32> [#uses=2]
 	%33 = add i32 %32, 16		; <i32> [#uses=1]
 	%34 = and i32 %33, 63		; <i32> [#uses=1]
@@ -80,16 +80,16 @@ bb2.i:		; preds = %bb1.i1, %bb.i
 	%.pn6.i = inttoptr i32 %.pn6.in.i to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%t1.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %ldi_addr.1.pn.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
 	%t2.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn6.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%t1.0.i = load %struct.VERTEX** %t1.0.in.i		; <%struct.VERTEX*> [#uses=2]
-	%t2.0.i = load %struct.VERTEX** %t2.0.in.i		; <%struct.VERTEX*> [#uses=2]
+	%t1.0.i = load %struct.VERTEX*, %struct.VERTEX** %t1.0.in.i		; <%struct.VERTEX*> [#uses=2]
+	%t2.0.i = load %struct.VERTEX*, %struct.VERTEX** %t2.0.in.i		; <%struct.VERTEX*> [#uses=2]
 	%38 = getelementptr %struct.VERTEX, %struct.VERTEX* %t1.0.i, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%39 = load double* %38, align 4		; <double> [#uses=3]
+	%39 = load double, double* %38, align 4		; <double> [#uses=3]
 	%40 = getelementptr %struct.VERTEX, %struct.VERTEX* %t1.0.i, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%41 = load double* %40, align 4		; <double> [#uses=3]
+	%41 = load double, double* %40, align 4		; <double> [#uses=3]
 	%42 = getelementptr %struct.VERTEX, %struct.VERTEX* %t2.0.i, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%43 = load double* %42, align 4		; <double> [#uses=1]
+	%43 = load double, double* %42, align 4		; <double> [#uses=1]
 	%44 = getelementptr %struct.VERTEX, %struct.VERTEX* %t2.0.i, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%45 = load double* %44, align 4		; <double> [#uses=1]
+	%45 = load double, double* %44, align 4		; <double> [#uses=1]
 	%46 = fsub double %39, %21		; <double> [#uses=1]
 	%47 = fsub double %45, %23		; <double> [#uses=1]
 	%48 = fmul double %46, %47		; <double> [#uses=1]
@@ -105,11 +105,11 @@ bb3.i:		; preds = %bb2.i
 	%55 = xor i32 %54, 32		; <i32> [#uses=3]
 	%56 = inttoptr i32 %55 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=2]
 	%57 = getelementptr %struct.edge_rec, %struct.edge_rec* %56, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%58 = load %struct.VERTEX** %57, align 4		; <%struct.VERTEX*> [#uses=2]
+	%58 = load %struct.VERTEX*, %struct.VERTEX** %57, align 4		; <%struct.VERTEX*> [#uses=2]
 	%59 = getelementptr %struct.VERTEX, %struct.VERTEX* %58, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%60 = load double* %59, align 4		; <double> [#uses=1]
+	%60 = load double, double* %59, align 4		; <double> [#uses=1]
 	%61 = getelementptr %struct.VERTEX, %struct.VERTEX* %58, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%62 = load double* %61, align 4		; <double> [#uses=1]
+	%62 = load double, double* %61, align 4		; <double> [#uses=1]
 	%63 = fsub double %60, %39		; <double> [#uses=1]
 	%64 = fsub double %23, %41		; <double> [#uses=1]
 	%65 = fmul double %63, %64		; <double> [#uses=1]
@@ -122,7 +122,7 @@ bb3.i:		; preds = %bb2.i
 
 bb4.i:		; preds = %bb3.i
 	%71 = getelementptr %struct.edge_rec, %struct.edge_rec* %56, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%72 = load %struct.edge_rec** %71, align 4		; <%struct.edge_rec*> [#uses=1]
+	%72 = load %struct.edge_rec*, %struct.edge_rec** %71, align 4		; <%struct.edge_rec*> [#uses=1]
 	br label %bb.i
 
 bb5.i:		; preds = %bb3.i
@@ -132,7 +132,7 @@ bb5.i:		; preds = %bb3.i
 	%76 = or i32 %74, %75		; <i32> [#uses=1]
 	%77 = inttoptr i32 %76 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%78 = getelementptr %struct.edge_rec, %struct.edge_rec* %77, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%79 = load %struct.edge_rec** %78, align 4		; <%struct.edge_rec*> [#uses=1]
+	%79 = load %struct.edge_rec*, %struct.edge_rec** %78, align 4		; <%struct.edge_rec*> [#uses=1]
 	%80 = ptrtoint %struct.edge_rec* %79 to i32		; <i32> [#uses=2]
 	%81 = add i32 %80, 16		; <i32> [#uses=1]
 	%82 = and i32 %81, 63		; <i32> [#uses=1]
@@ -140,7 +140,7 @@ bb5.i:		; preds = %bb3.i
 	%84 = or i32 %82, %83		; <i32> [#uses=1]
 	%85 = inttoptr i32 %84 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%86 = getelementptr %struct.edge_rec, %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%87 = load %struct.VERTEX** %86, align 4		; <%struct.VERTEX*> [#uses=1]
+	%87 = load %struct.VERTEX*, %struct.VERTEX** %86, align 4		; <%struct.VERTEX*> [#uses=1]
 	%88 = call  %struct.edge_rec* @alloc_edge() nounwind		; <%struct.edge_rec*> [#uses=6]
 	%89 = getelementptr %struct.edge_rec, %struct.edge_rec* %88, i32 0, i32 1		; <%struct.edge_rec**> [#uses=4]
 	store %struct.edge_rec* %88, %struct.edge_rec** %89, align 4
@@ -161,7 +161,7 @@ bb5.i:		; preds = %bb3.i
 	store %struct.VERTEX* %87, %struct.VERTEX** %100, align 4
 	%101 = getelementptr %struct.edge_rec, %struct.edge_rec* %95, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
 	store %struct.edge_rec* %93, %struct.edge_rec** %101, align 4
-	%102 = load %struct.edge_rec** %89, align 4		; <%struct.edge_rec*> [#uses=1]
+	%102 = load %struct.edge_rec*, %struct.edge_rec** %89, align 4		; <%struct.edge_rec*> [#uses=1]
 	%103 = ptrtoint %struct.edge_rec* %102 to i32		; <i32> [#uses=2]
 	%104 = add i32 %103, 16		; <i32> [#uses=1]
 	%105 = and i32 %104, 63		; <i32> [#uses=1]
@@ -169,7 +169,7 @@ bb5.i:		; preds = %bb3.i
 	%107 = or i32 %105, %106		; <i32> [#uses=1]
 	%108 = inttoptr i32 %107 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%109 = getelementptr %struct.edge_rec, %struct.edge_rec* %85, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%110 = load %struct.edge_rec** %109, align 4		; <%struct.edge_rec*> [#uses=1]
+	%110 = load %struct.edge_rec*, %struct.edge_rec** %109, align 4		; <%struct.edge_rec*> [#uses=1]
 	%111 = ptrtoint %struct.edge_rec* %110 to i32		; <i32> [#uses=2]
 	%112 = add i32 %111, 16		; <i32> [#uses=1]
 	%113 = and i32 %112, 63		; <i32> [#uses=1]
@@ -177,19 +177,19 @@ bb5.i:		; preds = %bb3.i
 	%115 = or i32 %113, %114		; <i32> [#uses=1]
 	%116 = inttoptr i32 %115 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%117 = getelementptr %struct.edge_rec, %struct.edge_rec* %116, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%118 = load %struct.edge_rec** %117, align 4		; <%struct.edge_rec*> [#uses=1]
+	%118 = load %struct.edge_rec*, %struct.edge_rec** %117, align 4		; <%struct.edge_rec*> [#uses=1]
 	%119 = getelementptr %struct.edge_rec, %struct.edge_rec* %108, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%120 = load %struct.edge_rec** %119, align 4		; <%struct.edge_rec*> [#uses=1]
+	%120 = load %struct.edge_rec*, %struct.edge_rec** %119, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %118, %struct.edge_rec** %119, align 4
 	store %struct.edge_rec* %120, %struct.edge_rec** %117, align 4
-	%121 = load %struct.edge_rec** %89, align 4		; <%struct.edge_rec*> [#uses=1]
-	%122 = load %struct.edge_rec** %109, align 4		; <%struct.edge_rec*> [#uses=1]
+	%121 = load %struct.edge_rec*, %struct.edge_rec** %89, align 4		; <%struct.edge_rec*> [#uses=1]
+	%122 = load %struct.edge_rec*, %struct.edge_rec** %109, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %121, %struct.edge_rec** %109, align 4
 	store %struct.edge_rec* %122, %struct.edge_rec** %89, align 4
 	%123 = xor i32 %91, 32		; <i32> [#uses=1]
 	%124 = inttoptr i32 %123 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=3]
 	%125 = getelementptr %struct.edge_rec, %struct.edge_rec* %124, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%126 = load %struct.edge_rec** %125, align 4		; <%struct.edge_rec*> [#uses=1]
+	%126 = load %struct.edge_rec*, %struct.edge_rec** %125, align 4		; <%struct.edge_rec*> [#uses=1]
 	%127 = ptrtoint %struct.edge_rec* %126 to i32		; <i32> [#uses=2]
 	%128 = add i32 %127, 16		; <i32> [#uses=1]
 	%129 = and i32 %128, 63		; <i32> [#uses=1]
@@ -197,7 +197,7 @@ bb5.i:		; preds = %bb3.i
 	%131 = or i32 %129, %130		; <i32> [#uses=1]
 	%132 = inttoptr i32 %131 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%133 = getelementptr %struct.edge_rec, %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%134 = load %struct.edge_rec** %133, align 4		; <%struct.edge_rec*> [#uses=1]
+	%134 = load %struct.edge_rec*, %struct.edge_rec** %133, align 4		; <%struct.edge_rec*> [#uses=1]
 	%135 = ptrtoint %struct.edge_rec* %134 to i32		; <i32> [#uses=2]
 	%136 = add i32 %135, 16		; <i32> [#uses=1]
 	%137 = and i32 %136, 63		; <i32> [#uses=1]
@@ -205,13 +205,13 @@ bb5.i:		; preds = %bb3.i
 	%139 = or i32 %137, %138		; <i32> [#uses=1]
 	%140 = inttoptr i32 %139 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%141 = getelementptr %struct.edge_rec, %struct.edge_rec* %140, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%142 = load %struct.edge_rec** %141, align 4		; <%struct.edge_rec*> [#uses=1]
+	%142 = load %struct.edge_rec*, %struct.edge_rec** %141, align 4		; <%struct.edge_rec*> [#uses=1]
 	%143 = getelementptr %struct.edge_rec, %struct.edge_rec* %132, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%144 = load %struct.edge_rec** %143, align 4		; <%struct.edge_rec*> [#uses=1]
+	%144 = load %struct.edge_rec*, %struct.edge_rec** %143, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %142, %struct.edge_rec** %143, align 4
 	store %struct.edge_rec* %144, %struct.edge_rec** %141, align 4
-	%145 = load %struct.edge_rec** %125, align 4		; <%struct.edge_rec*> [#uses=1]
-	%146 = load %struct.edge_rec** %133, align 4		; <%struct.edge_rec*> [#uses=2]
+	%145 = load %struct.edge_rec*, %struct.edge_rec** %125, align 4		; <%struct.edge_rec*> [#uses=1]
+	%146 = load %struct.edge_rec*, %struct.edge_rec** %133, align 4		; <%struct.edge_rec*> [#uses=2]
 	store %struct.edge_rec* %145, %struct.edge_rec** %133, align 4
 	store %struct.edge_rec* %146, %struct.edge_rec** %125, align 4
 	%147 = and i32 %92, 63		; <i32> [#uses=1]
@@ -219,22 +219,22 @@ bb5.i:		; preds = %bb3.i
 	%149 = or i32 %147, %148		; <i32> [#uses=1]
 	%150 = inttoptr i32 %149 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%151 = getelementptr %struct.edge_rec, %struct.edge_rec* %150, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%152 = load %struct.edge_rec** %151, align 4		; <%struct.edge_rec*> [#uses=1]
+	%152 = load %struct.edge_rec*, %struct.edge_rec** %151, align 4		; <%struct.edge_rec*> [#uses=1]
 	%153 = ptrtoint %struct.edge_rec* %152 to i32		; <i32> [#uses=2]
 	%154 = add i32 %153, 16		; <i32> [#uses=1]
 	%155 = and i32 %154, 63		; <i32> [#uses=1]
 	%156 = and i32 %153, -64		; <i32> [#uses=1]
 	%157 = or i32 %155, %156		; <i32> [#uses=1]
 	%158 = inttoptr i32 %157 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
-	%159 = load %struct.VERTEX** %90, align 4		; <%struct.VERTEX*> [#uses=1]
+	%159 = load %struct.VERTEX*, %struct.VERTEX** %90, align 4		; <%struct.VERTEX*> [#uses=1]
 	%160 = getelementptr %struct.edge_rec, %struct.edge_rec* %124, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%161 = load %struct.VERTEX** %160, align 4		; <%struct.VERTEX*> [#uses=1]
+	%161 = load %struct.VERTEX*, %struct.VERTEX** %160, align 4		; <%struct.VERTEX*> [#uses=1]
 	%162 = getelementptr %struct.edge_rec, %struct.edge_rec* %16, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%163 = load %struct.VERTEX** %162, align 4		; <%struct.VERTEX*> [#uses=1]
+	%163 = load %struct.VERTEX*, %struct.VERTEX** %162, align 4		; <%struct.VERTEX*> [#uses=1]
 	%164 = icmp eq %struct.VERTEX* %163, %159		; <i1> [#uses=1]
 	%rdo_addr.0.i = select i1 %164, %struct.edge_rec* %88, %struct.edge_rec* %16		; <%struct.edge_rec*> [#uses=3]
 	%165 = getelementptr %struct.edge_rec, %struct.edge_rec* %10, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%166 = load %struct.VERTEX** %165, align 4		; <%struct.VERTEX*> [#uses=1]
+	%166 = load %struct.VERTEX*, %struct.VERTEX** %165, align 4		; <%struct.VERTEX*> [#uses=1]
 	%167 = icmp eq %struct.VERTEX* %166, %161		; <i1> [#uses=1]
 	%ldo_addr.0.ph.i = select i1 %167, %struct.edge_rec* %124, %struct.edge_rec* %10		; <%struct.edge_rec*> [#uses=3]
 	br label %bb9.i
@@ -244,31 +244,31 @@ bb9.i:		; preds = %bb25.i, %bb24.i, %bb5
 	%rcand.2.i = phi %struct.edge_rec* [ %158, %bb5.i ], [ %666, %bb24.i ], [ %rcand.1.i, %bb25.i ]		; <%struct.edge_rec*> [#uses=5]
 	%basel.0.i = phi %struct.edge_rec* [ %88, %bb5.i ], [ %595, %bb24.i ], [ %716, %bb25.i ]		; <%struct.edge_rec*> [#uses=2]
 	%168 = getelementptr %struct.edge_rec, %struct.edge_rec* %lcand.2.i, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%169 = load %struct.edge_rec** %168, align 4		; <%struct.edge_rec*> [#uses=3]
+	%169 = load %struct.edge_rec*, %struct.edge_rec** %168, align 4		; <%struct.edge_rec*> [#uses=3]
 	%170 = getelementptr %struct.edge_rec, %struct.edge_rec* %basel.0.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=3]
-	%171 = load %struct.VERTEX** %170, align 4		; <%struct.VERTEX*> [#uses=4]
+	%171 = load %struct.VERTEX*, %struct.VERTEX** %170, align 4		; <%struct.VERTEX*> [#uses=4]
 	%172 = ptrtoint %struct.edge_rec* %basel.0.i to i32		; <i32> [#uses=3]
 	%173 = xor i32 %172, 32		; <i32> [#uses=1]
 	%174 = inttoptr i32 %173 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=2]
 	%175 = getelementptr %struct.edge_rec, %struct.edge_rec* %174, i32 0, i32 0		; <%struct.VERTEX**> [#uses=3]
-	%176 = load %struct.VERTEX** %175, align 4		; <%struct.VERTEX*> [#uses=3]
+	%176 = load %struct.VERTEX*, %struct.VERTEX** %175, align 4		; <%struct.VERTEX*> [#uses=3]
 	%177 = ptrtoint %struct.edge_rec* %169 to i32		; <i32> [#uses=1]
 	%178 = xor i32 %177, 32		; <i32> [#uses=1]
 	%179 = inttoptr i32 %178 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%180 = getelementptr %struct.edge_rec, %struct.edge_rec* %179, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%181 = load %struct.VERTEX** %180, align 4		; <%struct.VERTEX*> [#uses=2]
+	%181 = load %struct.VERTEX*, %struct.VERTEX** %180, align 4		; <%struct.VERTEX*> [#uses=2]
 	%182 = getelementptr %struct.VERTEX, %struct.VERTEX* %171, i32 0, i32 0, i32 0		; <double*> [#uses=2]
-	%183 = load double* %182, align 4		; <double> [#uses=2]
+	%183 = load double, double* %182, align 4		; <double> [#uses=2]
 	%184 = getelementptr %struct.VERTEX, %struct.VERTEX* %171, i32 0, i32 0, i32 1		; <double*> [#uses=2]
-	%185 = load double* %184, align 4		; <double> [#uses=2]
+	%185 = load double, double* %184, align 4		; <double> [#uses=2]
 	%186 = getelementptr %struct.VERTEX, %struct.VERTEX* %181, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%187 = load double* %186, align 4		; <double> [#uses=1]
+	%187 = load double, double* %186, align 4		; <double> [#uses=1]
 	%188 = getelementptr %struct.VERTEX, %struct.VERTEX* %181, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%189 = load double* %188, align 4		; <double> [#uses=1]
+	%189 = load double, double* %188, align 4		; <double> [#uses=1]
 	%190 = getelementptr %struct.VERTEX, %struct.VERTEX* %176, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%191 = load double* %190, align 4		; <double> [#uses=2]
+	%191 = load double, double* %190, align 4		; <double> [#uses=2]
 	%192 = getelementptr %struct.VERTEX, %struct.VERTEX* %176, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%193 = load double* %192, align 4		; <double> [#uses=2]
+	%193 = load double, double* %192, align 4		; <double> [#uses=2]
 	%194 = fsub double %183, %191		; <double> [#uses=1]
 	%195 = fsub double %189, %193		; <double> [#uses=1]
 	%196 = fmul double %194, %195		; <double> [#uses=1]
@@ -281,7 +281,7 @@ bb9.i:		; preds = %bb25.i, %bb24.i, %bb5
 
 bb10.i:		; preds = %bb9.i
 	%202 = getelementptr %struct.VERTEX, %struct.VERTEX* %171, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%avail_edge.promoted25 = load %struct.edge_rec** @avail_edge		; <%struct.edge_rec*> [#uses=1]
+	%avail_edge.promoted25 = load %struct.edge_rec*, %struct.edge_rec** @avail_edge		; <%struct.edge_rec*> [#uses=1]
 	br label %bb12.i
 
 bb11.i:		; preds = %bb12.i
@@ -292,7 +292,7 @@ bb11.i:		; preds = %bb12.i
 	%207 = or i32 %205, %206		; <i32> [#uses=1]
 	%208 = inttoptr i32 %207 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%209 = getelementptr %struct.edge_rec, %struct.edge_rec* %208, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%210 = load %struct.edge_rec** %209, align 4		; <%struct.edge_rec*> [#uses=1]
+	%210 = load %struct.edge_rec*, %struct.edge_rec** %209, align 4		; <%struct.edge_rec*> [#uses=1]
 	%211 = ptrtoint %struct.edge_rec* %210 to i32		; <i32> [#uses=2]
 	%212 = add i32 %211, 16		; <i32> [#uses=1]
 	%213 = and i32 %212, 63		; <i32> [#uses=1]
@@ -300,7 +300,7 @@ bb11.i:		; preds = %bb12.i
 	%215 = or i32 %213, %214		; <i32> [#uses=1]
 	%216 = inttoptr i32 %215 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%217 = getelementptr %struct.edge_rec, %struct.edge_rec* %lcand.0.i, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%218 = load %struct.edge_rec** %217, align 4		; <%struct.edge_rec*> [#uses=1]
+	%218 = load %struct.edge_rec*, %struct.edge_rec** %217, align 4		; <%struct.edge_rec*> [#uses=1]
 	%219 = ptrtoint %struct.edge_rec* %218 to i32		; <i32> [#uses=2]
 	%220 = add i32 %219, 16		; <i32> [#uses=1]
 	%221 = and i32 %220, 63		; <i32> [#uses=1]
@@ -308,7 +308,7 @@ bb11.i:		; preds = %bb12.i
 	%223 = or i32 %221, %222		; <i32> [#uses=1]
 	%224 = inttoptr i32 %223 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%225 = getelementptr %struct.edge_rec, %struct.edge_rec* %216, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%226 = load %struct.edge_rec** %225, align 4		; <%struct.edge_rec*> [#uses=1]
+	%226 = load %struct.edge_rec*, %struct.edge_rec** %225, align 4		; <%struct.edge_rec*> [#uses=1]
 	%227 = ptrtoint %struct.edge_rec* %226 to i32		; <i32> [#uses=2]
 	%228 = add i32 %227, 16		; <i32> [#uses=1]
 	%229 = and i32 %228, 63		; <i32> [#uses=1]
@@ -316,13 +316,13 @@ bb11.i:		; preds = %bb12.i
 	%231 = or i32 %229, %230		; <i32> [#uses=1]
 	%232 = inttoptr i32 %231 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%233 = getelementptr %struct.edge_rec, %struct.edge_rec* %232, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%234 = load %struct.edge_rec** %233, align 4		; <%struct.edge_rec*> [#uses=1]
+	%234 = load %struct.edge_rec*, %struct.edge_rec** %233, align 4		; <%struct.edge_rec*> [#uses=1]
 	%235 = getelementptr %struct.edge_rec, %struct.edge_rec* %224, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%236 = load %struct.edge_rec** %235, align 4		; <%struct.edge_rec*> [#uses=1]
+	%236 = load %struct.edge_rec*, %struct.edge_rec** %235, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %234, %struct.edge_rec** %235, align 4
 	store %struct.edge_rec* %236, %struct.edge_rec** %233, align 4
-	%237 = load %struct.edge_rec** %217, align 4		; <%struct.edge_rec*> [#uses=1]
-	%238 = load %struct.edge_rec** %225, align 4		; <%struct.edge_rec*> [#uses=1]
+	%237 = load %struct.edge_rec*, %struct.edge_rec** %217, align 4		; <%struct.edge_rec*> [#uses=1]
+	%238 = load %struct.edge_rec*, %struct.edge_rec** %225, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %237, %struct.edge_rec** %225, align 4
 	store %struct.edge_rec* %238, %struct.edge_rec** %217, align 4
 	%239 = xor i32 %203, 32		; <i32> [#uses=2]
@@ -331,7 +331,7 @@ bb11.i:		; preds = %bb12.i
 	%242 = or i32 %241, %206		; <i32> [#uses=1]
 	%243 = inttoptr i32 %242 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%244 = getelementptr %struct.edge_rec, %struct.edge_rec* %243, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%245 = load %struct.edge_rec** %244, align 4		; <%struct.edge_rec*> [#uses=1]
+	%245 = load %struct.edge_rec*, %struct.edge_rec** %244, align 4		; <%struct.edge_rec*> [#uses=1]
 	%246 = ptrtoint %struct.edge_rec* %245 to i32		; <i32> [#uses=2]
 	%247 = add i32 %246, 16		; <i32> [#uses=1]
 	%248 = and i32 %247, 63		; <i32> [#uses=1]
@@ -340,7 +340,7 @@ bb11.i:		; preds = %bb12.i
 	%251 = inttoptr i32 %250 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%252 = inttoptr i32 %239 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%253 = getelementptr %struct.edge_rec, %struct.edge_rec* %252, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%254 = load %struct.edge_rec** %253, align 4		; <%struct.edge_rec*> [#uses=1]
+	%254 = load %struct.edge_rec*, %struct.edge_rec** %253, align 4		; <%struct.edge_rec*> [#uses=1]
 	%255 = ptrtoint %struct.edge_rec* %254 to i32		; <i32> [#uses=2]
 	%256 = add i32 %255, 16		; <i32> [#uses=1]
 	%257 = and i32 %256, 63		; <i32> [#uses=1]
@@ -348,7 +348,7 @@ bb11.i:		; preds = %bb12.i
 	%259 = or i32 %257, %258		; <i32> [#uses=1]
 	%260 = inttoptr i32 %259 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%261 = getelementptr %struct.edge_rec, %struct.edge_rec* %251, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%262 = load %struct.edge_rec** %261, align 4		; <%struct.edge_rec*> [#uses=1]
+	%262 = load %struct.edge_rec*, %struct.edge_rec** %261, align 4		; <%struct.edge_rec*> [#uses=1]
 	%263 = ptrtoint %struct.edge_rec* %262 to i32		; <i32> [#uses=2]
 	%264 = add i32 %263, 16		; <i32> [#uses=1]
 	%265 = and i32 %264, 63		; <i32> [#uses=1]
@@ -356,22 +356,22 @@ bb11.i:		; preds = %bb12.i
 	%267 = or i32 %265, %266		; <i32> [#uses=1]
 	%268 = inttoptr i32 %267 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%269 = getelementptr %struct.edge_rec, %struct.edge_rec* %268, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%270 = load %struct.edge_rec** %269, align 4		; <%struct.edge_rec*> [#uses=1]
+	%270 = load %struct.edge_rec*, %struct.edge_rec** %269, align 4		; <%struct.edge_rec*> [#uses=1]
 	%271 = getelementptr %struct.edge_rec, %struct.edge_rec* %260, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%272 = load %struct.edge_rec** %271, align 4		; <%struct.edge_rec*> [#uses=1]
+	%272 = load %struct.edge_rec*, %struct.edge_rec** %271, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %270, %struct.edge_rec** %271, align 4
 	store %struct.edge_rec* %272, %struct.edge_rec** %269, align 4
-	%273 = load %struct.edge_rec** %253, align 4		; <%struct.edge_rec*> [#uses=1]
-	%274 = load %struct.edge_rec** %261, align 4		; <%struct.edge_rec*> [#uses=1]
+	%273 = load %struct.edge_rec*, %struct.edge_rec** %253, align 4		; <%struct.edge_rec*> [#uses=1]
+	%274 = load %struct.edge_rec*, %struct.edge_rec** %261, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %273, %struct.edge_rec** %261, align 4
 	store %struct.edge_rec* %274, %struct.edge_rec** %253, align 4
 	%275 = inttoptr i32 %206 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=2]
 	%276 = getelementptr %struct.edge_rec, %struct.edge_rec* %275, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
 	store %struct.edge_rec* %avail_edge.tmp.026, %struct.edge_rec** %276, align 4
 	%277 = getelementptr %struct.edge_rec, %struct.edge_rec* %t.0.i, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%278 = load %struct.edge_rec** %277, align 4		; <%struct.edge_rec*> [#uses=2]
-	%.pre.i = load double* %182, align 4		; <double> [#uses=1]
-	%.pre22.i = load double* %184, align 4		; <double> [#uses=1]
+	%278 = load %struct.edge_rec*, %struct.edge_rec** %277, align 4		; <%struct.edge_rec*> [#uses=2]
+	%.pre.i = load double, double* %182, align 4		; <double> [#uses=1]
+	%.pre22.i = load double, double* %184, align 4		; <double> [#uses=1]
 	br label %bb12.i
 
 bb12.i:		; preds = %bb11.i, %bb10.i
@@ -392,34 +392,34 @@ bb12.i:		; preds = %bb11.i, %bb10.i
 	%v1.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn5.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
 	%v2.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn4.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
 	%v3.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %lcand.2.pn.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%v1.0.i = load %struct.VERTEX** %v1.0.in.i		; <%struct.VERTEX*> [#uses=3]
-	%v2.0.i = load %struct.VERTEX** %v2.0.in.i		; <%struct.VERTEX*> [#uses=3]
-	%v3.0.i = load %struct.VERTEX** %v3.0.in.i		; <%struct.VERTEX*> [#uses=3]
-	%281 = load double* %202, align 4		; <double> [#uses=3]
+	%v1.0.i = load %struct.VERTEX*, %struct.VERTEX** %v1.0.in.i		; <%struct.VERTEX*> [#uses=3]
+	%v2.0.i = load %struct.VERTEX*, %struct.VERTEX** %v2.0.in.i		; <%struct.VERTEX*> [#uses=3]
+	%v3.0.i = load %struct.VERTEX*, %struct.VERTEX** %v3.0.in.i		; <%struct.VERTEX*> [#uses=3]
+	%281 = load double, double* %202, align 4		; <double> [#uses=3]
 	%282 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%283 = load double* %282, align 4		; <double> [#uses=1]
+	%283 = load double, double* %282, align 4		; <double> [#uses=1]
 	%284 = fsub double %283, %280		; <double> [#uses=2]
 	%285 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%286 = load double* %285, align 4		; <double> [#uses=1]
+	%286 = load double, double* %285, align 4		; <double> [#uses=1]
 	%287 = fsub double %286, %279		; <double> [#uses=2]
 	%288 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%289 = load double* %288, align 4		; <double> [#uses=1]
+	%289 = load double, double* %288, align 4		; <double> [#uses=1]
 	%290 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%291 = load double* %290, align 4		; <double> [#uses=1]
+	%291 = load double, double* %290, align 4		; <double> [#uses=1]
 	%292 = fsub double %291, %280		; <double> [#uses=2]
 	%293 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%294 = load double* %293, align 4		; <double> [#uses=1]
+	%294 = load double, double* %293, align 4		; <double> [#uses=1]
 	%295 = fsub double %294, %279		; <double> [#uses=2]
 	%296 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%297 = load double* %296, align 4		; <double> [#uses=1]
+	%297 = load double, double* %296, align 4		; <double> [#uses=1]
 	%298 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%299 = load double* %298, align 4		; <double> [#uses=1]
+	%299 = load double, double* %298, align 4		; <double> [#uses=1]
 	%300 = fsub double %299, %280		; <double> [#uses=2]
 	%301 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%302 = load double* %301, align 4		; <double> [#uses=1]
+	%302 = load double, double* %301, align 4		; <double> [#uses=1]
 	%303 = fsub double %302, %279		; <double> [#uses=2]
 	%304 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%305 = load double* %304, align 4		; <double> [#uses=1]
+	%305 = load double, double* %304, align 4		; <double> [#uses=1]
 	%306 = fsub double %289, %281		; <double> [#uses=1]
 	%307 = fmul double %292, %303		; <double> [#uses=1]
 	%308 = fmul double %295, %300		; <double> [#uses=1]
@@ -442,8 +442,8 @@ bb12.i:		; preds = %bb11.i, %bb10.i
 
 bb13.loopexit.i:		; preds = %bb12.i
 	store %struct.edge_rec* %avail_edge.tmp.026, %struct.edge_rec** @avail_edge
-	%.pre23.i = load %struct.VERTEX** %170, align 4		; <%struct.VERTEX*> [#uses=1]
-	%.pre24.i = load %struct.VERTEX** %175, align 4		; <%struct.VERTEX*> [#uses=1]
+	%.pre23.i = load %struct.VERTEX*, %struct.VERTEX** %170, align 4		; <%struct.VERTEX*> [#uses=1]
+	%.pre24.i = load %struct.VERTEX*, %struct.VERTEX** %175, align 4		; <%struct.VERTEX*> [#uses=1]
 	br label %bb13.i
 
 bb13.i:		; preds = %bb13.loopexit.i, %bb9.i
@@ -457,7 +457,7 @@ bb13.i:		; preds = %bb13.loopexit.i, %bb
 	%330 = or i32 %328, %329		; <i32> [#uses=1]
 	%331 = inttoptr i32 %330 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%332 = getelementptr %struct.edge_rec, %struct.edge_rec* %331, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%333 = load %struct.edge_rec** %332, align 4		; <%struct.edge_rec*> [#uses=1]
+	%333 = load %struct.edge_rec*, %struct.edge_rec** %332, align 4		; <%struct.edge_rec*> [#uses=1]
 	%334 = ptrtoint %struct.edge_rec* %333 to i32		; <i32> [#uses=2]
 	%335 = add i32 %334, 16		; <i32> [#uses=1]
 	%336 = and i32 %335, 63		; <i32> [#uses=1]
@@ -466,19 +466,19 @@ bb13.i:		; preds = %bb13.loopexit.i, %bb
 	%339 = xor i32 %338, 32		; <i32> [#uses=1]
 	%340 = inttoptr i32 %339 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%341 = getelementptr %struct.edge_rec, %struct.edge_rec* %340, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%342 = load %struct.VERTEX** %341, align 4		; <%struct.VERTEX*> [#uses=2]
+	%342 = load %struct.VERTEX*, %struct.VERTEX** %341, align 4		; <%struct.VERTEX*> [#uses=2]
 	%343 = getelementptr %struct.VERTEX, %struct.VERTEX* %325, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%344 = load double* %343, align 4		; <double> [#uses=1]
+	%344 = load double, double* %343, align 4		; <double> [#uses=1]
 	%345 = getelementptr %struct.VERTEX, %struct.VERTEX* %325, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%346 = load double* %345, align 4		; <double> [#uses=1]
+	%346 = load double, double* %345, align 4		; <double> [#uses=1]
 	%347 = getelementptr %struct.VERTEX, %struct.VERTEX* %342, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%348 = load double* %347, align 4		; <double> [#uses=1]
+	%348 = load double, double* %347, align 4		; <double> [#uses=1]
 	%349 = getelementptr %struct.VERTEX, %struct.VERTEX* %342, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%350 = load double* %349, align 4		; <double> [#uses=1]
+	%350 = load double, double* %349, align 4		; <double> [#uses=1]
 	%351 = getelementptr %struct.VERTEX, %struct.VERTEX* %324, i32 0, i32 0, i32 0		; <double*> [#uses=2]
-	%352 = load double* %351, align 4		; <double> [#uses=3]
+	%352 = load double, double* %351, align 4		; <double> [#uses=3]
 	%353 = getelementptr %struct.VERTEX, %struct.VERTEX* %324, i32 0, i32 0, i32 1		; <double*> [#uses=2]
-	%354 = load double* %353, align 4		; <double> [#uses=3]
+	%354 = load double, double* %353, align 4		; <double> [#uses=3]
 	%355 = fsub double %344, %352		; <double> [#uses=1]
 	%356 = fsub double %350, %354		; <double> [#uses=1]
 	%357 = fmul double %355, %356		; <double> [#uses=1]
@@ -491,7 +491,7 @@ bb13.i:		; preds = %bb13.loopexit.i, %bb
 
 bb14.i:		; preds = %bb13.i
 	%363 = getelementptr %struct.VERTEX, %struct.VERTEX* %324, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%avail_edge.promoted = load %struct.edge_rec** @avail_edge		; <%struct.edge_rec*> [#uses=1]
+	%avail_edge.promoted = load %struct.edge_rec*, %struct.edge_rec** @avail_edge		; <%struct.edge_rec*> [#uses=1]
 	br label %bb16.i
 
 bb15.i:		; preds = %bb16.i
@@ -502,7 +502,7 @@ bb15.i:		; preds = %bb16.i
 	%368 = or i32 %366, %367		; <i32> [#uses=1]
 	%369 = inttoptr i32 %368 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%370 = getelementptr %struct.edge_rec, %struct.edge_rec* %369, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%371 = load %struct.edge_rec** %370, align 4		; <%struct.edge_rec*> [#uses=1]
+	%371 = load %struct.edge_rec*, %struct.edge_rec** %370, align 4		; <%struct.edge_rec*> [#uses=1]
 	%372 = ptrtoint %struct.edge_rec* %371 to i32		; <i32> [#uses=2]
 	%373 = add i32 %372, 16		; <i32> [#uses=1]
 	%374 = and i32 %373, 63		; <i32> [#uses=1]
@@ -510,7 +510,7 @@ bb15.i:		; preds = %bb16.i
 	%376 = or i32 %374, %375		; <i32> [#uses=1]
 	%377 = inttoptr i32 %376 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%378 = getelementptr %struct.edge_rec, %struct.edge_rec* %rcand.0.i, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%379 = load %struct.edge_rec** %378, align 4		; <%struct.edge_rec*> [#uses=1]
+	%379 = load %struct.edge_rec*, %struct.edge_rec** %378, align 4		; <%struct.edge_rec*> [#uses=1]
 	%380 = ptrtoint %struct.edge_rec* %379 to i32		; <i32> [#uses=2]
 	%381 = add i32 %380, 16		; <i32> [#uses=1]
 	%382 = and i32 %381, 63		; <i32> [#uses=1]
@@ -518,7 +518,7 @@ bb15.i:		; preds = %bb16.i
 	%384 = or i32 %382, %383		; <i32> [#uses=1]
 	%385 = inttoptr i32 %384 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%386 = getelementptr %struct.edge_rec, %struct.edge_rec* %377, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%387 = load %struct.edge_rec** %386, align 4		; <%struct.edge_rec*> [#uses=1]
+	%387 = load %struct.edge_rec*, %struct.edge_rec** %386, align 4		; <%struct.edge_rec*> [#uses=1]
 	%388 = ptrtoint %struct.edge_rec* %387 to i32		; <i32> [#uses=2]
 	%389 = add i32 %388, 16		; <i32> [#uses=1]
 	%390 = and i32 %389, 63		; <i32> [#uses=1]
@@ -526,13 +526,13 @@ bb15.i:		; preds = %bb16.i
 	%392 = or i32 %390, %391		; <i32> [#uses=1]
 	%393 = inttoptr i32 %392 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%394 = getelementptr %struct.edge_rec, %struct.edge_rec* %393, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%395 = load %struct.edge_rec** %394, align 4		; <%struct.edge_rec*> [#uses=1]
+	%395 = load %struct.edge_rec*, %struct.edge_rec** %394, align 4		; <%struct.edge_rec*> [#uses=1]
 	%396 = getelementptr %struct.edge_rec, %struct.edge_rec* %385, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%397 = load %struct.edge_rec** %396, align 4		; <%struct.edge_rec*> [#uses=1]
+	%397 = load %struct.edge_rec*, %struct.edge_rec** %396, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %395, %struct.edge_rec** %396, align 4
 	store %struct.edge_rec* %397, %struct.edge_rec** %394, align 4
-	%398 = load %struct.edge_rec** %378, align 4		; <%struct.edge_rec*> [#uses=1]
-	%399 = load %struct.edge_rec** %386, align 4		; <%struct.edge_rec*> [#uses=1]
+	%398 = load %struct.edge_rec*, %struct.edge_rec** %378, align 4		; <%struct.edge_rec*> [#uses=1]
+	%399 = load %struct.edge_rec*, %struct.edge_rec** %386, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %398, %struct.edge_rec** %386, align 4
 	store %struct.edge_rec* %399, %struct.edge_rec** %378, align 4
 	%400 = xor i32 %364, 32		; <i32> [#uses=2]
@@ -541,7 +541,7 @@ bb15.i:		; preds = %bb16.i
 	%403 = or i32 %402, %367		; <i32> [#uses=1]
 	%404 = inttoptr i32 %403 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%405 = getelementptr %struct.edge_rec, %struct.edge_rec* %404, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%406 = load %struct.edge_rec** %405, align 4		; <%struct.edge_rec*> [#uses=1]
+	%406 = load %struct.edge_rec*, %struct.edge_rec** %405, align 4		; <%struct.edge_rec*> [#uses=1]
 	%407 = ptrtoint %struct.edge_rec* %406 to i32		; <i32> [#uses=2]
 	%408 = add i32 %407, 16		; <i32> [#uses=1]
 	%409 = and i32 %408, 63		; <i32> [#uses=1]
@@ -550,7 +550,7 @@ bb15.i:		; preds = %bb16.i
 	%412 = inttoptr i32 %411 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%413 = inttoptr i32 %400 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%414 = getelementptr %struct.edge_rec, %struct.edge_rec* %413, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%415 = load %struct.edge_rec** %414, align 4		; <%struct.edge_rec*> [#uses=1]
+	%415 = load %struct.edge_rec*, %struct.edge_rec** %414, align 4		; <%struct.edge_rec*> [#uses=1]
 	%416 = ptrtoint %struct.edge_rec* %415 to i32		; <i32> [#uses=2]
 	%417 = add i32 %416, 16		; <i32> [#uses=1]
 	%418 = and i32 %417, 63		; <i32> [#uses=1]
@@ -558,7 +558,7 @@ bb15.i:		; preds = %bb16.i
 	%420 = or i32 %418, %419		; <i32> [#uses=1]
 	%421 = inttoptr i32 %420 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%422 = getelementptr %struct.edge_rec, %struct.edge_rec* %412, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%423 = load %struct.edge_rec** %422, align 4		; <%struct.edge_rec*> [#uses=1]
+	%423 = load %struct.edge_rec*, %struct.edge_rec** %422, align 4		; <%struct.edge_rec*> [#uses=1]
 	%424 = ptrtoint %struct.edge_rec* %423 to i32		; <i32> [#uses=2]
 	%425 = add i32 %424, 16		; <i32> [#uses=1]
 	%426 = and i32 %425, 63		; <i32> [#uses=1]
@@ -566,13 +566,13 @@ bb15.i:		; preds = %bb16.i
 	%428 = or i32 %426, %427		; <i32> [#uses=1]
 	%429 = inttoptr i32 %428 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%430 = getelementptr %struct.edge_rec, %struct.edge_rec* %429, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%431 = load %struct.edge_rec** %430, align 4		; <%struct.edge_rec*> [#uses=1]
+	%431 = load %struct.edge_rec*, %struct.edge_rec** %430, align 4		; <%struct.edge_rec*> [#uses=1]
 	%432 = getelementptr %struct.edge_rec, %struct.edge_rec* %421, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%433 = load %struct.edge_rec** %432, align 4		; <%struct.edge_rec*> [#uses=1]
+	%433 = load %struct.edge_rec*, %struct.edge_rec** %432, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %431, %struct.edge_rec** %432, align 4
 	store %struct.edge_rec* %433, %struct.edge_rec** %430, align 4
-	%434 = load %struct.edge_rec** %414, align 4		; <%struct.edge_rec*> [#uses=1]
-	%435 = load %struct.edge_rec** %422, align 4		; <%struct.edge_rec*> [#uses=1]
+	%434 = load %struct.edge_rec*, %struct.edge_rec** %414, align 4		; <%struct.edge_rec*> [#uses=1]
+	%435 = load %struct.edge_rec*, %struct.edge_rec** %422, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %434, %struct.edge_rec** %422, align 4
 	store %struct.edge_rec* %435, %struct.edge_rec** %414, align 4
 	%436 = inttoptr i32 %367 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=2]
@@ -584,14 +584,14 @@ bb15.i:		; preds = %bb16.i
 	%441 = or i32 %439, %440		; <i32> [#uses=1]
 	%442 = inttoptr i32 %441 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%443 = getelementptr %struct.edge_rec, %struct.edge_rec* %442, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%444 = load %struct.edge_rec** %443, align 4		; <%struct.edge_rec*> [#uses=1]
+	%444 = load %struct.edge_rec*, %struct.edge_rec** %443, align 4		; <%struct.edge_rec*> [#uses=1]
 	%445 = ptrtoint %struct.edge_rec* %444 to i32		; <i32> [#uses=2]
 	%446 = add i32 %445, 16		; <i32> [#uses=1]
 	%447 = and i32 %446, 63		; <i32> [#uses=1]
 	%448 = and i32 %445, -64		; <i32> [#uses=1]
 	%449 = or i32 %447, %448		; <i32> [#uses=2]
-	%.pre25.i = load double* %351, align 4		; <double> [#uses=1]
-	%.pre26.i = load double* %353, align 4		; <double> [#uses=1]
+	%.pre25.i = load double, double* %351, align 4		; <double> [#uses=1]
+	%.pre26.i = load double, double* %353, align 4		; <double> [#uses=1]
 	br label %bb16.i
 
 bb16.i:		; preds = %bb15.i, %bb14.i
@@ -612,34 +612,34 @@ bb16.i:		; preds = %bb15.i, %bb14.i
 	%v1.1.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn3.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
 	%v2.1.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
 	%v3.1.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %rcand.2.pn.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%v1.1.i = load %struct.VERTEX** %v1.1.in.i		; <%struct.VERTEX*> [#uses=3]
-	%v2.1.i = load %struct.VERTEX** %v2.1.in.i		; <%struct.VERTEX*> [#uses=3]
-	%v3.1.i = load %struct.VERTEX** %v3.1.in.i		; <%struct.VERTEX*> [#uses=3]
-	%452 = load double* %363, align 4		; <double> [#uses=3]
+	%v1.1.i = load %struct.VERTEX*, %struct.VERTEX** %v1.1.in.i		; <%struct.VERTEX*> [#uses=3]
+	%v2.1.i = load %struct.VERTEX*, %struct.VERTEX** %v2.1.in.i		; <%struct.VERTEX*> [#uses=3]
+	%v3.1.i = load %struct.VERTEX*, %struct.VERTEX** %v3.1.in.i		; <%struct.VERTEX*> [#uses=3]
+	%452 = load double, double* %363, align 4		; <double> [#uses=3]
 	%453 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%454 = load double* %453, align 4		; <double> [#uses=1]
+	%454 = load double, double* %453, align 4		; <double> [#uses=1]
 	%455 = fsub double %454, %451		; <double> [#uses=2]
 	%456 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%457 = load double* %456, align 4		; <double> [#uses=1]
+	%457 = load double, double* %456, align 4		; <double> [#uses=1]
 	%458 = fsub double %457, %450		; <double> [#uses=2]
 	%459 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%460 = load double* %459, align 4		; <double> [#uses=1]
+	%460 = load double, double* %459, align 4		; <double> [#uses=1]
 	%461 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%462 = load double* %461, align 4		; <double> [#uses=1]
+	%462 = load double, double* %461, align 4		; <double> [#uses=1]
 	%463 = fsub double %462, %451		; <double> [#uses=2]
 	%464 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%465 = load double* %464, align 4		; <double> [#uses=1]
+	%465 = load double, double* %464, align 4		; <double> [#uses=1]
 	%466 = fsub double %465, %450		; <double> [#uses=2]
 	%467 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%468 = load double* %467, align 4		; <double> [#uses=1]
+	%468 = load double, double* %467, align 4		; <double> [#uses=1]
 	%469 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%470 = load double* %469, align 4		; <double> [#uses=1]
+	%470 = load double, double* %469, align 4		; <double> [#uses=1]
 	%471 = fsub double %470, %451		; <double> [#uses=2]
 	%472 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%473 = load double* %472, align 4		; <double> [#uses=1]
+	%473 = load double, double* %472, align 4		; <double> [#uses=1]
 	%474 = fsub double %473, %450		; <double> [#uses=2]
 	%475 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%476 = load double* %475, align 4		; <double> [#uses=1]
+	%476 = load double, double* %475, align 4		; <double> [#uses=1]
 	%477 = fsub double %460, %452		; <double> [#uses=1]
 	%478 = fmul double %463, %474		; <double> [#uses=1]
 	%479 = fmul double %466, %471		; <double> [#uses=1]
@@ -662,8 +662,8 @@ bb16.i:		; preds = %bb15.i, %bb14.i
 
 bb17.loopexit.i:		; preds = %bb16.i
 	store %struct.edge_rec* %avail_edge.tmp.0, %struct.edge_rec** @avail_edge
-	%.pre27.i = load %struct.VERTEX** %170, align 4		; <%struct.VERTEX*> [#uses=1]
-	%.pre28.i = load %struct.VERTEX** %175, align 4		; <%struct.VERTEX*> [#uses=1]
+	%.pre27.i = load %struct.VERTEX*, %struct.VERTEX** %170, align 4		; <%struct.VERTEX*> [#uses=1]
+	%.pre28.i = load %struct.VERTEX*, %struct.VERTEX** %175, align 4		; <%struct.VERTEX*> [#uses=1]
 	br label %bb17.i
 
 bb17.i:		; preds = %bb17.loopexit.i, %bb13.i
@@ -674,19 +674,19 @@ bb17.i:		; preds = %bb17.loopexit.i, %bb
 	%498 = xor i32 %497, 32		; <i32> [#uses=1]
 	%499 = inttoptr i32 %498 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=2]
 	%500 = getelementptr %struct.edge_rec, %struct.edge_rec* %499, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%501 = load %struct.VERTEX** %500, align 4		; <%struct.VERTEX*> [#uses=4]
+	%501 = load %struct.VERTEX*, %struct.VERTEX** %500, align 4		; <%struct.VERTEX*> [#uses=4]
 	%502 = getelementptr %struct.VERTEX, %struct.VERTEX* %496, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%503 = load double* %502, align 4		; <double> [#uses=1]
+	%503 = load double, double* %502, align 4		; <double> [#uses=1]
 	%504 = getelementptr %struct.VERTEX, %struct.VERTEX* %496, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%505 = load double* %504, align 4		; <double> [#uses=1]
+	%505 = load double, double* %504, align 4		; <double> [#uses=1]
 	%506 = getelementptr %struct.VERTEX, %struct.VERTEX* %501, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%507 = load double* %506, align 4		; <double> [#uses=2]
+	%507 = load double, double* %506, align 4		; <double> [#uses=2]
 	%508 = getelementptr %struct.VERTEX, %struct.VERTEX* %501, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%509 = load double* %508, align 4		; <double> [#uses=2]
+	%509 = load double, double* %508, align 4		; <double> [#uses=2]
 	%510 = getelementptr %struct.VERTEX, %struct.VERTEX* %495, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%511 = load double* %510, align 4		; <double> [#uses=3]
+	%511 = load double, double* %510, align 4		; <double> [#uses=3]
 	%512 = getelementptr %struct.VERTEX, %struct.VERTEX* %495, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%513 = load double* %512, align 4		; <double> [#uses=3]
+	%513 = load double, double* %512, align 4		; <double> [#uses=3]
 	%514 = fsub double %503, %511		; <double> [#uses=2]
 	%515 = fsub double %509, %513		; <double> [#uses=1]
 	%516 = fmul double %514, %515		; <double> [#uses=1]
@@ -699,11 +699,11 @@ bb17.i:		; preds = %bb17.loopexit.i, %bb
 	%523 = xor i32 %522, 32		; <i32> [#uses=1]
 	%524 = inttoptr i32 %523 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%525 = getelementptr %struct.edge_rec, %struct.edge_rec* %524, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%526 = load %struct.VERTEX** %525, align 4		; <%struct.VERTEX*> [#uses=4]
+	%526 = load %struct.VERTEX*, %struct.VERTEX** %525, align 4		; <%struct.VERTEX*> [#uses=4]
 	%527 = getelementptr %struct.VERTEX, %struct.VERTEX* %526, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%528 = load double* %527, align 4		; <double> [#uses=4]
+	%528 = load double, double* %527, align 4		; <double> [#uses=4]
 	%529 = getelementptr %struct.VERTEX, %struct.VERTEX* %526, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%530 = load double* %529, align 4		; <double> [#uses=4]
+	%530 = load double, double* %529, align 4		; <double> [#uses=4]
 	%531 = fsub double %530, %513		; <double> [#uses=1]
 	%532 = fmul double %514, %531		; <double> [#uses=1]
 	%533 = fsub double %528, %511		; <double> [#uses=1]
@@ -715,9 +715,9 @@ bb17.i:		; preds = %bb17.loopexit.i, %bb
 
 bb21.i:		; preds = %bb17.i
 	%538 = getelementptr %struct.edge_rec, %struct.edge_rec* %lcand.1.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%539 = load %struct.VERTEX** %538, align 4		; <%struct.VERTEX*> [#uses=3]
+	%539 = load %struct.VERTEX*, %struct.VERTEX** %538, align 4		; <%struct.VERTEX*> [#uses=3]
 	%540 = getelementptr %struct.edge_rec, %struct.edge_rec* %rcand.1.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%541 = load %struct.VERTEX** %540, align 4		; <%struct.VERTEX*> [#uses=3]
+	%541 = load %struct.VERTEX*, %struct.VERTEX** %540, align 4		; <%struct.VERTEX*> [#uses=3]
 	br i1 %521, label %bb22.i, label %bb24.i
 
 bb22.i:		; preds = %bb21.i
@@ -725,27 +725,27 @@ bb22.i:		; preds = %bb21.i
 
 bb23.i:		; preds = %bb22.i
 	%542 = getelementptr %struct.VERTEX, %struct.VERTEX* %526, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%543 = load double* %542, align 4		; <double> [#uses=3]
+	%543 = load double, double* %542, align 4		; <double> [#uses=3]
 	%544 = fsub double %507, %528		; <double> [#uses=2]
 	%545 = fsub double %509, %530		; <double> [#uses=2]
 	%546 = getelementptr %struct.VERTEX, %struct.VERTEX* %501, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%547 = load double* %546, align 4		; <double> [#uses=1]
+	%547 = load double, double* %546, align 4		; <double> [#uses=1]
 	%548 = getelementptr %struct.VERTEX, %struct.VERTEX* %539, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%549 = load double* %548, align 4		; <double> [#uses=1]
+	%549 = load double, double* %548, align 4		; <double> [#uses=1]
 	%550 = fsub double %549, %528		; <double> [#uses=2]
 	%551 = getelementptr %struct.VERTEX, %struct.VERTEX* %539, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%552 = load double* %551, align 4		; <double> [#uses=1]
+	%552 = load double, double* %551, align 4		; <double> [#uses=1]
 	%553 = fsub double %552, %530		; <double> [#uses=2]
 	%554 = getelementptr %struct.VERTEX, %struct.VERTEX* %539, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%555 = load double* %554, align 4		; <double> [#uses=1]
+	%555 = load double, double* %554, align 4		; <double> [#uses=1]
 	%556 = getelementptr %struct.VERTEX, %struct.VERTEX* %541, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%557 = load double* %556, align 4		; <double> [#uses=1]
+	%557 = load double, double* %556, align 4		; <double> [#uses=1]
 	%558 = fsub double %557, %528		; <double> [#uses=2]
 	%559 = getelementptr %struct.VERTEX, %struct.VERTEX* %541, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%560 = load double* %559, align 4		; <double> [#uses=1]
+	%560 = load double, double* %559, align 4		; <double> [#uses=1]
 	%561 = fsub double %560, %530		; <double> [#uses=2]
 	%562 = getelementptr %struct.VERTEX, %struct.VERTEX* %541, i32 0, i32 0, i32 2		; <double*> [#uses=1]
-	%563 = load double* %562, align 4		; <double> [#uses=1]
+	%563 = load double, double* %562, align 4		; <double> [#uses=1]
 	%564 = fsub double %547, %543		; <double> [#uses=1]
 	%565 = fmul double %550, %561		; <double> [#uses=1]
 	%566 = fmul double %553, %558		; <double> [#uses=1]
@@ -773,7 +773,7 @@ bb24.i:		; preds = %bb23.i, %bb21.i
 	%585 = or i32 %583, %584		; <i32> [#uses=1]
 	%586 = inttoptr i32 %585 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%587 = getelementptr %struct.edge_rec, %struct.edge_rec* %586, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%588 = load %struct.edge_rec** %587, align 4		; <%struct.edge_rec*> [#uses=1]
+	%588 = load %struct.edge_rec*, %struct.edge_rec** %587, align 4		; <%struct.edge_rec*> [#uses=1]
 	%589 = ptrtoint %struct.edge_rec* %588 to i32		; <i32> [#uses=2]
 	%590 = add i32 %589, 16		; <i32> [#uses=1]
 	%591 = and i32 %590, 63		; <i32> [#uses=1]
@@ -800,7 +800,7 @@ bb24.i:		; preds = %bb23.i, %bb21.i
 	store %struct.VERTEX* %495, %struct.VERTEX** %607, align 4
 	%608 = getelementptr %struct.edge_rec, %struct.edge_rec* %602, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
 	store %struct.edge_rec* %600, %struct.edge_rec** %608, align 4
-	%609 = load %struct.edge_rec** %596, align 4		; <%struct.edge_rec*> [#uses=1]
+	%609 = load %struct.edge_rec*, %struct.edge_rec** %596, align 4		; <%struct.edge_rec*> [#uses=1]
 	%610 = ptrtoint %struct.edge_rec* %609 to i32		; <i32> [#uses=2]
 	%611 = add i32 %610, 16		; <i32> [#uses=1]
 	%612 = and i32 %611, 63		; <i32> [#uses=1]
@@ -808,7 +808,7 @@ bb24.i:		; preds = %bb23.i, %bb21.i
 	%614 = or i32 %612, %613		; <i32> [#uses=1]
 	%615 = inttoptr i32 %614 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%616 = getelementptr %struct.edge_rec, %struct.edge_rec* %594, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%617 = load %struct.edge_rec** %616, align 4		; <%struct.edge_rec*> [#uses=1]
+	%617 = load %struct.edge_rec*, %struct.edge_rec** %616, align 4		; <%struct.edge_rec*> [#uses=1]
 	%618 = ptrtoint %struct.edge_rec* %617 to i32		; <i32> [#uses=2]
 	%619 = add i32 %618, 16		; <i32> [#uses=1]
 	%620 = and i32 %619, 63		; <i32> [#uses=1]
@@ -816,19 +816,19 @@ bb24.i:		; preds = %bb23.i, %bb21.i
 	%622 = or i32 %620, %621		; <i32> [#uses=1]
 	%623 = inttoptr i32 %622 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%624 = getelementptr %struct.edge_rec, %struct.edge_rec* %623, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%625 = load %struct.edge_rec** %624, align 4		; <%struct.edge_rec*> [#uses=1]
+	%625 = load %struct.edge_rec*, %struct.edge_rec** %624, align 4		; <%struct.edge_rec*> [#uses=1]
 	%626 = getelementptr %struct.edge_rec, %struct.edge_rec* %615, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%627 = load %struct.edge_rec** %626, align 4		; <%struct.edge_rec*> [#uses=1]
+	%627 = load %struct.edge_rec*, %struct.edge_rec** %626, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %625, %struct.edge_rec** %626, align 4
 	store %struct.edge_rec* %627, %struct.edge_rec** %624, align 4
-	%628 = load %struct.edge_rec** %596, align 4		; <%struct.edge_rec*> [#uses=1]
-	%629 = load %struct.edge_rec** %616, align 4		; <%struct.edge_rec*> [#uses=1]
+	%628 = load %struct.edge_rec*, %struct.edge_rec** %596, align 4		; <%struct.edge_rec*> [#uses=1]
+	%629 = load %struct.edge_rec*, %struct.edge_rec** %616, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %628, %struct.edge_rec** %616, align 4
 	store %struct.edge_rec* %629, %struct.edge_rec** %596, align 4
 	%630 = xor i32 %598, 32		; <i32> [#uses=2]
 	%631 = inttoptr i32 %630 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%632 = getelementptr %struct.edge_rec, %struct.edge_rec* %631, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%633 = load %struct.edge_rec** %632, align 4		; <%struct.edge_rec*> [#uses=1]
+	%633 = load %struct.edge_rec*, %struct.edge_rec** %632, align 4		; <%struct.edge_rec*> [#uses=1]
 	%634 = ptrtoint %struct.edge_rec* %633 to i32		; <i32> [#uses=2]
 	%635 = add i32 %634, 16		; <i32> [#uses=1]
 	%636 = and i32 %635, 63		; <i32> [#uses=1]
@@ -836,7 +836,7 @@ bb24.i:		; preds = %bb23.i, %bb21.i
 	%638 = or i32 %636, %637		; <i32> [#uses=1]
 	%639 = inttoptr i32 %638 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%640 = getelementptr %struct.edge_rec, %struct.edge_rec* %174, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%641 = load %struct.edge_rec** %640, align 4		; <%struct.edge_rec*> [#uses=1]
+	%641 = load %struct.edge_rec*, %struct.edge_rec** %640, align 4		; <%struct.edge_rec*> [#uses=1]
 	%642 = ptrtoint %struct.edge_rec* %641 to i32		; <i32> [#uses=2]
 	%643 = add i32 %642, 16		; <i32> [#uses=1]
 	%644 = and i32 %643, 63		; <i32> [#uses=1]
@@ -844,13 +844,13 @@ bb24.i:		; preds = %bb23.i, %bb21.i
 	%646 = or i32 %644, %645		; <i32> [#uses=1]
 	%647 = inttoptr i32 %646 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%648 = getelementptr %struct.edge_rec, %struct.edge_rec* %647, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%649 = load %struct.edge_rec** %648, align 4		; <%struct.edge_rec*> [#uses=1]
+	%649 = load %struct.edge_rec*, %struct.edge_rec** %648, align 4		; <%struct.edge_rec*> [#uses=1]
 	%650 = getelementptr %struct.edge_rec, %struct.edge_rec* %639, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%651 = load %struct.edge_rec** %650, align 4		; <%struct.edge_rec*> [#uses=1]
+	%651 = load %struct.edge_rec*, %struct.edge_rec** %650, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %649, %struct.edge_rec** %650, align 4
 	store %struct.edge_rec* %651, %struct.edge_rec** %648, align 4
-	%652 = load %struct.edge_rec** %632, align 4		; <%struct.edge_rec*> [#uses=1]
-	%653 = load %struct.edge_rec** %640, align 4		; <%struct.edge_rec*> [#uses=1]
+	%652 = load %struct.edge_rec*, %struct.edge_rec** %632, align 4		; <%struct.edge_rec*> [#uses=1]
+	%653 = load %struct.edge_rec*, %struct.edge_rec** %640, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %652, %struct.edge_rec** %640, align 4
 	store %struct.edge_rec* %653, %struct.edge_rec** %632, align 4
 	%654 = add i32 %630, 48		; <i32> [#uses=1]
@@ -859,7 +859,7 @@ bb24.i:		; preds = %bb23.i, %bb21.i
 	%657 = or i32 %655, %656		; <i32> [#uses=1]
 	%658 = inttoptr i32 %657 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%659 = getelementptr %struct.edge_rec, %struct.edge_rec* %658, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%660 = load %struct.edge_rec** %659, align 4		; <%struct.edge_rec*> [#uses=1]
+	%660 = load %struct.edge_rec*, %struct.edge_rec** %659, align 4		; <%struct.edge_rec*> [#uses=1]
 	%661 = ptrtoint %struct.edge_rec* %660 to i32		; <i32> [#uses=2]
 	%662 = add i32 %661, 16		; <i32> [#uses=1]
 	%663 = and i32 %662, 63		; <i32> [#uses=1]
@@ -875,7 +875,7 @@ bb25.i:		; preds = %bb23.i, %bb22.i
 	%670 = or i32 %668, %669		; <i32> [#uses=1]
 	%671 = inttoptr i32 %670 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%672 = getelementptr %struct.edge_rec, %struct.edge_rec* %671, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%673 = load %struct.edge_rec** %672, align 4		; <%struct.edge_rec*> [#uses=1]
+	%673 = load %struct.edge_rec*, %struct.edge_rec** %672, align 4		; <%struct.edge_rec*> [#uses=1]
 	%674 = ptrtoint %struct.edge_rec* %673 to i32		; <i32> [#uses=2]
 	%675 = add i32 %674, 16		; <i32> [#uses=1]
 	%676 = and i32 %675, 63		; <i32> [#uses=1]
@@ -902,7 +902,7 @@ bb25.i:		; preds = %bb23.i, %bb22.i
 	store %struct.VERTEX* %496, %struct.VERTEX** %692, align 4
 	%693 = getelementptr %struct.edge_rec, %struct.edge_rec* %687, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
 	store %struct.edge_rec* %685, %struct.edge_rec** %693, align 4
-	%694 = load %struct.edge_rec** %681, align 4		; <%struct.edge_rec*> [#uses=1]
+	%694 = load %struct.edge_rec*, %struct.edge_rec** %681, align 4		; <%struct.edge_rec*> [#uses=1]
 	%695 = ptrtoint %struct.edge_rec* %694 to i32		; <i32> [#uses=2]
 	%696 = add i32 %695, 16		; <i32> [#uses=1]
 	%697 = and i32 %696, 63		; <i32> [#uses=1]
@@ -910,7 +910,7 @@ bb25.i:		; preds = %bb23.i, %bb22.i
 	%699 = or i32 %697, %698		; <i32> [#uses=1]
 	%700 = inttoptr i32 %699 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%701 = getelementptr %struct.edge_rec, %struct.edge_rec* %499, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%702 = load %struct.edge_rec** %701, align 4		; <%struct.edge_rec*> [#uses=1]
+	%702 = load %struct.edge_rec*, %struct.edge_rec** %701, align 4		; <%struct.edge_rec*> [#uses=1]
 	%703 = ptrtoint %struct.edge_rec* %702 to i32		; <i32> [#uses=2]
 	%704 = add i32 %703, 16		; <i32> [#uses=1]
 	%705 = and i32 %704, 63		; <i32> [#uses=1]
@@ -918,19 +918,19 @@ bb25.i:		; preds = %bb23.i, %bb22.i
 	%707 = or i32 %705, %706		; <i32> [#uses=1]
 	%708 = inttoptr i32 %707 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%709 = getelementptr %struct.edge_rec, %struct.edge_rec* %708, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%710 = load %struct.edge_rec** %709, align 4		; <%struct.edge_rec*> [#uses=1]
+	%710 = load %struct.edge_rec*, %struct.edge_rec** %709, align 4		; <%struct.edge_rec*> [#uses=1]
 	%711 = getelementptr %struct.edge_rec, %struct.edge_rec* %700, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%712 = load %struct.edge_rec** %711, align 4		; <%struct.edge_rec*> [#uses=1]
+	%712 = load %struct.edge_rec*, %struct.edge_rec** %711, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %710, %struct.edge_rec** %711, align 4
 	store %struct.edge_rec* %712, %struct.edge_rec** %709, align 4
-	%713 = load %struct.edge_rec** %681, align 4		; <%struct.edge_rec*> [#uses=1]
-	%714 = load %struct.edge_rec** %701, align 4		; <%struct.edge_rec*> [#uses=1]
+	%713 = load %struct.edge_rec*, %struct.edge_rec** %681, align 4		; <%struct.edge_rec*> [#uses=1]
+	%714 = load %struct.edge_rec*, %struct.edge_rec** %701, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %713, %struct.edge_rec** %701, align 4
 	store %struct.edge_rec* %714, %struct.edge_rec** %681, align 4
 	%715 = xor i32 %683, 32		; <i32> [#uses=1]
 	%716 = inttoptr i32 %715 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=2]
 	%717 = getelementptr %struct.edge_rec, %struct.edge_rec* %716, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%718 = load %struct.edge_rec** %717, align 4		; <%struct.edge_rec*> [#uses=1]
+	%718 = load %struct.edge_rec*, %struct.edge_rec** %717, align 4		; <%struct.edge_rec*> [#uses=1]
 	%719 = ptrtoint %struct.edge_rec* %718 to i32		; <i32> [#uses=2]
 	%720 = add i32 %719, 16		; <i32> [#uses=1]
 	%721 = and i32 %720, 63		; <i32> [#uses=1]
@@ -938,7 +938,7 @@ bb25.i:		; preds = %bb23.i, %bb22.i
 	%723 = or i32 %721, %722		; <i32> [#uses=1]
 	%724 = inttoptr i32 %723 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%725 = getelementptr %struct.edge_rec, %struct.edge_rec* %679, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%726 = load %struct.edge_rec** %725, align 4		; <%struct.edge_rec*> [#uses=1]
+	%726 = load %struct.edge_rec*, %struct.edge_rec** %725, align 4		; <%struct.edge_rec*> [#uses=1]
 	%727 = ptrtoint %struct.edge_rec* %726 to i32		; <i32> [#uses=2]
 	%728 = add i32 %727, 16		; <i32> [#uses=1]
 	%729 = and i32 %728, 63		; <i32> [#uses=1]
@@ -946,21 +946,21 @@ bb25.i:		; preds = %bb23.i, %bb22.i
 	%731 = or i32 %729, %730		; <i32> [#uses=1]
 	%732 = inttoptr i32 %731 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%733 = getelementptr %struct.edge_rec, %struct.edge_rec* %732, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%734 = load %struct.edge_rec** %733, align 4		; <%struct.edge_rec*> [#uses=1]
+	%734 = load %struct.edge_rec*, %struct.edge_rec** %733, align 4		; <%struct.edge_rec*> [#uses=1]
 	%735 = getelementptr %struct.edge_rec, %struct.edge_rec* %724, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%736 = load %struct.edge_rec** %735, align 4		; <%struct.edge_rec*> [#uses=1]
+	%736 = load %struct.edge_rec*, %struct.edge_rec** %735, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %734, %struct.edge_rec** %735, align 4
 	store %struct.edge_rec* %736, %struct.edge_rec** %733, align 4
-	%737 = load %struct.edge_rec** %717, align 4		; <%struct.edge_rec*> [#uses=1]
-	%738 = load %struct.edge_rec** %725, align 4		; <%struct.edge_rec*> [#uses=1]
+	%737 = load %struct.edge_rec*, %struct.edge_rec** %717, align 4		; <%struct.edge_rec*> [#uses=1]
+	%738 = load %struct.edge_rec*, %struct.edge_rec** %725, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %737, %struct.edge_rec** %725, align 4
 	store %struct.edge_rec* %738, %struct.edge_rec** %717, align 4
-	%739 = load %struct.edge_rec** %681, align 4		; <%struct.edge_rec*> [#uses=1]
+	%739 = load %struct.edge_rec*, %struct.edge_rec** %681, align 4		; <%struct.edge_rec*> [#uses=1]
 	br label %bb9.i
 
 do_merge.exit:		; preds = %bb17.i
 	%740 = getelementptr %struct.edge_rec, %struct.edge_rec* %ldo_addr.0.ph.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%741 = load %struct.VERTEX** %740, align 4		; <%struct.VERTEX*> [#uses=1]
+	%741 = load %struct.VERTEX*, %struct.VERTEX** %740, align 4		; <%struct.VERTEX*> [#uses=1]
 	%742 = icmp eq %struct.VERTEX* %741, %tree_addr.0.i		; <i1> [#uses=1]
 	br i1 %742, label %bb5.loopexit, label %bb2
 
@@ -970,28 +970,28 @@ bb2:		; preds = %bb2, %do_merge.exit
 	%744 = xor i32 %743, 32		; <i32> [#uses=1]
 	%745 = inttoptr i32 %744 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%746 = getelementptr %struct.edge_rec, %struct.edge_rec* %745, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%747 = load %struct.edge_rec** %746, align 4		; <%struct.edge_rec*> [#uses=3]
+	%747 = load %struct.edge_rec*, %struct.edge_rec** %746, align 4		; <%struct.edge_rec*> [#uses=3]
 	%748 = getelementptr %struct.edge_rec, %struct.edge_rec* %747, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%749 = load %struct.VERTEX** %748, align 4		; <%struct.VERTEX*> [#uses=1]
+	%749 = load %struct.VERTEX*, %struct.VERTEX** %748, align 4		; <%struct.VERTEX*> [#uses=1]
 	%750 = icmp eq %struct.VERTEX* %749, %tree_addr.0.i		; <i1> [#uses=1]
 	br i1 %750, label %bb5.loopexit, label %bb2
 
 bb4:		; preds = %bb5.loopexit, %bb4
 	%rdo.05 = phi %struct.edge_rec* [ %755, %bb4 ], [ %rdo_addr.0.i, %bb5.loopexit ]		; <%struct.edge_rec*> [#uses=1]
 	%751 = getelementptr %struct.edge_rec, %struct.edge_rec* %rdo.05, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%752 = load %struct.edge_rec** %751, align 4		; <%struct.edge_rec*> [#uses=1]
+	%752 = load %struct.edge_rec*, %struct.edge_rec** %751, align 4		; <%struct.edge_rec*> [#uses=1]
 	%753 = ptrtoint %struct.edge_rec* %752 to i32		; <i32> [#uses=1]
 	%754 = xor i32 %753, 32		; <i32> [#uses=1]
 	%755 = inttoptr i32 %754 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=3]
 	%756 = getelementptr %struct.edge_rec, %struct.edge_rec* %755, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%757 = load %struct.VERTEX** %756, align 4		; <%struct.VERTEX*> [#uses=1]
+	%757 = load %struct.VERTEX*, %struct.VERTEX** %756, align 4		; <%struct.VERTEX*> [#uses=1]
 	%758 = icmp eq %struct.VERTEX* %757, %extra		; <i1> [#uses=1]
 	br i1 %758, label %bb6, label %bb4
 
 bb5.loopexit:		; preds = %bb2, %do_merge.exit
 	%ldo.0.lcssa = phi %struct.edge_rec* [ %ldo_addr.0.ph.i, %do_merge.exit ], [ %747, %bb2 ]		; <%struct.edge_rec*> [#uses=1]
 	%759 = getelementptr %struct.edge_rec, %struct.edge_rec* %rdo_addr.0.i, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%760 = load %struct.VERTEX** %759, align 4		; <%struct.VERTEX*> [#uses=1]
+	%760 = load %struct.VERTEX*, %struct.VERTEX** %759, align 4		; <%struct.VERTEX*> [#uses=1]
 	%761 = icmp eq %struct.VERTEX* %760, %extra		; <i1> [#uses=1]
 	br i1 %761, label %bb6, label %bb4
 
@@ -1003,7 +1003,7 @@ bb6:		; preds = %bb5.loopexit, %bb4
 
 bb7:		; preds = %bb
 	%762 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 1		; <%struct.VERTEX**> [#uses=1]
-	%763 = load %struct.VERTEX** %762, align 4		; <%struct.VERTEX*> [#uses=4]
+	%763 = load %struct.VERTEX*, %struct.VERTEX** %762, align 4		; <%struct.VERTEX*> [#uses=4]
 	%764 = icmp eq %struct.VERTEX* %763, null		; <i1> [#uses=1]
 	%765 = call  %struct.edge_rec* @alloc_edge() nounwind		; <%struct.edge_rec*> [#uses=5]
 	%766 = getelementptr %struct.edge_rec, %struct.edge_rec* %765, i32 0, i32 1		; <%struct.edge_rec**> [#uses=4]
@@ -1076,14 +1076,14 @@ bb11:		; preds = %bb7
 	%806 = xor i32 %781, 32		; <i32> [#uses=1]
 	%807 = inttoptr i32 %806 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%808 = getelementptr %struct.edge_rec, %struct.edge_rec* %807, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%809 = load %struct.edge_rec** %808, align 4		; <%struct.edge_rec*> [#uses=1]
+	%809 = load %struct.edge_rec*, %struct.edge_rec** %808, align 4		; <%struct.edge_rec*> [#uses=1]
 	%810 = ptrtoint %struct.edge_rec* %809 to i32		; <i32> [#uses=2]
 	%811 = add i32 %810, 16		; <i32> [#uses=1]
 	%812 = and i32 %811, 63		; <i32> [#uses=1]
 	%813 = and i32 %810, -64		; <i32> [#uses=1]
 	%814 = or i32 %812, %813		; <i32> [#uses=1]
 	%815 = inttoptr i32 %814 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
-	%816 = load %struct.edge_rec** %793, align 4		; <%struct.edge_rec*> [#uses=1]
+	%816 = load %struct.edge_rec*, %struct.edge_rec** %793, align 4		; <%struct.edge_rec*> [#uses=1]
 	%817 = ptrtoint %struct.edge_rec* %816 to i32		; <i32> [#uses=2]
 	%818 = add i32 %817, 16		; <i32> [#uses=1]
 	%819 = and i32 %818, 63		; <i32> [#uses=1]
@@ -1091,32 +1091,32 @@ bb11:		; preds = %bb7
 	%821 = or i32 %819, %820		; <i32> [#uses=1]
 	%822 = inttoptr i32 %821 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%823 = getelementptr %struct.edge_rec, %struct.edge_rec* %822, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%824 = load %struct.edge_rec** %823, align 4		; <%struct.edge_rec*> [#uses=1]
+	%824 = load %struct.edge_rec*, %struct.edge_rec** %823, align 4		; <%struct.edge_rec*> [#uses=1]
 	%825 = getelementptr %struct.edge_rec, %struct.edge_rec* %815, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%826 = load %struct.edge_rec** %825, align 4		; <%struct.edge_rec*> [#uses=1]
+	%826 = load %struct.edge_rec*, %struct.edge_rec** %825, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %824, %struct.edge_rec** %825, align 4
 	store %struct.edge_rec* %826, %struct.edge_rec** %823, align 4
-	%827 = load %struct.edge_rec** %808, align 4		; <%struct.edge_rec*> [#uses=1]
-	%828 = load %struct.edge_rec** %793, align 4		; <%struct.edge_rec*> [#uses=1]
+	%827 = load %struct.edge_rec*, %struct.edge_rec** %808, align 4		; <%struct.edge_rec*> [#uses=1]
+	%828 = load %struct.edge_rec*, %struct.edge_rec** %793, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %827, %struct.edge_rec** %793, align 4
 	store %struct.edge_rec* %828, %struct.edge_rec** %808, align 4
 	%829 = xor i32 %795, 32		; <i32> [#uses=3]
 	%830 = inttoptr i32 %829 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%831 = getelementptr %struct.edge_rec, %struct.edge_rec* %830, i32 0, i32 0		; <%struct.VERTEX**> [#uses=1]
-	%832 = load %struct.VERTEX** %831, align 4		; <%struct.VERTEX*> [#uses=1]
+	%832 = load %struct.VERTEX*, %struct.VERTEX** %831, align 4		; <%struct.VERTEX*> [#uses=1]
 	%833 = and i32 %798, 63		; <i32> [#uses=1]
 	%834 = and i32 %795, -64		; <i32> [#uses=1]
 	%835 = or i32 %833, %834		; <i32> [#uses=1]
 	%836 = inttoptr i32 %835 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%837 = getelementptr %struct.edge_rec, %struct.edge_rec* %836, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%838 = load %struct.edge_rec** %837, align 4		; <%struct.edge_rec*> [#uses=1]
+	%838 = load %struct.edge_rec*, %struct.edge_rec** %837, align 4		; <%struct.edge_rec*> [#uses=1]
 	%839 = ptrtoint %struct.edge_rec* %838 to i32		; <i32> [#uses=2]
 	%840 = add i32 %839, 16		; <i32> [#uses=1]
 	%841 = and i32 %840, 63		; <i32> [#uses=1]
 	%842 = and i32 %839, -64		; <i32> [#uses=1]
 	%843 = or i32 %841, %842		; <i32> [#uses=1]
 	%844 = inttoptr i32 %843 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
-	%845 = load %struct.VERTEX** %767, align 4		; <%struct.VERTEX*> [#uses=1]
+	%845 = load %struct.VERTEX*, %struct.VERTEX** %767, align 4		; <%struct.VERTEX*> [#uses=1]
 	%846 = call  %struct.edge_rec* @alloc_edge() nounwind		; <%struct.edge_rec*> [#uses=4]
 	%847 = getelementptr %struct.edge_rec, %struct.edge_rec* %846, i32 0, i32 1		; <%struct.edge_rec**> [#uses=7]
 	store %struct.edge_rec* %846, %struct.edge_rec** %847, align 4
@@ -1137,7 +1137,7 @@ bb11:		; preds = %bb7
 	store %struct.VERTEX* %845, %struct.VERTEX** %858, align 4
 	%859 = getelementptr %struct.edge_rec, %struct.edge_rec* %853, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
 	store %struct.edge_rec* %851, %struct.edge_rec** %859, align 4
-	%860 = load %struct.edge_rec** %847, align 4		; <%struct.edge_rec*> [#uses=1]
+	%860 = load %struct.edge_rec*, %struct.edge_rec** %847, align 4		; <%struct.edge_rec*> [#uses=1]
 	%861 = ptrtoint %struct.edge_rec* %860 to i32		; <i32> [#uses=2]
 	%862 = add i32 %861, 16		; <i32> [#uses=1]
 	%863 = and i32 %862, 63		; <i32> [#uses=1]
@@ -1145,7 +1145,7 @@ bb11:		; preds = %bb7
 	%865 = or i32 %863, %864		; <i32> [#uses=1]
 	%866 = inttoptr i32 %865 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%867 = getelementptr %struct.edge_rec, %struct.edge_rec* %844, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%868 = load %struct.edge_rec** %867, align 4		; <%struct.edge_rec*> [#uses=1]
+	%868 = load %struct.edge_rec*, %struct.edge_rec** %867, align 4		; <%struct.edge_rec*> [#uses=1]
 	%869 = ptrtoint %struct.edge_rec* %868 to i32		; <i32> [#uses=2]
 	%870 = add i32 %869, 16		; <i32> [#uses=1]
 	%871 = and i32 %870, 63		; <i32> [#uses=1]
@@ -1153,26 +1153,26 @@ bb11:		; preds = %bb7
 	%873 = or i32 %871, %872		; <i32> [#uses=1]
 	%874 = inttoptr i32 %873 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%875 = getelementptr %struct.edge_rec, %struct.edge_rec* %874, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%876 = load %struct.edge_rec** %875, align 4		; <%struct.edge_rec*> [#uses=1]
+	%876 = load %struct.edge_rec*, %struct.edge_rec** %875, align 4		; <%struct.edge_rec*> [#uses=1]
 	%877 = getelementptr %struct.edge_rec, %struct.edge_rec* %866, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%878 = load %struct.edge_rec** %877, align 4		; <%struct.edge_rec*> [#uses=1]
+	%878 = load %struct.edge_rec*, %struct.edge_rec** %877, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %876, %struct.edge_rec** %877, align 4
 	store %struct.edge_rec* %878, %struct.edge_rec** %875, align 4
-	%879 = load %struct.edge_rec** %847, align 4		; <%struct.edge_rec*> [#uses=1]
-	%880 = load %struct.edge_rec** %867, align 4		; <%struct.edge_rec*> [#uses=1]
+	%879 = load %struct.edge_rec*, %struct.edge_rec** %847, align 4		; <%struct.edge_rec*> [#uses=1]
+	%880 = load %struct.edge_rec*, %struct.edge_rec** %867, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %879, %struct.edge_rec** %867, align 4
 	store %struct.edge_rec* %880, %struct.edge_rec** %847, align 4
 	%881 = xor i32 %849, 32		; <i32> [#uses=3]
 	%882 = inttoptr i32 %881 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%883 = getelementptr %struct.edge_rec, %struct.edge_rec* %882, i32 0, i32 1		; <%struct.edge_rec**> [#uses=6]
-	%884 = load %struct.edge_rec** %883, align 4		; <%struct.edge_rec*> [#uses=1]
+	%884 = load %struct.edge_rec*, %struct.edge_rec** %883, align 4		; <%struct.edge_rec*> [#uses=1]
 	%885 = ptrtoint %struct.edge_rec* %884 to i32		; <i32> [#uses=2]
 	%886 = add i32 %885, 16		; <i32> [#uses=1]
 	%887 = and i32 %886, 63		; <i32> [#uses=1]
 	%888 = and i32 %885, -64		; <i32> [#uses=1]
 	%889 = or i32 %887, %888		; <i32> [#uses=1]
 	%890 = inttoptr i32 %889 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
-	%891 = load %struct.edge_rec** %766, align 4		; <%struct.edge_rec*> [#uses=1]
+	%891 = load %struct.edge_rec*, %struct.edge_rec** %766, align 4		; <%struct.edge_rec*> [#uses=1]
 	%892 = ptrtoint %struct.edge_rec* %891 to i32		; <i32> [#uses=2]
 	%893 = add i32 %892, 16		; <i32> [#uses=1]
 	%894 = and i32 %893, 63		; <i32> [#uses=1]
@@ -1180,27 +1180,27 @@ bb11:		; preds = %bb7
 	%896 = or i32 %894, %895		; <i32> [#uses=1]
 	%897 = inttoptr i32 %896 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%898 = getelementptr %struct.edge_rec, %struct.edge_rec* %897, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%899 = load %struct.edge_rec** %898, align 4		; <%struct.edge_rec*> [#uses=1]
+	%899 = load %struct.edge_rec*, %struct.edge_rec** %898, align 4		; <%struct.edge_rec*> [#uses=1]
 	%900 = getelementptr %struct.edge_rec, %struct.edge_rec* %890, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%901 = load %struct.edge_rec** %900, align 4		; <%struct.edge_rec*> [#uses=1]
+	%901 = load %struct.edge_rec*, %struct.edge_rec** %900, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %899, %struct.edge_rec** %900, align 4
 	store %struct.edge_rec* %901, %struct.edge_rec** %898, align 4
-	%902 = load %struct.edge_rec** %883, align 4		; <%struct.edge_rec*> [#uses=1]
-	%903 = load %struct.edge_rec** %766, align 4		; <%struct.edge_rec*> [#uses=1]
+	%902 = load %struct.edge_rec*, %struct.edge_rec** %883, align 4		; <%struct.edge_rec*> [#uses=1]
+	%903 = load %struct.edge_rec*, %struct.edge_rec** %766, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %902, %struct.edge_rec** %766, align 4
 	store %struct.edge_rec* %903, %struct.edge_rec** %883, align 4
 	%904 = getelementptr %struct.VERTEX, %struct.VERTEX* %763, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%905 = load double* %904, align 4		; <double> [#uses=2]
+	%905 = load double, double* %904, align 4		; <double> [#uses=2]
 	%906 = getelementptr %struct.VERTEX, %struct.VERTEX* %763, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%907 = load double* %906, align 4		; <double> [#uses=2]
+	%907 = load double, double* %906, align 4		; <double> [#uses=2]
 	%908 = getelementptr %struct.VERTEX, %struct.VERTEX* %extra, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%909 = load double* %908, align 4		; <double> [#uses=3]
+	%909 = load double, double* %908, align 4		; <double> [#uses=3]
 	%910 = getelementptr %struct.VERTEX, %struct.VERTEX* %extra, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%911 = load double* %910, align 4		; <double> [#uses=3]
+	%911 = load double, double* %910, align 4		; <double> [#uses=3]
 	%912 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 0, i32 0		; <double*> [#uses=1]
-	%913 = load double* %912, align 4		; <double> [#uses=3]
+	%913 = load double, double* %912, align 4		; <double> [#uses=3]
 	%914 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 0, i32 1		; <double*> [#uses=1]
-	%915 = load double* %914, align 4		; <double> [#uses=3]
+	%915 = load double, double* %914, align 4		; <double> [#uses=3]
 	%916 = fsub double %905, %913		; <double> [#uses=1]
 	%917 = fsub double %911, %915		; <double> [#uses=1]
 	%918 = fmul double %916, %917		; <double> [#uses=1]
@@ -1228,14 +1228,14 @@ bb14:		; preds = %bb13
 	%934 = or i32 %932, %933		; <i32> [#uses=1]
 	%935 = inttoptr i32 %934 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%936 = getelementptr %struct.edge_rec, %struct.edge_rec* %935, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%937 = load %struct.edge_rec** %936, align 4		; <%struct.edge_rec*> [#uses=1]
+	%937 = load %struct.edge_rec*, %struct.edge_rec** %936, align 4		; <%struct.edge_rec*> [#uses=1]
 	%938 = ptrtoint %struct.edge_rec* %937 to i32		; <i32> [#uses=2]
 	%939 = add i32 %938, 16		; <i32> [#uses=1]
 	%940 = and i32 %939, 63		; <i32> [#uses=1]
 	%941 = and i32 %938, -64		; <i32> [#uses=1]
 	%942 = or i32 %940, %941		; <i32> [#uses=1]
 	%943 = inttoptr i32 %942 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
-	%944 = load %struct.edge_rec** %847, align 4		; <%struct.edge_rec*> [#uses=1]
+	%944 = load %struct.edge_rec*, %struct.edge_rec** %847, align 4		; <%struct.edge_rec*> [#uses=1]
 	%945 = ptrtoint %struct.edge_rec* %944 to i32		; <i32> [#uses=2]
 	%946 = add i32 %945, 16		; <i32> [#uses=1]
 	%947 = and i32 %946, 63		; <i32> [#uses=1]
@@ -1243,7 +1243,7 @@ bb14:		; preds = %bb13
 	%949 = or i32 %947, %948		; <i32> [#uses=1]
 	%950 = inttoptr i32 %949 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%951 = getelementptr %struct.edge_rec, %struct.edge_rec* %943, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%952 = load %struct.edge_rec** %951, align 4		; <%struct.edge_rec*> [#uses=1]
+	%952 = load %struct.edge_rec*, %struct.edge_rec** %951, align 4		; <%struct.edge_rec*> [#uses=1]
 	%953 = ptrtoint %struct.edge_rec* %952 to i32		; <i32> [#uses=2]
 	%954 = add i32 %953, 16		; <i32> [#uses=1]
 	%955 = and i32 %954, 63		; <i32> [#uses=1]
@@ -1251,13 +1251,13 @@ bb14:		; preds = %bb13
 	%957 = or i32 %955, %956		; <i32> [#uses=1]
 	%958 = inttoptr i32 %957 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%959 = getelementptr %struct.edge_rec, %struct.edge_rec* %958, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%960 = load %struct.edge_rec** %959, align 4		; <%struct.edge_rec*> [#uses=1]
+	%960 = load %struct.edge_rec*, %struct.edge_rec** %959, align 4		; <%struct.edge_rec*> [#uses=1]
 	%961 = getelementptr %struct.edge_rec, %struct.edge_rec* %950, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%962 = load %struct.edge_rec** %961, align 4		; <%struct.edge_rec*> [#uses=1]
+	%962 = load %struct.edge_rec*, %struct.edge_rec** %961, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %960, %struct.edge_rec** %961, align 4
 	store %struct.edge_rec* %962, %struct.edge_rec** %959, align 4
-	%963 = load %struct.edge_rec** %847, align 4		; <%struct.edge_rec*> [#uses=1]
-	%964 = load %struct.edge_rec** %951, align 4		; <%struct.edge_rec*> [#uses=1]
+	%963 = load %struct.edge_rec*, %struct.edge_rec** %847, align 4		; <%struct.edge_rec*> [#uses=1]
+	%964 = load %struct.edge_rec*, %struct.edge_rec** %951, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %963, %struct.edge_rec** %951, align 4
 	store %struct.edge_rec* %964, %struct.edge_rec** %847, align 4
 	%965 = add i32 %881, 16		; <i32> [#uses=1]
@@ -1265,14 +1265,14 @@ bb14:		; preds = %bb13
 	%967 = or i32 %966, %933		; <i32> [#uses=1]
 	%968 = inttoptr i32 %967 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%969 = getelementptr %struct.edge_rec, %struct.edge_rec* %968, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
-	%970 = load %struct.edge_rec** %969, align 4		; <%struct.edge_rec*> [#uses=1]
+	%970 = load %struct.edge_rec*, %struct.edge_rec** %969, align 4		; <%struct.edge_rec*> [#uses=1]
 	%971 = ptrtoint %struct.edge_rec* %970 to i32		; <i32> [#uses=2]
 	%972 = add i32 %971, 16		; <i32> [#uses=1]
 	%973 = and i32 %972, 63		; <i32> [#uses=1]
 	%974 = and i32 %971, -64		; <i32> [#uses=1]
 	%975 = or i32 %973, %974		; <i32> [#uses=1]
 	%976 = inttoptr i32 %975 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
-	%977 = load %struct.edge_rec** %883, align 4		; <%struct.edge_rec*> [#uses=1]
+	%977 = load %struct.edge_rec*, %struct.edge_rec** %883, align 4		; <%struct.edge_rec*> [#uses=1]
 	%978 = ptrtoint %struct.edge_rec* %977 to i32		; <i32> [#uses=2]
 	%979 = add i32 %978, 16		; <i32> [#uses=1]
 	%980 = and i32 %979, 63		; <i32> [#uses=1]
@@ -1280,7 +1280,7 @@ bb14:		; preds = %bb13
 	%982 = or i32 %980, %981		; <i32> [#uses=1]
 	%983 = inttoptr i32 %982 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%984 = getelementptr %struct.edge_rec, %struct.edge_rec* %976, i32 0, i32 1		; <%struct.edge_rec**> [#uses=3]
-	%985 = load %struct.edge_rec** %984, align 4		; <%struct.edge_rec*> [#uses=1]
+	%985 = load %struct.edge_rec*, %struct.edge_rec** %984, align 4		; <%struct.edge_rec*> [#uses=1]
 	%986 = ptrtoint %struct.edge_rec* %985 to i32		; <i32> [#uses=2]
 	%987 = add i32 %986, 16		; <i32> [#uses=1]
 	%988 = and i32 %987, 63		; <i32> [#uses=1]
@@ -1288,17 +1288,17 @@ bb14:		; preds = %bb13
 	%990 = or i32 %988, %989		; <i32> [#uses=1]
 	%991 = inttoptr i32 %990 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=1]
 	%992 = getelementptr %struct.edge_rec, %struct.edge_rec* %991, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%993 = load %struct.edge_rec** %992, align 4		; <%struct.edge_rec*> [#uses=1]
+	%993 = load %struct.edge_rec*, %struct.edge_rec** %992, align 4		; <%struct.edge_rec*> [#uses=1]
 	%994 = getelementptr %struct.edge_rec, %struct.edge_rec* %983, i32 0, i32 1		; <%struct.edge_rec**> [#uses=2]
-	%995 = load %struct.edge_rec** %994, align 4		; <%struct.edge_rec*> [#uses=1]
+	%995 = load %struct.edge_rec*, %struct.edge_rec** %994, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %993, %struct.edge_rec** %994, align 4
 	store %struct.edge_rec* %995, %struct.edge_rec** %992, align 4
-	%996 = load %struct.edge_rec** %883, align 4		; <%struct.edge_rec*> [#uses=1]
-	%997 = load %struct.edge_rec** %984, align 4		; <%struct.edge_rec*> [#uses=1]
+	%996 = load %struct.edge_rec*, %struct.edge_rec** %883, align 4		; <%struct.edge_rec*> [#uses=1]
+	%997 = load %struct.edge_rec*, %struct.edge_rec** %984, align 4		; <%struct.edge_rec*> [#uses=1]
 	store %struct.edge_rec* %996, %struct.edge_rec** %984, align 4
 	store %struct.edge_rec* %997, %struct.edge_rec** %883, align 4
 	%998 = inttoptr i32 %933 to %struct.edge_rec*		; <%struct.edge_rec*> [#uses=2]
-	%999 = load %struct.edge_rec** @avail_edge, align 4		; <%struct.edge_rec*> [#uses=1]
+	%999 = load %struct.edge_rec*, %struct.edge_rec** @avail_edge, align 4		; <%struct.edge_rec*> [#uses=1]
 	%1000 = getelementptr %struct.edge_rec, %struct.edge_rec* %998, i32 0, i32 1		; <%struct.edge_rec**> [#uses=1]
 	store %struct.edge_rec* %999, %struct.edge_rec** %1000, align 4
 	store %struct.edge_rec* %998, %struct.edge_rec** @avail_edge, align 4

Modified: llvm/trunk/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll Fri Feb 27 15:17:42 2015
@@ -83,7 +83,7 @@ bb45:		; preds = %bb43.preheader, %cli_c
 	br i1 undef, label %cli_calloc.exit70.thread, label %cli_calloc.exit70
 
 bb52:		; preds = %cli_calloc.exit
-	%0 = load i16* undef, align 4		; <i16> [#uses=1]
+	%0 = load i16, i16* undef, align 4		; <i16> [#uses=1]
 	%1 = icmp eq i16 %0, 0		; <i1> [#uses=1]
 	%iftmp.20.0 = select i1 %1, i8* %hexsig, i8* null		; <i8*> [#uses=1]
 	%2 = tail call  i32 @strlen(i8* %iftmp.20.0) nounwind readonly		; <i32> [#uses=0]

Modified: llvm/trunk/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll Fri Feb 27 15:17:42 2015
@@ -67,7 +67,7 @@ bb18:		; preds = %bb18, %bb.nph
 bb22:		; preds = %bb18, %bb17
 	%0 = getelementptr i8, i8* null, i32 10		; <i8*> [#uses=1]
 	%1 = bitcast i8* %0 to i16*		; <i16*> [#uses=1]
-	%2 = load i16* %1, align 2		; <i16> [#uses=1]
+	%2 = load i16, i16* %1, align 2		; <i16> [#uses=1]
 	%3 = add i16 %2, 1		; <i16> [#uses=1]
 	%4 = zext i16 %3 to i32		; <i32> [#uses=1]
 	%5 = mul i32 %4, 3		; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ bb:		; preds = %bb, %entry
 	br i1 undef, label %bb28, label %bb
 
 bb28:		; preds = %bb
-	%0 = load double* @a, align 4		; <double> [#uses=2]
+	%0 = load double, double* @a, align 4		; <double> [#uses=2]
 	%1 = fadd double %0, undef		; <double> [#uses=2]
 	br i1 undef, label %bb59, label %bb60
 

Modified: llvm/trunk/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll Fri Feb 27 15:17:42 2015
@@ -13,17 +13,17 @@ entry:
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store <4 x i32> %v, <4 x i32>* %v_addr
 	store i32 %f, i32* %f_addr
-	%1 = load <4 x i32>* %v_addr, align 16		; <<4 x i32>> [#uses=1]
-	%2 = load i32* %f_addr, align 4		; <i32> [#uses=1]
+	%1 = load <4 x i32>, <4 x i32>* %v_addr, align 16		; <<4 x i32>> [#uses=1]
+	%2 = load i32, i32* %f_addr, align 4		; <i32> [#uses=1]
 	%3 = insertelement <4 x i32> undef, i32 %2, i32 0		; <<4 x i32>> [#uses=1]
 	%4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> zeroinitializer		; <<4 x i32>> [#uses=1]
 	%5 = mul <4 x i32> %1, %4		; <<4 x i32>> [#uses=1]
 	store <4 x i32> %5, <4 x i32>* %0, align 16
-	%6 = load <4 x i32>* %0, align 16		; <<4 x i32>> [#uses=1]
+	%6 = load <4 x i32>, <4 x i32>* %0, align 16		; <<4 x i32>> [#uses=1]
 	store <4 x i32> %6, <4 x i32>* %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval1 = load <4 x i32>* %retval		; <<4 x i32>> [#uses=1]
+	%retval1 = load <4 x i32>, <4 x i32>* %retval		; <<4 x i32>> [#uses=1]
 	ret <4 x i32> %retval1
 }

Modified: llvm/trunk/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ bb7:
 bb8:                                              ; preds = %bb7, %entry
   %2 = phi i32 [ 0, %entry ], [ %1, %bb7 ]        ; <i32> [#uses=3]
   %scevgep22 = getelementptr %struct.iovec, %struct.iovec* %iov, i32 %2, i32 0; <i8**> [#uses=0]
-  %3 = load i32* %nr_segs, align 4                ; <i32> [#uses=1]
+  %3 = load i32, i32* %nr_segs, align 4                ; <i32> [#uses=1]
   %4 = icmp ult i32 %2, %3                        ; <i1> [#uses=1]
   br i1 %4, label %bb, label %bb9
 

Modified: llvm/trunk/test/CodeGen/ARM/2009-08-21-PostRAKill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-08-21-PostRAKill.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-08-21-PostRAKill.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-08-21-PostRAKill.ll Fri Feb 27 15:17:42 2015
@@ -9,18 +9,18 @@ target triple = "armv7-apple-darwin9"
 
 define %struct.tree* @tsp(%struct.tree* %t, i32 %nproc) nounwind {
 entry:
-  %t.idx51.val.i = load double* null              ; <double> [#uses=1]
+  %t.idx51.val.i = load double, double* null              ; <double> [#uses=1]
   br i1 undef, label %bb4.i, label %bb.i
 
 bb.i:                                             ; preds = %entry
   unreachable
 
 bb4.i:                                            ; preds = %entry
-  %0 = load %struct.tree** @g, align 4         ; <%struct.tree*> [#uses=2]
+  %0 = load %struct.tree*, %struct.tree** @g, align 4         ; <%struct.tree*> [#uses=2]
   %.idx45.i = getelementptr %struct.tree, %struct.tree* %0, i32 0, i32 1 ; <double*> [#uses=1]
-  %.idx45.val.i = load double* %.idx45.i          ; <double> [#uses=1]
+  %.idx45.val.i = load double, double* %.idx45.i          ; <double> [#uses=1]
   %.idx46.i = getelementptr %struct.tree, %struct.tree* %0, i32 0, i32 2 ; <double*> [#uses=1]
-  %.idx46.val.i = load double* %.idx46.i          ; <double> [#uses=1]
+  %.idx46.val.i = load double, double* %.idx46.i          ; <double> [#uses=1]
   %1 = fsub double 0.000000e+00, %.idx45.val.i    ; <double> [#uses=2]
   %2 = fmul double %1, %1                         ; <double> [#uses=1]
   %3 = fsub double %t.idx51.val.i, %.idx46.val.i  ; <double> [#uses=2]

Modified: llvm/trunk/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll Fri Feb 27 15:17:42 2015
@@ -22,14 +22,14 @@ invcont:
   br label %return
 
 bb:                                               ; preds = %ppad
-  %eh_select = load i32* %eh_selector
+  %eh_select = load i32, i32* %eh_selector
   store i32 %eh_select, i32* %save_filt.1, align 4
-  %eh_value = load i8** %eh_exception
+  %eh_value = load i8*, i8** %eh_exception
   store i8* %eh_value, i8** %save_eptr.0, align 4
   call void @_ZN1AD1Ev(%struct.A* %a) nounwind
-  %0 = load i8** %save_eptr.0, align 4
+  %0 = load i8*, i8** %save_eptr.0, align 4
   store i8* %0, i8** %eh_exception, align 4
-  %1 = load i32* %save_filt.1, align 4
+  %1 = load i32, i32* %save_filt.1, align 4
   store i32 %1, i32* %eh_selector, align 4
   br label %Unwind
 
@@ -49,7 +49,7 @@ ppad:
   br label %bb
 
 Unwind:                                           ; preds = %bb
-  %eh_ptr3 = load i8** %eh_exception
+  %eh_ptr3 = load i8*, i8** %eh_exception
   call void @_Unwind_SjLj_Resume(i8* %eh_ptr3)
   unreachable
 }
@@ -61,7 +61,7 @@ entry:
   store %struct.A* %this, %struct.A** %this_addr
   %0 = call i8* @_Znwm(i32 4)
   %1 = bitcast i8* %0 to i32*
-  %2 = load %struct.A** %this_addr, align 4
+  %2 = load %struct.A*, %struct.A** %this_addr, align 4
   %3 = getelementptr inbounds %struct.A, %struct.A* %2, i32 0, i32 0
   store i32* %1, i32** %3, align 4
   br label %return
@@ -77,9 +77,9 @@ entry:
   %this_addr = alloca %struct.A*
   %"alloca point" = bitcast i32 0 to i32
   store %struct.A* %this, %struct.A** %this_addr
-  %0 = load %struct.A** %this_addr, align 4
+  %0 = load %struct.A*, %struct.A** %this_addr, align 4
   %1 = getelementptr inbounds %struct.A, %struct.A* %0, i32 0, i32 0
-  %2 = load i32** %1, align 4
+  %2 = load i32*, i32** %1, align 4
   %3 = bitcast i32* %2 to i8*
   call void @_ZdlPv(i8* %3) nounwind
   br label %bb

Modified: llvm/trunk/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 define <4 x i16> @v2regbug(<4 x i16>* %B) nounwind {
 ;CHECK-LABEL: v2regbug:
 ;CHECK: vzip.16
-	%tmp1 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %B
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32><i32 0, i32 0, i32 1, i32 1>
 	ret <4 x i16> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll Fri Feb 27 15:17:42 2015
@@ -2,8 +2,8 @@
 ; pr4939
 
 define void @test(double* %x, double* %y) nounwind {
-  %1 = load double* %x
-  %2 = load double* %y
+  %1 = load double, double* %x
+  %2 = load double, double* %y
   %3 = fsub double -0.000000e+00, %1
   %4 = fcmp ugt double %2, %3
   br i1 %4, label %bb1, label %bb2

Modified: llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll Fri Feb 27 15:17:42 2015
@@ -13,10 +13,10 @@ declare <2 x float> @llvm.arm.neon.vpadd
 
 define arm_aapcs_vfpcc i8 @foo(%struct.fr* nocapture %this, %struct.obb* %box) nounwind {
 entry:
-  %val.i.i = load <4 x float>* undef              ; <<4 x float>> [#uses=1]
-  %val2.i.i = load <4 x float>* null              ; <<4 x float>> [#uses=1]
+  %val.i.i = load <4 x float>, <4 x float>* undef              ; <<4 x float>> [#uses=1]
+  %val2.i.i = load <4 x float>, <4 x float>* null              ; <<4 x float>> [#uses=1]
   %elt3.i.i = getelementptr inbounds %struct.obb, %struct.obb* %box, i32 0, i32 0, i32 2, i32 0 ; <<4 x float>*> [#uses=1]
-  %val4.i.i = load <4 x float>* %elt3.i.i         ; <<4 x float>> [#uses=1]
+  %val4.i.i = load <4 x float>, <4 x float>* %elt3.i.i         ; <<4 x float>> [#uses=1]
   %0 = shufflevector <2 x float> undef, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
   %1 = fadd <4 x float> undef, zeroinitializer    ; <<4 x float>> [#uses=1]
   br label %bb33

Modified: llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll Fri Feb 27 15:17:42 2015
@@ -16,13 +16,13 @@ define arm_aapcs_vfpcc <4 x float> @foo(
   %tmp3738 = inttoptr i32 %tmp37 to float*
   %tmp39 = add i32 %1, 24
   %tmp3940 = inttoptr i32 %tmp39 to float*
-  %2 = load float* %lsr.iv2641, align 4
-  %3 = load float* %tmp2930, align 4
-  %4 = load float* %tmp3132, align 4
-  %5 = load float* %tmp3334, align 4
-  %6 = load float* %tmp3536, align 4
-  %7 = load float* %tmp3738, align 4
-  %8 = load float* %tmp3940, align 4
+  %2 = load float, float* %lsr.iv2641, align 4
+  %3 = load float, float* %tmp2930, align 4
+  %4 = load float, float* %tmp3132, align 4
+  %5 = load float, float* %tmp3334, align 4
+  %6 = load float, float* %tmp3536, align 4
+  %7 = load float, float* %tmp3738, align 4
+  %8 = load float, float* %tmp3940, align 4
   %9 = insertelement <4 x float> undef, float %6, i32 0
   %10 = shufflevector <4 x float> %9, <4 x float> undef, <4 x i32> zeroinitializer
   %11 = insertelement <4 x float> %10, float %7, i32 1

Modified: llvm/trunk/test/CodeGen/ARM/2009-09-23-LiveVariablesBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-09-23-LiveVariablesBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-09-23-LiveVariablesBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-09-23-LiveVariablesBug.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define arm_aapcs_vfpcc %struct.1* @hhh3(
 entry:
   %0 = call arm_aapcs_vfpcc  %struct.4* @sss1(%struct.4* undef, float 0.000000e+00) nounwind ; <%struct.4*> [#uses=0]
   %1 = call arm_aapcs_vfpcc  %struct.4* @qqq1(%struct.4* null, float 5.000000e-01) nounwind ; <%struct.4*> [#uses=0]
-  %val92 = load <4 x float>* null                 ; <<4 x float>> [#uses=1]
+  %val92 = load <4 x float>, <4 x float>* null                 ; <<4 x float>> [#uses=1]
   %2 = call arm_aapcs_vfpcc  %struct.4* @zzz2(%struct.4* undef, <4 x float> %val92) nounwind ; <%struct.4*> [#uses=0]
   ret %struct.1* %this
 }

Modified: llvm/trunk/test/CodeGen/ARM/2009-09-24-spill-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-09-24-spill-align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-09-24-spill-align.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-09-24-spill-align.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ entry:
   %out_poly16_t = alloca i16                      ; <i16*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
 ; CHECK: vldr
-  %0 = load <4 x i16>* %arg0_poly16x4_t, align 8  ; <<4 x i16>> [#uses=1]
+  %0 = load <4 x i16>, <4 x i16>* %arg0_poly16x4_t, align 8  ; <<4 x i16>> [#uses=1]
   %1 = extractelement <4 x i16> %0, i32 1         ; <i16> [#uses=1]
   store i16 %1, i16* %out_poly16_t, align 2
   br label %return

Modified: llvm/trunk/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@ entry:
   br i1 undef, label %return, label %bb
 
 bb:                                               ; preds = %bb, %entry
-  %0 = load float* undef, align 4                 ; <float> [#uses=1]
-  %1 = load float* null, align 4                  ; <float> [#uses=1]
+  %0 = load float, float* undef, align 4                 ; <float> [#uses=1]
+  %1 = load float, float* null, align 4                  ; <float> [#uses=1]
   %2 = insertelement <4 x float> undef, float undef, i32 1 ; <<4 x float>> [#uses=1]
   %3 = insertelement <4 x float> %2, float %1, i32 2 ; <<4 x float>> [#uses=2]
   %4 = insertelement <4 x float> undef, float %0, i32 2 ; <<4 x float>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-11-02-NegativeLane.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-11-02-NegativeLane.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-11-02-NegativeLane.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
 
 bb:                                               ; preds = %bb, %entry
 ; CHECK: vld1.16 {d16[], d17[]}
-  %0 = load i16* undef, align 2
+  %0 = load i16, i16* undef, align 2
   %1 = insertelement <8 x i16> undef, i16 %0, i32 2
   %2 = insertelement <8 x i16> %1, i16 undef, i32 3
   %3 = mul <8 x i16> %2, %2

Modified: llvm/trunk/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ target triple = "armv7-eabi"
 
 define arm_aapcs_vfpcc void @foo() {
 entry:
-  %0 = load float* null, align 4                  ; <float> [#uses=2]
+  %0 = load float, float* null, align 4                  ; <float> [#uses=2]
   %1 = fmul float %0, undef                       ; <float> [#uses=2]
   %2 = fmul float 0.000000e+00, %1                ; <float> [#uses=2]
   %3 = fmul float %0, %1                          ; <float> [#uses=1]
@@ -18,7 +18,7 @@ entry:
   %7 = fsub float %2, undef                       ; <float> [#uses=1]
   %8 = fsub float 0.000000e+00, undef             ; <float> [#uses=3]
   %9 = fadd float %2, undef                       ; <float> [#uses=3]
-  %10 = load float* undef, align 8                ; <float> [#uses=3]
+  %10 = load float, float* undef, align 8                ; <float> [#uses=3]
   %11 = fmul float %8, %10                        ; <float> [#uses=1]
   %12 = fadd float undef, %11                     ; <float> [#uses=2]
   %13 = fmul float undef, undef                   ; <float> [#uses=1]
@@ -30,10 +30,10 @@ entry:
   %19 = fadd float %18, 0.000000e+00              ; <float> [#uses=1]
   %20 = fmul float undef, %10                     ; <float> [#uses=1]
   %21 = fadd float %19, %20                       ; <float> [#uses=1]
-  %22 = load float* undef, align 8                ; <float> [#uses=1]
+  %22 = load float, float* undef, align 8                ; <float> [#uses=1]
   %23 = fmul float %5, %22                        ; <float> [#uses=1]
   %24 = fadd float %23, undef                     ; <float> [#uses=1]
-  %25 = load float* undef, align 8                ; <float> [#uses=2]
+  %25 = load float, float* undef, align 8                ; <float> [#uses=2]
   %26 = fmul float %8, %25                        ; <float> [#uses=1]
   %27 = fadd float %24, %26                       ; <float> [#uses=1]
   %28 = fmul float %9, %25                        ; <float> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll Fri Feb 27 15:17:42 2015
@@ -14,10 +14,10 @@ entry:
 
 bb:                                               ; preds = %entry
   %0 = getelementptr inbounds %bar, %bar* null, i32 0, i32 0, i32 0, i32 2 ; <float*> [#uses=2]
-  %1 = load float* undef, align 4                 ; <float> [#uses=1]
+  %1 = load float, float* undef, align 4                 ; <float> [#uses=1]
   %2 = fsub float 0.000000e+00, undef             ; <float> [#uses=2]
   %3 = fmul float 0.000000e+00, undef             ; <float> [#uses=1]
-  %4 = load float* %0, align 4                    ; <float> [#uses=3]
+  %4 = load float, float* %0, align 4                    ; <float> [#uses=3]
   %5 = fmul float %4, %2                          ; <float> [#uses=1]
   %6 = fsub float %3, %5                          ; <float> [#uses=1]
   %7 = fmul float %4, undef                       ; <float> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert2.ll Fri Feb 27 15:17:42 2015
@@ -22,19 +22,19 @@ bb3.i:
   %0 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=0]
   %1 = fsub float 0.000000e+00, undef             ; <float> [#uses=1]
   %2 = getelementptr inbounds %quuz, %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
-  %3 = load float* %2, align 4                    ; <float> [#uses=1]
+  %3 = load float, float* %2, align 4                    ; <float> [#uses=1]
   %4 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
   %5 = fsub float %3, undef                       ; <float> [#uses=2]
   %6 = getelementptr inbounds %quuz, %quuz* %b, i32 0, i32 1, i32 0, i32 2 ; <float*> [#uses=2]
-  %7 = load float* %6, align 4                    ; <float> [#uses=1]
+  %7 = load float, float* %6, align 4                    ; <float> [#uses=1]
   %8 = fsub float %7, undef                       ; <float> [#uses=1]
   %9 = getelementptr inbounds %quuz, %quuz* %c, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=2]
-  %10 = load float* %9, align 4                   ; <float> [#uses=1]
+  %10 = load float, float* %9, align 4                   ; <float> [#uses=1]
   %11 = fsub float %10, undef                     ; <float> [#uses=2]
   %12 = getelementptr inbounds %quuz, %quuz* %c, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
-  %13 = load float* %12, align 4                  ; <float> [#uses=1]
+  %13 = load float, float* %12, align 4                  ; <float> [#uses=1]
   %14 = fsub float %13, undef                     ; <float> [#uses=1]
-  %15 = load float* undef, align 4                ; <float> [#uses=1]
+  %15 = load float, float* undef, align 4                ; <float> [#uses=1]
   %16 = fsub float %15, undef                     ; <float> [#uses=1]
   %17 = fmul float %5, %16                        ; <float> [#uses=1]
   %18 = fsub float %17, 0.000000e+00              ; <float> [#uses=5]
@@ -52,11 +52,11 @@ bb3.i:
   %27 = fadd float %26, undef                     ; <float> [#uses=1]
   %28 = fadd float %27, undef                     ; <float> [#uses=1]
   %29 = call arm_aapcs_vfpcc  float @sqrtf(float %28) readnone ; <float> [#uses=1]
-  %30 = load float* null, align 4                 ; <float> [#uses=2]
-  %31 = load float* %4, align 4                   ; <float> [#uses=2]
-  %32 = load float* %2, align 4                   ; <float> [#uses=2]
-  %33 = load float* null, align 4                 ; <float> [#uses=3]
-  %34 = load float* %6, align 4                   ; <float> [#uses=2]
+  %30 = load float, float* null, align 4                 ; <float> [#uses=2]
+  %31 = load float, float* %4, align 4                   ; <float> [#uses=2]
+  %32 = load float, float* %2, align 4                   ; <float> [#uses=2]
+  %33 = load float, float* null, align 4                 ; <float> [#uses=3]
+  %34 = load float, float* %6, align 4                   ; <float> [#uses=2]
   %35 = fsub float %33, %34                       ; <float> [#uses=2]
   %36 = fmul float %20, %35                       ; <float> [#uses=1]
   %37 = fsub float %36, undef                     ; <float> [#uses=1]
@@ -71,12 +71,12 @@ bb3.i:
   %46 = fadd float %44, %45                       ; <float> [#uses=1]
   %47 = fmul float %33, %43                       ; <float> [#uses=1]
   %48 = fadd float %46, %47                       ; <float> [#uses=2]
-  %49 = load float* %9, align 4                   ; <float> [#uses=2]
+  %49 = load float, float* %9, align 4                   ; <float> [#uses=2]
   %50 = fsub float %30, %49                       ; <float> [#uses=1]
-  %51 = load float* %12, align 4                  ; <float> [#uses=3]
+  %51 = load float, float* %12, align 4                  ; <float> [#uses=3]
   %52 = fsub float %32, %51                       ; <float> [#uses=2]
-  %53 = load float* undef, align 4                ; <float> [#uses=2]
-  %54 = load float* %24, align 4                  ; <float> [#uses=2]
+  %53 = load float, float* undef, align 4                ; <float> [#uses=2]
+  %54 = load float, float* %24, align 4                  ; <float> [#uses=2]
   %55 = fmul float %54, undef                     ; <float> [#uses=1]
   %56 = fmul float undef, %52                     ; <float> [#uses=1]
   %57 = fsub float %55, %56                       ; <float> [#uses=1]
@@ -93,7 +93,7 @@ bb3.i:
   %68 = fsub float %51, %31                       ; <float> [#uses=1]
   %69 = fsub float %53, %33                       ; <float> [#uses=1]
   %70 = fmul float undef, %67                     ; <float> [#uses=1]
-  %71 = load float* undef, align 4                ; <float> [#uses=2]
+  %71 = load float, float* undef, align 4                ; <float> [#uses=2]
   %72 = fmul float %71, %69                       ; <float> [#uses=1]
   %73 = fsub float %70, %72                       ; <float> [#uses=1]
   %74 = fmul float %71, %68                       ; <float> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
 
 define arm_aapcs_vfpcc %bar* @aaa(%foo* nocapture %this, %quuz* %a, %quuz* %b, %quuz* %c, i8 zeroext %forced) {
 entry:
-  %0 = load %bar** undef, align 4                 ; <%bar*> [#uses=2]
+  %0 = load %bar*, %bar** undef, align 4                 ; <%bar*> [#uses=2]
   br i1 false, label %bb85, label %bb
 
 bb:                                               ; preds = %entry
@@ -43,10 +43,10 @@ bb3.i:
   %18 = fadd float %17, undef                     ; <float> [#uses=1]
   %19 = call arm_aapcs_vfpcc  float @sqrtf(float %18) readnone ; <float> [#uses=2]
   %20 = fcmp ogt float %19, 0x3F1A36E2E0000000    ; <i1> [#uses=1]
-  %21 = load float* %1, align 4                   ; <float> [#uses=2]
-  %22 = load float* %3, align 4                   ; <float> [#uses=2]
-  %23 = load float* undef, align 4                ; <float> [#uses=2]
-  %24 = load float* %4, align 4                   ; <float> [#uses=2]
+  %21 = load float, float* %1, align 4                   ; <float> [#uses=2]
+  %22 = load float, float* %3, align 4                   ; <float> [#uses=2]
+  %23 = load float, float* undef, align 4                ; <float> [#uses=2]
+  %24 = load float, float* %4, align 4                   ; <float> [#uses=2]
   %25 = fsub float %23, %24                       ; <float> [#uses=2]
   %26 = fmul float 0.000000e+00, %25              ; <float> [#uses=1]
   %27 = fsub float %26, undef                     ; <float> [#uses=1]
@@ -59,11 +59,11 @@ bb3.i:
   %34 = fadd float %32, %33                       ; <float> [#uses=1]
   %35 = fmul float %23, %31                       ; <float> [#uses=1]
   %36 = fadd float %34, %35                       ; <float> [#uses=1]
-  %37 = load float* %6, align 4                   ; <float> [#uses=2]
-  %38 = load float* %7, align 4                   ; <float> [#uses=2]
+  %37 = load float, float* %6, align 4                   ; <float> [#uses=2]
+  %38 = load float, float* %7, align 4                   ; <float> [#uses=2]
   %39 = fsub float %22, %38                       ; <float> [#uses=2]
-  %40 = load float* undef, align 4                ; <float> [#uses=1]
-  %41 = load float* null, align 4                 ; <float> [#uses=2]
+  %40 = load float, float* undef, align 4                ; <float> [#uses=1]
+  %41 = load float, float* null, align 4                 ; <float> [#uses=2]
   %42 = fmul float %41, undef                     ; <float> [#uses=1]
   %43 = fmul float undef, %39                     ; <float> [#uses=1]
   %44 = fsub float %42, %43                       ; <float> [#uses=1]
@@ -80,7 +80,7 @@ bb3.i:
   %55 = fmul float undef, undef                   ; <float> [#uses=1]
   %56 = fsub float %54, %55                       ; <float> [#uses=1]
   %57 = fmul float undef, %53                     ; <float> [#uses=1]
-  %58 = load float* undef, align 4                ; <float> [#uses=2]
+  %58 = load float, float* undef, align 4                ; <float> [#uses=2]
   %59 = fmul float %58, undef                     ; <float> [#uses=1]
   %60 = fsub float %57, %59                       ; <float> [#uses=1]
   %61 = fmul float %58, undef                     ; <float> [#uses=1]
@@ -100,7 +100,7 @@ bb3.i:
   br i1 %72, label %bb4.i97, label %ccc.exit98
 
 bb4.i97:                                          ; preds = %bb3.i
-  %73 = load %bar** undef, align 4                ; <%bar*> [#uses=0]
+  %73 = load %bar*, %bar** undef, align 4                ; <%bar*> [#uses=0]
   br label %ccc.exit98
 
 ccc.exit98:                                       ; preds = %bb4.i97, %bb3.i

Modified: llvm/trunk/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=arm-unknown-linux-gnueabi
 
 define void @"java.lang.String::getChars"([84 x i8]* %method, i32 %base_pc, [788 x i8]* %thread) {
-  %1 = load i32* undef                            ; <i32> [#uses=1]
+  %1 = load i32, i32* undef                            ; <i32> [#uses=1]
   %2 = sub i32 %1, 48                             ; <i32> [#uses=1]
   br i1 undef, label %stack_overflow, label %no_overflow
 
@@ -10,13 +10,13 @@ stack_overflow:
 
 no_overflow:                                      ; preds = %0
   %frame = inttoptr i32 %2 to [17 x i32]*         ; <[17 x i32]*> [#uses=4]
-  %3 = load i32* undef                            ; <i32> [#uses=1]
-  %4 = load i32* null                             ; <i32> [#uses=1]
+  %3 = load i32, i32* undef                            ; <i32> [#uses=1]
+  %4 = load i32, i32* null                             ; <i32> [#uses=1]
   %5 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 13 ; <i32*> [#uses=1]
   %6 = bitcast i32* %5 to [8 x i8]**              ; <[8 x i8]**> [#uses=1]
-  %7 = load [8 x i8]** %6                         ; <[8 x i8]*> [#uses=1]
+  %7 = load [8 x i8]*, [8 x i8]** %6                         ; <[8 x i8]*> [#uses=1]
   %8 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 12 ; <i32*> [#uses=1]
-  %9 = load i32* %8                               ; <i32> [#uses=1]
+  %9 = load i32, i32* %8                               ; <i32> [#uses=1]
   br i1 undef, label %bci_13, label %bci_4
 
 bci_13:                                           ; preds = %no_overflow
@@ -27,13 +27,13 @@ bci_30:
 
 bci_46:                                           ; preds = %bci_30
   %10 = sub i32 %4, %3                            ; <i32> [#uses=1]
-  %11 = load [8 x i8]** null                      ; <[8 x i8]*> [#uses=1]
+  %11 = load [8 x i8]*, [8 x i8]** null                      ; <[8 x i8]*> [#uses=1]
   %callee = bitcast [8 x i8]* %11 to [84 x i8]*   ; <[84 x i8]*> [#uses=1]
   %12 = bitcast i8* undef to i32*                 ; <i32*> [#uses=1]
-  %base_pc7 = load i32* %12                       ; <i32> [#uses=2]
+  %base_pc7 = load i32, i32* %12                       ; <i32> [#uses=2]
   %13 = add i32 %base_pc7, 0                      ; <i32> [#uses=1]
   %14 = inttoptr i32 %13 to void ([84 x i8]*, i32, [788 x i8]*)** ; <void ([84 x i8]*, i32, [788 x i8]*)**> [#uses=1]
-  %entry_point = load void ([84 x i8]*, i32, [788 x i8]*)** %14 ; <void ([84 x i8]*, i32, [788 x i8]*)*> [#uses=1]
+  %entry_point = load void ([84 x i8]*, i32, [788 x i8]*)*, void ([84 x i8]*, i32, [788 x i8]*)** %14 ; <void ([84 x i8]*, i32, [788 x i8]*)*> [#uses=1]
   %15 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 1 ; <i32*> [#uses=1]
   %16 = ptrtoint i32* %15 to i32                  ; <i32> [#uses=1]
   %stack_pointer_addr9 = bitcast i8* undef to i32* ; <i32*> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll Fri Feb 27 15:17:42 2015
@@ -9,10 +9,10 @@ stack_overflow:
 
 no_overflow:                                      ; preds = %0
   %frame = inttoptr i32 %1 to [17 x i32]*         ; <[17 x i32]*> [#uses=4]
-  %2 = load i32* null                             ; <i32> [#uses=2]
+  %2 = load i32, i32* null                             ; <i32> [#uses=2]
   %3 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 14 ; <i32*> [#uses=1]
-  %4 = load i32* %3                               ; <i32> [#uses=2]
-  %5 = load [8 x i8]** undef                      ; <[8 x i8]*> [#uses=2]
+  %4 = load i32, i32* %3                               ; <i32> [#uses=2]
+  %5 = load [8 x i8]*, [8 x i8]** undef                      ; <[8 x i8]*> [#uses=2]
   br i1 undef, label %bci_13, label %bci_4
 
 bci_13:                                           ; preds = %no_overflow

Modified: llvm/trunk/test/CodeGen/ARM/2010-05-17-FastAllocCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-05-17-FastAllocCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-05-17-FastAllocCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-05-17-FastAllocCrash.ll Fri Feb 27 15:17:42 2015
@@ -82,9 +82,9 @@ cond_true1369.preheader:
   ret void
 
 bb1567:                                           ; preds = %cond_true1254
-  %tmp1591 = load i64* getelementptr inbounds (%struct.CHESS_POSITION* @search, i32 0, i32 4) ; <i64> [#uses=1]
+  %tmp1591 = load i64, i64* getelementptr inbounds (%struct.CHESS_POSITION* @search, i32 0, i32 4) ; <i64> [#uses=1]
   %tmp1572 = tail call fastcc i32 @FirstOne()     ; <i32> [#uses=1]
-  %tmp1594 = load i32* undef                      ; <i32> [#uses=1]
+  %tmp1594 = load i32, i32* undef                      ; <i32> [#uses=1]
   %tmp1594.upgrd.5 = trunc i32 %tmp1594 to i8     ; <i8> [#uses=1]
   %shift.upgrd.6 = zext i8 %tmp1594.upgrd.5 to i64 ; <i64> [#uses=1]
   %tmp1595 = lshr i64 %tmp1591, %shift.upgrd.6    ; <i64> [#uses=1]
@@ -92,7 +92,7 @@ bb1567:
   %tmp1596 = and i32 %tmp1595.upgrd.7, 255        ; <i32> [#uses=1]
   %gep.upgrd.8 = zext i32 %tmp1596 to i64         ; <i64> [#uses=1]
   %tmp1598 = getelementptr [64 x [256 x i32]], [64 x [256 x i32]]* @bishop_mobility_rr45, i32 0, i32 %tmp1572, i64 %gep.upgrd.8 ; <i32*> [#uses=1]
-  %tmp1599 = load i32* %tmp1598                   ; <i32> [#uses=1]
+  %tmp1599 = load i32, i32* %tmp1598                   ; <i32> [#uses=1]
   %tmp1602 = sub i32 0, %tmp1599                  ; <i32> [#uses=1]
   br i1 undef, label %cond_next1637, label %cond_true1607
 

Modified: llvm/trunk/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ entry:
   %vla10 = alloca i8, i32 undef, align 1          ; <i8*> [#uses=1]
   %vla14 = alloca i8, i32 undef, align 1          ; <i8*> [#uses=1]
   %vla18 = alloca i8, i32 undef, align 1          ; <i8*> [#uses=1]
-  %tmp21 = load i32* undef                        ; <i32> [#uses=1]
+  %tmp21 = load i32, i32* undef                        ; <i32> [#uses=1]
   %0 = mul i32 1, %tmp21                          ; <i32> [#uses=1]
   %vla22 = alloca i8, i32 %0, align 1             ; <i8*> [#uses=1]
   call  void (...)* @zz(i8* getelementptr inbounds ([1 x i8]* @.str, i32 0, i32 0), i32 2, i32 1)

Modified: llvm/trunk/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ entry:
   br i1 %tst, label %bb.nph96, label %bb3
 
 bb3:                                              ; preds = %entry
-  %1 = load i64* %0, align 4                      ; <i64> [#uses=0]
+  %1 = load i64, i64* %0, align 4                      ; <i64> [#uses=0]
   ret i8 42
 
 bb.nph96:                                         ; preds = %entry

Modified: llvm/trunk/test/CodeGen/ARM/2010-05-19-Shuffles.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-05-19-Shuffles.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-05-19-Shuffles.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-05-19-Shuffles.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define <8 x i8> @f2(<8 x i8> %x) nounwin
 }
 
 define void @f3(<4 x i64>* %xp) nounwind {
-  %x = load <4 x i64>* %xp
+  %x = load <4 x i64>, <4 x i64>* %xp
   %y = shufflevector <4 x i64> %x, <4 x i64> undef, <4 x i32> <i32 0, i32 3, i32 2, i32 1>
   store <4 x i64> %y, <4 x i64>* %xp
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/2010-05-21-BuildVector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-05-21-BuildVector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-05-21-BuildVector.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-05-21-BuildVector.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 
 define void @test(float* %fltp, i32 %packedValue, float* %table) nounwind {
 entry:
-  %0 = load float* %fltp
+  %0 = load float, float* %fltp
   %1 = insertelement <4 x float> undef, float %0, i32 0
   %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
   %3 = shl i32 %packedValue, 16
@@ -11,28 +11,28 @@ entry:
   %.sum = add i32 %4, 4
   %5 = getelementptr inbounds float, float* %table, i32 %.sum
 ;CHECK: vldr s
-  %6 = load float* %5, align 4
+  %6 = load float, float* %5, align 4
   %tmp11 = insertelement <4 x float> undef, float %6, i32 0
   %7 = shl i32 %packedValue, 18
   %8 = ashr i32 %7, 30
   %.sum12 = add i32 %8, 4
   %9 = getelementptr inbounds float, float* %table, i32 %.sum12
 ;CHECK: vldr s
-  %10 = load float* %9, align 4
+  %10 = load float, float* %9, align 4
   %tmp9 = insertelement <4 x float> %tmp11, float %10, i32 1
   %11 = shl i32 %packedValue, 20
   %12 = ashr i32 %11, 30
   %.sum13 = add i32 %12, 4
   %13 = getelementptr inbounds float, float* %table, i32 %.sum13
 ;CHECK: vldr s
-  %14 = load float* %13, align 4
+  %14 = load float, float* %13, align 4
   %tmp7 = insertelement <4 x float> %tmp9, float %14, i32 2
   %15 = shl i32 %packedValue, 22
   %16 = ashr i32 %15, 30
   %.sum14 = add i32 %16, 4
   %17 = getelementptr inbounds float, float* %table, i32 %.sum14
 ;CHECK: vldr s
-  %18 = load float* %17, align 4
+  %18 = load float, float* %17, align 4
   %tmp5 = insertelement <4 x float> %tmp7, float %18, i32 3
   %19 = fmul <4 x float> %tmp5, %2
   %20 = bitcast float* %fltp to i8*

Modified: llvm/trunk/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 define void @foo(%struct.__int8x8x2_t* nocapture %a, i8* %b) nounwind {
 entry:
  %0 = bitcast %struct.__int8x8x2_t* %a to i128*  ; <i128*> [#uses=1]
- %srcval = load i128* %0, align 8                ; <i128> [#uses=2]
+ %srcval = load i128, i128* %0, align 8                ; <i128> [#uses=2]
  %tmp6 = trunc i128 %srcval to i64               ; <i64> [#uses=1]
  %tmp8 = lshr i128 %srcval, 64                   ; <i128> [#uses=1]
  %tmp9 = trunc i128 %tmp8 to i64                 ; <i64> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll Fri Feb 27 15:17:42 2015
@@ -18,10 +18,10 @@ entry:
   br i1 %1, label %bb, label %return
 
 bb:                                               ; preds = %bb445, %entry
-  %2 = load %struct.cellbox** undef, align 4      ; <%struct.cellbox*> [#uses=2]
+  %2 = load %struct.cellbox*, %struct.cellbox** undef, align 4      ; <%struct.cellbox*> [#uses=2]
   %3 = getelementptr inbounds %struct.cellbox, %struct.cellbox* %2, i32 0, i32 3 ; <i32*> [#uses=1]
   store i32 undef, i32* %3, align 4
-  %4 = load i32* undef, align 4                   ; <i32> [#uses=3]
+  %4 = load i32, i32* undef, align 4                   ; <i32> [#uses=3]
   %5 = icmp eq i32 undef, 1                       ; <i1> [#uses=1]
   br i1 %5, label %bb10, label %bb445
 
@@ -29,12 +29,12 @@ bb10:
   br i1 undef, label %bb11, label %bb445
 
 bb11:                                             ; preds = %bb10
-  %6 = load %struct.tilebox** undef, align 4      ; <%struct.tilebox*> [#uses=3]
-  %7 = load %struct.termbox** null, align 4       ; <%struct.termbox*> [#uses=1]
+  %6 = load %struct.tilebox*, %struct.tilebox** undef, align 4      ; <%struct.tilebox*> [#uses=3]
+  %7 = load %struct.termbox*, %struct.termbox** null, align 4       ; <%struct.termbox*> [#uses=1]
   %8 = getelementptr inbounds %struct.tilebox, %struct.tilebox* %6, i32 0, i32 13 ; <i32*> [#uses=1]
-  %9 = load i32* %8, align 4                      ; <i32> [#uses=3]
+  %9 = load i32, i32* %8, align 4                      ; <i32> [#uses=3]
   %10 = getelementptr inbounds %struct.tilebox, %struct.tilebox* %6, i32 0, i32 15 ; <i32*> [#uses=1]
-  %11 = load i32* %10, align 4                    ; <i32> [#uses=1]
+  %11 = load i32, i32* %10, align 4                    ; <i32> [#uses=1]
   br i1 false, label %bb12, label %bb13
 
 bb12:                                             ; preds = %bb11
@@ -77,7 +77,7 @@ bb21:
 
 bb36:                                             ; preds = %bb43.loopexit, %bb36
   %termptr.0478 = phi %struct.termbox* [ %42, %bb36 ], [ %7, %bb43.loopexit ] ; <%struct.termbox*> [#uses=1]
-  %30 = load i32* undef, align 4                  ; <i32> [#uses=1]
+  %30 = load i32, i32* undef, align 4                  ; <i32> [#uses=1]
   %31 = sub nsw i32 %30, %9                       ; <i32> [#uses=1]
   %32 = sitofp i32 %31 to double                  ; <double> [#uses=1]
   %33 = fdiv double %32, 0.000000e+00             ; <double> [#uses=1]
@@ -93,7 +93,7 @@ bb36:
   %40 = add i32 %iftmp.47.0, 0                    ; <i32> [#uses=1]
   store i32 %40, i32* undef, align 4
   %41 = getelementptr inbounds %struct.termbox, %struct.termbox* %termptr.0478, i32 0, i32 0 ; <%struct.termbox**> [#uses=1]
-  %42 = load %struct.termbox** %41, align 4       ; <%struct.termbox*> [#uses=2]
+  %42 = load %struct.termbox*, %struct.termbox** %41, align 4       ; <%struct.termbox*> [#uses=2]
   %43 = icmp eq %struct.termbox* %42, null        ; <i1> [#uses=1]
   br i1 %43, label %bb52.loopexit, label %bb36
 
@@ -128,7 +128,7 @@ bb248:
 
 bb249:                                            ; preds = %bb248
   %46 = getelementptr inbounds %struct.cellbox, %struct.cellbox* %2, i32 0, i32 21, i32 undef ; <%struct.tilebox**> [#uses=1]
-  %47 = load %struct.tilebox** %46, align 4       ; <%struct.tilebox*> [#uses=1]
+  %47 = load %struct.tilebox*, %struct.tilebox** %46, align 4       ; <%struct.tilebox*> [#uses=1]
   %48 = getelementptr inbounds %struct.tilebox, %struct.tilebox* %47, i32 0, i32 11 ; <i32*> [#uses=1]
   store i32 undef, i32* %48, align 4
   unreachable

Modified: llvm/trunk/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll Fri Feb 27 15:17:42 2015
@@ -35,7 +35,7 @@ entry:
   %.loc = alloca i32                              ; <i32*> [#uses=2]
   %tmp.i = getelementptr inbounds %"class.llvm::StringInit", %"class.llvm::StringInit"* %this, i32 0, i32 0, i32 4 ; <i8*> [#uses=1]
   %0 = bitcast i8* %tmp.i to %"struct.llvm::Init"** ; <%"struct.llvm::Init"**> [#uses=1]
-  %tmp2.i = load %"struct.llvm::Init"** %0        ; <%"struct.llvm::Init"*> [#uses=2]
+  %tmp2.i = load %"struct.llvm::Init"*, %"struct.llvm::Init"** %0        ; <%"struct.llvm::Init"*> [#uses=2]
   %1 = icmp eq %"struct.llvm::Init"* %tmp2.i, null ; <i1> [#uses=1]
   br i1 %1, label %entry.return_crit_edge, label %tmpbb
 
@@ -53,16 +53,16 @@ tmpbb:
 
 if.then:                                          ; preds = %tmpbb
   %tmp2.i.i.i.i = getelementptr inbounds %"class.llvm::StringInit", %"class.llvm::StringInit"* %this, i32 0, i32 1, i32 0, i32 0 ; <i8**> [#uses=1]
-  %tmp3.i.i.i.i = load i8** %tmp2.i.i.i.i         ; <i8*> [#uses=2]
+  %tmp3.i.i.i.i = load i8*, i8** %tmp2.i.i.i.i         ; <i8*> [#uses=2]
   %arrayidx.i.i.i.i = getelementptr inbounds i8, i8* %tmp3.i.i.i.i, i32 -12 ; <i8*> [#uses=1]
   %tmp.i.i.i = bitcast i8* %arrayidx.i.i.i.i to i32* ; <i32*> [#uses=1]
-  %tmp2.i.i.i = load i32* %tmp.i.i.i              ; <i32> [#uses=1]
+  %tmp2.i.i.i = load i32, i32* %tmp.i.i.i              ; <i32> [#uses=1]
   %tmp.i5 = getelementptr inbounds %"class.llvm::Record", %"class.llvm::Record"* %R, i32 0, i32 4 ; <%"class.std::vector"*> [#uses=1]
   %tmp2.i.i = getelementptr inbounds %"class.llvm::Record", %"class.llvm::Record"* %R, i32 0, i32 4, i32 0, i32 4 ; <i8*> [#uses=1]
   %4 = bitcast i8* %tmp2.i.i to %"class.llvm::RecordVal"** ; <%"class.llvm::RecordVal"**> [#uses=1]
-  %tmp3.i.i6 = load %"class.llvm::RecordVal"** %4 ; <%"class.llvm::RecordVal"*> [#uses=1]
+  %tmp3.i.i6 = load %"class.llvm::RecordVal"*, %"class.llvm::RecordVal"** %4 ; <%"class.llvm::RecordVal"*> [#uses=1]
   %tmp5.i.i = bitcast %"class.std::vector"* %tmp.i5 to %"class.llvm::RecordVal"** ; <%"class.llvm::RecordVal"**> [#uses=1]
-  %tmp6.i.i = load %"class.llvm::RecordVal"** %tmp5.i.i ; <%"class.llvm::RecordVal"*> [#uses=5]
+  %tmp6.i.i = load %"class.llvm::RecordVal"*, %"class.llvm::RecordVal"** %tmp5.i.i ; <%"class.llvm::RecordVal"*> [#uses=5]
   %sub.ptr.lhs.cast.i.i = ptrtoint %"class.llvm::RecordVal"* %tmp3.i.i6 to i32 ; <i32> [#uses=1]
   %sub.ptr.rhs.cast.i.i = ptrtoint %"class.llvm::RecordVal"* %tmp6.i.i to i32 ; <i32> [#uses=1]
   %sub.ptr.sub.i.i = sub i32 %sub.ptr.lhs.cast.i.i, %sub.ptr.rhs.cast.i.i ; <i32> [#uses=1]
@@ -71,7 +71,7 @@ if.then:
 
 codeRepl:                                         ; preds = %if.then
   %targetBlock = call i1 @_ZNK4llvm7VarInit12getFieldInitERNS_6RecordEPKNS_9RecordValERKSs_for.cond.i(i32 %sub.ptr.div.i.i, %"class.llvm::RecordVal"* %tmp6.i.i, i32 %tmp2.i.i.i, i8* %tmp3.i.i.i.i, i32* %.loc) ; <i1> [#uses=1]
-  %.reload = load i32* %.loc                      ; <i32> [#uses=3]
+  %.reload = load i32, i32* %.loc                      ; <i32> [#uses=3]
   br i1 %targetBlock, label %for.cond.i.return_crit_edge, label %_ZN4llvm6Record8getValueENS_9StringRefE.exit
 
 for.cond.i.return_crit_edge:                      ; preds = %codeRepl
@@ -101,7 +101,7 @@ land.lhs.true.return_crit_edge:
 
 lor.lhs.false:                                    ; preds = %land.lhs.true
   %tmp.i3 = getelementptr inbounds %"class.llvm::RecordVal", %"class.llvm::RecordVal"* %tmp6.i.i, i32 %.reload, i32 3 ; <%"struct.llvm::Init"**> [#uses=1]
-  %tmp2.i4 = load %"struct.llvm::Init"** %tmp.i3  ; <%"struct.llvm::Init"*> [#uses=2]
+  %tmp2.i4 = load %"struct.llvm::Init"*, %"struct.llvm::Init"** %tmp.i3  ; <%"struct.llvm::Init"*> [#uses=2]
   %5 = icmp eq %"struct.llvm::Init"* %tmp2.i4, null ; <i1> [#uses=1]
   br i1 %5, label %lor.lhs.false.if.end_crit_edge, label %tmpbb1
 
@@ -122,7 +122,7 @@ tmpbb1:
 
 if.end:                                           ; preds = %.if.end_crit_edge, %lor.lhs.false.if.end_crit_edge, %if.then6.if.end_crit_edge
   %tmp.i1 = getelementptr inbounds %"class.llvm::RecordVal", %"class.llvm::RecordVal"* %tmp6.i.i, i32 %.reload, i32 3 ; <%"struct.llvm::Init"**> [#uses=1]
-  %tmp2.i2 = load %"struct.llvm::Init"** %tmp.i1  ; <%"struct.llvm::Init"*> [#uses=3]
+  %tmp2.i2 = load %"struct.llvm::Init"*, %"struct.llvm::Init"** %tmp.i1  ; <%"struct.llvm::Init"*> [#uses=3]
   %8 = bitcast %"class.llvm::StringInit"* %this to %"struct.llvm::Init"* ; <%"struct.llvm::Init"*> [#uses=1]
   %cmp19 = icmp eq %"struct.llvm::Init"* %tmp2.i2, %8 ; <i1> [#uses=1]
   br i1 %cmp19, label %cond.false, label %cond.end
@@ -133,9 +133,9 @@ cond.false:
 
 cond.end:                                         ; preds = %if.end
   %9 = bitcast %"struct.llvm::Init"* %tmp2.i2 to %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*** ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)***> [#uses=1]
-  %10 = load %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*** %9 ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)**> [#uses=1]
+  %10 = load %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)**, %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*** %9 ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)**> [#uses=1]
   %vfn = getelementptr inbounds %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*, %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)** %10, i32 8 ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)**> [#uses=1]
-  %11 = load %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)** %vfn ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*> [#uses=1]
+  %11 = load %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*, %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)** %vfn ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*> [#uses=1]
   %call25 = tail call %"struct.llvm::Init"* %11(%"struct.llvm::Init"* %tmp2.i2, %"class.llvm::Record"* %R, %"class.llvm::RecordVal"* %RV, %"class.std::basic_string"* %FieldName) ; <%"struct.llvm::Init"*> [#uses=1]
   ret %"struct.llvm::Init"* %call25
 

Modified: llvm/trunk/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define void @x0(i8* nocapture %buf, i32
 entry:
   tail call void @llvm.dbg.value(metadata i8* %buf, i64 0, metadata !0, metadata !{!"0x102"}), !dbg !15
   tail call void @llvm.dbg.value(metadata i32 %nbytes, i64 0, metadata !8, metadata !{!"0x102"}), !dbg !16
-  %tmp = load i32* @length, !dbg !17              ; <i32> [#uses=3]
+  %tmp = load i32, i32* @length, !dbg !17              ; <i32> [#uses=3]
   %cmp = icmp eq i32 %tmp, -1, !dbg !17           ; <i1> [#uses=1]
   %cmp.not = xor i1 %cmp, true                    ; <i1> [#uses=1]
   %cmp3 = icmp ult i32 %tmp, %nbytes, !dbg !17    ; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define i32 @main(i32 %argc, i8** %argv)
 entry:
   %0 = shufflevector <2 x i64> undef, <2 x i64> zeroinitializer, <2 x i32> <i32 1, i32 2> ; <<2 x i64>> [#uses=1]
   store <2 x i64> %0, <2 x i64>* undef, align 16
-  %val4723 = load <8 x i16>* undef                ; <<8 x i16>> [#uses=1]
+  %val4723 = load <8 x i16>, <8 x i16>* undef                ; <<8 x i16>> [#uses=1]
   call void @PrintShortX(i8* getelementptr inbounds ([21 x i8]* @.str271, i32 0, i32 0), <8 x i16> %val4723, i32 0) nounwind
   ret i32 undef
 }

Modified: llvm/trunk/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll Fri Feb 27 15:17:42 2015
@@ -30,9 +30,9 @@ declare void @_Unwind_SjLj_Resume(i8*)
 define internal void @_ZN1AD1Ev(%struct.A* nocapture %this) nounwind ssp align 2 {
 entry:
   %tmp.i = getelementptr inbounds %struct.A, %struct.A* %this, i32 0, i32 0 ; <i32*> [#uses=1]
-  %tmp2.i = load i32* %tmp.i                      ; <i32> [#uses=1]
+  %tmp2.i = load i32, i32* %tmp.i                      ; <i32> [#uses=1]
   %call.i = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str4, i32 0, i32 0), i32 %tmp2.i) nounwind ; <i32> [#uses=0]
-  %tmp3.i = load i32* @d                          ; <i32> [#uses=1]
+  %tmp3.i = load i32, i32* @d                          ; <i32> [#uses=1]
   %inc.i = add nsw i32 %tmp3.i, 1                 ; <i32> [#uses=1]
   store i32 %inc.i, i32* @d
   ret void
@@ -59,13 +59,13 @@ try.cont:
   %1 = tail call i8* @__cxa_begin_catch(i8* %exn) nounwind ; <i8*> [#uses=0]
   %puts = tail call i32 @puts(i8* getelementptr inbounds ([8 x i8]* @str1, i32 0, i32 0)) ; <i32> [#uses=0]
   %call.i.i3 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str4, i32 0, i32 0), i32 2) nounwind ; <i32> [#uses=0]
-  %tmp3.i.i = load i32* @d                        ; <i32> [#uses=1]
+  %tmp3.i.i = load i32, i32* @d                        ; <i32> [#uses=1]
   %inc.i.i4 = add nsw i32 %tmp3.i.i, 1            ; <i32> [#uses=1]
   store i32 %inc.i.i4, i32* @d
   tail call void @__cxa_end_catch()
-  %tmp13 = load i32* @d                           ; <i32> [#uses=1]
+  %tmp13 = load i32, i32* @d                           ; <i32> [#uses=1]
   %call14 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([18 x i8]* @.str2, i32 0, i32 0), i32 2, i32 %tmp13) ; <i32> [#uses=0]
-  %tmp16 = load i32* @d                           ; <i32> [#uses=1]
+  %tmp16 = load i32, i32* @d                           ; <i32> [#uses=1]
   %cmp = icmp ne i32 %tmp16, 2                    ; <i1> [#uses=1]
   %conv = zext i1 %cmp to i32                     ; <i32> [#uses=1]
   ret i32 %conv

Modified: llvm/trunk/test/CodeGen/ARM/2010-08-04-EHCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-08-04-EHCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-08-04-EHCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-08-04-EHCrash.ll Fri Feb 27 15:17:42 2015
@@ -12,17 +12,17 @@ entry:
   br label %return
 
 bb:                                               ; No predecessors!
-  %eh_select = load i32* %eh_selector             ; <i32> [#uses=1]
+  %eh_select = load i32, i32* %eh_selector             ; <i32> [#uses=1]
   store i32 %eh_select, i32* %save_filt.936, align 4
-  %eh_value = load i8** %eh_exception             ; <i8*> [#uses=1]
+  %eh_value = load i8*, i8** %eh_exception             ; <i8*> [#uses=1]
   store i8* %eh_value, i8** %save_eptr.935, align 4
   invoke arm_apcscc  void @func3()
           to label %invcont unwind label %lpad
 
 invcont:                                          ; preds = %bb
-  %tmp6 = load i8** %save_eptr.935, align 4          ; <i8*> [#uses=1]
+  %tmp6 = load i8*, i8** %save_eptr.935, align 4          ; <i8*> [#uses=1]
   store i8* %tmp6, i8** %eh_exception, align 4
-  %tmp7 = load i32* %save_filt.936, align 4          ; <i32> [#uses=1]
+  %tmp7 = load i32, i32* %save_filt.936, align 4          ; <i32> [#uses=1]
   store i32 %tmp7, i32* %eh_selector, align 4
   br label %Unwind
 
@@ -38,7 +38,7 @@ lpad:
               cleanup
   %exn = extractvalue { i8*, i32 } %eh_ptr, 0
   store i8* %exn, i8** %eh_exception
-  %eh_ptr13 = load i8** %eh_exception             ; <i8*> [#uses=1]
+  %eh_ptr13 = load i8*, i8** %eh_exception             ; <i8*> [#uses=1]
   %eh_select14 = extractvalue { i8*, i32 } %eh_ptr, 1
   store i32 %eh_select14, i32* %eh_selector
   br label %ppad
@@ -47,7 +47,7 @@ ppad:
   br label %bb12
 
 Unwind:
-  %eh_ptr15 = load i8** %eh_exception
+  %eh_ptr15 = load i8*, i8** %eh_exception
   call arm_apcscc  void @_Unwind_SjLj_Resume(i8* %eh_ptr15)
   unreachable
 }

Modified: llvm/trunk/test/CodeGen/ARM/2010-08-04-StackVariable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-08-04-StackVariable.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-08-04-StackVariable.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-08-04-StackVariable.ll Fri Feb 27 15:17:42 2015
@@ -13,13 +13,13 @@ entry:
 
 bb:                                               ; preds = %entry
   %1 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !29 ; <i32*> [#uses=1]
-  %2 = load i32* %1, align 8, !dbg !29            ; <i32> [#uses=1]
+  %2 = load i32, i32* %1, align 8, !dbg !29            ; <i32> [#uses=1]
   %3 = add i32 %2, %i, !dbg !29                   ; <i32> [#uses=1]
   br label %bb2, !dbg !29
 
 bb1:                                              ; preds = %entry
   %4 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !30 ; <i32*> [#uses=1]
-  %5 = load i32* %4, align 8, !dbg !30            ; <i32> [#uses=1]
+  %5 = load i32, i32* %4, align 8, !dbg !30            ; <i32> [#uses=1]
   %6 = sub i32 %5, 1, !dbg !30                    ; <i32> [#uses=1]
   br label %bb2, !dbg !30
 
@@ -58,11 +58,11 @@ entry:
   store i32 1, i32* %1, align 8, !dbg !42
   %2 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
   %3 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
-  %4 = load i8** %3, align 8, !dbg !43            ; <i8*> [#uses=1]
+  %4 = load i8*, i8** %3, align 8, !dbg !43            ; <i8*> [#uses=1]
   store i8* %4, i8** %2, align 8, !dbg !43
   %5 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
   %6 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
-  %7 = load i32* %6, align 8, !dbg !43            ; <i32> [#uses=1]
+  %7 = load i32, i32* %6, align 8, !dbg !43            ; <i32> [#uses=1]
   store i32 %7, i32* %5, align 8, !dbg !43
   %8 = call i32 @_Z3fooi4SVal(i32 2, %struct.SVal* noalias %0) nounwind, !dbg !43 ; <i32> [#uses=0]
   call void @llvm.dbg.value(metadata i32 %8, i64 0, metadata !44, metadata !{!"0x102"}), !dbg !43

Modified: llvm/trunk/test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll Fri Feb 27 15:17:42 2015
@@ -39,29 +39,29 @@ presymmetry.exit:
   %scevgep97.i = getelementptr i32, i32* %in, i32 undef
   %tmp198410 = or i32 undef, 1
   %scevgep.i48 = getelementptr i32, i32* %in, i32 undef
-  %0 = load i32* %scevgep.i48, align 4
+  %0 = load i32, i32* %scevgep.i48, align 4
   %1 = add nsw i32 %0, 0
   store i32 %1, i32* undef, align 4
   %asmtmp.i.i33.i.i.i = tail call %0 asm "smull\09$0, $1, $2, $3", "=&r,=&r,%r,r,~{cc}"(i32 undef, i32 1518500250) nounwind
   %asmresult1.i.i34.i.i.i = extractvalue %0 %asmtmp.i.i33.i.i.i, 1
   %2 = shl i32 %asmresult1.i.i34.i.i.i, 1
-  %3 = load i32* null, align 4
-  %4 = load i32* undef, align 4
+  %3 = load i32, i32* null, align 4
+  %4 = load i32, i32* undef, align 4
   %5 = sub nsw i32 %3, %4
-  %6 = load i32* undef, align 4
-  %7 = load i32* null, align 4
+  %6 = load i32, i32* undef, align 4
+  %7 = load i32, i32* null, align 4
   %8 = sub nsw i32 %6, %7
-  %9 = load i32* %scevgep97.i, align 4
-  %10 = load i32* undef, align 4
+  %9 = load i32, i32* %scevgep97.i, align 4
+  %10 = load i32, i32* undef, align 4
   %11 = sub nsw i32 %9, %10
-  %12 = load i32* null, align 4
-  %13 = load i32* %scevgep101.i, align 4
+  %12 = load i32, i32* null, align 4
+  %13 = load i32, i32* %scevgep101.i, align 4
   %14 = sub nsw i32 %12, %13
-  %15 = load i32* %scevgep.i48, align 4
-  %16 = load i32* null, align 4
+  %15 = load i32, i32* %scevgep.i48, align 4
+  %16 = load i32, i32* null, align 4
   %17 = add nsw i32 %16, %15
   %18 = sub nsw i32 %15, %16
-  %19 = load i32* undef, align 4
+  %19 = load i32, i32* undef, align 4
   %20 = add nsw i32 %19, %2
   %21 = sub nsw i32 %19, %2
   %22 = add nsw i32 %14, %5

Modified: llvm/trunk/test/CodeGen/ARM/2010-12-08-tpsoft.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-12-08-tpsoft.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-12-08-tpsoft.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-12-08-tpsoft.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@
 
 define arm_aapcs_vfpcc i32 @main() nounwind {
 entry:
-  %0 = load i32* @i, align 4
+  %0 = load i32, i32* @i, align 4
   switch i32 %0, label %bb2 [
     i32 12, label %bb
     i32 13, label %bb1

Modified: llvm/trunk/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll Fri Feb 27 15:17:42 2015
@@ -31,9 +31,9 @@
 ; OBJ-NEXT:     Section: .bss
 
 define i32 @main(i32 %argc) nounwind {
-  %1 = load i32* @sum, align 4
+  %1 = load i32, i32* @sum, align 4
   %2 = getelementptr [80 x i8], [80 x i8]* @array00, i32 0, i32 %argc
-  %3 = load i8* %2
+  %3 = load i8, i8* %2
   %4 = zext i8 %3 to i32
   %5 = add i32 %1, %4
   ret i32 %5

Modified: llvm/trunk/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ target triple = "thumbv7-apple-darwin10"
 define zeroext i8 @get1(i8 zeroext %a) nounwind optsize {
 entry:
   tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !30
-  %0 = load i8* @x1, align 4, !dbg !30
+  %0 = load i8, i8* @x1, align 4, !dbg !30
   tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !30
   store i8 %a, i8* @x1, align 4, !dbg !30
   ret i8 %0, !dbg !31
@@ -42,7 +42,7 @@ declare void @llvm.dbg.value(metadata, i
 define zeroext i8 @get2(i8 zeroext %a) nounwind optsize {
 entry:
   tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !32
-  %0 = load i8* @x2, align 4, !dbg !32
+  %0 = load i8, i8* @x2, align 4, !dbg !32
   tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !19, metadata !{!"0x102"}), !dbg !32
   store i8 %a, i8* @x2, align 4, !dbg !32
   ret i8 %0, !dbg !33
@@ -51,7 +51,7 @@ entry:
 define zeroext i8 @get3(i8 zeroext %a) nounwind optsize {
 entry:
   tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !21, metadata !{!"0x102"}), !dbg !34
-  %0 = load i8* @x3, align 4, !dbg !34
+  %0 = load i8, i8* @x3, align 4, !dbg !34
   tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !22, metadata !{!"0x102"}), !dbg !34
   store i8 %a, i8* @x3, align 4, !dbg !34
   ret i8 %0, !dbg !35
@@ -60,7 +60,7 @@ entry:
 define zeroext i8 @get4(i8 zeroext %a) nounwind optsize {
 entry:
   tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !24, metadata !{!"0x102"}), !dbg !36
-  %0 = load i8* @x4, align 4, !dbg !36
+  %0 = load i8, i8* @x4, align 4, !dbg !36
   tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !25, metadata !{!"0x102"}), !dbg !36
   store i8 %a, i8* @x4, align 4, !dbg !36
   ret i8 %0, !dbg !37
@@ -69,7 +69,7 @@ entry:
 define zeroext i8 @get5(i8 zeroext %a) nounwind optsize {
 entry:
   tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !27, metadata !{!"0x102"}), !dbg !38
-  %0 = load i8* @x5, align 4, !dbg !38
+  %0 = load i8, i8* @x5, align 4, !dbg !38
   tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !28, metadata !{!"0x102"}), !dbg !38
   store i8 %a, i8* @x5, align 4, !dbg !38
   ret i8 %0, !dbg !39

Modified: llvm/trunk/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll Fri Feb 27 15:17:42 2015
@@ -39,10 +39,10 @@ bb134:
   unreachable
 
 bb135:                                            ; preds = %meshBB396
-  %uriHash.1.phi.load = load i32* undef
-  %.load120 = load i8*** %.SV116
-  %.phi24 = load i8* null
-  %.phi26 = load i8** null
+  %uriHash.1.phi.load = load i32, i32* undef
+  %.load120 = load i8**, i8*** %.SV116
+  %.phi24 = load i8, i8* null
+  %.phi26 = load i8*, i8** null
   store i8 %.phi24, i8* %.phi26, align 1
   %0 = getelementptr inbounds i8, i8* %.phi26, i32 1
   store i8* %0, i8** %.load120, align 4
@@ -52,7 +52,7 @@ bb135:
   %1 = mul i32 %uriHash.1.phi.load, 1000003
   %2 = xor i32 0, %1
   store i32 %2, i32* null
-  %3 = load i8* null, align 1
+  %3 = load i8, i8* null, align 1
   %4 = icmp eq i8 %3, 0
   store i8* %0, i8** undef
   br i1 %4, label %meshBB472, label %bb131

Modified: llvm/trunk/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ bb1:
   %1 = getelementptr inbounds %struct.ui, %struct.ui* %0, i32 0, i32 0
   store %struct.mo* undef, %struct.mo** %1, align 4
   %2 = getelementptr inbounds %struct.ui, %struct.ui* %0, i32 0, i32 5
-  %3 = load i64* %2, align 4
+  %3 = load i64, i64* %2, align 4
   %4 = call i32 @mo_create_nnm(%struct.mo* undef, i64 %3, i32** undef) nounwind
   br i1 undef, label %bb3, label %bb2
 

Modified: llvm/trunk/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll Fri Feb 27 15:17:42 2015
@@ -21,8 +21,8 @@ for.body:
   %x = getelementptr %struct.Outer, %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 0
   %y = getelementptr %struct.Outer, %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 1
   %inc = add i32 %i.022, 1
-  %tmp8 = load i32* %x, align 4
-  %tmp11 = load i32* %y, align 4
+  %tmp8 = load i32, i32* %x, align 4
+  %tmp11 = load i32, i32* %y, align 4
   %mul = mul nsw i32 %tmp11, %tmp8
   %tobool.i14 = icmp eq i32 %mul, 0
   br i1 %tobool.i14, label %_Z14printIsNotZeroi.exit17, label %if.then.i16
@@ -35,7 +35,7 @@ _Z14printIsNotZeroi.exit17:
 
 _Z14printIsNotZeroi.exit17.for.body_crit_edge:    ; preds = %_Z14printIsNotZeroi.exit17
   %b.phi.trans.insert = getelementptr %struct.Outer, %struct.Outer* @oStruct, i32 0, i32 1, i32 %inc, i32 3
-  %tmp3.pre = load i8* %b.phi.trans.insert, align 1
+  %tmp3.pre = load i8, i8* %b.phi.trans.insert, align 1
   %phitmp27 = icmp eq i8 %val8, 0
   br label %for.body
 

Modified: llvm/trunk/test/CodeGen/ARM/2011-04-07-schediv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-04-07-schediv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-04-07-schediv.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-04-07-schediv.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ bb:
   %uglygep = getelementptr i8, i8* %src_copy_start6, i32 %tmp
   %src_copy_start_addr.04 = bitcast i8* %uglygep to float*
   %dst_copy_start_addr.03 = getelementptr float, float* %dst_copy_start, i32 %j.05
-  %1 = load float* %src_copy_start_addr.04, align 4
+  %1 = load float, float* %src_copy_start_addr.04, align 4
   store float %1, float* %dst_copy_start_addr.03, align 4
   %2 = add i32 %j.05, 1
   %exitcond = icmp eq i32 %2, %src_width

Modified: llvm/trunk/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ for.body:
 ; CHECK: mov{{.*}} r{{[0-9]+}}, #{{[01]}}
 ; CHECK-NOT: mov r{{[0-9]+}}, #{{[01]}}
   %arrayidx = getelementptr i32, i32* %A, i32 %0
-  %tmp4 = load i32* %arrayidx, align 4
+  %tmp4 = load i32, i32* %arrayidx, align 4
   %cmp6 = icmp eq i32 %tmp4, %value
   br i1 %cmp6, label %return, label %for.inc
 

Modified: llvm/trunk/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ target triple = "thumbv7-apple-darwin10.
 define void @_Z8TestCasev() nounwind ssp {
 entry:
   %a = alloca float, align 4
-  %tmp = load float* %a, align 4
+  %tmp = load float, float* %a, align 4
   call void asm sideeffect "", "w,~{s0},~{s16}"(float %tmp) nounwind, !srcloc !0
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/2011-04-26-SchedTweak.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-04-26-SchedTweak.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-04-26-SchedTweak.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-04-26-SchedTweak.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ entry:
   %block_count = alloca i32, align 4
   %index_cache = alloca i32, align 4
   store i32 0, i32* %index_cache, align 4
-  %tmp = load i32* @G, align 4
+  %tmp = load i32, i32* @G, align 4
   %tmp1 = call i32 @bar(i32 0, i32 0, i32 %tmp) nounwind
   switch i32 %tmp1, label %bb8 [
     i32 0, label %bb
@@ -31,7 +31,7 @@ entry:
   ]
 
 bb:
-  %tmp2 = load i32* @G, align 4
+  %tmp2 = load i32, i32* @G, align 4
   %tmp4 = icmp eq i32 %tmp2, 0
   br i1 %tmp4, label %bb1, label %bb8
 
@@ -41,8 +41,8 @@ bb1:
 ; CHECK: blx _Get
 ; CHECK: umull
 ; CHECK: blx _foo
-  %tmp5 = load i32* %block_size, align 4
-  %tmp6 = load i32* %block_count, align 4
+  %tmp5 = load i32, i32* %block_size, align 4
+  %tmp6 = load i32, i32* %block_count, align 4
   %tmp7 = call %struct.FF* @Get() nounwind
   store %struct.FF* %tmp7, %struct.FF** @FuncPtr, align 4
   %tmp10 = zext i32 %tmp6 to i64

Modified: llvm/trunk/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll Fri Feb 27 15:17:42 2015
@@ -30,7 +30,7 @@ target triple = "thumbv7-apple-macosx10.
 
 define i32 @get1(i32 %a) nounwind optsize ssp {
   tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !30
-  %1 = load i32* @x1, align 4, !dbg !31
+  %1 = load i32, i32* @x1, align 4, !dbg !31
   tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !31
   store i32 %a, i32* @x1, align 4, !dbg !31
   ret i32 %1, !dbg !31
@@ -38,7 +38,7 @@ define i32 @get1(i32 %a) nounwind optsiz
 
 define i32 @get2(i32 %a) nounwind optsize ssp {
   tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !13, metadata !{!"0x102"}), !dbg !32
-  %1 = load i32* @x2, align 4, !dbg !33
+  %1 = load i32, i32* @x2, align 4, !dbg !33
   tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !33
   store i32 %a, i32* @x2, align 4, !dbg !33
   ret i32 %1, !dbg !33
@@ -46,7 +46,7 @@ define i32 @get2(i32 %a) nounwind optsiz
 
 define i32 @get3(i32 %a) nounwind optsize ssp {
   tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !34
-  %1 = load i32* @x3, align 4, !dbg !35
+  %1 = load i32, i32* @x3, align 4, !dbg !35
   tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !35
   store i32 %a, i32* @x3, align 4, !dbg !35
   ret i32 %1, !dbg !35
@@ -54,7 +54,7 @@ define i32 @get3(i32 %a) nounwind optsiz
 
 define i32 @get4(i32 %a) nounwind optsize ssp {
   tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !19, metadata !{!"0x102"}), !dbg !36
-  %1 = load i32* @x4, align 4, !dbg !37
+  %1 = load i32, i32* @x4, align 4, !dbg !37
   tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !20, metadata !{!"0x102"}), !dbg !37
   store i32 %a, i32* @x4, align 4, !dbg !37
   ret i32 %1, !dbg !37
@@ -62,7 +62,7 @@ define i32 @get4(i32 %a) nounwind optsiz
 
 define i32 @get5(i32 %a) nounwind optsize ssp {
   tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !27, metadata !{!"0x102"}), !dbg !38
-  %1 = load i32* @x5, align 4, !dbg !39
+  %1 = load i32, i32* @x5, align 4, !dbg !39
   tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !28, metadata !{!"0x102"}), !dbg !39
   store i32 %a, i32* @x5, align 4, !dbg !39
   ret i32 %1, !dbg !39

Modified: llvm/trunk/test/CodeGen/ARM/2011-08-29-SchedCycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-08-29-SchedCycle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-08-29-SchedCycle.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-08-29-SchedCycle.ll Fri Feb 27 15:17:42 2015
@@ -32,7 +32,7 @@
 
 define void @t() nounwind {
 entry:
-  %tmp = load i64* undef, align 4
+  %tmp = load i64, i64* undef, align 4
   %tmp5 = udiv i64 %tmp, 30
   %tmp13 = and i64 %tmp5, 64739244643450880
   %tmp16 = sub i64 0, %tmp13

Modified: llvm/trunk/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ bb.i:
   %1 = shl nsw i32 %k_addr.0.i, 1
   %.sum8.i = add i32 %1, -1
   %2 = getelementptr inbounds [256 x i32], [256 x i32]* %heap, i32 0, i32 %.sum8.i
-  %3 = load i32* %2, align 4
+  %3 = load i32, i32* %2, align 4
   br i1 false, label %bb5.i, label %bb4.i
 
 bb4.i:                                            ; preds = %bb.i

Modified: llvm/trunk/test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll Fri Feb 27 15:17:42 2015
@@ -11,12 +11,12 @@ target triple = "armv7-none-linux-gnueab
 @z2 = common global <4 x i16> zeroinitializer
 
 define void @f() {
-  %1 = load <3 x i16>* @x1
-  %2 = load <3 x i16>* @y1
+  %1 = load <3 x i16>, <3 x i16>* @x1
+  %2 = load <3 x i16>, <3 x i16>* @y1
   %3 = sdiv <3 x i16> %1, %2
   store <3 x i16> %3, <3 x i16>* @z1
-  %4 = load <4 x i16>* @x2
-  %5 = load <4 x i16>* @y2
+  %4 = load <4 x i16>, <4 x i16>* @x2
+  %5 = load <4 x i16>, <4 x i16>* @y2
   %6 = sdiv <4 x i16> %4, %5
   store <4 x i16> %6, <4 x i16>* @z2
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 ; ARM target specific dag combine created a cycle in DAG.
 
 define void @t() nounwind ssp {
-  %1 = load i64* undef, align 4
+  %1 = load i64, i64* undef, align 4
   %2 = shl i32 5, 0
   %3 = zext i32 %2 to i64
   %4 = and i64 %1, %3

Modified: llvm/trunk/test/CodeGen/ARM/2011-10-26-ExpandUnalignedLoadCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-10-26-ExpandUnalignedLoadCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-10-26-ExpandUnalignedLoadCrash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-10-26-ExpandUnalignedLoadCrash.ll Fri Feb 27 15:17:42 2015
@@ -9,11 +9,11 @@ L.entry:
   %2 = mul i32 %0, 6
   %3 = getelementptr i8, i8* %1, i32 %2
   %4 = bitcast i8* %3 to <3 x i16>*
-  %5 = load <3 x i16>* %4, align 1
+  %5 = load <3 x i16>, <3 x i16>* %4, align 1
   %6 = bitcast i16* %sourceA to i8*
   %7 = getelementptr i8, i8* %6, i32 %2
   %8 = bitcast i8* %7 to <3 x i16>*
-  %9 = load <3 x i16>* %8, align 1
+  %9 = load <3 x i16>, <3 x i16>* %8, align 1
   %10 = or <3 x i16> %9, %5
   store <3 x i16> %10, <3 x i16>* %4, align 1
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 
 define void @test_neon_vector_add_2xi8() nounwind {
 ; CHECK-LABEL: test_neon_vector_add_2xi8:
-  %1 = load <2 x i8>* @i8_src1
-  %2 = load <2 x i8>* @i8_src2
+  %1 = load <2 x i8>, <2 x i8>* @i8_src1
+  %2 = load <2 x i8>, <2 x i8>* @i8_src2
   %3 = add <2 x i8> %1, %2
   store <2 x i8> %3, <2 x i8>* @i8_res
   ret void
@@ -16,8 +16,8 @@ define void @test_neon_vector_add_2xi8()
 
 define void @test_neon_ld_st_volatile_with_ashr_2xi8() {
 ; CHECK-LABEL: test_neon_ld_st_volatile_with_ashr_2xi8:
-  %1 = load volatile <2 x i8>* @i8_src1
-  %2 = load volatile <2 x i8>* @i8_src2
+  %1 = load volatile <2 x i8>, <2 x i8>* @i8_src1
+  %2 = load volatile <2 x i8>, <2 x i8>* @i8_src2
   %3 = ashr <2 x i8> %1, %2
   store volatile <2 x i8> %3, <2 x i8>* @i8_res
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ declare <2 x i16> @foo_v2i16(<2 x i16>)
 
 define void @test_neon_call_return_v2i16() {
 ; CHECK-LABEL: test_neon_call_return_v2i16:
-  %1 = load <2 x i16>* @src1_v2i16
+  %1 = load <2 x i16>, <2 x i16>* @src1_v2i16
   %2 = call <2 x i16> @foo_v2i16(<2 x i16> %1) nounwind
   store <2 x i16> %2, <2 x i16>* @res_v2i16
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define <2 x i32> @test1(<2 x double>* %A
 ; CHECK: test1
 ; CHECK: vcvt.s32.f64
 ; CHECK: vcvt.s32.f64
-  %tmp1 = load <2 x double>* %A
+  %tmp1 = load <2 x double>, <2 x double>* %A
 	%tmp2 = fptosi <2 x double> %tmp1 to <2 x i32>
 	ret <2 x i32> %tmp2
 }
@@ -13,7 +13,7 @@ define <2 x i32> @test2(<2 x double>* %A
 ; CHECK: test2
 ; CHECK: vcvt.u32.f64
 ; CHECK: vcvt.u32.f64
-  %tmp1 = load <2 x double>* %A
+  %tmp1 = load <2 x double>, <2 x double>* %A
 	%tmp2 = fptoui <2 x double> %tmp1 to <2 x i32>
 	ret <2 x i32> %tmp2
 }
@@ -22,7 +22,7 @@ define <2 x double> @test3(<2 x i32>* %A
 ; CHECK: test3
 ; CHECK: vcvt.f64.s32
 ; CHECK: vcvt.f64.s32
-  %tmp1 = load <2 x i32>* %A
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = sitofp <2 x i32> %tmp1 to <2 x double>
 	ret <2 x double> %tmp2
 }
@@ -31,7 +31,7 @@ define <2 x double> @test4(<2 x i32>* %A
 ; CHECK: test4
 ; CHECK: vcvt.f64.u32
 ; CHECK: vcvt.f64.u32
-  %tmp1 = load <2 x i32>* %A
+  %tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = uitofp <2 x i32> %tmp1 to <2 x double>
 	ret <2 x double> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll Fri Feb 27 15:17:42 2015
@@ -31,25 +31,25 @@ entry:
   store double 0.000000e+00, double* null, align 4
   %call = tail call double @cos(double %angle) nounwind readnone
   %call1 = tail call double @sin(double %angle) nounwind readnone
-  %0 = load double* %V1, align 4
+  %0 = load double, double* %V1, align 4
   %arrayidx2 = getelementptr inbounds double, double* %V1, i32 1
-  %1 = load double* %arrayidx2, align 4
+  %1 = load double, double* %arrayidx2, align 4
   %mul = fmul double %0, %1
   %sub = fsub double 1.000000e+00, %call
   %mul3 = fmul double %mul, %sub
-  %2 = load double* undef, align 4
+  %2 = load double, double* undef, align 4
   %mul5 = fmul double %2, %call1
   %add = fadd double %mul3, %mul5
   store double %add, double* %arrayidx5.1.i, align 4
-  %3 = load double* %V1, align 4
+  %3 = load double, double* %V1, align 4
   %mul11 = fmul double %3, undef
   %mul13 = fmul double %mul11, %sub
-  %4 = load double* %arrayidx2, align 4
+  %4 = load double, double* %arrayidx2, align 4
   %mul15 = fmul double %4, %call1
   %sub16 = fsub double %mul13, %mul15
   store double %sub16, double* %arrayidx5.2.i, align 4
-  %5 = load double* %V1, align 4
-  %6 = load double* %arrayidx2, align 4
+  %5 = load double, double* %V1, align 4
+  %6 = load double, double* %arrayidx2, align 4
   %mul22 = fmul double %5, %6
   %mul24 = fmul double %mul22, %sub
   %sub27 = fsub double %mul24, undef

Modified: llvm/trunk/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll Fri Feb 27 15:17:42 2015
@@ -15,14 +15,14 @@ define hidden void @foo() {
 ; CHECK: ldr.w
 ; CHECK-NOT: ldm
 entry:
-  %tmp13 = load i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 1, i32 0, i32 0), align 1
-  %tmp15 = load i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 1, i32 0, i32 1), align 1
-  %tmp17 = load i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 1, i32 0, i32 2), align 1
-  %tmp19 = load i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 1, i32 0, i32 3), align 1
-  %tmp = load i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 0), align 1
-  %tmp3 = load i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 1), align 1
-  %tmp4 = load i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 2), align 1
-  %tmp5 = load i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 3), align 1
+  %tmp13 = load i32, i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 1, i32 0, i32 0), align 1
+  %tmp15 = load i32, i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 1, i32 0, i32 1), align 1
+  %tmp17 = load i32, i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 1, i32 0, i32 2), align 1
+  %tmp19 = load i32, i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 1, i32 0, i32 3), align 1
+  %tmp = load i32, i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 0), align 1
+  %tmp3 = load i32, i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 1), align 1
+  %tmp4 = load i32, i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 2), align 1
+  %tmp5 = load i32, i32* getelementptr inbounds (%struct.InformationBlock* @infoBlock, i32 0, i32 2, i32 0, i32 3), align 1
   %insert21 = insertvalue [4 x i32] undef, i32 %tmp13, 0
   %insert23 = insertvalue [4 x i32] %insert21, i32 %tmp15, 1
   %insert25 = insertvalue [4 x i32] %insert23, i32 %tmp17, 2

Modified: llvm/trunk/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ define void @test_sqrt(<4 x float>* %X)
 ; CHECK:      vst1.64  {{.*}}
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void
@@ -48,7 +48,7 @@ define void @test_cos(<4 x float>* %X) n
 ; CHECK:      vst1.64
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.cos.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void
@@ -79,7 +79,7 @@ define void @test_exp(<4 x float>* %X) n
 ; CHECK:      vst1.64
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.exp.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void
@@ -110,7 +110,7 @@ define void @test_exp2(<4 x float>* %X)
 ; CHECK:      vst1.64
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.exp2.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void
@@ -141,7 +141,7 @@ define void @test_log10(<4 x float>* %X)
 ; CHECK:      vst1.64
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.log10.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void
@@ -172,7 +172,7 @@ define void @test_log(<4 x float>* %X) n
 ; CHECK:      vst1.64
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.log.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void
@@ -203,7 +203,7 @@ define void @test_log2(<4 x float>* %X)
 ; CHECK:      vst1.64
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.log2.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void
@@ -236,7 +236,7 @@ define void @test_pow(<4 x float>* %X) n
 
 L.entry:
 
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.pow.v4f32(<4 x float> %0, <4 x float> <float 2., float 2., float 2., float 2.>)
 
   store <4 x float> %1, <4 x float>* %X, align 16
@@ -259,7 +259,7 @@ define void @test_powi(<4 x float>* %X)
 
 L.entry:
 
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.powi.v4f32(<4 x float> %0, i32 2)
 
   store <4 x float> %1, <4 x float>* %X, align 16
@@ -292,7 +292,7 @@ define void @test_sin(<4 x float>* %X) n
 ; CHECK:      vst1.64
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.sin.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void
@@ -323,7 +323,7 @@ define void @test_floor(<4 x float>* %X)
 ; CHECK:      vst1.64
 
 L.entry:
-  %0 = load <4 x float>* @A, align 16
+  %0 = load <4 x float>, <4 x float>* @A, align 16
   %1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %0)
   store <4 x float> %1, <4 x float>* %X, align 16
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/2011-11-30-MergeAlignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-11-30-MergeAlignment.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-11-30-MergeAlignment.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-11-30-MergeAlignment.ll Fri Feb 27 15:17:42 2015
@@ -8,9 +8,9 @@ target triple = "thumbv7-apple-darwin10"
 @x2 = internal global i64 12
 
 define i64 @f() {
-  %ax = load i32* @x1
+  %ax = load i32, i32* @x1
   %a = zext i32 %ax to i64
-  %b = load i64* @x2
+  %b = load i64, i64* @x2
   %c = add i64 %a, %b
   ret i64 %c
 }

Modified: llvm/trunk/test/CodeGen/ARM/2011-12-14-machine-sink.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-12-14-machine-sink.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-12-14-machine-sink.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-12-14-machine-sink.ll Fri Feb 27 15:17:42 2015
@@ -15,13 +15,13 @@ for.cond:
 
 for.body:                                         ; preds = %for.cond
   %v.5 = select i1 undef, i32 undef, i32 0
-  %0 = load i8* undef, align 1
+  %0 = load i8, i8* undef, align 1
   %conv88 = zext i8 %0 to i32
   %sub89 = sub nsw i32 0, %conv88
   %v.8 = select i1 undef, i32 undef, i32 %sub89
-  %1 = load i8* null, align 1
+  %1 = load i8, i8* null, align 1
   %conv108 = zext i8 %1 to i32
-  %2 = load i8* undef, align 1
+  %2 = load i8, i8* undef, align 1
   %conv110 = zext i8 %2 to i32
   %sub111 = sub nsw i32 %conv108, %conv110
   %cmp112 = icmp slt i32 %sub111, 0

Modified: llvm/trunk/test/CodeGen/ARM/2011-12-19-sjlj-clobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-12-19-sjlj-clobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-12-19-sjlj-clobber.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-12-19-sjlj-clobber.ll Fri Feb 27 15:17:42 2015
@@ -22,13 +22,13 @@ bb:
   store i32 %b, i32* %tmp1, align 4
   store i8* %d, i8** %tmp2, align 4
   store i1 false, i1* %tmp3
-  %tmp7 = load i8** %c
+  %tmp7 = load i8*, i8** %c
   %tmp10 = invoke %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* (i8*, i8*, %0*)*)(i8* %tmp7, i8* %d, %0* null)
           to label %bb11 unwind label %bb15
 
 bb11:                                             ; preds = %bb
   store %0* %tmp10, %0** %myException, align 4
-  %tmp12 = load %0** %myException, align 4
+  %tmp12 = load %0*, %0** %myException, align 4
   %tmp13 = bitcast %0* %tmp12 to i8*
   invoke void @objc_exception_throw(i8* %tmp13) noreturn
           to label %bb14 unwind label %bb15

Modified: llvm/trunk/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ bb3:
   br i1 %tmp, label %bb4, label %bb67
 
 bb4:                                              ; preds = %bb3
-  %tmp5 = load <4 x i32>* undef, align 16
+  %tmp5 = load <4 x i32>, <4 x i32>* undef, align 16
   %tmp6 = and <4 x i32> %tmp5, <i32 8388607, i32 8388607, i32 8388607, i32 8388607>
   %tmp7 = or <4 x i32> %tmp6, <i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
   %tmp8 = bitcast <4 x i32> %tmp7 to <4 x float>
@@ -41,9 +41,9 @@ bb4:
   %tmp24 = trunc i128 %tmp23 to i64
   %tmp25 = insertvalue [2 x i64] undef, i64 %tmp24, 0
   %tmp26 = insertvalue [2 x i64] %tmp25, i64 0, 1
-  %tmp27 = load float* undef, align 4
+  %tmp27 = load float, float* undef, align 4
   %tmp28 = insertelement <4 x float> undef, float %tmp27, i32 3
-  %tmp29 = load <4 x i32>* undef, align 16
+  %tmp29 = load <4 x i32>, <4 x i32>* undef, align 16
   %tmp30 = and <4 x i32> %tmp29, <i32 8388607, i32 8388607, i32 8388607, i32 8388607>
   %tmp31 = or <4 x i32> %tmp30, <i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
   %tmp32 = bitcast <4 x i32> %tmp31 to <4 x float>
@@ -52,10 +52,10 @@ bb4:
   %tmp35 = fmul <4 x float> %tmp34, undef
   %tmp36 = fmul <4 x float> %tmp35, undef
   %tmp37 = call arm_aapcs_vfpcc  i8* undef(i8* undef) nounwind
-  %tmp38 = load float* undef, align 4
+  %tmp38 = load float, float* undef, align 4
   %tmp39 = insertelement <2 x float> undef, float %tmp38, i32 0
   %tmp40 = call arm_aapcs_vfpcc  i8* undef(i8* undef) nounwind
-  %tmp41 = load float* undef, align 4
+  %tmp41 = load float, float* undef, align 4
   %tmp42 = insertelement <4 x float> undef, float %tmp41, i32 3
   %tmp43 = shufflevector <2 x float> %tmp39, <2 x float> undef, <4 x i32> zeroinitializer
   %tmp44 = fmul <4 x float> %tmp33, %tmp43
@@ -64,10 +64,10 @@ bb4:
   %tmp47 = fmul <4 x float> %tmp46, %tmp36
   %tmp48 = fadd <4 x float> undef, %tmp47
   %tmp49 = call arm_aapcs_vfpcc  i8* undef(i8* undef) nounwind
-  %tmp50 = load float* undef, align 4
+  %tmp50 = load float, float* undef, align 4
   %tmp51 = insertelement <4 x float> undef, float %tmp50, i32 3
   %tmp52 = call arm_aapcs_vfpcc float* null(i8* undef) nounwind
-  %tmp54 = load float* %tmp52, align 4
+  %tmp54 = load float, float* %tmp52, align 4
   %tmp55 = insertelement <4 x float> undef, float %tmp54, i32 3
   %tmp56 = fsub <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %tmp22
   %tmp57 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp56, <4 x float> %tmp55) nounwind

Modified: llvm/trunk/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ target triple = "armv7-none-linux-eabi"
 ; This test case is exercising REG_SEQUENCE, and chains of REG_SEQUENCE.
 define arm_aapcs_vfpcc void @foo(i8* nocapture %arg, i8* %arg1) nounwind align 2 {
 bb:
-  %tmp = load <2 x float>* undef, align 8
+  %tmp = load <2 x float>, <2 x float>* undef, align 8
   %tmp2 = extractelement <2 x float> %tmp, i32 0
   %tmp3 = insertelement <4 x float> undef, float %tmp2, i32 0
   %tmp4 = insertelement <4 x float> %tmp3, float 0.000000e+00, i32 1

Modified: llvm/trunk/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll Fri Feb 27 15:17:42 2015
@@ -56,9 +56,9 @@ bb3:
   %tmp39 = shufflevector <2 x i64> %tmp38, <2 x i64> undef, <1 x i32> zeroinitializer
   %tmp40 = bitcast <1 x i64> %tmp39 to <2 x float>
   %tmp41 = shufflevector <2 x float> %tmp40, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-  %tmp42 = load <4 x float>* null, align 16
+  %tmp42 = load <4 x float>, <4 x float>* null, align 16
   %tmp43 = fmul <4 x float> %tmp42, %tmp41
-  %tmp44 = load <4 x float>* undef, align 16
+  %tmp44 = load <4 x float>, <4 x float>* undef, align 16
   %tmp45 = fadd <4 x float> undef, %tmp43
   %tmp46 = fadd <4 x float> undef, %tmp45
   %tmp47 = bitcast <4 x float> %tmp36 to <2 x i64>
@@ -76,7 +76,7 @@ bb3:
   %tmp59 = fmul <4 x float> undef, %tmp58
   %tmp60 = fadd <4 x float> %tmp59, undef
   %tmp61 = fadd <4 x float> %tmp60, zeroinitializer
-  %tmp62 = load void (i8*, i8*)** undef, align 4
+  %tmp62 = load void (i8*, i8*)*, void (i8*, i8*)** undef, align 4
   call arm_aapcs_vfpcc  void %tmp62(i8* sret undef, i8* undef) nounwind
   %tmp63 = bitcast <4 x float> %tmp46 to i128
   %tmp64 = bitcast <4 x float> %tmp54 to i128
@@ -96,7 +96,7 @@ bb3:
   call arm_aapcs_vfpcc  void @bar(i8* sret null, [8 x i64] %tmp77) nounwind
   %tmp78 = call arm_aapcs_vfpcc  i8* null(i8* null) nounwind
   %tmp79 = bitcast i8* %tmp78 to i512*
-  %tmp80 = load i512* %tmp79, align 16
+  %tmp80 = load i512, i512* %tmp79, align 16
   %tmp81 = lshr i512 %tmp80, 128
   %tmp82 = trunc i512 %tmp80 to i128
   %tmp83 = trunc i512 %tmp81 to i128

Modified: llvm/trunk/test/CodeGen/ARM/2012-02-01-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-02-01-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-02-01-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-02-01-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "armv7-none-linux-gnueab
 @foo = external global %0, align 16
 
 define arm_aapcs_vfpcc void @bar(float, i1 zeroext, i1 zeroext) nounwind {
-  %4 = load <4 x float>* getelementptr inbounds (%0* @foo, i32 0, i32 0), align 16
+  %4 = load <4 x float>, <4 x float>* getelementptr inbounds (%0* @foo, i32 0, i32 0), align 16
   %5 = extractelement <4 x float> %4, i32 0
   %6 = extractelement <4 x float> %4, i32 1
   %7 = extractelement <4 x float> %4, i32 2

Modified: llvm/trunk/test/CodeGen/ARM/2012-03-13-DAGCombineBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-03-13-DAGCombineBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-03-13-DAGCombineBug.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-03-13-DAGCombineBug.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 define void @test_hi_short3(<3 x i16> * nocapture %srcA, <2 x i16> * nocapture %dst) nounwind {
 entry:
 ; CHECK: vst1.32
-  %0 = load <3 x i16> * %srcA, align 8
+  %0 = load <3 x i16> , <3 x i16> * %srcA, align 8
   %1 = shufflevector <3 x i16> %0, <3 x i16> undef, <2 x i32> <i32 2, i32 undef>
   store <2 x i16> %1, <2 x i16> * %dst, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll Fri Feb 27 15:17:42 2015
@@ -27,13 +27,13 @@
 define i32 @f1(i32* nocapture %p1, i32* nocapture %p2) nounwind {
 entry:
   store volatile i32 65540, i32* %p1, align 4
-  %0 = load volatile i32* %p2, align 4
+  %0 = load volatile i32, i32* %p2, align 4
   ret i32 %0
 }
 
 define i32 @f2(i32* nocapture %p1, i32* nocapture %p2) nounwind {
 entry:
   store i32 65540, i32* %p1, align 4
-  %0 = load i32* %p2, align 4
+  %0 = load i32, i32* %p2, align 4
   ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll Fri Feb 27 15:17:42 2015
@@ -129,7 +129,7 @@ define arm_aapcs_vfpcc void @foo(float,
   %45 = fmul <4 x float> undef, undef
   %46 = fmul <4 x float> %45, %43
   %47 = fmul <4 x float> undef, %44
-  %48 = load <4 x float>* undef, align 8
+  %48 = load <4 x float>, <4 x float>* undef, align 8
   %49 = bitcast <4 x float> %48 to <2 x i64>
   %50 = shufflevector <2 x i64> %49, <2 x i64> undef, <1 x i32> <i32 1>
   %51 = bitcast <1 x i64> %50 to <2 x float>

Modified: llvm/trunk/test/CodeGen/ARM/2012-08-08-legalize-unaligned.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-08-08-legalize-unaligned.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-08-08-legalize-unaligned.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-08-08-legalize-unaligned.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ target triple = "armv7-none-linux-gnueab
 
 define void @test_hi_char8() noinline {
 entry:
-  %0 = load <4 x i8>* undef, align 1
+  %0 = load <4 x i8>, <4 x i8>* undef, align 1
   store <4 x i8> %0, <4 x i8>* null, align 4
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/2012-08-09-neon-extload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-08-09-neon-extload.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-08-09-neon-extload.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-08-09-neon-extload.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@
 define void @test_v2i8tov2i32() {
 ; CHECK-LABEL: test_v2i8tov2i32:
 
-  %i8val = load <2 x i8>* @var_v2i8
+  %i8val = load <2 x i8>, <2 x i8>* @var_v2i8
 
   %i32val = sext <2 x i8> %i8val to <2 x i32>
   store <2 x i32> %i32val, <2 x i32>* @var_v2i32
@@ -28,7 +28,7 @@ define void @test_v2i8tov2i32() {
 define void @test_v2i8tov2i64() {
 ; CHECK-LABEL: test_v2i8tov2i64:
 
-  %i8val = load <2 x i8>* @var_v2i8
+  %i8val = load <2 x i8>, <2 x i8>* @var_v2i8
 
   %i64val = sext <2 x i8> %i8val to <2 x i64>
   store <2 x i64> %i64val, <2 x i64>* @var_v2i64
@@ -46,7 +46,7 @@ define void @test_v2i8tov2i64() {
 define void @test_v4i8tov4i16() {
 ; CHECK-LABEL: test_v4i8tov4i16:
 
-  %i8val = load <4 x i8>* @var_v4i8
+  %i8val = load <4 x i8>, <4 x i8>* @var_v4i8
 
   %i16val = sext <4 x i8> %i8val to <4 x i16>
   store <4 x i16> %i16val, <4 x i16>* @var_v4i16
@@ -61,7 +61,7 @@ define void @test_v4i8tov4i16() {
 define void @test_v4i8tov4i32() {
 ; CHECK-LABEL: test_v4i8tov4i32:
 
-  %i8val = load <4 x i8>* @var_v4i8
+  %i8val = load <4 x i8>, <4 x i8>* @var_v4i8
 
   %i16val = sext <4 x i8> %i8val to <4 x i32>
   store <4 x i32> %i16val, <4 x i32>* @var_v4i32
@@ -75,7 +75,7 @@ define void @test_v4i8tov4i32() {
 define void @test_v2i16tov2i32() {
 ; CHECK-LABEL: test_v2i16tov2i32:
 
-  %i16val = load <2 x i16>* @var_v2i16
+  %i16val = load <2 x i16>, <2 x i16>* @var_v2i16
 
   %i32val = sext <2 x i16> %i16val to <2 x i32>
   store <2 x i32> %i32val, <2 x i32>* @var_v2i32
@@ -90,7 +90,7 @@ define void @test_v2i16tov2i32() {
 define void @test_v2i16tov2i64() {
 ; CHECK-LABEL: test_v2i16tov2i64:
 
-  %i16val = load <2 x i16>* @var_v2i16
+  %i16val = load <2 x i16>, <2 x i16>* @var_v2i16
 
   %i64val = sext <2 x i16> %i16val to <2 x i64>
   store <2 x i64> %i64val, <2 x i64>* @var_v2i64

Modified: llvm/trunk/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@
 define void @sextload_v4i8_c(<4 x i8>* %v) nounwind {
 ;CHECK-LABEL: sextload_v4i8_c:
 entry:
-  %0 = load <4 x i8>* %v, align 8
+  %0 = load <4 x i8>, <4 x i8>* %v, align 8
   %v0  = sext <4 x i8> %0 to <4 x i32>
 ;CHECK: vmull
   %v1 = mul <4 x i32>  %v0, <i32 3, i32 3, i32 3, i32 3>
@@ -28,7 +28,7 @@ entry:
 define void @sextload_v2i8_c(<2 x i8>* %v) nounwind {
 ;CHECK-LABEL: sextload_v2i8_c:
 entry:
-  %0   = load <2 x i8>* %v, align 8
+  %0   = load <2 x i8>, <2 x i8>* %v, align 8
   %v0  = sext <2 x i8>  %0 to <2 x i64>
 ;CHECK: vmull
   %v1  = mul <2 x i64>  %v0, <i64 3, i64 3>
@@ -41,7 +41,7 @@ entry:
 define void @sextload_v2i16_c(<2 x i16>* %v) nounwind {
 ;CHECK-LABEL: sextload_v2i16_c:
 entry:
-  %0   = load <2 x i16>* %v, align 8
+  %0   = load <2 x i16>, <2 x i16>* %v, align 8
   %v0  = sext <2 x i16>  %0 to <2 x i64>
 ;CHECK: vmull
   %v1  = mul <2 x i64>  %v0, <i64 3, i64 3>
@@ -56,10 +56,10 @@ entry:
 define void @sextload_v4i8_v(<4 x i8>* %v, <4 x i8>* %p) nounwind {
 ;CHECK-LABEL: sextload_v4i8_v:
 entry:
-  %0 = load <4 x i8>* %v, align 8
+  %0 = load <4 x i8>, <4 x i8>* %v, align 8
   %v0  = sext <4 x i8> %0 to <4 x i32>
 
-  %1  = load <4 x i8>* %p, align 8
+  %1  = load <4 x i8>, <4 x i8>* %p, align 8
   %v2 = sext <4 x i8> %1 to <4 x i32>
 ;CHECK: vmull
   %v1 = mul <4 x i32>  %v0, %v2
@@ -72,10 +72,10 @@ entry:
 define void @sextload_v2i8_v(<2 x i8>* %v, <2 x i8>* %p) nounwind {
 ;CHECK-LABEL: sextload_v2i8_v:
 entry:
-  %0 = load <2 x i8>* %v, align 8
+  %0 = load <2 x i8>, <2 x i8>* %v, align 8
   %v0  = sext <2 x i8> %0 to <2 x i64>
 
-  %1  = load <2 x i8>* %p, align 8
+  %1  = load <2 x i8>, <2 x i8>* %p, align 8
   %v2 = sext <2 x i8> %1 to <2 x i64>
 ;CHECK: vmull
   %v1 = mul <2 x i64>  %v0, %v2
@@ -88,10 +88,10 @@ entry:
 define void @sextload_v2i16_v(<2 x i16>* %v, <2 x i16>* %p) nounwind {
 ;CHECK-LABEL: sextload_v2i16_v:
 entry:
-  %0 = load <2 x i16>* %v, align 8
+  %0 = load <2 x i16>, <2 x i16>* %v, align 8
   %v0  = sext <2 x i16> %0 to <2 x i64>
 
-  %1  = load <2 x i16>* %p, align 8
+  %1  = load <2 x i16>, <2 x i16>* %p, align 8
   %v2 = sext <2 x i16> %1 to <2 x i64>
 ;CHECK: vmull
   %v1 = mul <2 x i64>  %v0, %v2
@@ -106,10 +106,10 @@ entry:
 define void @sextload_v4i8_vs(<4 x i8>* %v, <4 x i16>* %p) nounwind {
 ;CHECK-LABEL: sextload_v4i8_vs:
 entry:
-  %0 = load <4 x i8>* %v, align 8
+  %0 = load <4 x i8>, <4 x i8>* %v, align 8
   %v0  = sext <4 x i8> %0 to <4 x i32>
 
-  %1  = load <4 x i16>* %p, align 8
+  %1  = load <4 x i16>, <4 x i16>* %p, align 8
   %v2 = sext <4 x i16> %1 to <4 x i32>
 ;CHECK: vmull
   %v1 = mul <4 x i32>  %v0, %v2
@@ -122,10 +122,10 @@ entry:
 define void @sextload_v2i8_vs(<2 x i8>* %v, <2 x i16>* %p) nounwind {
 ;CHECK-LABEL: sextload_v2i8_vs:
 entry:
-  %0 = load <2 x i8>* %v, align 8
+  %0 = load <2 x i8>, <2 x i8>* %v, align 8
   %v0  = sext <2 x i8> %0 to <2 x i64>
 
-  %1  = load <2 x i16>* %p, align 8
+  %1  = load <2 x i16>, <2 x i16>* %p, align 8
   %v2 = sext <2 x i16> %1 to <2 x i64>
 ;CHECK: vmull
   %v1 = mul <2 x i64>  %v0, %v2
@@ -138,10 +138,10 @@ entry:
 define void @sextload_v2i16_vs(<2 x i16>* %v, <2 x i32>* %p) nounwind {
 ;CHECK-LABEL: sextload_v2i16_vs:
 entry:
-  %0 = load <2 x i16>* %v, align 8
+  %0 = load <2 x i16>, <2 x i16>* %v, align 8
   %v0  = sext <2 x i16> %0 to <2 x i64>
 
-  %1  = load <2 x i32>* %p, align 8
+  %1  = load <2 x i32>, <2 x i32>* %p, align 8
   %v2 = sext <2 x i32> %1 to <2 x i64>
 ;CHECK: vmull
   %v1 = mul <2 x i64>  %v0, %v2

Modified: llvm/trunk/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ declare void @f(double);
 define void @test_byval_8_bytes_alignment_fixed_arg(i32 %n1, %struct_t* byval %val) nounwind {
 entry:
   %a = getelementptr inbounds %struct_t, %struct_t* %val, i32 0, i32 0
-  %0 = load double* %a
+  %0 = load double, double* %a
   call void (double)* @f(double %0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ declare i32 @printf(i8*, ...)
 define void @test_byval_usage_scheduling(i32 %n1, i32 %n2, %struct_t* byval %val) nounwind {
 entry:
   %a = getelementptr inbounds %struct_t, %struct_t* %val, i32 0, i32 0
-  %0 = load double* %a
+  %0 = load double, double* %a
   %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), double %0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/2013-01-21-PR14992.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2013-01-21-PR14992.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2013-01-21-PR14992.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2013-01-21-PR14992.ll Fri Feb 27 15:17:42 2015
@@ -6,11 +6,11 @@
 ;CHECK-LABEL: foo:
 define i32 @foo(i32* %a) nounwind optsize {
 entry:
-  %0 = load i32* %a, align 4
+  %0 = load i32, i32* %a, align 4
   %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 1
-  %1 = load i32* %arrayidx1, align 4
+  %1 = load i32, i32* %arrayidx1, align 4
   %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 2
-  %2 = load i32* %arrayidx2, align 4
+  %2 = load i32, i32* %arrayidx2, align 4
   %add.ptr = getelementptr inbounds i32, i32* %a, i32 3
 ;Make sure we do not have a duplicated register in the front of the reg list
 ;EXPECTED:  ldm [[BASE:r[0-9]+]]!, {[[REG:r[0-9]+]], {{r[0-9]+}},

Modified: llvm/trunk/test/CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll Fri Feb 27 15:17:42 2015
@@ -9,9 +9,9 @@ define void @sample_test(<8 x i64> * %se
 entry:
 
 ; Load %source
-  %s0 = load <8 x i64> * %source, align 64
+  %s0 = load <8 x i64> , <8 x i64> * %source, align 64
   %arrayidx64 = getelementptr inbounds <8 x i64>, <8 x i64> * %source, i32 6
-  %s120 = load <8 x i64> * %arrayidx64, align 64
+  %s120 = load <8 x i64> , <8 x i64> * %arrayidx64, align 64
   %s122 = bitcast <8 x i64> %s120 to i512
   %data.i.i677.48.extract.shift = lshr i512 %s122, 384
   %data.i.i677.48.extract.trunc = trunc i512 %data.i.i677.48.extract.shift to i64
@@ -33,9 +33,9 @@ entry:
   %s130 = insertelement <8 x i64> %s129, i64 %data.i.i677.56.extract.trunc, i32 7
 
 ; Load %secondSource
-  %s1 = load <8 x i64> * %secondSource, align 64
+  %s1 = load <8 x i64> , <8 x i64> * %secondSource, align 64
   %arrayidx67 = getelementptr inbounds <8 x i64>, <8 x i64> * %secondSource, i32 6
-  %s121 = load <8 x i64> * %arrayidx67, align 64
+  %s121 = load <8 x i64> , <8 x i64> * %arrayidx67, align 64
   %s131 = bitcast <8 x i64> %s121 to i512
   %data.i1.i676.48.extract.shift = lshr i512 %s131, 384
   %data.i1.i676.48.extract.trunc = trunc i512 %data.i1.i676.48.extract.shift to i64
@@ -64,7 +64,7 @@ entry:
   %arrayidx72 = getelementptr inbounds <8 x i64>, <8 x i64> * %dest, i32 6
   store <8 x i64> %vecinit49.i.i702, <8 x i64> * %arrayidx72, align 64
   %arrayidx78 = getelementptr inbounds <8 x i64>, <8 x i64> * %secondSource, i32 7
-  %s141 = load <8 x i64> * %arrayidx78, align 64
+  %s141 = load <8 x i64> , <8 x i64> * %arrayidx78, align 64
   %s151 = bitcast <8 x i64> %s141 to i512
   %data.i1.i649.32.extract.shift = lshr i512 %s151, 256
   %data.i1.i649.32.extract.trunc = trunc i512 %data.i1.i649.32.extract.shift to i64

Modified: llvm/trunk/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll Fri Feb 27 15:17:42 2015
@@ -8,45 +8,45 @@ entry:
         %sub19 = sub i32 %add, %Width
         %sub20 = add i32 %sub19, -1
         %arrayidx21 = getelementptr inbounds i8, i8* %call1, i32 %sub20
-        %0 = load i8* %arrayidx21, align 1
+        %0 = load i8, i8* %arrayidx21, align 1
         %conv22 = zext i8 %0 to i32
         %arrayidx25 = getelementptr inbounds i8, i8* %call1, i32 %sub19
-        %1 = load i8* %arrayidx25, align 1
+        %1 = load i8, i8* %arrayidx25, align 1
         %conv26 = zext i8 %1 to i32
         %mul23189 = add i32 %conv26, %conv22
         %add30 = add i32 %sub19, 1
         %arrayidx31 = getelementptr inbounds i8, i8* %call1, i32 %add30
-        %2 = load i8* %arrayidx31, align 1
+        %2 = load i8, i8* %arrayidx31, align 1
         %conv32 = zext i8 %2 to i32
 ; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
 ; CHECK-NEXT: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #1]
         %add28190 = add i32 %mul23189, %conv32
         %sub35 = add i32 %add, -1
         %arrayidx36 = getelementptr inbounds i8, i8* %call1, i32 %sub35
-        %3 = load i8* %arrayidx36, align 1
+        %3 = load i8, i8* %arrayidx36, align 1
         %conv37 = zext i8 %3 to i32
         %add34191 = add i32 %add28190, %conv37
         %arrayidx40 = getelementptr inbounds i8, i8* %call1, i32 %add
-        %4 = load i8* %arrayidx40, align 1
+        %4 = load i8, i8* %arrayidx40, align 1
         %conv41 = zext i8 %4 to i32
         %mul42 = mul nsw i32 %conv41, 255
         %add44 = add i32 %add, 1
         %arrayidx45 = getelementptr inbounds i8, i8* %call1, i32 %add44
-        %5 = load i8* %arrayidx45, align 1
+        %5 = load i8, i8* %arrayidx45, align 1
         %conv46 = zext i8 %5 to i32
 ; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
 ; CHECK-NEXT: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #1]
         %add49 = add i32 %add, %Width
         %sub50 = add i32 %add49, -1
         %arrayidx51 = getelementptr inbounds i8, i8* %call1, i32 %sub50
-        %6 = load i8* %arrayidx51, align 1
+        %6 = load i8, i8* %arrayidx51, align 1
         %conv52 = zext i8 %6 to i32
         %arrayidx56 = getelementptr inbounds i8, i8* %call1, i32 %add49
-        %7 = load i8* %arrayidx56, align 1
+        %7 = load i8, i8* %arrayidx56, align 1
         %conv57 = zext i8 %7 to i32
         %add61 = add i32 %add49, 1
         %arrayidx62 = getelementptr inbounds i8, i8* %call1, i32 %add61
-        %8 = load i8* %arrayidx62, align 1
+        %8 = load i8, i8* %arrayidx62, align 1
         %conv63 = zext i8 %8 to i32
 ; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
 ; CHECK-NEXT: ldrb{{[.w]*}} r{{[0-9]*}}, [r{{[0-9]*}}, #1]

Modified: llvm/trunk/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ define arm_aapcscc void @f2(i8 signext %
 entry:
   %a.addr = alloca i8, align 1
   store i8 %a, i8* %a.addr, align 1
-  %0 = load i8* %a.addr, align 1
+  %0 = load i8, i8* %a.addr, align 1
   %conv = sext i8 %0 to i32
   %shr = ashr i32 %conv, 56
   %conv1 = trunc i32 %shr to i8

Modified: llvm/trunk/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ entry:
 ; CHECK: vorr q9, q9, q10
 ; CHECK: vst1.32 {d18, d19}, [r0]
 vector.body:
-  %wide.load = load <4 x i32>* undef, align 4
+  %wide.load = load <4 x i32>, <4 x i32>* undef, align 4
   %0 = and <4 x i32> %wide.load, <i32 -16711936, i32 -16711936, i32 -16711936, i32 -16711936>
   %1 = sub <4 x i32> %wide.load, zeroinitializer
   %2 = and <4 x i32> %1, <i32 16711680, i32 16711680, i32 16711680, i32 16711680>

Modified: llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll Fri Feb 27 15:17:42 2015
@@ -38,13 +38,13 @@ entry:
 define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind {
 ; CHECK: vtbx4:
 ; CHECK: VTBX4 {{.*}}, pred:14, pred:%noreg, %Q{{[0-9]+}}_Q{{[0-9]+}}<imp-use>
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load %struct.__neon_int8x8x4_t* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load %struct.__neon_int8x8x4_t, %struct.__neon_int8x8x4_t* %B
         %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
         %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
-	%tmp7 = load <8 x i8>* %C
+	%tmp7 = load <8 x i8>, <8 x i8>* %C
 	%tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
   call void @bar2(%struct.__neon_int8x8x4_t %tmp2, <8 x i8> %tmp8)
 	ret <8 x i8> %tmp8

Modified: llvm/trunk/test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll Fri Feb 27 15:17:42 2015
@@ -12,10 +12,10 @@ define i32 @test1(i32* %p) {
   %4 = getelementptr inbounds i32, i32* %p, i32 4
 
 ; CHECK-NEXT: ldm [[NEWBASE]],
-  %5 = load i32* %1, align 4
-  %6 = load i32* %2, align 4
-  %7 = load i32* %3, align 4
-  %8 = load i32* %4, align 4
+  %5 = load i32, i32* %1, align 4
+  %6 = load i32, i32* %2, align 4
+  %7 = load i32, i32* %3, align 4
+  %8 = load i32, i32* %4, align 4
 
   %9 = add nsw i32 %5, %6
   %10 = add nsw i32 %9, %7
@@ -36,10 +36,10 @@ define i32 @test2(i32* %p) {
   %4 = getelementptr inbounds i32, i32* %p, i32 5
 
 ; CHECK-NEXT: ldm [[NEWBASE]],
-  %5 = load i32* %1, align 4
-  %6 = load i32* %2, align 4
-  %7 = load i32* %3, align 4
-  %8 = load i32* %4, align 4
+  %5 = load i32, i32* %1, align 4
+  %6 = load i32, i32* %2, align 4
+  %7 = load i32, i32* %3, align 4
+  %8 = load i32, i32* %4, align 4
 
   %9 = add nsw i32 %5, %6
   %10 = add nsw i32 %9, %7

Modified: llvm/trunk/test/CodeGen/ARM/MergeConsecutiveStores.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/MergeConsecutiveStores.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/MergeConsecutiveStores.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/MergeConsecutiveStores.ll Fri Feb 27 15:17:42 2015
@@ -13,12 +13,12 @@ define void @MergeLoadStoreBaseIndexOffs
   %.08 = phi i8* [ %b, %0 ], [ %10, %1 ]
   %.0 = phi i32* [ %a, %0 ], [ %2, %1 ]
   %2 = getelementptr inbounds i32, i32* %.0, i32 1
-  %3 = load i32* %.0, align 1
+  %3 = load i32, i32* %.0, align 1
   %4 = getelementptr inbounds i8, i8* %c, i32 %3
-  %5 = load i8* %4, align 1
+  %5 = load i8, i8* %4, align 1
   %6 = add i32 %3, 1
   %7 = getelementptr inbounds i8, i8* %c, i32 %6
-  %8 = load i8* %7, align 1
+  %8 = load i8, i8* %7, align 1
   store i8 %5, i8* %.08, align 1
   %9 = getelementptr inbounds i8, i8* %.08, i32 1
   store i8 %8, i8* %9, align 1
@@ -45,13 +45,13 @@ define void @MergeLoadStoreBaseIndexOffs
   %.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
   %.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
   %2 = getelementptr inbounds i8, i8* %.0, i32 1
-  %3 = load i8* %.0, align 1
+  %3 = load i8, i8* %.0, align 1
   %4 = sext i8 %3 to i32
   %5 = getelementptr inbounds i8, i8* %c, i32 %4
-  %6 = load i8* %5, align 1
+  %6 = load i8, i8* %5, align 1
   %7 = add i32 %4, 1
   %8 = getelementptr inbounds i8, i8* %c, i32 %7
-  %9 = load i8* %8, align 1
+  %9 = load i8, i8* %8, align 1
   store i8 %6, i8* %.08, align 1
   %10 = getelementptr inbounds i8, i8* %.08, i32 1
   store i8 %9, i8* %10, align 1
@@ -77,14 +77,14 @@ define void @loadStoreBaseIndexOffsetSex
   %.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
   %.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
   %2 = getelementptr inbounds i8, i8* %.0, i32 1
-  %3 = load i8* %.0, align 1
+  %3 = load i8, i8* %.0, align 1
   %4 = sext i8 %3 to i32
   %5 = getelementptr inbounds i8, i8* %c, i32 %4
-  %6 = load i8* %5, align 1
+  %6 = load i8, i8* %5, align 1
   %7 = add i8 %3, 1
   %wrap.4 = sext i8 %7 to i32
   %8 = getelementptr inbounds i8, i8* %c, i32 %wrap.4
-  %9 = load i8* %8, align 1
+  %9 = load i8, i8* %8, align 1
   store i8 %6, i8* %.08, align 1
   %10 = getelementptr inbounds i8, i8* %.08, i32 1
   store i8 %9, i8* %10, align 1

Modified: llvm/trunk/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll Fri Feb 27 15:17:42 2015
@@ -10,10 +10,10 @@ entry:
   %i.addr = alloca i32, align 4
   %buffer = alloca [4096 x i8], align 1
   store i32 %i, i32* %i.addr, align 4
-  %0 = load i32* %i.addr, align 4
+  %0 = load i32, i32* %i.addr, align 4
   %rem = urem i32 %0, 4096
   %arrayidx = getelementptr inbounds [4096 x i8], [4096 x i8]* %buffer, i32 0, i32 %rem
-  %1 = load volatile i8* %arrayidx, align 1
+  %1 = load volatile i8, i8* %arrayidx, align 1
   ret i8 %1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/Windows/dllimport.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/Windows/dllimport.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/Windows/dllimport.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/Windows/dllimport.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ declare dllimport arm_aapcs_vfpcc i32 @e
 declare arm_aapcs_vfpcc i32 @internal()
 
 define arm_aapcs_vfpcc i32 @get_var() {
-  %1 = load i32* @var, align 4
+  %1 = load i32, i32* @var, align 4
   ret i32 %1
 }
 
@@ -20,7 +20,7 @@ define arm_aapcs_vfpcc i32 @get_var() {
 ; CHECK: bx lr
 
 define arm_aapcs_vfpcc i32 @get_ext() {
-  %1 = load i32* @ext, align 4
+  %1 = load i32, i32* @ext, align 4
   ret i32 %1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/Windows/frame-register.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/Windows/frame-register.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/Windows/frame-register.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/Windows/frame-register.ll Fri Feb 27 15:17:42 2015
@@ -8,12 +8,12 @@ entry:
   %i.addr = alloca i32, align 4
   %j = alloca i32, align 4
   store i32 %i, i32* %i.addr, align 4
-  %0 = load i32* %i.addr, align 4
+  %0 = load i32, i32* %i.addr, align 4
   %add = add nsw i32 %0, 1
   store i32 %add, i32* %j, align 4
-  %1 = load i32* %j, align 4
+  %1 = load i32, i32* %j, align 4
   call void @callee(i32 %1)
-  %2 = load i32* %j, align 4
+  %2 = load i32, i32* %j, align 4
   %add1 = add nsw i32 %2, 1
   ret i32 %add1
 }

Modified: llvm/trunk/test/CodeGen/ARM/Windows/movw-movt-relocations.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/Windows/movw-movt-relocations.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/Windows/movw-movt-relocations.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/Windows/movw-movt-relocations.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@
 ; Function Attrs: nounwind optsize readonly
 define i32 @relocation(i32 %j, i32 %k) {
 entry:
-  %0 = load i32* @i, align 4
-  %1 = load i32* @j, align 4
+  %0 = load i32, i32* @i, align 4
+  %1 = load i32, i32* @j, align 4
   %add = add nsw i32 %1, %0
   ret i32 %add
 }

Modified: llvm/trunk/test/CodeGen/ARM/Windows/pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/Windows/pic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/Windows/pic.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/Windows/pic.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define arm_aapcs_vfpcc i8 @return_external() {
 entry:
-  %0 = load i8* @external, align 1
+  %0 = load i8, i8* @external, align 1
   ret i8 %0
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/Windows/stack-probe-non-default.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/Windows/stack-probe-non-default.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/Windows/stack-probe-non-default.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/Windows/stack-probe-non-default.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
   %0 = getelementptr inbounds [4096 x i8], [4096 x i8]* %buffer, i32 0, i32 0
   call arm_aapcs_vfpcc void @initialise(i8* %0)
   %arrayidx = getelementptr inbounds [4096 x i8], [4096 x i8]* %buffer, i32 0, i32 %offset
-  %1 = load i8* %arrayidx, align 1
+  %1 = load i8, i8* %arrayidx, align 1
   ret i8 %1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/Windows/vla.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/Windows/vla.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/Windows/vla.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/Windows/vla.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define arm_aapcs_vfpcc i8 @function(i32
 entry:
   %vla = alloca i8, i32 %sz, align 1
   %arrayidx = getelementptr inbounds i8, i8* %vla, i32 %idx
-  %0 = load volatile i8* %arrayidx, align 1
+  %0 = load volatile i8, i8* %arrayidx, align 1
   ret i8 %0
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/a15-partial-update.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/a15-partial-update.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/a15-partial-update.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/a15-partial-update.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ define <2 x float> @t1(float* %A, <2 x f
 ; generated.
 
 ; CHECK-NOT: vmov.{{.*}} d{{[0-9]+}},
-  %tmp2 = load float* %A, align 4
+  %tmp2 = load float, float* %A, align 4
   %tmp3 = insertelement <2 x float> %B, float %tmp2, i32 1
   ret <2 x float> %tmp3
 }
@@ -29,7 +29,7 @@ loop:
   %newcount = add i32 %oldcount, 1
   %p1 = getelementptr <4 x i8>, <4 x i8> *%in, i32 %newcount
   %p2 = getelementptr <4 x i8>, <4 x i8> *%out, i32 %newcount
-  %tmp1 = load <4 x i8> *%p1, align 4
+  %tmp1 = load <4 x i8> , <4 x i8> *%p1, align 4
   store <4 x i8> %tmp1, <4 x i8> *%p2
   %cmp = icmp eq i32 %newcount, %n
   br i1 %cmp, label %loop, label %ret

Modified: llvm/trunk/test/CodeGen/ARM/addrmode.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/addrmode.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/addrmode.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/addrmode.ll Fri Feb 27 15:17:42 2015
@@ -4,14 +4,14 @@
 define i32 @t1(i32 %a) {
 	%b = mul i32 %a, 9
         %c = inttoptr i32 %b to i32*
-        %d = load i32* %c
+        %d = load i32, i32* %c
 	ret i32 %d
 }
 
 define i32 @t2(i32 %a) {
 	%b = mul i32 %a, -7
         %c = inttoptr i32 %b to i32*
-        %d = load i32* %c
+        %d = load i32, i32* %c
 	ret i32 %d
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/aliases.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/aliases.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/aliases.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/aliases.ll Fri Feb 27 15:17:42 2015
@@ -33,9 +33,9 @@ define i32 @foo_f() {
 
 define i32 @test() {
 entry:
-   %tmp = load i32* @foo1
-   %tmp1 = load i32* @foo2
-   %tmp0 = load i32* @bar_i
+   %tmp = load i32, i32* @foo1
+   %tmp1 = load i32, i32* @foo2
+   %tmp0 = load i32, i32* @bar_i
    %tmp2 = call i32 @foo_f()
    %tmp3 = add i32 %tmp, %tmp2
    %tmp4 = call %FunTy* @bar_f()

Modified: llvm/trunk/test/CodeGen/ARM/alloc-no-stack-realign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/alloc-no-stack-realign.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/alloc-no-stack-realign.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/alloc-no-stack-realign.ll Fri Feb 27 15:17:42 2015
@@ -31,9 +31,9 @@ entry:
 ; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
 ; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
  %retval = alloca <16 x float>, align 16
- %0 = load <16 x float>* @T3_retval, align 16
+ %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
  store <16 x float> %0, <16 x float>* %retval
- %1 = load <16 x float>* %retval
+ %1 = load <16 x float>, <16 x float>* %retval
  store <16 x float> %1, <16 x float>* %agg.result, align 16
  ret void
 }
@@ -66,9 +66,9 @@ entry:
 ; REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
  %retval = alloca <16 x float>, align 16
- %0 = load <16 x float>* @T3_retval, align 16
+ %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
  store <16 x float> %0, <16 x float>* %retval
- %1 = load <16 x float>* %retval
+ %1 = load <16 x float>, <16 x float>* %retval
  store <16 x float> %1, <16 x float>* %agg.result, align 16
  ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/arm-and-tst-peephole.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/arm-and-tst-peephole.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/arm-and-tst-peephole.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/arm-and-tst-peephole.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ tailrecurse:
   %acc.tr = phi i32 [ %or, %sw.bb ], [ %acc, %entry ]
   %lsr.iv24 = bitcast %struct.Foo* %lsr.iv2 to i8**
   %scevgep5 = getelementptr i8*, i8** %lsr.iv24, i32 -1
-  %tmp2 = load i8** %scevgep5
+  %tmp2 = load i8*, i8** %scevgep5
   %0 = ptrtoint i8* %tmp2 to i32
 
 ; ARM:      ands {{r[0-9]+}}, {{r[0-9]+}}, #3
@@ -90,7 +90,7 @@ sw.epilog:
 define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
 entry:
   %0 = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1, i32 0
-  %1 = load i8* %0, align 1
+  %1 = load i8, i8* %0, align 1
   %2 = zext i8 %1 to i32
 ; ARM: ands
 ; THUMB: ands
@@ -104,7 +104,7 @@ entry:
 bb:                                               ; preds = %entry
 ; V8-NEXT: %bb
   %5 = getelementptr inbounds %struct.S, %struct.S* %y, i32 0, i32 1, i32 0
-  %6 = load i8* %5, align 1
+  %6 = load i8, i8* %5, align 1
   %7 = zext i8 %6 to i32
 ; ARM: andsne
 ; THUMB: ands

Modified: llvm/trunk/test/CodeGen/ARM/arm-modifier.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/arm-modifier.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/arm-modifier.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/arm-modifier.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@ entry:
   %scale2.addr = alloca float, align 4
   store float %scale, float* %scale.addr, align 4
   store float %scale2, float* %scale2.addr, align 4
-  %tmp = load float* %scale.addr, align 4
-  %tmp1 = load float* %scale2.addr, align 4
+  %tmp = load float, float* %scale.addr, align 4
+  %tmp1 = load float, float* %scale2.addr, align 4
   call void asm sideeffect "vmul.f32    q0, q0, ${0:y} \0A\09vmul.f32    q1, q1, ${0:y} \0A\09vmul.f32    q1, q0, ${1:y} \0A\09", "w,w,~{q0},~{q1}"(float %tmp, float %tmp1) nounwind
   ret i32 0
 }
@@ -49,8 +49,8 @@ entry:
 ; CHECK: stm {{lr|r[0-9]+}}, {[[REG1:(r[0-9]+)]], r{{[0-9]+}}}
 ; CHECK: adds {{lr|r[0-9]+}}, [[REG1]]
 ; CHECK: ldm {{lr|r[0-9]+}}, {r{{[0-9]+}}, r{{[0-9]+}}}
-%tmp = load i64* @f3_var, align 4
-%tmp1 = load i64* @f3_var2, align 4
+%tmp = load i64, i64* @f3_var, align 4
+%tmp1 = load i64, i64* @f3_var2, align 4
 %0 = call i64 asm sideeffect "stm ${0:m}, ${1:M}\0A\09adds $3, $1\0A\09", "=*m,=r,1,r"(i64** @f3_ptr, i64 %tmp, i64 %tmp1) nounwind
 store i64 %0, i64* @f3_var, align 4
 %1 = call i64 asm sideeffect "ldm ${1:m}, ${0:M}\0A\09", "=r,*m"(i64** @f3_ptr) nounwind

Modified: llvm/trunk/test/CodeGen/ARM/atomic-64bit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/atomic-64bit.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/atomic-64bit.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/atomic-64bit.ll Fri Feb 27 15:17:42 2015
@@ -214,7 +214,7 @@ define i64 @test8(i64* %ptr) {
 ; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
 ; CHECK-THUMB: dmb {{ish$}}
 
-  %r = load atomic i64* %ptr seq_cst, align 8
+  %r = load atomic i64, i64* %ptr seq_cst, align 8
   ret i64 %r
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/atomic-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/atomic-load-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/atomic-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/atomic-load-store.ll Fri Feb 27 15:17:42 2015
@@ -44,7 +44,7 @@ define i32 @test2(i32* %ptr) {
 ; THUMBM-LABEL: test2
 ; THUMBM: ldr
 ; THUMBM: dmb sy
-  %val = load atomic i32* %ptr seq_cst, align 4
+  %val = load atomic i32, i32* %ptr seq_cst, align 4
   ret i32 %val
 }
 
@@ -76,7 +76,7 @@ define void @test3(i8* %ptr1, i8* %ptr2)
 ; ARMV6-NOT: mcr
 ; THUMBM-LABEL: test3
 ; THUMBM-NOT: dmb sy
-  %val = load atomic i8* %ptr1 unordered, align 1
+  %val = load atomic i8, i8* %ptr1 unordered, align 1
   store atomic i8 %val, i8* %ptr2 unordered, align 1
   ret void
 }
@@ -87,7 +87,7 @@ define void @test4(i8* %ptr1, i8* %ptr2)
 ; THUMBONE: ___sync_lock_test_and_set_1
 ; ARMV6-LABEL: test4
 ; THUMBM-LABEL: test4
-  %val = load atomic i8* %ptr1 seq_cst, align 1
+  %val = load atomic i8, i8* %ptr1 seq_cst, align 1
   store atomic i8 %val, i8* %ptr2 seq_cst, align 1
   ret void
 }
@@ -95,7 +95,7 @@ define void @test4(i8* %ptr1, i8* %ptr2)
 define i64 @test_old_load_64bit(i64* %p) {
 ; ARMV4-LABEL: test_old_load_64bit
 ; ARMV4: ___sync_val_compare_and_swap_8
-  %1 = load atomic i64* %p seq_cst, align 8
+  %1 = load atomic i64, i64* %p seq_cst, align 8
   ret i64 %1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/atomic-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/atomic-op.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/atomic-op.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/atomic-op.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ entry:
 	store i32 3855, i32* %ort
 	store i32 3855, i32* %xort
 	store i32 4, i32* %temp
-	%tmp = load i32* %temp
+	%tmp = load i32, i32* %temp
   ; CHECK: ldrex
   ; CHECK: add
   ; CHECK: strex
@@ -308,8 +308,8 @@ define i32 @test_cmpxchg_fail_order1(i32
 
 define i32 @load_load_add_acquire(i32* %mem1, i32* %mem2) nounwind {
 ; CHECK-LABEL: load_load_add_acquire
-  %val1 = load atomic i32* %mem1 acquire, align 4
-  %val2 = load atomic i32* %mem2 acquire, align 4
+  %val1 = load atomic i32, i32* %mem1 acquire, align 4
+  %val2 = load atomic i32, i32* %mem2 acquire, align 4
   %tmp = add i32 %val1, %val2
 
 ; CHECK: ldr {{r[0-9]}}, [r0]
@@ -353,7 +353,7 @@ define void @store_store_release(i32* %m
 
 define void @load_fence_store_monotonic(i32* %mem1, i32* %mem2) {
 ; CHECK-LABEL: load_fence_store_monotonic
-  %val = load atomic i32* %mem1 monotonic, align 4
+  %val = load atomic i32, i32* %mem1 monotonic, align 4
   fence seq_cst
   store atomic i32 %val, i32* %mem2 monotonic, align 4
 

Modified: llvm/trunk/test/CodeGen/ARM/atomic-ops-v8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/atomic-ops-v8.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/atomic-ops-v8.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/atomic-ops-v8.ll Fri Feb 27 15:17:42 2015
@@ -1166,7 +1166,7 @@ define void @test_atomic_cmpxchg_i64(i64
 
 define i8 @test_atomic_load_monotonic_i8() nounwind {
 ; CHECK-LABEL: test_atomic_load_monotonic_i8:
-  %val = load atomic i8* @var8 monotonic, align 1
+  %val = load atomic i8, i8* @var8 monotonic, align 1
 ; CHECK-NOT: dmb
 ; CHECK-NOT: mcr
 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
@@ -1183,7 +1183,7 @@ define i8 @test_atomic_load_monotonic_re
   %addr_int = add i64 %base, %off
   %addr = inttoptr i64 %addr_int to i8*
 
-  %val = load atomic i8* %addr monotonic, align 1
+  %val = load atomic i8, i8* %addr monotonic, align 1
 ; CHECK-NOT: dmb
 ; CHECK-NOT: mcr
 ; CHECK-LE: ldrb r0, [r0, r2]
@@ -1196,7 +1196,7 @@ define i8 @test_atomic_load_monotonic_re
 
 define i8 @test_atomic_load_acquire_i8() nounwind {
 ; CHECK-LABEL: test_atomic_load_acquire_i8:
-  %val = load atomic i8* @var8 acquire, align 1
+  %val = load atomic i8, i8* @var8 acquire, align 1
 ; CHECK-NOT: dmb
 ; CHECK-NOT: mcr
 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
@@ -1213,7 +1213,7 @@ define i8 @test_atomic_load_acquire_i8()
 
 define i8 @test_atomic_load_seq_cst_i8() nounwind {
 ; CHECK-LABEL: test_atomic_load_seq_cst_i8:
-  %val = load atomic i8* @var8 seq_cst, align 1
+  %val = load atomic i8, i8* @var8 seq_cst, align 1
 ; CHECK-NOT: dmb
 ; CHECK-NOT: mcr
 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
@@ -1230,7 +1230,7 @@ define i8 @test_atomic_load_seq_cst_i8()
 
 define i16 @test_atomic_load_monotonic_i16() nounwind {
 ; CHECK-LABEL: test_atomic_load_monotonic_i16:
-  %val = load atomic i16* @var16 monotonic, align 2
+  %val = load atomic i16, i16* @var16 monotonic, align 2
 ; CHECK-NOT: dmb
 ; CHECK-NOT: mcr
 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
@@ -1251,7 +1251,7 @@ define i32 @test_atomic_load_monotonic_r
   %addr_int = add i64 %base, %off
   %addr = inttoptr i64 %addr_int to i32*
 
-  %val = load atomic i32* %addr monotonic, align 4
+  %val = load atomic i32, i32* %addr monotonic, align 4
 ; CHECK-NOT: dmb
 ; CHECK-NOT: mcr
 ; CHECK-LE: ldr r0, [r0, r2]
@@ -1264,7 +1264,7 @@ define i32 @test_atomic_load_monotonic_r
 
 define i64 @test_atomic_load_seq_cst_i64() nounwind {
 ; CHECK-LABEL: test_atomic_load_seq_cst_i64:
-  %val = load atomic i64* @var64 seq_cst, align 8
+  %val = load atomic i64, i64* @var64 seq_cst, align 8
 ; CHECK-NOT: dmb
 ; CHECK-NOT: mcr
 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
@@ -1399,7 +1399,7 @@ define i32 @not.barriers(i32* %var, i1 %
 ; CHECK-LABEL: not.barriers:
   br i1 %cond, label %atomic_ver, label %simple_ver
 simple_ver:
-  %oldval = load i32* %var
+  %oldval = load i32, i32* %var
   %newval = add nsw i32 %oldval, -1
   store i32 %newval, i32* %var
   br label %somewhere

Modified: llvm/trunk/test/CodeGen/ARM/available_externally.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/available_externally.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/available_externally.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/available_externally.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 @B = external hidden constant i32
 
 define i32 @t1() {
-  %tmp = load i32* @A
+  %tmp = load i32, i32* @A
   store i32 %tmp, i32* @B
   ret i32 %tmp
 }

Modified: llvm/trunk/test/CodeGen/ARM/avoid-cpsr-rmw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/avoid-cpsr-rmw.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/avoid-cpsr-rmw.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/avoid-cpsr-rmw.ll Fri Feb 27 15:17:42 2015
@@ -30,13 +30,13 @@ while.body:
 ; CHECK-NOT: muls
   %ptr1.addr.09 = phi i32* [ %add.ptr, %while.body ], [ %ptr1, %entry ]
   %ptr2.addr.08 = phi i32* [ %incdec.ptr, %while.body ], [ %ptr2, %entry ]
-  %0 = load i32* %ptr1.addr.09, align 4
+  %0 = load i32, i32* %ptr1.addr.09, align 4
   %arrayidx1 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 1
-  %1 = load i32* %arrayidx1, align 4
+  %1 = load i32, i32* %arrayidx1, align 4
   %arrayidx3 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 2
-  %2 = load i32* %arrayidx3, align 4
+  %2 = load i32, i32* %arrayidx3, align 4
   %arrayidx4 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 3
-  %3 = load i32* %arrayidx4, align 4
+  %3 = load i32, i32* %arrayidx4, align 4
   %add.ptr = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 4
   %mul = mul i32 %1, %0
   %mul5 = mul i32 %mul, %2
@@ -64,13 +64,13 @@ while.body:
 ; CHECK: muls
   %ptr1.addr.09 = phi i32* [ %add.ptr, %while.body ], [ %ptr1, %entry ]
   %ptr2.addr.08 = phi i32* [ %incdec.ptr, %while.body ], [ %ptr2, %entry ]
-  %0 = load i32* %ptr1.addr.09, align 4
+  %0 = load i32, i32* %ptr1.addr.09, align 4
   %arrayidx1 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 1
-  %1 = load i32* %arrayidx1, align 4
+  %1 = load i32, i32* %arrayidx1, align 4
   %arrayidx3 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 2
-  %2 = load i32* %arrayidx3, align 4
+  %2 = load i32, i32* %arrayidx3, align 4
   %arrayidx4 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 3
-  %3 = load i32* %arrayidx4, align 4
+  %3 = load i32, i32* %arrayidx4, align 4
   %add.ptr = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 4
   %mul = mul i32 %1, %0
   %mul5 = mul i32 %mul, %2
@@ -92,7 +92,7 @@ entry:
 ; CHECK: vmrs APSR_nzcv, fpscr
 ; CHECK: if.then
 ; CHECK-NOT: movs
-  %0 = load double* %q, align 4
+  %0 = load double, double* %q, align 4
   %cmp = fcmp olt double %0, 1.000000e+01
   %incdec.ptr1 = getelementptr inbounds i32, i32* %p, i32 1
   br i1 %cmp, label %if.then, label %if.else

Modified: llvm/trunk/test/CodeGen/ARM/bfi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/bfi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/bfi.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/bfi.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
 ; CHECK: f1
 ; CHECK: mov r2, #10
 ; CHECK: bfi r1, r2, #22, #4
-  %0 = load i32* bitcast (%struct.F* @X to i32*), align 4 ; <i32> [#uses=1]
+  %0 = load i32, i32* bitcast (%struct.F* @X to i32*), align 4 ; <i32> [#uses=1]
   %1 = and i32 %0, -62914561                      ; <i32> [#uses=1]
   %2 = or i32 %1, 41943040                        ; <i32> [#uses=1]
   store i32 %2, i32* bitcast (%struct.F* @X to i32*), align 4

Modified: llvm/trunk/test/CodeGen/ARM/bfx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/bfx.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/bfx.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/bfx.ll Fri Feb 27 15:17:42 2015
@@ -42,12 +42,12 @@ entry:
   %shr2 = and i32 %and1, 255
   %shr4 = lshr i32 %x, 24
   %arrayidx = getelementptr inbounds i32, i32* %ctx, i32 %shr4
-  %0 = load i32* %arrayidx, align 4
+  %0 = load i32, i32* %arrayidx, align 4
   %arrayidx5 = getelementptr inbounds i32, i32* %ctx, i32 %shr2
-  %1 = load i32* %arrayidx5, align 4
+  %1 = load i32, i32* %arrayidx5, align 4
   %add = add i32 %1, %0
   %arrayidx6 = getelementptr inbounds i32, i32* %ctx, i32 %shr
-  %2 = load i32* %arrayidx6, align 4
+  %2 = load i32, i32* %arrayidx6, align 4
   %add7 = add i32 %add, %2
   ret i32 %add7
 }

Modified: llvm/trunk/test/CodeGen/ARM/big-endian-neon-bitconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/big-endian-neon-bitconv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/big-endian-neon-bitconv.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/big-endian-neon-bitconv.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define void @conv_i64_to_v8i8( i64 %val,
 ; CHECK-LABEL: conv_i64_to_v8i8:
 ; CHECK: vrev64.8
   %v = bitcast i64 %val to <8 x i8>
-  %w = load <8 x i8>* @v8i8
+  %w = load <8 x i8>, <8 x i8>* @v8i8
   %a = add <8 x i8> %v, %w
   store <8 x i8> %a, <8 x i8>* %store
   ret void
@@ -28,8 +28,8 @@ define void @conv_i64_to_v8i8( i64 %val,
 define void @conv_v8i8_to_i64( <8 x i8>* %load, <8 x i8>* %store ) {
 ; CHECK-LABEL: conv_v8i8_to_i64:
 ; CHECK: vrev64.8
-  %v = load <8 x i8>* %load
-  %w = load <8 x i8>* @v8i8
+  %v = load <8 x i8>, <8 x i8>* %load
+  %w = load <8 x i8>, <8 x i8>* @v8i8
   %a = add <8 x i8> %v, %w
   %f = bitcast <8 x i8> %a to i64
   call void @conv_i64_to_v8i8( i64 %f, <8 x i8>* %store )
@@ -40,7 +40,7 @@ define void @conv_i64_to_v4i16( i64 %val
 ; CHECK-LABEL: conv_i64_to_v4i16:
 ; CHECK: vrev64.16
   %v = bitcast i64 %val to <4 x i16>
-  %w = load <4 x i16>* @v4i16
+  %w = load <4 x i16>, <4 x i16>* @v4i16
   %a = add <4 x i16> %v, %w
   store <4 x i16> %a, <4 x i16>* %store
   ret void
@@ -49,8 +49,8 @@ define void @conv_i64_to_v4i16( i64 %val
 define void @conv_v4i16_to_i64( <4 x i16>* %load, <4 x i16>* %store ) {
 ; CHECK-LABEL: conv_v4i16_to_i64:
 ; CHECK: vrev64.16
-  %v = load <4 x i16>* %load
-  %w = load <4 x i16>* @v4i16
+  %v = load <4 x i16>, <4 x i16>* %load
+  %w = load <4 x i16>, <4 x i16>* @v4i16
   %a = add <4 x i16> %v, %w
   %f = bitcast <4 x i16> %a to i64
   call void @conv_i64_to_v4i16( i64 %f, <4 x i16>* %store )
@@ -61,7 +61,7 @@ define void @conv_i64_to_v2i32( i64 %val
 ; CHECK-LABEL: conv_i64_to_v2i32:
 ; CHECK: vrev64.32
   %v = bitcast i64 %val to <2 x i32>
-  %w = load <2 x i32>* @v2i32
+  %w = load <2 x i32>, <2 x i32>* @v2i32
   %a = add <2 x i32> %v, %w
   store <2 x i32> %a, <2 x i32>* %store
   ret void
@@ -70,8 +70,8 @@ define void @conv_i64_to_v2i32( i64 %val
 define void @conv_v2i32_to_i64( <2 x i32>* %load, <2 x i32>* %store ) {
 ; CHECK-LABEL: conv_v2i32_to_i64:
 ; CHECK: vrev64.32
-  %v = load <2 x i32>* %load
-  %w = load <2 x i32>* @v2i32
+  %v = load <2 x i32>, <2 x i32>* %load
+  %w = load <2 x i32>, <2 x i32>* @v2i32
   %a = add <2 x i32> %v, %w
   %f = bitcast <2 x i32> %a to i64
   call void @conv_i64_to_v2i32( i64 %f, <2 x i32>* %store )
@@ -82,7 +82,7 @@ define void @conv_i64_to_v2f32( i64 %val
 ; CHECK-LABEL: conv_i64_to_v2f32:
 ; CHECK: vrev64.32
   %v = bitcast i64 %val to <2 x float>
-  %w = load <2 x float>* @v2f32
+  %w = load <2 x float>, <2 x float>* @v2f32
   %a = fadd <2 x float> %v, %w
   store <2 x float> %a, <2 x float>* %store
   ret void
@@ -91,8 +91,8 @@ define void @conv_i64_to_v2f32( i64 %val
 define void @conv_v2f32_to_i64( <2 x float>* %load, <2 x float>* %store ) {
 ; CHECK-LABEL: conv_v2f32_to_i64:
 ; CHECK: vrev64.32
-  %v = load <2 x float>* %load
-  %w = load <2 x float>* @v2f32
+  %v = load <2 x float>, <2 x float>* %load
+  %w = load <2 x float>, <2 x float>* @v2f32
   %a = fadd <2 x float> %v, %w
   %f = bitcast <2 x float> %a to i64
   call void @conv_i64_to_v2f32( i64 %f, <2 x float>* %store )
@@ -103,7 +103,7 @@ define void @conv_f64_to_v8i8( double %v
 ; CHECK-LABEL: conv_f64_to_v8i8:
 ; CHECK: vrev64.8
   %v = bitcast double %val to <8 x i8>
-  %w = load <8 x i8>* @v8i8
+  %w = load <8 x i8>, <8 x i8>* @v8i8
   %a = add <8 x i8> %v, %w
   store <8 x i8> %a, <8 x i8>* %store
   ret void
@@ -112,8 +112,8 @@ define void @conv_f64_to_v8i8( double %v
 define void @conv_v8i8_to_f64( <8 x i8>* %load, <8 x i8>* %store ) {
 ; CHECK-LABEL: conv_v8i8_to_f64:
 ; CHECK: vrev64.8
-  %v = load <8 x i8>* %load
-  %w = load <8 x i8>* @v8i8
+  %v = load <8 x i8>, <8 x i8>* %load
+  %w = load <8 x i8>, <8 x i8>* @v8i8
   %a = add <8 x i8> %v, %w
   %f = bitcast <8 x i8> %a to double
   call void @conv_f64_to_v8i8( double %f, <8 x i8>* %store )
@@ -124,7 +124,7 @@ define void @conv_f64_to_v4i16( double %
 ; CHECK-LABEL: conv_f64_to_v4i16:
 ; CHECK: vrev64.16
   %v = bitcast double %val to <4 x i16>
-  %w = load <4 x i16>* @v4i16
+  %w = load <4 x i16>, <4 x i16>* @v4i16
   %a = add <4 x i16> %v, %w
   store <4 x i16> %a, <4 x i16>* %store
   ret void
@@ -133,8 +133,8 @@ define void @conv_f64_to_v4i16( double %
 define void @conv_v4i16_to_f64( <4 x i16>* %load, <4 x i16>* %store ) {
 ; CHECK-LABEL: conv_v4i16_to_f64:
 ; CHECK: vrev64.16
-  %v = load <4 x i16>* %load
-  %w = load <4 x i16>* @v4i16
+  %v = load <4 x i16>, <4 x i16>* %load
+  %w = load <4 x i16>, <4 x i16>* @v4i16
   %a = add <4 x i16> %v, %w
   %f = bitcast <4 x i16> %a to double
   call void @conv_f64_to_v4i16( double %f, <4 x i16>* %store )
@@ -145,7 +145,7 @@ define void @conv_f64_to_v2i32( double %
 ; CHECK-LABEL: conv_f64_to_v2i32:
 ; CHECK: vrev64.32
   %v = bitcast double %val to <2 x i32>
-  %w = load <2 x i32>* @v2i32
+  %w = load <2 x i32>, <2 x i32>* @v2i32
   %a = add <2 x i32> %v, %w
   store <2 x i32> %a, <2 x i32>* %store
   ret void
@@ -154,8 +154,8 @@ define void @conv_f64_to_v2i32( double %
 define void @conv_v2i32_to_f64( <2 x i32>* %load, <2 x i32>* %store ) {
 ; CHECK-LABEL: conv_v2i32_to_f64:
 ; CHECK: vrev64.32
-  %v = load <2 x i32>* %load
-  %w = load <2 x i32>* @v2i32
+  %v = load <2 x i32>, <2 x i32>* %load
+  %w = load <2 x i32>, <2 x i32>* @v2i32
   %a = add <2 x i32> %v, %w
   %f = bitcast <2 x i32> %a to double
   call void @conv_f64_to_v2i32( double %f, <2 x i32>* %store )
@@ -166,7 +166,7 @@ define void @conv_f64_to_v2f32( double %
 ; CHECK-LABEL: conv_f64_to_v2f32:
 ; CHECK: vrev64.32
   %v = bitcast double %val to <2 x float>
-  %w = load <2 x float>* @v2f32
+  %w = load <2 x float>, <2 x float>* @v2f32
   %a = fadd <2 x float> %v, %w
   store <2 x float> %a, <2 x float>* %store
   ret void
@@ -175,8 +175,8 @@ define void @conv_f64_to_v2f32( double %
 define void @conv_v2f32_to_f64( <2 x float>* %load, <2 x float>* %store ) {
 ; CHECK-LABEL: conv_v2f32_to_f64:
 ; CHECK: vrev64.32
-  %v = load <2 x float>* %load
-  %w = load <2 x float>* @v2f32
+  %v = load <2 x float>, <2 x float>* %load
+  %w = load <2 x float>, <2 x float>* @v2f32
   %a = fadd <2 x float> %v, %w
   %f = bitcast <2 x float> %a to double
   call void @conv_f64_to_v2f32( double %f, <2 x float>* %store )
@@ -190,7 +190,7 @@ define void @conv_i128_to_v16i8( i128 %v
 ; CHECK-LABEL: conv_i128_to_v16i8:
 ; CHECK: vrev32.8
   %v = bitcast i128 %val to <16 x i8>
-  %w = load  <16 x i8>* @v16i8
+  %w = load  <16 x i8>,  <16 x i8>* @v16i8
   %a = add <16 x i8> %v, %w
   store <16 x i8> %a, <16 x i8>* %store
   ret void
@@ -199,8 +199,8 @@ define void @conv_i128_to_v16i8( i128 %v
 define void @conv_v16i8_to_i128( <16 x i8>* %load, <16 x i8>* %store ) {
 ; CHECK-LABEL: conv_v16i8_to_i128:
 ; CHECK: vrev32.8
-  %v = load <16 x i8>* %load
-  %w = load <16 x i8>* @v16i8
+  %v = load <16 x i8>, <16 x i8>* %load
+  %w = load <16 x i8>, <16 x i8>* @v16i8
   %a = add <16 x i8> %v, %w
   %f = bitcast <16 x i8> %a to i128
   call void @conv_i128_to_v16i8( i128 %f, <16 x i8>* %store )
@@ -211,7 +211,7 @@ define void @conv_i128_to_v8i16( i128 %v
 ; CHECK-LABEL: conv_i128_to_v8i16:
 ; CHECK: vrev32.16
   %v = bitcast i128 %val to <8 x i16>
-  %w = load  <8 x i16>* @v8i16
+  %w = load  <8 x i16>,  <8 x i16>* @v8i16
   %a = add <8 x i16> %v, %w
   store <8 x i16> %a, <8 x i16>* %store
   ret void
@@ -220,8 +220,8 @@ define void @conv_i128_to_v8i16( i128 %v
 define void @conv_v8i16_to_i128( <8 x i16>* %load, <8 x i16>* %store ) {
 ; CHECK-LABEL: conv_v8i16_to_i128:
 ; CHECK: vrev32.16
-  %v = load <8 x i16>* %load
-  %w = load <8 x i16>* @v8i16
+  %v = load <8 x i16>, <8 x i16>* %load
+  %w = load <8 x i16>, <8 x i16>* @v8i16
   %a = add <8 x i16> %v, %w
   %f = bitcast <8 x i16> %a to i128
   call void @conv_i128_to_v8i16( i128 %f, <8 x i16>* %store )
@@ -232,7 +232,7 @@ define void @conv_i128_to_v4i32( i128 %v
 ; CHECK-LABEL: conv_i128_to_v4i32:
 ; CHECK: vrev64.32
   %v = bitcast i128 %val to <4 x i32>
-  %w = load <4 x i32>* @v4i32
+  %w = load <4 x i32>, <4 x i32>* @v4i32
   %a = add <4 x i32> %v, %w
   store <4 x i32> %a, <4 x i32>* %store
   ret void
@@ -241,8 +241,8 @@ define void @conv_i128_to_v4i32( i128 %v
 define void @conv_v4i32_to_i128( <4 x i32>* %load, <4 x i32>* %store ) {
 ; CHECK-LABEL: conv_v4i32_to_i128:
 ; CHECK: vrev64.32
-  %v = load <4 x i32>* %load
-  %w = load <4 x i32>* @v4i32
+  %v = load <4 x i32>, <4 x i32>* %load
+  %w = load <4 x i32>, <4 x i32>* @v4i32
   %a = add <4 x i32> %v, %w
   %f = bitcast <4 x i32> %a to i128
   call void @conv_i128_to_v4i32( i128 %f, <4 x i32>* %store )
@@ -253,7 +253,7 @@ define void @conv_i128_to_v4f32( i128 %v
 ; CHECK-LABEL: conv_i128_to_v4f32:
 ; CHECK: vrev64.32
   %v = bitcast i128 %val to <4 x float>
-  %w = load <4 x float>* @v4f32
+  %w = load <4 x float>, <4 x float>* @v4f32
   %a = fadd <4 x float> %v, %w
   store <4 x float> %a, <4 x float>* %store
   ret void
@@ -262,8 +262,8 @@ define void @conv_i128_to_v4f32( i128 %v
 define void @conv_v4f32_to_i128( <4 x float>* %load, <4 x float>* %store ) {
 ; CHECK-LABEL: conv_v4f32_to_i128:
 ; CHECK: vrev64.32
-  %v = load <4 x float>* %load
-  %w = load <4 x float>* @v4f32
+  %v = load <4 x float>, <4 x float>* %load
+  %w = load <4 x float>, <4 x float>* @v4f32
   %a = fadd <4 x float> %v, %w
   %f = bitcast <4 x float> %a to i128
   call void @conv_i128_to_v4f32( i128 %f, <4 x float>* %store )
@@ -274,7 +274,7 @@ define void @conv_f128_to_v2f64( fp128 %
 ; CHECK-LABEL: conv_f128_to_v2f64:
 ; CHECK: vrev64.32
   %v = bitcast fp128 %val to <2 x double>
-  %w = load <2 x double>* @v2f64
+  %w = load <2 x double>, <2 x double>* @v2f64
   %a = fadd <2 x double> %v, %w
   store <2 x double> %a, <2 x double>* %store
   ret void
@@ -283,8 +283,8 @@ define void @conv_f128_to_v2f64( fp128 %
 define void @conv_v2f64_to_f128( <2 x double>* %load, <2 x double>* %store ) {
 ; CHECK-LABEL: conv_v2f64_to_f128:
 ; CHECK: vrev64.32
-  %v = load <2 x double>* %load
-  %w = load <2 x double>* @v2f64
+  %v = load <2 x double>, <2 x double>* %load
+  %w = load <2 x double>, <2 x double>* @v2f64
   %a = fadd <2 x double> %v, %w
   %f = bitcast <2 x double> %a to fp128
   call void @conv_f128_to_v2f64( fp128 %f, <2 x double>* %store )
@@ -295,7 +295,7 @@ define void @conv_f128_to_v16i8( fp128 %
 ; CHECK-LABEL: conv_f128_to_v16i8:
 ; CHECK: vrev32.8
   %v = bitcast fp128 %val to <16 x i8>
-  %w = load  <16 x i8>* @v16i8
+  %w = load  <16 x i8>,  <16 x i8>* @v16i8
   %a = add <16 x i8> %v, %w
   store <16 x i8> %a, <16 x i8>* %store
   ret void
@@ -304,8 +304,8 @@ define void @conv_f128_to_v16i8( fp128 %
 define void @conv_v16i8_to_f128( <16 x i8>* %load, <16 x i8>* %store ) {
 ; CHECK-LABEL: conv_v16i8_to_f128:
 ; CHECK: vrev32.8
-  %v = load <16 x i8>* %load
-  %w = load <16 x i8>* @v16i8
+  %v = load <16 x i8>, <16 x i8>* %load
+  %w = load <16 x i8>, <16 x i8>* @v16i8
   %a = add <16 x i8> %v, %w
   %f = bitcast <16 x i8> %a to fp128
   call void @conv_f128_to_v16i8( fp128 %f, <16 x i8>* %store )
@@ -316,7 +316,7 @@ define void @conv_f128_to_v8i16( fp128 %
 ; CHECK-LABEL: conv_f128_to_v8i16:
 ; CHECK: vrev32.16
   %v = bitcast fp128 %val to <8 x i16>
-  %w = load  <8 x i16>* @v8i16
+  %w = load  <8 x i16>,  <8 x i16>* @v8i16
   %a = add <8 x i16> %v, %w
   store <8 x i16> %a, <8 x i16>* %store
   ret void
@@ -325,8 +325,8 @@ define void @conv_f128_to_v8i16( fp128 %
 define void @conv_v8i16_to_f128( <8 x i16>* %load, <8 x i16>* %store ) {
 ; CHECK-LABEL: conv_v8i16_to_f128:
 ; CHECK: vrev32.16
-  %v = load <8 x i16>* %load
-  %w = load <8 x i16>* @v8i16
+  %v = load <8 x i16>, <8 x i16>* %load
+  %w = load <8 x i16>, <8 x i16>* @v8i16
   %a = add <8 x i16> %v, %w
   %f = bitcast <8 x i16> %a to fp128
   call void @conv_f128_to_v8i16( fp128 %f, <8 x i16>* %store )
@@ -337,7 +337,7 @@ define void @conv_f128_to_v4f32( fp128 %
 ; CHECK-LABEL: conv_f128_to_v4f32:
 ; CHECK: vrev64.32
   %v = bitcast fp128 %val to <4 x float>
-  %w = load <4 x float>* @v4f32
+  %w = load <4 x float>, <4 x float>* @v4f32
   %a = fadd <4 x float> %v, %w
   store <4 x float> %a, <4 x float>* %store
   ret void
@@ -346,8 +346,8 @@ define void @conv_f128_to_v4f32( fp128 %
 define void @conv_v4f32_to_f128( <4 x float>* %load, <4 x float>* %store ) {
 ; CHECK-LABEL: conv_v4f32_to_f128:
 ; CHECK: vrev64.32
-  %v = load <4 x float>* %load
-  %w = load <4 x float>* @v4f32
+  %v = load <4 x float>, <4 x float>* %load
+  %w = load <4 x float>, <4 x float>* @v4f32
   %a = fadd <4 x float> %v, %w
   %f = bitcast <4 x float> %a to fp128
   call void @conv_f128_to_v4f32( fp128 %f, <4 x float>* %store )

Modified: llvm/trunk/test/CodeGen/ARM/big-endian-neon-extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/big-endian-neon-extend.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/big-endian-neon-extend.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/big-endian-neon-extend.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define void @vector_ext_2i8_to_2i64( <2
 ; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]]
 ; CHECK-NEXT: vst1.64   {[[REG]], {{d[0-9]+}}}, [r1]
 ; CHECK-NEXT: bx        lr
-  %1 = load <2 x i8>* %loadaddr
+  %1 = load <2 x i8>, <2 x i8>* %loadaddr
   %2 = zext <2 x i8> %1 to <2 x i64>
   store <2 x i64> %2, <2 x i64>* %storeaddr
   ret void
@@ -33,7 +33,7 @@ define void @vector_ext_2i16_to_2i64( <2
 ; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]]
 ; CHECK-NEXT: vst1.64   {[[REG]], {{d[0-9]+}}}, [r1]
 ; CHECK-NEXT: bx        lr
-  %1 = load <2 x i16>* %loadaddr
+  %1 = load <2 x i16>, <2 x i16>* %loadaddr
   %2 = zext <2 x i16> %1 to <2 x i64>
   store <2 x i64> %2, <2 x i64>* %storeaddr
   ret void
@@ -49,7 +49,7 @@ define void @vector_ext_2i8_to_2i32( <2
 ; CHECK-NEXT: vrev64.32 [[REG]], [[REG]]
 ; CHECK-NEXT: vstr      [[REG]], [r1]
 ; CHECK-NEXT: bx        lr
-  %1 = load <2 x i8>* %loadaddr
+  %1 = load <2 x i8>, <2 x i8>* %loadaddr
   %2 = zext <2 x i8> %1 to <2 x i32>
   store <2 x i32> %2, <2 x i32>* %storeaddr
   ret void
@@ -63,7 +63,7 @@ define void @vector_ext_2i16_to_2i32( <2
 ; CHECK-NEXT: vrev64.32 [[REG]], [[REG]]
 ; CHECK-NEXT: vstr      [[REG]], [r1]
 ; CHECK-NEXT: bx        lr
-  %1 = load <2 x i16>* %loadaddr
+  %1 = load <2 x i16>, <2 x i16>* %loadaddr
   %2 = zext <2 x i16> %1 to <2 x i32>
   store <2 x i32> %2, <2 x i32>* %storeaddr
   ret void
@@ -80,7 +80,7 @@ define void @vector_ext_2i8_to_2i16( <2
 ; CHECK-NEXT: vrev32.16 [[REG]], {{d[0-9]+}}
 ; CHECK-NEXT: vst1.32   {[[REG]][0]}, [r1:32]
 ; CHECK-NEXT: bx        lr
-  %1 = load <2 x i8>* %loadaddr
+  %1 = load <2 x i8>, <2 x i8>* %loadaddr
   %2 = zext <2 x i8> %1 to <2 x i16>
   store <2 x i16> %2, <2 x i16>* %storeaddr
   ret void
@@ -95,7 +95,7 @@ define void @vector_ext_4i8_to_4i32( <4
 ; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]]
 ; CHECK-NEXT: vst1.64   {[[REG]], {{d[0-9]+}}}, [r1]
 ; CHECK-NEXT: bx        lr
-  %1 = load <4 x i8>* %loadaddr
+  %1 = load <4 x i8>, <4 x i8>* %loadaddr
   %2 = zext <4 x i8> %1 to <4 x i32>
   store <4 x i32> %2, <4 x i32>* %storeaddr
   ret void
@@ -109,7 +109,7 @@ define void @vector_ext_4i8_to_4i16( <4
 ; CHECK-NEXT: vrev64.16 [[REG]], [[REG]]
 ; CHECK-NEXT: vstr      [[REG]], [r1]
 ; CHECK-NEXT: bx        lr
-  %1 = load <4 x i8>* %loadaddr
+  %1 = load <4 x i8>, <4 x i8>* %loadaddr
   %2 = zext <4 x i8> %1 to <4 x i16>
   store <4 x i16> %2, <4 x i16>* %storeaddr
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/big-endian-neon-trunc-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/big-endian-neon-trunc-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/big-endian-neon-trunc-store.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/big-endian-neon-trunc-store.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define void @vector_trunc_store_2i64_to_
 ; CHECK:       vrev32.16  [[REG]], [[REG]]
 ; CHECK:       vuzp.16    [[REG]], [[REG2:d[0-9]+]]
 ; CHECK:       vrev32.16  [[REG]], [[REG2]]
-  %1 = load <2 x i64>* %loadaddr
+  %1 = load <2 x i64>, <2 x i64>* %loadaddr
   %2 = trunc <2 x i64> %1 to <2 x i16>
   store <2 x i16> %2, <2 x i16>* %storeaddr
   ret void
@@ -18,7 +18,7 @@ define void @vector_trunc_store_4i32_to_
 ; CHECK:       vrev16.8  [[REG]], [[REG]]
 ; CHECK:       vuzp.8    [[REG]], [[REG2:d[0-9]+]]
 ; CHECK:       vrev32.8  [[REG]], [[REG2]]
-  %1 = load <4 x i32>* %loadaddr
+  %1 = load <4 x i32>, <4 x i32>* %loadaddr
   %2 = trunc <4 x i32> %1 to <4 x i8>
   store <4 x i8> %2, <4 x i8>* %storeaddr
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/big-endian-ret-f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/big-endian-ret-f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/big-endian-ret-f64.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/big-endian-ret-f64.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define double @fn() {
 ; CHECK: ldr r0, [sp]
 ; CHECK: ldr r1, [sp, #4]
   %r = alloca double, align 8
-  %1 = load double* %r, align 8
+  %1 = load double, double* %r, align 8
   ret double %1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/big-endian-vector-caller.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/big-endian-vector-caller.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/big-endian-vector-caller.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/big-endian-vector-caller.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define void @test_i64_f64(double* %p, i6
 ; SOFT: vadd.f64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.f64 d0
-    %1 = load double* %p
+    %1 = load double, double* %p
     %2 = fadd double %1, %1
     %3 = call i64 @test_i64_f64_helper(double %2)
     %4 = add i64 %3, %3
@@ -23,7 +23,7 @@ define void @test_i64_v1i64(<1 x i64>* %
 ; SOFT: vadd.i64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.i64 d0
-    %1 = load <1 x i64>* %p
+    %1 = load <1 x i64>, <1 x i64>* %p
     %2 = add <1 x i64> %1, %1
     %3 = call i64 @test_i64_v1i64_helper(<1 x i64> %2)
     %4 = add i64 %3, %3
@@ -39,7 +39,7 @@ define void @test_i64_v2f32(<2 x float>*
 ; SOFT: vrev64.32 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.32 d0
-    %1 = load <2 x float>* %p
+    %1 = load <2 x float>, <2 x float>* %p
     %2 = fadd <2 x float> %1, %1
     %3 = call i64 @test_i64_v2f32_helper(<2 x float> %2)
     %4 = add i64 %3, %3
@@ -55,7 +55,7 @@ define void @test_i64_v2i32(<2 x i32>* %
 ; SOFT: vrev64.32 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.32 d0
-    %1 = load <2 x i32>* %p
+    %1 = load <2 x i32>, <2 x i32>* %p
     %2 = add <2 x i32> %1, %1
     %3 = call i64 @test_i64_v2i32_helper(<2 x i32> %2)
     %4 = add i64 %3, %3
@@ -71,7 +71,7 @@ define void @test_i64_v4i16(<4 x i16>* %
 ; SOFT: vrev64.16 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.16 d0
-    %1 = load <4 x i16>* %p
+    %1 = load <4 x i16>, <4 x i16>* %p
     %2 = add <4 x i16> %1, %1
     %3 = call i64 @test_i64_v4i16_helper(<4 x i16> %2)
     %4 = add i64 %3, %3
@@ -87,7 +87,7 @@ define void @test_i64_v8i8(<8 x i8>* %p,
 ; SOFT: vrev64.8 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.8 d0
-    %1 = load <8 x i8>* %p
+    %1 = load <8 x i8>, <8 x i8>* %p
     %2 = add <8 x i8> %1, %1
     %3 = call i64 @test_i64_v8i8_helper(<8 x i8> %2)
     %4 = add i64 %3, %3
@@ -102,7 +102,7 @@ declare double @test_f64_i64_helper(i64
 define void @test_f64_i64(i64* %p, double* %q) {
 ; CHECK: adds r1
 ; CHECK: adc r0
-    %1 = load i64* %p
+    %1 = load i64, i64* %p
     %2 = add i64 %1, %1
     %3 = call double @test_f64_i64_helper(i64 %2)
     %4 = fadd double %3, %3
@@ -119,7 +119,7 @@ define void @test_f64_v1i64(<1 x i64>* %
 ; SOFT: vadd.i64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.i64 d0
-    %1 = load <1 x i64>* %p
+    %1 = load <1 x i64>, <1 x i64>* %p
     %2 = add <1 x i64> %1, %1
     %3 = call double @test_f64_v1i64_helper(<1 x i64> %2)
     %4 = fadd double %3, %3
@@ -136,7 +136,7 @@ define void @test_f64_v2f32(<2 x float>*
 ; SOFT: vrev64.32 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.32 d0
-    %1 = load <2 x float>* %p
+    %1 = load <2 x float>, <2 x float>* %p
     %2 = fadd <2 x float> %1, %1
     %3 = call double @test_f64_v2f32_helper(<2 x float> %2)
     %4 = fadd double %3, %3
@@ -153,7 +153,7 @@ define void @test_f64_v2i32(<2 x i32>* %
 ; SOFT: vrev64.32 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.32 d0
-    %1 = load <2 x i32>* %p
+    %1 = load <2 x i32>, <2 x i32>* %p
     %2 = add <2 x i32> %1, %1
     %3 = call double @test_f64_v2i32_helper(<2 x i32> %2)
     %4 = fadd double %3, %3
@@ -170,7 +170,7 @@ define void @test_f64_v4i16(<4 x i16>* %
 ; SOFT: vrev64.16 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.16 d0
-    %1 = load <4 x i16>* %p
+    %1 = load <4 x i16>, <4 x i16>* %p
     %2 = add <4 x i16> %1, %1
     %3 = call double @test_f64_v4i16_helper(<4 x i16> %2)
     %4 = fadd double %3, %3
@@ -187,7 +187,7 @@ define void @test_f64_v8i8(<8 x i8>* %p,
 ; SOFT: vrev64.8 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.8 d0
-    %1 = load <8 x i8>* %p
+    %1 = load <8 x i8>, <8 x i8>* %p
     %2 = add <8 x i8> %1, %1
     %3 = call double @test_f64_v8i8_helper(<8 x i8> %2)
     %4 = fadd double %3, %3
@@ -203,7 +203,7 @@ declare <1 x i64> @test_v1i64_i64_helper
 define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) {
 ; CHECK: adds r1
 ; CHECK: adc r0
-    %1 = load i64* %p
+    %1 = load i64, i64* %p
     %2 = add i64 %1, %1
     %3 = call <1 x i64> @test_v1i64_i64_helper(i64 %2)
     %4 = add <1 x i64> %3, %3
@@ -220,7 +220,7 @@ define void @test_v1i64_f64(double* %p,
 ; SOFT: vadd.f64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.f64 d0
-    %1 = load double* %p
+    %1 = load double, double* %p
     %2 = fadd double %1, %1
     %3 = call <1 x i64> @test_v1i64_f64_helper(double %2)
     %4 = add <1 x i64> %3, %3
@@ -237,7 +237,7 @@ define void @test_v1i64_v2f32(<2 x float
 ; HARD: vrev64.32 d0
 ; SOFT: vadd.f32 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
-    %1 = load <2 x float>* %p
+    %1 = load <2 x float>, <2 x float>* %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %2)
     %4 = add <1 x i64> %3, %3
@@ -255,7 +255,7 @@ define void @test_v1i64_v2i32(<2 x i32>*
 ; SOFT: vadd.i32 [[REG:d[0-9]+]]
 ; SOFT: vrev64.32 [[REG]]
 ; SOFT: vmov r1, r0, [[REG]]
-    %1 = load <2 x i32>* %p
+    %1 = load <2 x i32>, <2 x i32>* %p
     %2 = add <2 x i32> %1, %1
     %3 = call <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %2)
     %4 = add <1 x i64> %3, %3
@@ -272,7 +272,7 @@ define void @test_v1i64_v4i16(<4 x i16>*
 ; SOFT: vrev64.16 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.16 d0
-    %1 = load <4 x i16>* %p
+    %1 = load <4 x i16>, <4 x i16>* %p
     %2 = add <4 x i16> %1, %1
     %3 = call <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %2)
     %4 = add <1 x i64> %3, %3
@@ -289,7 +289,7 @@ define void @test_v1i64_v8i8(<8 x i8>* %
 ; SOFT: vrev64.8 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.8 d0
-    %1 = load <8 x i8>* %p
+    %1 = load <8 x i8>, <8 x i8>* %p
     %2 = add <8 x i8> %1, %1
     %3 = call <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %2)
     %4 = add <1 x i64> %3, %3
@@ -305,7 +305,7 @@ declare <2 x float> @test_v2f32_i64_help
 define void @test_v2f32_i64(i64* %p, <2 x float>* %q) {
 ; CHECK: adds r1
 ; CHECK: adc r0
-    %1 = load i64* %p
+    %1 = load i64, i64* %p
     %2 = add i64 %1, %1
     %3 = call <2 x float> @test_v2f32_i64_helper(i64 %2)
     %4 = fadd <2 x float> %3, %3
@@ -322,7 +322,7 @@ define void @test_v2f32_f64(double* %p,
 ; SOFT: vadd.f64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.f64 d0
-    %1 = load double* %p
+    %1 = load double, double* %p
     %2 = fadd double %1, %1
     %3 = call <2 x float> @test_v2f32_f64_helper(double %2)
     %4 = fadd <2 x float> %3, %3
@@ -339,7 +339,7 @@ define void @test_v2f32_v1i64(<1 x i64>*
 ; SOFT: vadd.i64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.i64 d0
-    %1 = load <1 x i64>* %p
+    %1 = load <1 x i64>, <1 x i64>* %p
     %2 = add <1 x i64> %1, %1
     %3 = call <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %2)
     %4 = fadd <2 x float> %3, %3
@@ -357,7 +357,7 @@ define void @test_v2f32_v2i32(<2 x i32>*
 ; SOFT: vadd.i32 [[REG:d[0-9]+]]
 ; SOFT: vrev64.32 [[REG]]
 ; SOFT: vmov r1, r0, [[REG]]
-    %1 = load <2 x i32>* %p
+    %1 = load <2 x i32>, <2 x i32>* %p
     %2 = add <2 x i32> %1, %1
     %3 = call <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %2)
     %4 = fadd <2 x float> %3, %3
@@ -374,7 +374,7 @@ define void @test_v2f32_v4i16(<4 x i16>*
 ; SOFT: vrev64.16 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.16 d0
-    %1 = load <4 x i16>* %p
+    %1 = load <4 x i16>, <4 x i16>* %p
     %2 = add <4 x i16> %1, %1
     %3 = call <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %2)
     %4 = fadd <2 x float> %3, %3
@@ -391,7 +391,7 @@ define void @test_v2f32_v8i8(<8 x i8>* %
 ; SOFT: vrev64.8 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.8 d0
-    %1 = load <8 x i8>* %p
+    %1 = load <8 x i8>, <8 x i8>* %p
     %2 = add <8 x i8> %1, %1
     %3 = call <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %2)
     %4 = fadd <2 x float> %3, %3
@@ -407,7 +407,7 @@ declare <2 x i32> @test_v2i32_i64_helper
 define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) {
 ; CHECK: adds r1
 ; CHECK: adc r0
-    %1 = load i64* %p
+    %1 = load i64, i64* %p
     %2 = add i64 %1, %1
     %3 = call <2 x i32> @test_v2i32_i64_helper(i64 %2)
     %4 = add <2 x i32> %3, %3
@@ -424,7 +424,7 @@ define void @test_v2i32_f64(double* %p,
 ; SOFT: vadd.f64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.f64 d0
-    %1 = load double* %p
+    %1 = load double, double* %p
     %2 = fadd double %1, %1
     %3 = call <2 x i32> @test_v2i32_f64_helper(double %2)
     %4 = add <2 x i32> %3, %3
@@ -441,7 +441,7 @@ define void @test_v2i32_v1i64(<1 x i64>*
 ; SOFT: vadd.i64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.i64 d0
-    %1 = load <1 x i64>* %p
+    %1 = load <1 x i64>, <1 x i64>* %p
     %2 = add <1 x i64> %1, %1
     %3 = call <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %2)
     %4 = add <2 x i32> %3, %3
@@ -460,7 +460,7 @@ define void @test_v2i32_v2f32(<2 x float
 ; SOFT: vadd.f32 [[REG:d[0-9]+]]
 ; SOFT: vrev64.32 [[REG]]
 ; SOFT: vmov r1, r0, [[REG]]
-    %1 = load <2 x float>* %p
+    %1 = load <2 x float>, <2 x float>* %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %2)
     %4 = add <2 x i32> %3, %3
@@ -477,7 +477,7 @@ define void @test_v2i32_v4i16(<4 x i16>*
 ; SOFT: vrev64.16 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.16 d0
-    %1 = load <4 x i16>* %p
+    %1 = load <4 x i16>, <4 x i16>* %p
     %2 = add <4 x i16> %1, %1
     %3 = call <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %2)
     %4 = add <2 x i32> %3, %3
@@ -494,7 +494,7 @@ define void @test_v2i32_v8i8(<8 x i8>* %
 ; SOFT: vrev64.8 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.8 d0
-    %1 = load <8 x i8>* %p
+    %1 = load <8 x i8>, <8 x i8>* %p
     %2 = add <8 x i8> %1, %1
     %3 = call <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %2)
     %4 = add <2 x i32> %3, %3
@@ -510,7 +510,7 @@ declare <4 x i16> @test_v4i16_i64_helper
 define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) {
 ; CHECK: adds r1
 ; CHECK: adc r0
-    %1 = load i64* %p
+    %1 = load i64, i64* %p
     %2 = add i64 %1, %1
     %3 = call <4 x i16> @test_v4i16_i64_helper(i64 %2)
     %4 = add <4 x i16> %3, %3
@@ -527,7 +527,7 @@ define void @test_v4i16_f64(double* %p,
 ; SOFT: vadd.f64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.f64 d0
-    %1 = load double* %p
+    %1 = load double, double* %p
     %2 = fadd double %1, %1
     %3 = call <4 x i16> @test_v4i16_f64_helper(double %2)
     %4 = add <4 x i16> %3, %3
@@ -544,7 +544,7 @@ define void @test_v4i16_v1i64(<1 x i64>*
 ; SOFT: vadd.i64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.i64 d0
-    %1 = load <1 x i64>* %p
+    %1 = load <1 x i64>, <1 x i64>* %p
     %2 = add <1 x i64> %1, %1
     %3 = call <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %2)
     %4 = add <4 x i16> %3, %3
@@ -563,7 +563,7 @@ define void @test_v4i16_v2f32(<2 x float
 ; SOFT: vadd.f32 [[REG:d[0-9]+]]
 ; SOFT: vrev64.32 [[REG]]
 ; SOFT: vmov r1, r0, [[REG]]
-    %1 = load <2 x float>* %p
+    %1 = load <2 x float>, <2 x float>* %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %2)
     %4 = add <4 x i16> %3, %3
@@ -582,7 +582,7 @@ define void @test_v4i16_v2i32(<2 x i32>*
 ; SOFT: vadd.i32 [[REG:d[0-9]+]]
 ; SOFT: vrev64.32 [[REG]]
 ; SOFT: vmov r1, r0, [[REG]]
-    %1 = load <2 x i32>* %p
+    %1 = load <2 x i32>, <2 x i32>* %p
     %2 = add <2 x i32> %1, %1
     %3 = call <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %2)
     %4 = add <4 x i16> %3, %3
@@ -599,7 +599,7 @@ define void @test_v4i16_v8i8(<8 x i8>* %
 ; SOFT: vrev64.8 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.8 d0
-    %1 = load <8 x i8>* %p
+    %1 = load <8 x i8>, <8 x i8>* %p
     %2 = add <8 x i8> %1, %1
     %3 = call <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %2)
     %4 = add <4 x i16> %3, %3
@@ -615,7 +615,7 @@ declare <8 x i8> @test_v8i8_i64_helper(i
 define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) {
 ; CHECK: adds r1
 ; CHECK: adc r0
-    %1 = load i64* %p
+    %1 = load i64, i64* %p
     %2 = add i64 %1, %1
     %3 = call <8 x i8> @test_v8i8_i64_helper(i64 %2)
     %4 = add <8 x i8> %3, %3
@@ -632,7 +632,7 @@ define void @test_v8i8_f64(double* %p, <
 ; SOFT: vadd.f64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.f64 d0
-    %1 = load double* %p
+    %1 = load double, double* %p
     %2 = fadd double %1, %1
     %3 = call <8 x i8> @test_v8i8_f64_helper(double %2)
     %4 = add <8 x i8> %3, %3
@@ -649,7 +649,7 @@ define void @test_v8i8_v1i64(<1 x i64>*
 ; SOFT: vadd.i64 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vadd.i64 d0
-    %1 = load <1 x i64>* %p
+    %1 = load <1 x i64>, <1 x i64>* %p
     %2 = add <1 x i64> %1, %1
     %3 = call <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %2)
     %4 = add <8 x i8> %3, %3
@@ -666,7 +666,7 @@ define void @test_v8i8_v2f32(<2 x float>
 ; SOFT: vrev64.32 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.32 d0
-    %1 = load <2 x float>* %p
+    %1 = load <2 x float>, <2 x float>* %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %2)
     %4 = add <8 x i8> %3, %3
@@ -683,7 +683,7 @@ define void @test_v8i8_v2i32(<2 x i32>*
 ; SOFT: vrev64.32 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.32 d0
-    %1 = load <2 x i32>* %p
+    %1 = load <2 x i32>, <2 x i32>* %p
     %2 = add <2 x i32> %1, %1
     %3 = call <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %2)
     %4 = add <8 x i8> %3, %3
@@ -700,7 +700,7 @@ define void @test_v8i8_v4i16(<4 x i16>*
 ; SOFT: vrev64.16 [[REG:d[0-9]+]]
 ; SOFT: vmov r1, r0, [[REG]]
 ; HARD: vrev64.16 d0
-    %1 = load <4 x i16>* %p
+    %1 = load <4 x i16>, <4 x i16>* %p
     %2 = add <4 x i16> %1, %1
     %3 = call <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %2)
     %4 = add <8 x i8> %3, %3
@@ -720,7 +720,7 @@ define void @test_f128_v2f64(<2 x double
 ; SOFT: vmov r3, r2, [[REG2]]
 ; HARD: vadd.f64 d1
 ; HARD: vadd.f64 d0
-    %1 = load <2 x double>* %p
+    %1 = load <2 x double>, <2 x double>* %p
     %2 = fadd <2 x double> %1, %1
     %3 = call fp128 @test_f128_v2f64_helper(<2 x double> %2)
     %4 = fadd fp128 %3, %3
@@ -735,7 +735,7 @@ define void @test_f128_v2i64(<2 x i64>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vadd.i64 q0
-    %1 = load <2 x i64>* %p
+    %1 = load <2 x i64>, <2 x i64>* %p
     %2 = add <2 x i64> %1, %1
     %3 = call fp128 @test_f128_v2i64_helper(<2 x i64> %2)
     %4 = fadd fp128 %3, %3
@@ -750,7 +750,7 @@ define void @test_f128_v4f32(<4 x float>
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x float>* %p
+    %1 = load <4 x float>, <4 x float>* %p
     %2 = fadd <4 x float> %1, %1
     %3 = call fp128 @test_f128_v4f32_helper(<4 x float> %2)
     %4 = fadd fp128 %3, %3
@@ -765,7 +765,7 @@ define void @test_f128_v4i32(<4 x i32>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x i32>* %p
+    %1 = load <4 x i32>, <4 x i32>* %p
     %2 = add <4 x i32> %1, %1
     %3 = call fp128 @test_f128_v4i32_helper(<4 x i32> %2)
     %4 = fadd fp128 %3, %3
@@ -780,7 +780,7 @@ define void @test_f128_v8i16(<8 x i16>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.16 q0
-    %1 = load <8 x i16>* %p
+    %1 = load <8 x i16>, <8 x i16>* %p
     %2 = add <8 x i16> %1, %1
     %3 = call fp128 @test_f128_v8i16_helper(<8 x i16> %2)
     %4 = fadd fp128 %3, %3
@@ -795,7 +795,7 @@ define void @test_f128_v16i8(<16 x i8>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.8 q0
-    %1 = load <16 x i8>* %p
+    %1 = load <16 x i8>, <16 x i8>* %p
     %2 = add <16 x i8> %1, %1
     %3 = call fp128 @test_f128_v16i8_helper(<16 x i8> %2)
     %4 = fadd fp128 %3, %3
@@ -807,7 +807,7 @@ define void @test_f128_v16i8(<16 x i8>*
 ; CHECK-LABEL: test_v2f64_f128:
 declare <2 x double> @test_v2f64_f128_helper(fp128 %p)
 define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) {
-    %1 = load fp128* %p
+    %1 = load fp128, fp128* %p
     %2 = fadd fp128 %1, %1
     %3 = call <2 x double> @test_v2f64_f128_helper(fp128 %2)
     %4 = fadd <2 x double> %3, %3
@@ -824,7 +824,7 @@ define void @test_v2f64_v2i64(<2 x i64>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vadd.i64 q0
-    %1 = load <2 x i64>* %p
+    %1 = load <2 x i64>, <2 x i64>* %p
     %2 = add <2 x i64> %1, %1
     %3 = call <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %2)
     %4 = fadd <2 x double> %3, %3
@@ -840,7 +840,7 @@ define void @test_v2f64_v4f32(<4 x float
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x float>* %p
+    %1 = load <4 x float>, <4 x float>* %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <2 x double> @test_v2f64_v4f32_helper(<4 x float> %2)
     %4 = fadd <2 x double> %3, %3
@@ -856,7 +856,7 @@ define void @test_v2f64_v4i32(<4 x i32>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x i32>* %p
+    %1 = load <4 x i32>, <4 x i32>* %p
     %2 = add <4 x i32> %1, %1
     %3 = call <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %2)
     %4 = fadd <2 x double> %3, %3
@@ -872,7 +872,7 @@ define void @test_v2f64_v8i16(<8 x i16>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.16 q0
-    %1 = load <8 x i16>* %p
+    %1 = load <8 x i16>, <8 x i16>* %p
     %2 = add <8 x i16> %1, %1
     %3 = call <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %2)
     %4 = fadd <2 x double> %3, %3
@@ -888,7 +888,7 @@ define void @test_v2f64_v16i8(<16 x i8>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.8 q0
-    %1 = load <16 x i8>* %p
+    %1 = load <16 x i8>, <16 x i8>* %p
     %2 = add <16 x i8> %1, %1
     %3 = call <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %2)
     %4 = fadd <2 x double> %3, %3
@@ -901,7 +901,7 @@ define void @test_v2f64_v16i8(<16 x i8>*
 ; CHECK-LABEL: test_v2i64_f128:
 declare <2 x i64> @test_v2i64_f128_helper(fp128 %p)
 define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) {
-    %1 = load fp128* %p
+    %1 = load fp128, fp128* %p
     %2 = fadd fp128 %1, %1
     %3 = call <2 x i64> @test_v2i64_f128_helper(fp128 %2)
     %4 = add <2 x i64> %3, %3
@@ -918,7 +918,7 @@ define void @test_v2i64_v2f64(<2 x doubl
 ; SOFT: vmov r3, r2, [[REG2]]
 ; HARD: vadd.f64 d1
 ; HARD: vadd.f64 d0
-    %1 = load <2 x double>* %p
+    %1 = load <2 x double>, <2 x double>* %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %2)
     %4 = add <2 x i64> %3, %3
@@ -934,7 +934,7 @@ define void @test_v2i64_v4f32(<4 x float
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x float>* %p
+    %1 = load <4 x float>, <4 x float>* %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %2)
     %4 = add <2 x i64> %3, %3
@@ -950,7 +950,7 @@ define void @test_v2i64_v4i32(<4 x i32>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x i32>* %p
+    %1 = load <4 x i32>, <4 x i32>* %p
     %2 = add <4 x i32> %1, %1
     %3 = call <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %2)
     %4 = add <2 x i64> %3, %3
@@ -966,7 +966,7 @@ define void @test_v2i64_v8i16(<8 x i16>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.16 q0
-    %1 = load <8 x i16>* %p
+    %1 = load <8 x i16>, <8 x i16>* %p
     %2 = add <8 x i16> %1, %1
     %3 = call <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %2)
     %4 = add <2 x i64> %3, %3
@@ -982,7 +982,7 @@ define void @test_v2i64_v16i8(<16 x i8>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.8 q0
-    %1 = load <16 x i8>* %p
+    %1 = load <16 x i8>, <16 x i8>* %p
     %2 = add <16 x i8> %1, %1
     %3 = call <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %2)
     %4 = add <2 x i64> %3, %3
@@ -995,7 +995,7 @@ define void @test_v2i64_v16i8(<16 x i8>*
 ; CHECK-LABEL: test_v4f32_f128:
 declare <4 x float> @test_v4f32_f128_helper(fp128 %p)
 define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) {
-    %1 = load fp128* %p
+    %1 = load fp128, fp128* %p
     %2 = fadd fp128 %1, %1
     %3 = call <4 x float> @test_v4f32_f128_helper(fp128 %2)
     %4 = fadd <4 x float> %3, %3
@@ -1012,7 +1012,7 @@ define void @test_v4f32_v2f64(<2 x doubl
 ; SOFT: vmov r3, r2
 ; HARD: vadd.f64  d1
 ; HARD: vadd.f64  d0
-    %1 = load <2 x double>* %p
+    %1 = load <2 x double>, <2 x double>* %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <4 x float> @test_v4f32_v2f64_helper(<2 x double> %2)
     %4 = fadd <4 x float> %3, %3
@@ -1028,7 +1028,7 @@ define void @test_v4f32_v2i64(<2 x i64>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vadd.i64 q0
-    %1 = load <2 x i64>* %p
+    %1 = load <2 x i64>, <2 x i64>* %p
     %2 = add <2 x i64> %1, %1
     %3 = call <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %2)
     %4 = fadd <4 x float> %3, %3
@@ -1044,7 +1044,7 @@ define void @test_v4f32_v4i32(<4 x i32>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x i32>* %p
+    %1 = load <4 x i32>, <4 x i32>* %p
     %2 = add <4 x i32> %1, %1
     %3 = call <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %2)
     %4 = fadd <4 x float> %3, %3
@@ -1060,7 +1060,7 @@ define void @test_v4f32_v8i16(<8 x i16>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.16 q0
-    %1 = load <8 x i16>* %p
+    %1 = load <8 x i16>, <8 x i16>* %p
     %2 = add <8 x i16> %1, %1
     %3 = call <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %2)
     %4 = fadd <4 x float> %3, %3
@@ -1076,7 +1076,7 @@ define void @test_v4f32_v16i8(<16 x i8>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.8 q0
-    %1 = load <16 x i8>* %p
+    %1 = load <16 x i8>, <16 x i8>* %p
     %2 = add <16 x i8> %1, %1
     %3 = call <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %2)
     %4 = fadd <4 x float> %3, %3
@@ -1089,7 +1089,7 @@ define void @test_v4f32_v16i8(<16 x i8>*
 ; CHECK-LABEL: test_v4i32_f128:
 declare <4 x i32> @test_v4i32_f128_helper(fp128 %p)
 define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) {
-    %1 = load fp128* %p
+    %1 = load fp128, fp128* %p
     %2 = fadd fp128 %1, %1
     %3 = call <4 x i32> @test_v4i32_f128_helper(fp128 %2)
     %4 = add <4 x i32> %3, %3
@@ -1106,7 +1106,7 @@ define void @test_v4i32_v2f64(<2 x doubl
 ; SOFT: vmov r3, r2
 ; HARD: vadd.f64 d1
 ; HARD: vadd.f64 d0
-    %1 = load <2 x double>* %p
+    %1 = load <2 x double>, <2 x double>* %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %2)
     %4 = add <4 x i32> %3, %3
@@ -1122,7 +1122,7 @@ define void @test_v4i32_v2i64(<2 x i64>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vadd.i64 q0
-    %1 = load <2 x i64>* %p
+    %1 = load <2 x i64>, <2 x i64>* %p
     %2 = add <2 x i64> %1, %1
     %3 = call <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %2)
     %4 = add <4 x i32> %3, %3
@@ -1138,7 +1138,7 @@ define void @test_v4i32_v4f32(<4 x float
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x float>* %p
+    %1 = load <4 x float>, <4 x float>* %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %2)
     %4 = add <4 x i32> %3, %3
@@ -1154,7 +1154,7 @@ define void @test_v4i32_v8i16(<8 x i16>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.16 q0
-    %1 = load <8 x i16>* %p
+    %1 = load <8 x i16>, <8 x i16>* %p
     %2 = add <8 x i16> %1, %1
     %3 = call <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %2)
     %4 = add <4 x i32> %3, %3
@@ -1170,7 +1170,7 @@ define void @test_v4i32_v16i8(<16 x i8>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.8 q0
-    %1 = load <16 x i8>* %p
+    %1 = load <16 x i8>, <16 x i8>* %p
     %2 = add <16 x i8> %1, %1
     %3 = call <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %2)
     %4 = add <4 x i32> %3, %3
@@ -1183,7 +1183,7 @@ define void @test_v4i32_v16i8(<16 x i8>*
 ; CHECK-LABEL: test_v8i16_f128:
 declare <8 x i16> @test_v8i16_f128_helper(fp128 %p)
 define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) {
-    %1 = load fp128* %p
+    %1 = load fp128, fp128* %p
     %2 = fadd fp128 %1, %1
     %3 = call <8 x i16> @test_v8i16_f128_helper(fp128 %2)
     %4 = add <8 x i16> %3, %3
@@ -1200,7 +1200,7 @@ define void @test_v8i16_v2f64(<2 x doubl
 ; SOFT: vmov r3, r2
 ; HARD: vadd.f64 d1
 ; HARD: vadd.f64 d0
-    %1 = load <2 x double>* %p
+    %1 = load <2 x double>, <2 x double>* %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %2)
     %4 = add <8 x i16> %3, %3
@@ -1216,7 +1216,7 @@ define void @test_v8i16_v2i64(<2 x i64>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vadd.i64 q0
-    %1 = load <2 x i64>* %p
+    %1 = load <2 x i64>, <2 x i64>* %p
     %2 = add <2 x i64> %1, %1
     %3 = call <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %2)
     %4 = add <8 x i16> %3, %3
@@ -1232,7 +1232,7 @@ define void @test_v8i16_v4f32(<4 x float
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x float>* %p
+    %1 = load <4 x float>, <4 x float>* %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %2)
     %4 = add <8 x i16> %3, %3
@@ -1248,7 +1248,7 @@ define void @test_v8i16_v4i32(<4 x i32>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x i32>* %p
+    %1 = load <4 x i32>, <4 x i32>* %p
     %2 = add <4 x i32> %1, %1
     %3 = call <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %2)
     %4 = add <8 x i16> %3, %3
@@ -1264,7 +1264,7 @@ define void @test_v8i16_v16i8(<16 x i8>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.8 q0
-    %1 = load <16 x i8>* %p
+    %1 = load <16 x i8>, <16 x i8>* %p
     %2 = add <16 x i8> %1, %1
     %3 = call <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %2)
     %4 = add <8 x i16> %3, %3
@@ -1277,7 +1277,7 @@ define void @test_v8i16_v16i8(<16 x i8>*
 ; CHECK-LABEL: test_v16i8_f128:
 declare <16 x i8> @test_v16i8_f128_helper(fp128 %p)
 define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) {
-    %1 = load fp128* %p
+    %1 = load fp128, fp128* %p
     %2 = fadd fp128 %1, %1
     %3 = call <16 x i8> @test_v16i8_f128_helper(fp128 %2)
     %4 = add <16 x i8> %3, %3
@@ -1294,7 +1294,7 @@ define void @test_v16i8_v2f64(<2 x doubl
 ; SOFT: vmov r3, r2
 ; HARD: vadd.f64 d1
 ; HARD: vadd.f64 d0
-    %1 = load <2 x double>* %p
+    %1 = load <2 x double>, <2 x double>* %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %2)
     %4 = add <16 x i8> %3, %3
@@ -1310,7 +1310,7 @@ define void @test_v16i8_v2i64(<2 x i64>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vadd.i64 q0
-    %1 = load <2 x i64>* %p
+    %1 = load <2 x i64>, <2 x i64>* %p
     %2 = add <2 x i64> %1, %1
     %3 = call <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %2)
     %4 = add <16 x i8> %3, %3
@@ -1326,7 +1326,7 @@ define void @test_v16i8_v4f32(<4 x float
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x float>* %p
+    %1 = load <4 x float>, <4 x float>* %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %2)
     %4 = add <16 x i8> %3, %3
@@ -1342,7 +1342,7 @@ define void @test_v16i8_v4i32(<4 x i32>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.32 q0
-    %1 = load <4 x i32>* %p
+    %1 = load <4 x i32>, <4 x i32>* %p
     %2 = add <4 x i32> %1, %1
     %3 = call <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %2)
     %4 = add <16 x i8> %3, %3
@@ -1358,7 +1358,7 @@ define void @test_v16i8_v8i16(<8 x i16>*
 ; SOFT: vmov r1, r0
 ; SOFT: vmov r3, r2
 ; HARD: vrev64.16 q0
-    %1 = load <8 x i16>* %p
+    %1 = load <8 x i16>, <8 x i16>* %p
     %2 = add <8 x i16> %1, %1
     %3 = call <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %2)
     %4 = add <16 x i8> %3, %3

Modified: llvm/trunk/test/CodeGen/ARM/bswap16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/bswap16.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/bswap16.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/bswap16.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 
 define void @test1(i16* nocapture %data) {
 entry:
-  %0 = load i16* %data, align 2
+  %0 = load i16, i16* %data, align 2
   %1 = tail call i16 @llvm.bswap.i16(i16 %0)
   store i16 %1, i16* %data, align 2
   ret void
@@ -30,7 +30,7 @@ entry:
 
 define i16 @test3(i16* nocapture %data) {
 entry:
-  %0 = load i16* %data, align 2
+  %0 = load i16, i16* %data, align 2
   %1 = tail call i16 @llvm.bswap.i16(i16 %0)
   ret i16 %1
 

Modified: llvm/trunk/test/CodeGen/ARM/call-tc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/call-tc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/call-tc.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/call-tc.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ define void @t2() {
 ; CHECKT2D: ldr
 ; CHECKT2D-NEXT: ldr
 ; CHECKT2D-NEXT: bx r0
-        %tmp = load i32 ()** @t         ; <i32 ()*> [#uses=1]
+        %tmp = load i32 ()*, i32 ()** @t         ; <i32 ()*> [#uses=1]
         %tmp.upgrd.2 = tail call i32 %tmp( )            ; <i32> [#uses=0]
         ret void
 }
@@ -153,7 +153,7 @@ define i32 @t9() nounwind {
 ; CHECKT2D: b.w ___divsi3
   %lock = alloca %class.MutexLock, align 1
   %1 = call %class.MutexLock* @_ZN9MutexLockC1Ev(%class.MutexLock* %lock)
-  %2 = load i32* @x, align 4
+  %2 = load i32, i32* @x, align 4
   %3 = sdiv i32 1000, %2
   %4 = call %class.MutexLock* @_ZN9MutexLockD1Ev(%class.MutexLock* %lock)
   ret i32 %3
@@ -170,7 +170,7 @@ define float @libcall_tc_test2(float* no
 ; CHECKT2D-LABEL: libcall_tc_test2:
 ; CHECKT2D: blx _floorf
 ; CHECKT2D: b.w _truncf
-  %1 = load float* %a, align 4
+  %1 = load float, float* %a, align 4
   %call = tail call float @floorf(float %1)
   store float %call, float* %a, align 4
   %call1 = tail call float @truncf(float %b)

Modified: llvm/trunk/test/CodeGen/ARM/call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/call.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/call.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ define void @f() {
 define void @g.upgrd.1() {
 ; CHECKV4: mov lr, pc
 ; CHECKV5: blx
-        %tmp = load i32 ()** @t         ; <i32 ()*> [#uses=1]
+        %tmp = load i32 ()*, i32 ()** @t         ; <i32 ()*> [#uses=1]
         %tmp.upgrd.2 = call i32 %tmp( )            ; <i32> [#uses=0]
         ret void
 }
@@ -30,10 +30,10 @@ define i32* @m_231b(i32, i32, i32*, i32*
 ; CHECKV4: bx r{{.*}}
 BB0:
   %5 = inttoptr i32 %0 to i32*                    ; <i32*> [#uses=1]
-  %t35 = load volatile i32* %5                    ; <i32> [#uses=1]
+  %t35 = load volatile i32, i32* %5                    ; <i32> [#uses=1]
   %6 = inttoptr i32 %t35 to i32**                 ; <i32**> [#uses=1]
   %7 = getelementptr i32*, i32** %6, i32 86             ; <i32**> [#uses=1]
-  %8 = load i32** %7                              ; <i32*> [#uses=1]
+  %8 = load i32*, i32** %7                              ; <i32*> [#uses=1]
   %9 = bitcast i32* %8 to i32* (i32, i32*, i32, i32*, i32*, i32*)* ; <i32* (i32, i32*, i32, i32*, i32*, i32*)*> [#uses=1]
   %10 = call i32* %9(i32 %0, i32* null, i32 %1, i32* %2, i32* %3, i32* %4) ; <i32*> [#uses=1]
   ret i32* %10

Modified: llvm/trunk/test/CodeGen/ARM/call_nolink.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/call_nolink.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/call_nolink.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/call_nolink.ll Fri Feb 27 15:17:42 2015
@@ -23,31 +23,31 @@ bb115.i.i.bb115.i.i_crit_edge:		; preds
 bb115.i.i:		; preds = %bb115.i.i.bb115.i.i_crit_edge, %newFuncRoot
 	%i_addr.3210.0.i.i = phi i32 [ %tmp166.i.i, %bb115.i.i.bb115.i.i_crit_edge ], [ 0, %newFuncRoot ]		; <i32> [#uses=7]
 	%tmp124.i.i = getelementptr [2 x { i32, [3 x i32] }], [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 0		; <i32*> [#uses=1]
-	%tmp125.i.i = load i32* %tmp124.i.i		; <i32> [#uses=1]
+	%tmp125.i.i = load i32, i32* %tmp124.i.i		; <i32> [#uses=1]
 	%tmp126.i.i = getelementptr [14 x i32], [14 x i32]* @r, i32 0, i32 %tmp125.i.i		; <i32*> [#uses=1]
-	%tmp127.i.i = load i32* %tmp126.i.i		; <i32> [#uses=1]
+	%tmp127.i.i = load i32, i32* %tmp126.i.i		; <i32> [#uses=1]
 	%tmp131.i.i = getelementptr [2 x { i32, [3 x i32] }], [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 1		; <i32*> [#uses=1]
-	%tmp132.i.i = load i32* %tmp131.i.i		; <i32> [#uses=1]
+	%tmp132.i.i = load i32, i32* %tmp131.i.i		; <i32> [#uses=1]
 	%tmp133.i.i = getelementptr [14 x i32], [14 x i32]* @r, i32 0, i32 %tmp132.i.i		; <i32*> [#uses=1]
-	%tmp134.i.i = load i32* %tmp133.i.i		; <i32> [#uses=1]
+	%tmp134.i.i = load i32, i32* %tmp133.i.i		; <i32> [#uses=1]
 	%tmp138.i.i = getelementptr [2 x { i32, [3 x i32] }], [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 2		; <i32*> [#uses=1]
-	%tmp139.i.i = load i32* %tmp138.i.i		; <i32> [#uses=1]
+	%tmp139.i.i = load i32, i32* %tmp138.i.i		; <i32> [#uses=1]
 	%tmp140.i.i = getelementptr [14 x i32], [14 x i32]* @r, i32 0, i32 %tmp139.i.i		; <i32*> [#uses=1]
-	%tmp141.i.i = load i32* %tmp140.i.i		; <i32> [#uses=1]
+	%tmp141.i.i = load i32, i32* %tmp140.i.i		; <i32> [#uses=1]
 	%tmp143.i.i = add i32 %i_addr.3210.0.i.i, 12		; <i32> [#uses=1]
 	%tmp146.i.i = getelementptr [2 x { i32, [3 x i32] }], [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 0		; <i32*> [#uses=1]
-	%tmp147.i.i = load i32* %tmp146.i.i		; <i32> [#uses=1]
+	%tmp147.i.i = load i32, i32* %tmp146.i.i		; <i32> [#uses=1]
 	%tmp149.i.i = getelementptr [13 x %struct.anon], [13 x %struct.anon]* @isa, i32 0, i32 %tmp147.i.i, i32 0		; <i32 (i32, i32, i32)**> [#uses=1]
-	%tmp150.i.i = load i32 (i32, i32, i32)** %tmp149.i.i		; <i32 (i32, i32, i32)*> [#uses=1]
+	%tmp150.i.i = load i32 (i32, i32, i32)*, i32 (i32, i32, i32)** %tmp149.i.i		; <i32 (i32, i32, i32)*> [#uses=1]
 	%tmp154.i.i = tail call i32 %tmp150.i.i( i32 %tmp127.i.i, i32 %tmp134.i.i, i32 %tmp141.i.i )		; <i32> [#uses=1]
 	%tmp155.i.i = getelementptr [14 x i32], [14 x i32]* @r, i32 0, i32 %tmp143.i.i		; <i32*> [#uses=1]
 	store i32 %tmp154.i.i, i32* %tmp155.i.i
 	%tmp159.i.i = getelementptr [2 x i32], [2 x i32]* @counter, i32 0, i32 %i_addr.3210.0.i.i		; <i32*> [#uses=2]
-	%tmp160.i.i = load i32* %tmp159.i.i		; <i32> [#uses=1]
+	%tmp160.i.i = load i32, i32* %tmp159.i.i		; <i32> [#uses=1]
 	%tmp161.i.i = add i32 %tmp160.i.i, 1		; <i32> [#uses=1]
 	store i32 %tmp161.i.i, i32* %tmp159.i.i
 	%tmp166.i.i = add i32 %i_addr.3210.0.i.i, 1		; <i32> [#uses=2]
-	%tmp168.i.i = load i32* @numi		; <i32> [#uses=1]
+	%tmp168.i.i = load i32, i32* @numi		; <i32> [#uses=1]
 	icmp slt i32 %tmp166.i.i, %tmp168.i.i		; <i1>:0 [#uses=1]
 	br i1 %0, label %bb115.i.i.bb115.i.i_crit_edge, label %bb115.i.i.bb170.i.i_crit_edge.exitStub
 }

Modified: llvm/trunk/test/CodeGen/ARM/coalesce-dbgvalue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/coalesce-dbgvalue.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/coalesce-dbgvalue.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/coalesce-dbgvalue.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ for.cond1:
 for.body2:                                        ; preds = %for.cond1
   store i32 %storemerge11, i32* @b, align 4, !dbg !26
   tail call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !28
-  %0 = load i64* @a, align 8, !dbg !29
+  %0 = load i64, i64* @a, align 8, !dbg !29
   %xor = xor i64 %0, %e.1.ph, !dbg !29
   %conv3 = trunc i64 %xor to i32, !dbg !29
   tail call void @llvm.dbg.value(metadata i32 %conv3, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !29
@@ -44,7 +44,7 @@ land.end:
   %1 = phi i1 [ false, %for.body2 ], [ %tobool5, %land.rhs ]
   %land.ext = zext i1 %1 to i32
   %call6 = tail call i32 bitcast (i32 (...)* @fn2 to i32 (i32, i32*)*)(i32 %land.ext, i32* null) #3
-  %2 = load i32* @b, align 4, !dbg !26
+  %2 = load i32, i32* @b, align 4, !dbg !26
   %inc8 = add nsw i32 %2, 1, !dbg !26
   %phitmp = and i64 %xor, 4294967295, !dbg !26
   br label %for.cond1.outer, !dbg !26
@@ -52,7 +52,7 @@ land.end:
 for.cond1.outer:                                  ; preds = %land.end, %for.cond1.preheader
   %storemerge11.ph = phi i32 [ %inc8, %land.end ], [ 0, %for.cond1.preheader ]
   %e.1.ph = phi i64 [ %phitmp, %land.end ], [ 0, %for.cond1.preheader ]
-  %3 = load i32* @d, align 4, !dbg !31
+  %3 = load i32, i32* @d, align 4, !dbg !31
   %tobool10 = icmp eq i32 %3, 0, !dbg !31
   br label %for.cond1
 

Modified: llvm/trunk/test/CodeGen/ARM/coalesce-subregs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/coalesce-subregs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/coalesce-subregs.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/coalesce-subregs.ll Fri Feb 27 15:17:42 2015
@@ -86,22 +86,22 @@ declare void @llvm.arm.neon.vst2.v4f32(i
 define void @f3(float* %p, float* %q) nounwind ssp {
 entry:
   %arrayidx = getelementptr inbounds float, float* %p, i32 3
-  %0 = load float* %arrayidx, align 4
+  %0 = load float, float* %arrayidx, align 4
   %vecins = insertelement <2 x float> undef, float %0, i32 1
   %tobool = icmp eq float* %q, null
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = load float* %q, align 4
+  %1 = load float, float* %q, align 4
   %arrayidx2 = getelementptr inbounds float, float* %q, i32 1
-  %2 = load float* %arrayidx2, align 4
+  %2 = load float, float* %arrayidx2, align 4
   %add = fadd float %1, %2
   %vecins3 = insertelement <2 x float> %vecins, float %add, i32 0
   br label %if.end
 
 if.else:                                          ; preds = %entry
   %arrayidx4 = getelementptr inbounds float, float* %p, i32 2
-  %3 = load float* %arrayidx4, align 4
+  %3 = load float, float* %arrayidx4, align 4
   %vecins5 = insertelement <2 x float> %vecins, float %3, i32 0
   br label %if.end
 
@@ -129,9 +129,9 @@ entry:
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = load float* %q, align 4
+  %1 = load float, float* %q, align 4
   %arrayidx1 = getelementptr inbounds float, float* %q, i32 1
-  %2 = load float* %arrayidx1, align 4
+  %2 = load float, float* %arrayidx1, align 4
   %add = fadd float %1, %2
   %vecins = insertelement <2 x float> %vld1, float %add, i32 1
   br label %if.end
@@ -165,12 +165,12 @@ entry:
 
 if.then:                                          ; preds = %entry
   %arrayidx = getelementptr inbounds float, float* %q, i32 1
-  %1 = load float* %arrayidx, align 4
+  %1 = load float, float* %arrayidx, align 4
   %add4 = fadd float %vecext, %1
-  %2 = load float* %q, align 4
+  %2 = load float, float* %q, align 4
   %add6 = fadd float %vecext1, %2
   %arrayidx7 = getelementptr inbounds float, float* %q, i32 2
-  %3 = load float* %arrayidx7, align 4
+  %3 = load float, float* %arrayidx7, align 4
   %add8 = fadd float %vecext2, %3
   br label %if.end
 
@@ -231,7 +231,7 @@ bb3:
   br i1 undef, label %bb10, label %bb12
 
 bb10:                                             ; preds = %bb3
-  %tmp11 = load <4 x float>* undef, align 8
+  %tmp11 = load <4 x float>, <4 x float>* undef, align 8
   br label %bb12
 
 bb12:                                             ; preds = %bb10, %bb3
@@ -333,7 +333,7 @@ for.body:
   br i1 undef, label %for.body29, label %for.end
 
 for.body29:                                       ; preds = %for.body29, %for.body
-  %0 = load <2 x double>* null, align 1
+  %0 = load <2 x double>, <2 x double>* null, align 1
   %splat40 = shufflevector <2 x double> %0, <2 x double> undef, <2 x i32> zeroinitializer
   %mul41 = fmul <2 x double> undef, %splat40
   %add42 = fadd <2 x double> undef, %mul41

Modified: llvm/trunk/test/CodeGen/ARM/code-placement.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/code-placement.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/code-placement.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/code-placement.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ bb:
   %list_addr.05 = phi %struct.list_head* [ %2, %bb ], [ %list, %entry ]
   %next.04 = phi %struct.list_head* [ %list_addr.05, %bb ], [ null, %entry ]
   %1 = getelementptr inbounds %struct.list_head, %struct.list_head* %list_addr.05, i32 0, i32 0
-  %2 = load %struct.list_head** %1, align 4
+  %2 = load %struct.list_head*, %struct.list_head** %1, align 4
   store %struct.list_head* %next.04, %struct.list_head** %1, align 4
   %3 = icmp eq %struct.list_head* %2, null
   br i1 %3, label %bb2, label %bb
@@ -46,7 +46,7 @@ bb1:
   %sum.08 = phi i32 [ %2, %bb1 ], [ %sum.110, %bb2.preheader ] ; <i32> [#uses=1]
   %tmp17 = sub i32 %i.07, %indvar                 ; <i32> [#uses=1]
   %scevgep = getelementptr i32, i32* %src, i32 %tmp17  ; <i32*> [#uses=1]
-  %1 = load i32* %scevgep, align 4                ; <i32> [#uses=1]
+  %1 = load i32, i32* %scevgep, align 4                ; <i32> [#uses=1]
   %2 = add nsw i32 %1, %sum.08                    ; <i32> [#uses=2]
   %indvar.next = add i32 %indvar, 1               ; <i32> [#uses=2]
   %exitcond = icmp eq i32 %indvar.next, %size     ; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/commute-movcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/commute-movcc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/commute-movcc.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/commute-movcc.ll Fri Feb 27 15:17:42 2015
@@ -32,7 +32,7 @@ for.body:
   %BestCost.011 = phi i32 [ -1, %entry ], [ %BestCost.1, %if.end8 ]
   %BestIdx.010 = phi i32 [ 0, %entry ], [ %BestIdx.1, %if.end8 ]
   %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.012
-  %0 = load i32* %arrayidx, align 4
+  %0 = load i32, i32* %arrayidx, align 4
   %mul = mul i32 %0, %0
   %sub = add nsw i32 %i.012, -5
   %cmp2 = icmp eq i32 %sub, %Pref

Modified: llvm/trunk/test/CodeGen/ARM/compare-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/compare-call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/compare-call.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/compare-call.ll Fri Feb 27 15:17:42 2015
@@ -2,9 +2,9 @@
 
 define void @test3(float* %glob, i32 %X) {
 entry:
-        %tmp = load float* %glob                ; <float> [#uses=1]
+        %tmp = load float, float* %glob                ; <float> [#uses=1]
         %tmp2 = getelementptr float, float* %glob, i32 2               ; <float*> [#uses=1]
-        %tmp3 = load float* %tmp2               ; <float> [#uses=1]
+        %tmp3 = load float, float* %tmp2               ; <float> [#uses=1]
         %tmp.upgrd.1 = fcmp ogt float %tmp, %tmp3               ; <i1> [#uses=1]
         br i1 %tmp.upgrd.1, label %cond_true, label %UnifiedReturnBlock
 

Modified: llvm/trunk/test/CodeGen/ARM/copy-paired-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/copy-paired-reg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/copy-paired-reg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/copy-paired-reg.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define void @f() {
   store atomic i64 0, i64* %c seq_cst, align 8
   store atomic i64 0, i64* %d seq_cst, align 8
 
-  %e = load atomic i64* %d seq_cst, align 8
+  %e = load atomic i64, i64* %d seq_cst, align 8
 
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/crash-greedy-v6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/crash-greedy-v6.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/crash-greedy-v6.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/crash-greedy-v6.ll Fri Feb 27 15:17:42 2015
@@ -38,7 +38,7 @@ for.body:
   %arrayidx22 = getelementptr i8, i8* %green, i32 %i.031
   %arrayidx25 = getelementptr i8, i8* %blue, i32 %i.031
   %arrayidx28 = getelementptr i8, i8* %alpha, i32 %i.031
-  %tmp12 = load float* %arrayidx11, align 4
+  %tmp12 = load float, float* %arrayidx11, align 4
   tail call fastcc void @sample_3d_nearest(i8* %tObj, i8* undef, float undef, float %tmp12, float undef, i8* %arrayidx19, i8* %arrayidx22, i8* %arrayidx25, i8* %arrayidx28)
   %0 = add i32 %i.031, 1
   %exitcond = icmp eq i32 %0, %n

Modified: llvm/trunk/test/CodeGen/ARM/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/crash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/crash.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 
 define void @func() nounwind {
 entry:
-  %tmp = load i32* undef, align 4
+  %tmp = load i32, i32* undef, align 4
   br label %bb1
 
 bb1:

Modified: llvm/trunk/test/CodeGen/ARM/cse-ldrlit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/cse-ldrlit.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/cse-ldrlit.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/cse-ldrlit.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 declare void @bar(i32*)
 
 define void @foo() {
-  %flag = load i32* getelementptr inbounds([16 x i32]* @var, i32 0, i32 1)
+  %flag = load i32, i32* getelementptr inbounds([16 x i32]* @var, i32 0, i32 1)
   %tst = icmp eq i32 %flag, 0
   br i1 %tst, label %true, label %false
 true:

Modified: llvm/trunk/test/CodeGen/ARM/cse-libcalls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/cse-libcalls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/cse-libcalls.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/cse-libcalls.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "i386-apple-darwin8"
 
 define double @u_f_nonbon(double %lambda) nounwind {
 entry:
-	%tmp19.i.i = load double* null, align 4		; <double> [#uses=2]
+	%tmp19.i.i = load double, double* null, align 4		; <double> [#uses=2]
 	%tmp6.i = fcmp olt double %tmp19.i.i, 1.000000e+00		; <i1> [#uses=1]
 	%dielectric.0.i = select i1 %tmp6.i, double 1.000000e+00, double %tmp19.i.i		; <double> [#uses=1]
 	%tmp10.i4 = fdiv double 0x4074C2D71F36262D, %dielectric.0.i		; <double> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define float @f(<4 x i16>* nocapture %in
   ; CHECK: vldr
   ; CHECK: vmovl.u16
   ; CHECK-NOT: vand
-  %1 = load <4 x i16>* %in
+  %1 = load <4 x i16>, <4 x i16>* %in
   ; CHECK: vcvt.f32.u32
   %2 = uitofp <4 x i16> %1 to <4 x float>
   %3 = extractelement <4 x float> %2, i32 0
@@ -21,7 +21,7 @@ define float @f(<4 x i16>* nocapture %in
 
 define float @g(<4 x i16>* nocapture %in) {
   ; CHECK: vldr
-  %1 = load <4 x i16>* %in
+  %1 = load <4 x i16>, <4 x i16>* %in
   ; CHECK-NOT: uxth
   %2 = extractelement <4 x i16> %1, i32 0
   ; CHECK: vcvt.f32.u32

Modified: llvm/trunk/test/CodeGen/ARM/debug-frame-large-stack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/debug-frame-large-stack.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/debug-frame-large-stack.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/debug-frame-large-stack.ll Fri Feb 27 15:17:42 2015
@@ -48,7 +48,7 @@ define i32 @test3() {
 	%tmp = alloca i32, align 4
 	%a = alloca [805306369 x i8], align 16
 	store i32 0, i32* %tmp
-	%tmp1 = load i32* %tmp
+	%tmp1 = load i32, i32* %tmp
         ret i32 %tmp1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/debug-frame-vararg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/debug-frame-vararg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/debug-frame-vararg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/debug-frame-vararg.ll Fri Feb 27 15:17:42 2015
@@ -118,11 +118,11 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-  %ap.cur = load i8** %vl, align 4
+  %ap.cur = load i8*, i8** %vl, align 4
   %ap.next = getelementptr i8, i8* %ap.cur, i32 4
   store i8* %ap.next, i8** %vl, align 4
   %0 = bitcast i8* %ap.cur to i32*
-  %1 = load i32* %0, align 4
+  %1 = load i32, i32* %0, align 4
   %call = call i32 @foo(i32 %1) #1
   %inc = add nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, %count

Modified: llvm/trunk/test/CodeGen/ARM/debug-info-blocks.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/debug-info-blocks.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/debug-info-blocks.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/debug-info-blocks.ll Fri Feb 27 15:17:42 2015
@@ -47,21 +47,21 @@ define hidden void @foobar_func_block_in
   call void @llvm.dbg.declare(metadata %2* %6, metadata !136, metadata !163), !dbg !137
   call void @llvm.dbg.declare(metadata %2* %6, metadata !138, metadata !164), !dbg !137
   call void @llvm.dbg.declare(metadata %2* %6, metadata !139, metadata !165), !dbg !140
-  %8 = load %0** %1, align 4, !dbg !141
-  %9 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_13", !dbg !141
+  %8 = load %0*, %0** %1, align 4, !dbg !141
+  %9 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_13", !dbg !141
   %10 = bitcast %0* %8 to i8*, !dbg !141
   %11 = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %10, i8* %9), !dbg !141
   %12 = bitcast i8* %11 to %0*, !dbg !141
   %13 = getelementptr inbounds %2, %2* %6, i32 0, i32 5, !dbg !141
-  %14 = load i8** %13, !dbg !141
+  %14 = load i8*, i8** %13, !dbg !141
   %15 = bitcast i8* %14 to %struct.__block_byref_mydata*, !dbg !141
   %16 = getelementptr inbounds %struct.__block_byref_mydata, %struct.__block_byref_mydata* %15, i32 0, i32 1, !dbg !141
-  %17 = load %struct.__block_byref_mydata** %16, !dbg !141
+  %17 = load %struct.__block_byref_mydata*, %struct.__block_byref_mydata** %16, !dbg !141
   %18 = getelementptr inbounds %struct.__block_byref_mydata, %struct.__block_byref_mydata* %17, i32 0, i32 6, !dbg !141
   store %0* %12, %0** %18, align 4, !dbg !141
   %19 = getelementptr inbounds %2, %2* %6, i32 0, i32 6, !dbg !143
-  %20 = load %3** %19, align 4, !dbg !143
-  %21 = load i32* @"OBJC_IVAR_$_MyWork._data", !dbg !143
+  %20 = load %3*, %3** %19, align 4, !dbg !143
+  %21 = load i32, i32* @"OBJC_IVAR_$_MyWork._data", !dbg !143
   %22 = bitcast %3* %20 to i8*, !dbg !143
   %23 = getelementptr inbounds i8, i8* %22, i32 %21, !dbg !143
   %24 = bitcast i8* %23 to %struct.CR*, !dbg !143
@@ -69,8 +69,8 @@ define hidden void @foobar_func_block_in
   %26 = bitcast %struct.CR* %data to i8*, !dbg !143
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %25, i8* %26, i32 16, i32 4, i1 false), !dbg !143
   %27 = getelementptr inbounds %2, %2* %6, i32 0, i32 6, !dbg !144
-  %28 = load %3** %27, align 4, !dbg !144
-  %29 = load i32* @"OBJC_IVAR_$_MyWork._bounds", !dbg !144
+  %28 = load %3*, %3** %27, align 4, !dbg !144
+  %29 = load i32, i32* @"OBJC_IVAR_$_MyWork._bounds", !dbg !144
   %30 = bitcast %3* %28 to i8*, !dbg !144
   %31 = getelementptr inbounds i8, i8* %30, i32 %29, !dbg !144
   %32 = bitcast i8* %31 to %struct.CR*, !dbg !144
@@ -78,15 +78,15 @@ define hidden void @foobar_func_block_in
   %34 = bitcast %struct.CR* %bounds to i8*, !dbg !144
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %33, i8* %34, i32 16, i32 4, i1 false), !dbg !144
   %35 = getelementptr inbounds %2, %2* %6, i32 0, i32 6, !dbg !145
-  %36 = load %3** %35, align 4, !dbg !145
+  %36 = load %3*, %3** %35, align 4, !dbg !145
   %37 = getelementptr inbounds %2, %2* %6, i32 0, i32 5, !dbg !145
-  %38 = load i8** %37, !dbg !145
+  %38 = load i8*, i8** %37, !dbg !145
   %39 = bitcast i8* %38 to %struct.__block_byref_mydata*, !dbg !145
   %40 = getelementptr inbounds %struct.__block_byref_mydata, %struct.__block_byref_mydata* %39, i32 0, i32 1, !dbg !145
-  %41 = load %struct.__block_byref_mydata** %40, !dbg !145
+  %41 = load %struct.__block_byref_mydata*, %struct.__block_byref_mydata** %40, !dbg !145
   %42 = getelementptr inbounds %struct.__block_byref_mydata, %struct.__block_byref_mydata* %41, i32 0, i32 6, !dbg !145
-  %43 = load %0** %42, align 4, !dbg !145
-  %44 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_222", !dbg !145
+  %43 = load %0*, %0** %42, align 4, !dbg !145
+  %44 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_222", !dbg !145
   %45 = bitcast %3* %36 to i8*, !dbg !145
   call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %0*)*)(i8* %45, i8* %44, %0* %43), !dbg !145
   ret void, !dbg !146

Modified: llvm/trunk/test/CodeGen/ARM/divmod.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/divmod.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/divmod.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/divmod.ll Fri Feb 27 15:17:42 2015
@@ -47,7 +47,7 @@ define void @do_indent(i32 %cols) nounwi
 entry:
 ; A8-LABEL: do_indent:
 ; SWIFT-LABEL: do_indent:
-  %0 = load i32* @flags, align 4
+  %0 = load i32, i32* @flags, align 4
   %1 = and i32 %0, 67108864
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %bb1, label %bb
@@ -57,7 +57,7 @@ bb:
 ; SWIFT: sdiv
 ; SWIFT: mls
 ; SWIFT-NOT: bl __divmodsi4
-  %3 = load i32* @tabsize, align 4
+  %3 = load i32, i32* @tabsize, align 4
   %4 = srem i32 %cols, %3
   %5 = sdiv i32 %cols, %3
   %6 = tail call i32 @llvm.objectsize.i32.p0i8(i8* null, i1 false)

Modified: llvm/trunk/test/CodeGen/ARM/dwarf-eh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/dwarf-eh.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/dwarf-eh.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/dwarf-eh.ll Fri Feb 27 15:17:42 2015
@@ -34,12 +34,12 @@ define void @f() uwtable {
   store i32 %7, i32* %2
   br label %8
 
-  %9 = load i32* %2
+  %9 = load i32, i32* %2
   %10 = call i32 @llvm.eh.typeid.for(i8* bitcast ({ i8*, i8* }* @_ZTI9exception to i8*)) nounwind
   %11 = icmp eq i32 %9, %10
   br i1 %11, label %12, label %17
 
-  %13 = load i8** %1
+  %13 = load i8*, i8** %1
   %14 = call i8* @__cxa_begin_catch(i8* %13) #3
   %15 = bitcast i8* %14 to %struct.exception*
   store %struct.exception* %15, %struct.exception** %e
@@ -48,8 +48,8 @@ define void @f() uwtable {
 
   ret void
 
-  %18 = load i8** %1
-  %19 = load i32* %2
+  %18 = load i8*, i8** %1
+  %19 = load i32, i32* %2
   %20 = insertvalue { i8*, i32 } undef, i8* %18, 0
   %21 = insertvalue { i8*, i32 } %20, i32 %19, 1
   resume { i8*, i32 } %21

Modified: llvm/trunk/test/CodeGen/ARM/dyn-stackalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/dyn-stackalloc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/dyn-stackalloc.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/dyn-stackalloc.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define void @t1(%struct.state* %v) {
 ; CHECK-NOT: sub r{{[0-9]+}}, sp, [[REG1]]
 ; CHECK: sub sp, sp, [[REG1]]
 
-  %tmp6 = load i32* null
+  %tmp6 = load i32, i32* null
   %tmp8 = alloca float, i32 %tmp6
   store i32 1, i32* null
   br i1 false, label %bb123.preheader, label %return
@@ -29,7 +29,7 @@ bb123.preheader:
 
 bb43:                                             ; preds = %bb123.preheader
   call fastcc void @f1(float* %tmp8, float* null, i32 0)
-  %tmp70 = load i32* null
+  %tmp70 = load i32, i32* null
   %tmp85 = getelementptr float, float* %tmp8, i32 0
   call fastcc void @f2(float* null, float* null, float* %tmp85, i32 %tmp70)
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/emit-big-cst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/emit-big-cst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/emit-big-cst.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/emit-big-cst.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
 
 define void @accessBig(i64* %storage) {
   %addr = bitcast i64* %storage to i82*
-  %bigLoadedCst = load volatile i82* @bigCst
+  %bigLoadedCst = load volatile i82, i82* @bigCst
   %tmp = add i82 %bigLoadedCst, 1
   store i82 %tmp, i82* %addr
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/extload-knownzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/extload-knownzero.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/extload-knownzero.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/extload-knownzero.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
   br i1 %tmp1, label %bb1, label %bb2
 bb1:
 ; CHECK: ldrh
-  %tmp2 = load i16* %ptr, align 2
+  %tmp2 = load i16, i16* %ptr, align 2
   br label %bb2
 bb2:
 ; CHECK-NOT: uxth

Modified: llvm/trunk/test/CodeGen/ARM/extloadi1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/extloadi1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/extloadi1.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/extloadi1.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 
 define void @__mf_sigusr1_respond() {
 entry:
-        %tmp8.b = load i1* @handler_installed.6144.b            ; <i1> [#uses=1]
+        %tmp8.b = load i1, i1* @handler_installed.6144.b            ; <i1> [#uses=1]
         br i1 false, label %cond_true7, label %cond_next
 
 cond_next:              ; preds = %entry

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ entry:
   store i32* getelementptr inbounds ([2 x [2 x [2 x [2 x [2 x i32]]]]]* @arr, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1), i32** %addr, align 4
 ; ARM: add r0, r0, #124
 ; THUMB: adds r0, #124
-  %0 = load i32** %addr, align 4
+  %0 = load i32*, i32** %addr, align 4
   ret i32* %0
 }
 
@@ -30,7 +30,7 @@ entry:
 ; ARM: movw [[R:r[0-9]+]], #1148
 ; ARM: add r0, r{{[0-9]+}}, [[R]]
 ; THUMB: addw r0, r0, #1148
-  %0 = load i32** %addr, align 4
+  %0 = load i32*, i32** %addr, align 4
   ret i32* %0
 }
 
@@ -42,7 +42,7 @@ entry:
   store i32* getelementptr inbounds ([3 x [3 x %struct.A]]* @A, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1), i32** %addr, align 4
 ; ARM: add r0, r0, #140
 ; THUMB: adds r0, #140
-  %0 = load i32** %addr, align 4
+  %0 = load i32*, i32** %addr, align 4
   ret i32* %0
 }
 
@@ -61,6 +61,6 @@ entry:
 ; ARM-NOT: add r{{[0-9]}}, r{{[0-9]}}, #4
 ; ARM: movw r{{[0-9]}}, #1284
 ; THUMB: addw r{{[0-9]}}, r{{[0-9]}}, #1284
-  %0 = load i32** %addr, align 4
+  %0 = load i32*, i32** %addr, align 4
   ret i32* %0
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-align.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-align.ll Fri Feb 27 15:17:42 2015
@@ -34,7 +34,7 @@ entry:
 ; THUMB: str r1, [r0]
 
   %add = fadd float %x, %y
-  %0 = load %struct.anon** @a, align 4
+  %0 = load %struct.anon*, %struct.anon** @a, align 4
   %x1 = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 0
   store float %add, float* %x1, align 1
   ret void
@@ -66,9 +66,9 @@ entry:
 ; THUMB: @unaligned_f32_load
   %0 = alloca %class.TAlignTest*, align 4
   store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4
-  %1 = load %class.TAlignTest** %0
+  %1 = load %class.TAlignTest*, %class.TAlignTest** %0
   %2 = getelementptr inbounds %class.TAlignTest, %class.TAlignTest* %1, i32 0, i32 1
-  %3 = load float* %2, align 1
+  %3 = load float, float* %2, align 1
   %4 = fcmp une float %3, 0.000000e+00
 ; ARM: ldr r[[R:[0-9]+]], [r0, #2]
 ; ARM: vmov s0, r[[R]]
@@ -103,7 +103,7 @@ entry:
 ; THUMB-STRICT-ALIGN: ldrb
 ; THUMB-STRICT-ALIGN: ldrb
 
-  %0 = load i16* %x, align 1
+  %0 = load i16, i16* %x, align 1
   ret i16 %0
 }
 
@@ -139,6 +139,6 @@ entry:
 ; THUMB-STRICT-ALIGN: ldrb
 ; THUMB-STRICT-ALIGN: ldrb
 
-  %0 = load i32* %x, align 1
+  %0 = load i32, i32* %x, align 1
   ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-call.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-call.ll Fri Feb 27 15:17:42 2015
@@ -157,7 +157,7 @@ define void @foo3() uwtable {
 ; THUMB: blx     r1
   %fptr = alloca i32 (i32)*, align 8
   store i32 (i32)* @bar0, i32 (i32)** %fptr, align 8
-  %1 = load i32 (i32)** %fptr, align 8
+  %1 = load i32 (i32)*, i32 (i32)** %fptr, align 8
   %call = call i32 %1(i32 0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-fold.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-fold.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define void @t1() nounwind uwtable ssp {
 ; THUMB: ldrb
 ; THUMB-NOT: uxtb
 ; THUMB-NOT: and{{.*}}, #255
-  %1 = load i8* @a, align 1
+  %1 = load i8, i8* @a, align 1
   call void @foo1(i8 zeroext %1)
   ret void
 }
@@ -26,7 +26,7 @@ define void @t2() nounwind uwtable ssp {
 ; THUMB: t2
 ; THUMB: ldrh
 ; THUMB-NOT: uxth
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   call void @foo2(i16 zeroext %1)
   ret void
 }
@@ -43,7 +43,7 @@ define i32 @t3() nounwind uwtable ssp {
 ; THUMB: ldrb
 ; THUMB-NOT: uxtb
 ; THUMB-NOT: and{{.*}}, #255
-  %1 = load i8* @a, align 1
+  %1 = load i8, i8* @a, align 1
   %2 = zext i8 %1 to i32
   ret i32 %2
 }
@@ -55,7 +55,7 @@ define i32 @t4() nounwind uwtable ssp {
 ; THUMB: t4
 ; THUMB: ldrh
 ; THUMB-NOT: uxth
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   %2 = zext i16 %1 to i32
   ret i32 %2
 }
@@ -67,7 +67,7 @@ define i32 @t5() nounwind uwtable ssp {
 ; THUMB: t5
 ; THUMB: ldrsh
 ; THUMB-NOT: sxth
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   %2 = sext i16 %1 to i32
   ret i32 %2
 }
@@ -79,7 +79,7 @@ define i32 @t6() nounwind uwtable ssp {
 ; THUMB: t6
 ; THUMB: ldrsb
 ; THUMB-NOT: sxtb
-  %1 = load i8* @a, align 2
+  %1 = load i8, i8* @a, align 2
   %2 = sext i8 %1 to i32
   ret i32 %2
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define i32 @t1(i32* nocapture %ptr) noun
 entry:
 ; ARM: t1
   %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 1
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
 ; ARM: ldr r{{[0-9]}}, [r0, #4]
   ret i32 %0
 }
@@ -13,7 +13,7 @@ define i32 @t2(i32* nocapture %ptr) noun
 entry:
 ; ARM: t2
   %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 63
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
 ; ARM: ldr.w r{{[0-9]}}, [r0, #252]
   ret i32 %0
 }
@@ -22,7 +22,7 @@ define zeroext i16 @t3(i16* nocapture %p
 entry:
 ; ARM: t3
   %add.ptr = getelementptr inbounds i16, i16* %ptr, i16 1
-  %0 = load i16* %add.ptr, align 4
+  %0 = load i16, i16* %add.ptr, align 4
 ; ARM: ldrh r{{[0-9]}}, [r0, #2]
   ret i16 %0
 }
@@ -31,7 +31,7 @@ define zeroext i16 @t4(i16* nocapture %p
 entry:
 ; ARM: t4
   %add.ptr = getelementptr inbounds i16, i16* %ptr, i16 63
-  %0 = load i16* %add.ptr, align 4
+  %0 = load i16, i16* %add.ptr, align 4
 ; ARM: ldrh.w r{{[0-9]}}, [r0, #126]
   ret i16 %0
 }
@@ -40,7 +40,7 @@ define zeroext i8 @t5(i8* nocapture %ptr
 entry:
 ; ARM: t5
   %add.ptr = getelementptr inbounds i8, i8* %ptr, i8 1
-  %0 = load i8* %add.ptr, align 4
+  %0 = load i8, i8* %add.ptr, align 4
 ; ARM: ldrb r{{[0-9]}}, [r0, #1]
   ret i8 %0
 }
@@ -49,7 +49,7 @@ define zeroext i8 @t6(i8* nocapture %ptr
 entry:
 ; ARM: t6
   %add.ptr = getelementptr inbounds i8, i8* %ptr, i8 63
-  %0 = load i8* %add.ptr, align 4
+  %0 = load i8, i8* %add.ptr, align 4
 ; ARM: ldrb.w r{{[0-9]}}, [r0, #63]
   ret i8 %0
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define i32 @t1(i32* nocapture %ptr) noun
 entry:
 ; THUMB: t1
   %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -1
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
 ; THUMB: ldr r{{[0-9]}}, [r0, #-4]
   ret i32 %0
 }
@@ -13,7 +13,7 @@ define i32 @t2(i32* nocapture %ptr) noun
 entry:
 ; THUMB: t2
   %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -63
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
 ; THUMB: ldr r{{[0-9]}}, [r0, #-252]
   ret i32 %0
 }
@@ -22,7 +22,7 @@ define i32 @t3(i32* nocapture %ptr) noun
 entry:
 ; THUMB: t3
   %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -64
-  %0 = load i32* %add.ptr, align 4
+  %0 = load i32, i32* %add.ptr, align 4
 ; THUMB: ldr r{{[0-9]}}, [r0]
   ret i32 %0
 }
@@ -31,7 +31,7 @@ define zeroext i16 @t4(i16* nocapture %p
 entry:
 ; THUMB: t4
   %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -1
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; THUMB: ldrh r{{[0-9]}}, [r0, #-2]
   ret i16 %0
 }
@@ -40,7 +40,7 @@ define zeroext i16 @t5(i16* nocapture %p
 entry:
 ; THUMB: t5
   %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -127
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; THUMB: ldrh r{{[0-9]}}, [r0, #-254]
   ret i16 %0
 }
@@ -49,7 +49,7 @@ define zeroext i16 @t6(i16* nocapture %p
 entry:
 ; THUMB: t6
   %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -128
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; THUMB: ldrh r{{[0-9]}}, [r0]
   ret i16 %0
 }
@@ -58,7 +58,7 @@ define zeroext i8 @t7(i8* nocapture %ptr
 entry:
 ; THUMB: t7
   %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -1
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
 ; THUMB: ldrb r{{[0-9]}}, [r0, #-1]
   ret i8 %0
 }
@@ -67,7 +67,7 @@ define zeroext i8 @t8(i8* nocapture %ptr
 entry:
 ; THUMB: t8
   %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -255
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
 ; THUMB: ldrb r{{[0-9]}}, [r0, #-255]
   ret i8 %0
 }
@@ -76,7 +76,7 @@ define zeroext i8 @t9(i8* nocapture %ptr
 entry:
 ; THUMB: t9
   %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -256
-  %0 = load i8* %add.ptr, align 1
+  %0 = load i8, i8* %add.ptr, align 1
 ; THUMB: ldrb r{{[0-9]}}, [r0]
   ret i8 %0
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define zeroext i16 @t1(i16* nocapture %a
 entry:
 ; ARM: t1
   %add.ptr = getelementptr inbounds i16, i16* %a, i64 -8
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; ARM: ldrh r0, [r0, #-16]
   ret i16 %0
 }
@@ -15,7 +15,7 @@ define zeroext i16 @t2(i16* nocapture %a
 entry:
 ; ARM: t2
   %add.ptr = getelementptr inbounds i16, i16* %a, i64 -16
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; ARM: ldrh r0, [r0, #-32]
   ret i16 %0
 }
@@ -24,7 +24,7 @@ define zeroext i16 @t3(i16* nocapture %a
 entry:
 ; ARM: t3
   %add.ptr = getelementptr inbounds i16, i16* %a, i64 -127
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; ARM: ldrh r0, [r0, #-254]
   ret i16 %0
 }
@@ -33,7 +33,7 @@ define zeroext i16 @t4(i16* nocapture %a
 entry:
 ; ARM: t4
   %add.ptr = getelementptr inbounds i16, i16* %a, i64 -128
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; ARM: mvn r{{[1-9]}}, #255
 ; ARM: add r0, r0, r{{[1-9]}}
 ; ARM: ldrh r0, [r0]
@@ -44,7 +44,7 @@ define zeroext i16 @t5(i16* nocapture %a
 entry:
 ; ARM: t5
   %add.ptr = getelementptr inbounds i16, i16* %a, i64 8
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; ARM: ldrh r0, [r0, #16]
   ret i16 %0
 }
@@ -53,7 +53,7 @@ define zeroext i16 @t6(i16* nocapture %a
 entry:
 ; ARM: t6
   %add.ptr = getelementptr inbounds i16, i16* %a, i64 16
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; ARM: ldrh r0, [r0, #32]
   ret i16 %0
 }
@@ -62,7 +62,7 @@ define zeroext i16 @t7(i16* nocapture %a
 entry:
 ; ARM: t7
   %add.ptr = getelementptr inbounds i16, i16* %a, i64 127
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; ARM: ldrh r0, [r0, #254]
   ret i16 %0
 }
@@ -71,7 +71,7 @@ define zeroext i16 @t8(i16* nocapture %a
 entry:
 ; ARM: t8
   %add.ptr = getelementptr inbounds i16, i16* %a, i64 128
-  %0 = load i16* %add.ptr, align 2
+  %0 = load i16, i16* %add.ptr, align 2
 ; ARM: add r0, r0, #256
 ; ARM: ldrh r0, [r0]
   ret i16 %0
@@ -124,7 +124,7 @@ define signext i8 @t13(i8* nocapture %a)
 entry:
 ; ARM: t13
   %add.ptr = getelementptr inbounds i8, i8* %a, i64 -8
-  %0 = load i8* %add.ptr, align 2
+  %0 = load i8, i8* %add.ptr, align 2
 ; ARM: ldrsb r0, [r0, #-8]
   ret i8 %0
 }
@@ -133,7 +133,7 @@ define signext i8 @t14(i8* nocapture %a)
 entry:
 ; ARM: t14
   %add.ptr = getelementptr inbounds i8, i8* %a, i64 -255
-  %0 = load i8* %add.ptr, align 2
+  %0 = load i8, i8* %add.ptr, align 2
 ; ARM: ldrsb r0, [r0, #-255]
   ret i8 %0
 }
@@ -142,7 +142,7 @@ define signext i8 @t15(i8* nocapture %a)
 entry:
 ; ARM: t15
   %add.ptr = getelementptr inbounds i8, i8* %a, i64 -256
-  %0 = load i8* %add.ptr, align 2
+  %0 = load i8, i8* %add.ptr, align 2
 ; ARM: mvn r{{[1-9]}}, #255
 ; ARM: add r0, r0, r{{[1-9]}}
 ; ARM: ldrsb r0, [r0]

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-load-store-verify.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-load-store-verify.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-load-store-verify.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-load-store-verify.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ define i8 @t1() nounwind uwtable ssp {
 ; ALL: @t1
 ; ALL: ldrb
 ; ALL: add
-  %1 = load i8* @a, align 1
+  %1 = load i8, i8* @a, align 1
   %2 = add nsw i8 %1, 1
   ret i8 %2
 }
@@ -26,7 +26,7 @@ define i16 @t2() nounwind uwtable ssp {
 ; ALL: @t2
 ; ALL: ldrh
 ; ALL: add
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   %2 = add nsw i16 %1, 1
   ret i16 %2
 }
@@ -35,7 +35,7 @@ define i32 @t3() nounwind uwtable ssp {
 ; ALL: @t3
 ; ALL: ldr
 ; ALL: add
-  %1 = load i32* @c, align 4
+  %1 = load i32, i32* @c, align 4
   %2 = add nsw i32 %1, 1
   ret i32 %2
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-pic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-pic.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-pic.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ entry:
 ; ARMv7-ELF-NEXT: add r[[reg2]], pc
 ; ARMv7-ELF: ldr r[[reg3:[0-9]+]],
 ; ARMv7-ELF: ldr r[[reg2]], [r[[reg3]], r[[reg2]]]
-  %tmp = load i32* @g
+  %tmp = load i32, i32* @g
   ret i32 %tmp
 }
 
@@ -60,6 +60,6 @@ entry:
 ; ARMv7-ELF-NEXT: add r[[reg5]], pc
 ; ARMv7-ELF: ldr r[[reg6:[0-9]+]],
 ; ARMv7-ELF: ldr r[[reg5]], [r[[reg6]], r[[reg5]]]
-  %tmp = load i32* @i
+  %tmp = load i32, i32* @i
   ret i32 %tmp
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-pred.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-pred.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-pred.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-pred.ll Fri Feb 27 15:17:42 2015
@@ -7,9 +7,9 @@ entry:
   %X = alloca <4 x i32>, align 16
   %Y = alloca <4 x float>, align 16
   store i32 0, i32* %retval
-  %tmp = load <4 x i32>* %X, align 16
+  %tmp = load <4 x i32>, <4 x i32>* %X, align 16
   call void @__aa(<4 x i32> %tmp, i8* null, i32 3, <4 x float>* %Y)
-  %0 = load i32* %retval
+  %0 = load i32, i32* %retval
   ret i32 %0
 }
 
@@ -24,15 +24,15 @@ entry:
   store i8* %p, i8** %p.addr, align 4
   store i32 %offset, i32* %offset.addr, align 4
   store <4 x float>* %constants, <4 x float>** %constants.addr, align 4
-  %tmp = load <4 x i32>* %v.addr, align 16
+  %tmp = load <4 x i32>, <4 x i32>* %v.addr, align 16
   store <4 x i32> %tmp, <4 x i32>* %__a.addr.i, align 16
-  %tmp.i = load <4 x i32>* %__a.addr.i, align 16
+  %tmp.i = load <4 x i32>, <4 x i32>* %__a.addr.i, align 16
   %0 = bitcast <4 x i32> %tmp.i to <16 x i8>
   %1 = bitcast <16 x i8> %0 to <4 x i32>
   %vcvt.i = sitofp <4 x i32> %1 to <4 x float>
-  %tmp1 = load i8** %p.addr, align 4
-  %tmp2 = load i32* %offset.addr, align 4
-  %tmp3 = load <4 x float>** %constants.addr, align 4
+  %tmp1 = load i8*, i8** %p.addr, align 4
+  %tmp2 = load i32, i32* %offset.addr, align 4
+  %tmp3 = load <4 x float>*, <4 x float>** %constants.addr, align 4
   call void @__bb(<4 x float> %vcvt.i, i8* %tmp1, i32 %tmp2, <4 x float>* %tmp3)
   ret void
 }
@@ -48,9 +48,9 @@ entry:
   store i8* %p, i8** %p.addr, align 4
   store i32 %offset, i32* %offset.addr, align 4
   store <4 x float>* %constants, <4 x float>** %constants.addr, align 4
-  %tmp = load i64* %data, align 4
-  %tmp1 = load i8** %p.addr, align 4
-  %tmp2 = load i32* %offset.addr, align 4
+  %tmp = load i64, i64* %data, align 4
+  %tmp1 = load i8*, i8** %p.addr, align 4
+  %tmp2 = load i32, i32* %offset.addr, align 4
   %add.ptr = getelementptr i8, i8* %tmp1, i32 %tmp2
   %0 = bitcast i8* %add.ptr to i64*
   %arrayidx = getelementptr inbounds i64, i64* %0, i32 0

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-redefinition.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-redefinition.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-redefinition.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-redefinition.ll Fri Feb 27 15:17:42 2015
@@ -6,6 +6,6 @@ target triple = "thumbv7-apple-macosx10.
 
 define i32 @f(i32* %x) nounwind ssp {
   %y = getelementptr inbounds i32, i32* %x, i32 5000
-  %tmp103 = load i32* %y, align 4
+  %tmp103 = load i32, i32* %y, align 4
   ret i32 %tmp103
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-static.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-static.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-static.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-static.ll Fri Feb 27 15:17:42 2015
@@ -9,12 +9,12 @@ entry:
   %addend.addr = alloca float*, align 4
   store float* %sum, float** %sum.addr, align 4
   store float* %addend, float** %addend.addr, align 4
-  %tmp = load float** %sum.addr, align 4
-  %tmp1 = load float* %tmp
-  %tmp2 = load float** %addend.addr, align 4
-  %tmp3 = load float* %tmp2
+  %tmp = load float*, float** %sum.addr, align 4
+  %tmp1 = load float, float* %tmp
+  %tmp2 = load float*, float** %addend.addr, align 4
+  %tmp3 = load float, float* %tmp2
   %add = fadd float %tmp1, %tmp3
-  %tmp4 = load float** %sum.addr, align 4
+  %tmp4 = load float*, float** %sum.addr, align 4
   store float %add, float* %tmp4
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-vararg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-vararg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-vararg.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-vararg.ll Fri Feb 27 15:17:42 2015
@@ -10,11 +10,11 @@ entry:
   %m = alloca i32, align 4
   %n = alloca i32, align 4
   %tmp = alloca i32, align 4
-  %0 = load i32* %i, align 4
-  %1 = load i32* %j, align 4
-  %2 = load i32* %k, align 4
-  %3 = load i32* %m, align 4
-  %4 = load i32* %n, align 4
+  %0 = load i32, i32* %i, align 4
+  %1 = load i32, i32* %j, align 4
+  %2 = load i32, i32* %k, align 4
+  %3 = load i32, i32* %m, align 4
+  %4 = load i32, i32* %n, align 4
 ; ARM: VarArg
 ; ARM: mov [[FP:r[0-9]+]], sp
 ; ARM: sub sp, sp, #32
@@ -39,7 +39,7 @@ entry:
 ; THUMB: bl {{_?}}CallVariadic
   %call = call i32 (i32, ...)* @CallVariadic(i32 5, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4)
   store i32 %call, i32* %tmp, align 4
-  %5 = load i32* %tmp, align 4
+  %5 = load i32, i32* %tmp, align 4
   ret i32 %5
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ entry:
   %b.addr = alloca i32, align 4
   store i32 %a, i32* %a.addr
   store i32 %b, i32* %b.addr
-  %tmp = load i32* %a.addr
-  %tmp1 = load i32* %b.addr
+  %tmp = load i32, i32* %a.addr
+  %tmp1 = load i32, i32* %b.addr
   %add = add nsw i32 %tmp, %tmp1
   ret i32 %add
 }
@@ -110,9 +110,9 @@ bb2:
 ; ARM: sxth
 
 bb3:
-  %c1 = load i8* %ptr3
-  %c2 = load i16* %ptr2
-  %c3 = load i32* %ptr1
+  %c1 = load i8, i8* %ptr3
+  %c2 = load i16, i16* %ptr2
+  %c3 = load i32, i32* %ptr1
   %c4 = zext i8 %c1 to i32
   %c5 = sext i16 %c2 to i32
   %c6 = add i32 %c4, %c5
@@ -138,7 +138,7 @@ bb3:
 @test4g = external global i32
 
 define void @test4() {
-  %a = load i32* @test4g
+  %a = load i32, i32* @test4g
   %b = add i32 %a, 1
   store i32 %b, i32* @test4g
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/fastisel-gep-promote-before-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fastisel-gep-promote-before-add.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fastisel-gep-promote-before-add.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fastisel-gep-promote-before-add.ll Fri Feb 27 15:17:42 2015
@@ -6,13 +6,13 @@ define zeroext i8 @gep_promotion(i8* %pt
 entry:
   %ptr.addr = alloca i8*, align 8
   %add = add i8 64, 64 ; 0x40 + 0x40
-  %0 = load i8** %ptr.addr, align 8
+  %0 = load i8*, i8** %ptr.addr, align 8
 
   ; CHECK-LABEL: _gep_promotion:
   ; CHECK: ldrb {{r[0-9]+}}, {{\[r[0-9]+\]}}
   %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
 
-  %1 = load i8* %arrayidx, align 1
+  %1 = load i8, i8* %arrayidx, align 1
   ret i8 %1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/flag-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/flag-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/flag-crash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/flag-crash.ll Fri Feb 27 15:17:42 2015
@@ -6,12 +6,12 @@
 define fastcc void @func(%struct.gs_matrix* nocapture %pm1) nounwind {
 entry:
   %0 = getelementptr inbounds %struct.gs_matrix, %struct.gs_matrix* %pm1, i32 0, i32 6
-  %1 = load float* %0, align 4
+  %1 = load float, float* %0, align 4
   %2 = getelementptr inbounds %struct.gs_matrix, %struct.gs_matrix* %pm1, i32 0, i32 8
-  %3 = load float* %2, align 4
+  %3 = load float, float* %2, align 4
   %4 = getelementptr inbounds %struct.gs_matrix, %struct.gs_matrix* %pm1, i32 0, i32 2
   %5 = bitcast float* %4 to i32*
-  %6 = load i32* %5, align 4
+  %6 = load i32, i32* %5, align 4
   %7 = or i32 0, %6
   %.mask = and i32 %7, 2147483647
   %8 = icmp eq i32 %.mask, 0

Modified: llvm/trunk/test/CodeGen/ARM/fnegs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fnegs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fnegs.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fnegs.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@
 
 define float @test1(float* %a) {
 entry:
-	%0 = load float* %a, align 4		; <float> [#uses=2]
+	%0 = load float, float* %a, align 4		; <float> [#uses=2]
 	%1 = fsub float -0.000000e+00, %0		; <float> [#uses=2]
 	%2 = fpext float %1 to double		; <double> [#uses=1]
 	%3 = fcmp olt double %2, 1.234000e+00		; <i1> [#uses=1]
@@ -48,7 +48,7 @@ entry:
 
 define float @test2(float* %a) {
 entry:
-	%0 = load float* %a, align 4		; <float> [#uses=2]
+	%0 = load float, float* %a, align 4		; <float> [#uses=2]
 	%1 = fmul float -1.000000e+00, %0		; <float> [#uses=2]
 	%2 = fpext float %1 to double		; <double> [#uses=1]
 	%3 = fcmp olt double %2, 1.234000e+00		; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/fold-stack-adjust.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fold-stack-adjust.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fold-stack-adjust.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fold-stack-adjust.ll Fri Feb 27 15:17:42 2015
@@ -82,7 +82,7 @@ define void @check_vfp_fold() minsize {
 
   %var = alloca i8, i32 16
 
-  %tmp = load %bigVec* @var
+  %tmp = load %bigVec, %bigVec* @var
   call void @bar(i8* %var)
   store %bigVec %tmp, %bigVec* @var
 
@@ -119,7 +119,7 @@ define arm_aapcs_vfpcc double @check_vfp
 
   %var = alloca i8, i32 64
 
-  %tmp = load %bigVec* @var
+  %tmp = load %bigVec, %bigVec* @var
   call void @bar(i8* %var)
   store %bigVec %tmp, %bigVec* @var
 
@@ -152,7 +152,7 @@ define void @test_fold_point(i1 %tst) mi
 
   ; We want a long-lived floating register so that a callee-saved dN is used and
   ; there's both a vpop and a pop.
-  %live_val = load double* @dbl
+  %live_val = load double, double* @dbl
   br i1 %tst, label %true, label %end
 true:
   call void @bar(i8* %var)

Modified: llvm/trunk/test/CodeGen/ARM/fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fp.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fp.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ define double @h(double* %v) {
 ;CHECK: vldr
 ;CHECK-NEXT: vmov
 entry:
-        %tmp = load double* %v          ; <double> [#uses=1]
+        %tmp = load double, double* %v          ; <double> [#uses=1]
         ret double %tmp
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/fp16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fp16.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fp16.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fp16.ll Fri Feb 27 15:17:42 2015
@@ -16,8 +16,8 @@ define void @foo() nounwind {
 ; CHECK-ARMV8-LABEL: foo:
 ; CHECK-SOFTFLOAT-LABEL: foo:
 entry:
-  %0 = load i16* @x, align 2
-  %1 = load i16* @y, align 2
+  %0 = load i16, i16* @x, align 2
+  %1 = load i16, i16* @y, align 2
   %2 = tail call float @llvm.convert.from.fp16.f32(i16 %0)
 ; CHECK: __gnu_h2f_ieee
 ; CHECK-FP16: vcvtb.f32.f16

Modified: llvm/trunk/test/CodeGen/ARM/fpcmp-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fpcmp-opt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fpcmp-opt.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fpcmp-opt.ll Fri Feb 27 15:17:42 2015
@@ -13,8 +13,8 @@ entry:
 ; CHECK: vcmpe.f32 [[S1]], [[S0]]
 ; CHECK: vmrs APSR_nzcv, fpscr
 ; CHECK: beq
-  %0 = load float* %a
-  %1 = load float* %b
+  %0 = load float, float* %a
+  %1 = load float, float* %b
   %2 = fcmp une float %0, %1
   br i1 %2, label %bb1, label %bb2
 
@@ -41,7 +41,7 @@ entry:
 ; CHECK-NOT: vcmpe.f32
 ; CHECK-NOT: vmrs
 ; CHECK: bne
-  %0 = load double* %a
+  %0 = load double, double* %a
   %1 = fcmp oeq double %0, 0.000000e+00
   br i1 %1, label %bb1, label %bb2
 
@@ -64,7 +64,7 @@ entry:
 ; CHECK-NOT: vcmpe.f32
 ; CHECK-NOT: vmrs
 ; CHECK: bne
-  %0 = load float* %a
+  %0 = load float, float* %a
   %1 = fcmp oeq float %0, 0.000000e+00
   br i1 %1, label %bb1, label %bb2
 

Modified: llvm/trunk/test/CodeGen/ARM/fpmem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fpmem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fpmem.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fpmem.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define float @f1(float %a) {
 define float @f2(float* %v, float %u) {
 ; CHECK-LABEL: f2:
 ; CHECK: vldr{{.*}}[
-        %tmp = load float* %v           ; <float> [#uses=1]
+        %tmp = load float, float* %v           ; <float> [#uses=1]
         %tmp1 = fadd float %tmp, %u              ; <float> [#uses=1]
         ret float %tmp1
 }
@@ -18,7 +18,7 @@ define float @f2offset(float* %v, float
 ; CHECK-LABEL: f2offset:
 ; CHECK: vldr{{.*}}, #4]
         %addr = getelementptr float, float* %v, i32 1
-        %tmp = load float* %addr
+        %tmp = load float, float* %addr
         %tmp1 = fadd float %tmp, %u
         ret float %tmp1
 }
@@ -27,7 +27,7 @@ define float @f2noffset(float* %v, float
 ; CHECK-LABEL: f2noffset:
 ; CHECK: vldr{{.*}}, #-4]
         %addr = getelementptr float, float* %v, i32 -1
-        %tmp = load float* %addr
+        %tmp = load float, float* %addr
         %tmp1 = fadd float %tmp, %u
         ret float %tmp1
 }

Modified: llvm/trunk/test/CodeGen/ARM/fptoint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fptoint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fptoint.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fptoint.ll Fri Feb 27 15:17:42 2015
@@ -4,13 +4,13 @@
 @u = weak global i32 0		; <i32*> [#uses=2]
 
 define i32 @foo1(float *%x) {
-        %tmp1 = load float* %x
+        %tmp1 = load float, float* %x
 	%tmp2 = bitcast float %tmp1 to i32
 	ret i32 %tmp2
 }
 
 define i64 @foo2(double *%x) {
-        %tmp1 = load double* %x
+        %tmp1 = load double, double* %x
 	%tmp2 = bitcast double %tmp1 to i64
 	ret i64 %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/frame-register.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/frame-register.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/frame-register.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/frame-register.ll Fri Feb 27 15:17:42 2015
@@ -17,12 +17,12 @@ entry:
   %i.addr = alloca i32, align 4
   %j = alloca i32, align 4
   store i32 %i, i32* %i.addr, align 4
-  %0 = load i32* %i.addr, align 4
+  %0 = load i32, i32* %i.addr, align 4
   %add = add nsw i32 %0, 1
   store i32 %add, i32* %j, align 4
-  %1 = load i32* %j, align 4
+  %1 = load i32, i32* %j, align 4
   call void @callee(i32 %1)
-  %2 = load i32* %j, align 4
+  %2 = load i32, i32* %j, align 4
   %add1 = add nsw i32 %2, 1
   ret i32 %add1
 }

Modified: llvm/trunk/test/CodeGen/ARM/fusedMAC.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fusedMAC.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fusedMAC.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fusedMAC.ll Fri Feb 27 15:17:42 2015
@@ -144,7 +144,7 @@ entry:
 define float @test_fnms_f32(float %a, float %b, float* %c) nounwind readnone ssp {
 ; CHECK: test_fnms_f32
 ; CHECK: vfnms.f32
-  %tmp1 = load float* %c, align 4
+  %tmp1 = load float, float* %c, align 4
   %tmp2 = fsub float -0.0, %tmp1
   %tmp3 = tail call float @llvm.fma.f32(float %a, float %b, float %tmp2) nounwind readnone
   ret float %tmp3 

Modified: llvm/trunk/test/CodeGen/ARM/ghc-tcreturn-lowered.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ghc-tcreturn-lowered.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ghc-tcreturn-lowered.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ghc-tcreturn-lowered.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ define ghccc void @test_direct_tail() {
 define ghccc void @test_indirect_tail() {
 ; CHECK-LABEL: test_indirect_tail:
 ; CHECK: bx {{r[0-9]+}}
-  %func = load void()** @ind_func
+  %func = load void()*, void()** @ind_func
   tail call ghccc void()* %func()
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/global-merge-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/global-merge-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/global-merge-1.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/global-merge-1.ll Fri Feb 27 15:17:42 2015
@@ -55,12 +55,12 @@ declare i32 @calc(...) #1
 
 ; Function Attrs: nounwind ssp
 define internal void @calculate() #0 {
-  %1 = load <4 x i32>* bitcast ([5 x i32]* @bar to <4 x i32>*), align 4
-  %2 = load <4 x i32>* bitcast ([5 x i32]* @baz to <4 x i32>*), align 4
+  %1 = load <4 x i32>, <4 x i32>* bitcast ([5 x i32]* @bar to <4 x i32>*), align 4
+  %2 = load <4 x i32>, <4 x i32>* bitcast ([5 x i32]* @baz to <4 x i32>*), align 4
   %3 = mul <4 x i32> %2, %1
   store <4 x i32> %3, <4 x i32>* bitcast ([5 x i32]* @foo to <4 x i32>*), align 4
-  %4 = load i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 4), align 4, !tbaa !1
-  %5 = load i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 4), align 4, !tbaa !1
+  %4 = load i32, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 4), align 4, !tbaa !1
+  %5 = load i32, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 4), align 4, !tbaa !1
   %6 = mul nsw i32 %5, %4
   store i32 %6, i32* getelementptr inbounds ([5 x i32]* @foo, i32 0, i32 4), align 4, !tbaa !1
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/globals.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/globals.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/globals.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/globals.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 @G = external global i32
 
 define i32 @test1() {
-	%tmp = load i32* @G
+	%tmp = load i32, i32* @G
 	ret i32 %tmp
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/gv-stubs-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/gv-stubs-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/gv-stubs-crash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/gv-stubs-crash.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 @Exn = external hidden unnamed_addr constant { i8*, i8* }
 
 define hidden void @func(i32* %this, i32* %e) optsize align 2 {
-  %e.ld = load i32* %e, align 4
+  %e.ld = load i32, i32* %e, align 4
   %inv = invoke zeroext i1 @func2(i32* %this, i32 %e.ld) optsize
           to label %ret unwind label %lpad
 

Modified: llvm/trunk/test/CodeGen/ARM/half.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/half.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/half.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/half.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define void @test_load_store(half* %in,
 ; CHECK-LABEL: test_load_store:
 ; CHECK: ldrh [[TMP:r[0-9]+]], [r0]
 ; CHECK: strh [[TMP]], [r1]
-  %val = load half* %in
+  %val = load half, half* %in
   store half %val, half* %out
   ret void
 }
@@ -14,7 +14,7 @@ define void @test_load_store(half* %in,
 define i16 @test_bitcast_from_half(half* %addr) {
 ; CHECK-LABEL: test_bitcast_from_half:
 ; CHECK: ldrh r0, [r0]
-  %val = load half* %addr
+  %val = load half, half* %addr
   %val_int = bitcast half %val to i16
   ret i16 %val_int
 }
@@ -33,7 +33,7 @@ define float @test_extend32(half* %addr)
 ; CHECK-OLD: b.w ___gnu_h2f_ieee
 ; CHECK-F16: vcvtb.f32.f16
 ; CHECK-V8: vcvtb.f32.f16
-  %val16 = load half* %addr
+  %val16 = load half, half* %addr
   %val32 = fpext half %val16 to float
   ret float %val32
 }
@@ -46,7 +46,7 @@ define double @test_extend64(half* %addr
 ; CHECK-F16: vcvtb.f32.f16
 ; CHECK-F16: vcvt.f64.f32
 ; CHECK-V8: vcvtb.f64.f16
-  %val16 = load half* %addr
+  %val16 = load half, half* %addr
   %val32 = fpext half %val16 to double
   ret double %val32
 }

Modified: llvm/trunk/test/CodeGen/ARM/hidden-vis-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/hidden-vis-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/hidden-vis-2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/hidden-vis-2.ll Fri Feb 27 15:17:42 2015
@@ -7,6 +7,6 @@ entry:
 ; CHECK-LABEL: t:
 ; CHECK: ldr
 ; CHECK-NEXT: ldr
-	%0 = load i32* @x, align 4		; <i32> [#uses=1]
+	%0 = load i32, i32* @x, align 4		; <i32> [#uses=1]
 	ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/ARM/hidden-vis-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/hidden-vis-3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/hidden-vis-3.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/hidden-vis-3.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ entry:
 ; CHECK: LCPI0_1:
 ; CHECK-NEXT: .long _y
 
-	%0 = load i32* @x, align 4		; <i32> [#uses=1]
-	%1 = load i32* @y, align 4		; <i32> [#uses=1]
+	%0 = load i32, i32* @x, align 4		; <i32> [#uses=1]
+	%1 = load i32, i32* @y, align 4		; <i32> [#uses=1]
 	%2 = add i32 %1, %0		; <i32> [#uses=1]
 	ret i32 %2
 }

Modified: llvm/trunk/test/CodeGen/ARM/ifconv-kills.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ifconv-kills.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ifconv-kills.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ifconv-kills.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
 ; present something which can be easily if-converted
 if.then:
   ; %R0 should be killed here
-  %valt = load i32* %ptr, align 4
+  %valt = load i32, i32* %ptr, align 4
   br label %return
 
 if.else:
@@ -18,7 +18,7 @@ if.else:
   ; has to be removed because if.then will follow after this and still
   ; read it.
   %addr = getelementptr inbounds i32, i32* %ptr, i32 4
-  %vale = load i32* %addr, align 4
+  %vale = load i32, i32* %addr, align 4
   br label %return
 
 return:

Modified: llvm/trunk/test/CodeGen/ARM/ifconv-regmask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ifconv-regmask.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ifconv-regmask.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ifconv-regmask.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 ; Function Attrs: nounwind ssp
 define i32 @sfu() {
 entry:
-  %bf.load = load i32* getelementptr inbounds (%union.opcode* @opcode, i32 0, i32 0), align 4
+  %bf.load = load i32, i32* getelementptr inbounds (%union.opcode* @opcode, i32 0, i32 0), align 4
   %bf.lshr = lshr i32 %bf.load, 26
   %bf.clear = and i32 %bf.lshr, 7
   switch i32 %bf.clear, label %return [

Modified: llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
 entry:
   %0 = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1, i32 0
-  %1 = load i8* %0, align 1
+  %1 = load i8, i8* %0, align 1
   %2 = zext i8 %1 to i32
   %3 = and i32 %2, 112
   %4 = icmp eq i32 %3, 0
@@ -12,7 +12,7 @@ entry:
 
 bb:
   %5 = getelementptr inbounds %struct.S, %struct.S* %y, i32 0, i32 1, i32 0
-  %6 = load i8* %5, align 1
+  %6 = load i8, i8* %5, align 1
   %7 = zext i8 %6 to i32
   %8 = and i32 %7, 112
   %9 = icmp eq i32 %8, 0

Modified: llvm/trunk/test/CodeGen/ARM/ifcvt11.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ifcvt11.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ifcvt11.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ifcvt11.ll Fri Feb 27 15:17:42 2015
@@ -23,8 +23,8 @@ bb:
   %n.08 = phi i32 [ 0, %bb.nph ], [ %10, %bb4 ]
   %scevgep10 = getelementptr inbounds %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 0
   %scevgep11 = getelementptr %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 1
-  %3 = load double* %scevgep10, align 4
-  %4 = load double* %scevgep11, align 4
+  %3 = load double, double* %scevgep10, align 4
+  %4 = load double, double* %scevgep11, align 4
   %5 = fcmp uge double %3, %4
   br i1 %5, label %bb3, label %bb1
 
@@ -35,7 +35,7 @@ bb1:
 ; CHECK: vcmpe.f64
 ; CHECK: vmrs APSR_nzcv, fpscr
   %scevgep12 = getelementptr %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 2
-  %6 = load double* %scevgep12, align 4
+  %6 = load double, double* %scevgep12, align 4
   %7 = fcmp uge double %3, %6
   br i1 %7, label %bb3, label %bb2
 

Modified: llvm/trunk/test/CodeGen/ARM/ifcvt5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ifcvt5.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ifcvt5.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ifcvt5.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 
 define void @foo(i32 %a) {
 entry:
-	%tmp = load i32** @x		; <i32*> [#uses=1]
+	%tmp = load i32*, i32** @x		; <i32*> [#uses=1]
 	store i32 %a, i32* %tmp
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/ifcvt7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ifcvt7.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ifcvt7.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ifcvt7.ll Fri Feb 27 15:17:42 2015
@@ -11,9 +11,9 @@ entry:
 	br label %tailrecurse
 
 tailrecurse:		; preds = %bb, %entry
-	%tmp6 = load %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=1]
-	%tmp9 = load %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=2]
-	%tmp12 = load %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=1]
+	%tmp6 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=1]
+	%tmp9 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=2]
+	%tmp12 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=1]
 	%tmp14 = icmp eq %struct.quad_struct* null, null		; <i1> [#uses=1]
 	%tmp17 = icmp eq %struct.quad_struct* %tmp6, null		; <i1> [#uses=1]
 	%tmp23 = icmp eq %struct.quad_struct* %tmp9, null		; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/illegal-vector-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/illegal-vector-bitcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/illegal-vector-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/illegal-vector-bitcast.ll Fri Feb 27 15:17:42 2015
@@ -3,10 +3,10 @@
 
 define void @foo(<8 x float>* %f, <8 x float>* %g, <4 x i64>* %y)
 {
-  %h = load <8 x float>* %f
+  %h = load <8 x float>, <8 x float>* %f
   %i = fmul <8 x float> %h, <float 0x3FF19999A0000000, float 0x400A666660000000, float 0x40119999A0000000, float 0x40159999A0000000, float 0.5, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000>
   %m = bitcast <8 x float> %i to <4 x i64>
-  %z = load <4 x i64>* %y
+  %z = load <4 x i64>, <4 x i64>* %y
   %n = mul <4 x i64> %z, %m
   %p = bitcast <4 x i64> %n to <8 x float>
   store <8 x float> %p, <8 x float>* %g

Modified: llvm/trunk/test/CodeGen/ARM/indirectbr-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/indirectbr-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/indirectbr-2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/indirectbr-2.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@
 
 define i32 @func() nounwind ssp {
   %1 = alloca i32, align 4
-  %2 = load i32* @foo, align 4
+  %2 = load i32, i32* @foo, align 4
   %3 = icmp eq i32 %2, 34879
   br label %4
 
@@ -24,7 +24,7 @@ define i32 @func() nounwind ssp {
   %6 = mul i32 %5, 287
   %7 = add i32 %6, 2
   %8 = getelementptr [2 x i32], [2 x i32]* @DWJumpTable2808, i32 0, i32 %5
-  %9 = load i32* %8
+  %9 = load i32, i32* %8
   %10 = add i32 %9, ptrtoint (i8* blockaddress(@func, %4) to i32)
   %11 = inttoptr i32 %10 to i8*
   %12 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([45 x i8]* @0, i32 0, i32 0))
@@ -33,7 +33,7 @@ define i32 @func() nounwind ssp {
 ; <label>:13                                      ; preds = %4
   %tmp14 = phi i32 [ %7, %4 ]
   store i32 23958, i32* @foo, align 4
-  %tmp15 = load i32* %1, align 4
+  %tmp15 = load i32, i32* %1, align 4
   %tmp16 = icmp eq i32 %tmp15, 0
   %tmp17 = zext i1 %tmp16 to i32
   %tmp21 = add i32 %tmp17, %tmp14

Modified: llvm/trunk/test/CodeGen/ARM/indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/indirectbr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/indirectbr.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/indirectbr.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ entry:
 ; THUMB: [[NEXTADDR_PCBASE:LPC0_[0-9]]]:
 ; THUMB: add r[[NEXTADDR_REG]], pc
 
-  %0 = load i8** @nextaddr, align 4               ; <i8*> [#uses=2]
+  %0 = load i8*, i8** @nextaddr, align 4               ; <i8*> [#uses=2]
   %1 = icmp eq i8* %0, null                       ; <i1> [#uses=1]
 ; indirect branch gets duplicated here
 ; ARM: bx
@@ -32,7 +32,7 @@ bb2:
 
 bb3:                                              ; preds = %entry
   %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
-  %gotovar.4.0.pre = load i8** %2, align 4        ; <i8*> [#uses=1]
+  %gotovar.4.0.pre = load i8*, i8** %2, align 4        ; <i8*> [#uses=1]
   br label %bb2
 
 L5:                                               ; preds = %bb2

Modified: llvm/trunk/test/CodeGen/ARM/inline-diagnostics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/inline-diagnostics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/inline-diagnostics.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/inline-diagnostics.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define float @inline_func(float %f1, flo
   %c3 = alloca %struct.float4, align 4
   call void asm sideeffect "vmul.f32 ${2:q}, ${0:q}, ${1:q}", "=*r,=*r,*w"(%struct.float4* %c1, %struct.float4* %c2, %struct.float4* %c3) #1, !srcloc !1
   %x = getelementptr inbounds %struct.float4, %struct.float4* %c3, i32 0, i32 0
-  %1 = load float* %x, align 4
+  %1 = load float, float* %x, align 4
   ret float %1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/interrupt-attr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/interrupt-attr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/interrupt-attr.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/interrupt-attr.ll Fri Feb 27 15:17:42 2015
@@ -65,7 +65,7 @@ define arm_aapcscc void @fiq_fn() aligns
 
 ; CHECK-A-THUMB-LABEL: fiq_fn:
 ; CHECK-M-LABEL: fiq_fn:
-  %val = load volatile [16 x i32]* @bigvar
+  %val = load volatile [16 x i32], [16 x i32]* @bigvar
   store volatile [16 x i32] %val, [16 x i32]* @bigvar
   ret void
 }
@@ -81,7 +81,7 @@ define arm_aapcscc void @swi_fn() aligns
 ; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
 ; CHECK-A: subs pc, lr, #0
 
-  %val = load volatile [16 x i32]* @bigvar
+  %val = load volatile [16 x i32], [16 x i32]* @bigvar
   store volatile [16 x i32] %val, [16 x i32]* @bigvar
   ret void
 }
@@ -126,8 +126,8 @@ define arm_aapcscc void @floating_fn() a
 ; CHECK-A-NOT: vstr
 ; CHECK-A-NOT: vstm
 ; CHECK-A: vadd.f64 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-  %lhs = load volatile double* @var
-  %rhs = load volatile double* @var
+  %lhs = load volatile double, double* @var
+  %rhs = load volatile double, double* @var
   %sum = fadd double %lhs, %rhs
   store double %sum, double* @var
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/intrinsics-crypto.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/intrinsics-crypto.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/intrinsics-crypto.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/intrinsics-crypto.ll Fri Feb 27 15:17:42 2015
@@ -1,8 +1,8 @@
 ; RUN: llc < %s -mtriple=armv8 -mattr=+crypto | FileCheck %s
 
 define arm_aapcs_vfpcc <16 x i8> @test_aesde(<16 x i8>* %a, <16 x i8> *%b) {
-  %tmp = load <16 x i8>* %a
-  %tmp2 = load <16 x i8>* %b
+  %tmp = load <16 x i8>, <16 x i8>* %a
+  %tmp2 = load <16 x i8>, <16 x i8>* %b
   %tmp3 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %tmp, <16 x i8> %tmp2)
   ; CHECK: aesd.8 q{{[0-9]+}}, q{{[0-9]+}}
   %tmp4 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %tmp3, <16 x i8> %tmp2)
@@ -15,9 +15,9 @@ define arm_aapcs_vfpcc <16 x i8> @test_a
 }
 
 define arm_aapcs_vfpcc <4 x i32> @test_sha(<4 x i32> *%a, <4 x i32> *%b, <4 x i32> *%c) {
-  %tmp = load <4 x i32>* %a
-  %tmp2 = load <4 x i32>* %b
-  %tmp3 = load <4 x i32>* %c
+  %tmp = load <4 x i32>, <4 x i32>* %a
+  %tmp2 = load <4 x i32>, <4 x i32>* %b
+  %tmp3 = load <4 x i32>, <4 x i32>* %c
   %scalar = extractelement <4 x i32> %tmp, i32 0
   %resscalar = call i32 @llvm.arm.neon.sha1h(i32 %scalar)
   %res1 = insertelement <4 x i32> undef, i32 %resscalar, i32 0

Modified: llvm/trunk/test/CodeGen/ARM/invoke-donothing-assert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/invoke-donothing-assert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/invoke-donothing-assert.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/invoke-donothing-assert.ll Fri Feb 27 15:17:42 2015
@@ -46,7 +46,7 @@ new.notnull.i.i:
   br label %cleanup
 
 cleanup:
-  %0 = load i32* %a, align 4
+  %0 = load i32, i32* %a, align 4
   %inc294 = add nsw i32 %0, 4
   store i32 %inc294, i32* %a, align 4
   br i1 false, label %_ZN3lol5ArrayIivvvvvvvED1Ev.exit, label %delete.notnull.i.i.i1409

Modified: llvm/trunk/test/CodeGen/ARM/isel-v8i32-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/isel-v8i32-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/isel-v8i32-crash.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/isel-v8i32-crash.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ target datalayout = "e-m:e-p:32:32-i64:6
 define void @func(i16* nocapture %pb, float* nocapture readonly %pf) #0 {
 entry:
   %0 = bitcast float* %pf to <8 x float>*
-  %1 = load <8 x float>* %0, align 4
+  %1 = load <8 x float>, <8 x float>* %0, align 4
   %2 = fmul <8 x float> %1, <float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00>
   %3 = fptosi <8 x float> %2 to <8 x i16>
   %4 = bitcast i16* %pb to <8 x i16>*

Modified: llvm/trunk/test/CodeGen/ARM/krait-cpu-div-attribute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/krait-cpu-div-attribute.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/krait-cpu-div-attribute.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/krait-cpu-div-attribute.ll Fri Feb 27 15:17:42 2015
@@ -28,8 +28,8 @@ entry:
   store i32 0, i32* %retval
   store volatile i32 100, i32* %b, align 4
   store volatile i32 32, i32* %c, align 4
-  %0 = load volatile i32* %b, align 4
-  %1 = load volatile i32* %c, align 4
+  %0 = load volatile i32, i32* %b, align 4
+  %1 = load volatile i32, i32* %c, align 4
   %div = sdiv i32 %0, %1
   store volatile i32 %div, i32* %a, align 4
   ret i32 0

Modified: llvm/trunk/test/CodeGen/ARM/large-stack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/large-stack.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/large-stack.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/large-stack.ll Fri Feb 27 15:17:42 2015
@@ -15,6 +15,6 @@ define i32 @test3() {
 	%tmp = alloca i32, align 4
 	%a = alloca [805306369 x i8], align 16
 	store i32 0, i32* %tmp
-	%tmp1 = load i32* %tmp
+	%tmp1 = load i32, i32* %tmp
         ret i32 %tmp1
 }

Modified: llvm/trunk/test/CodeGen/ARM/ldm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldm.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@ define i32 @t1() {
 ; CHECK: pop
 ; V4T-LABEL: t1:
 ; V4T: pop
-        %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 0)            ; <i32> [#uses=1]
-        %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1)           ; <i32> [#uses=1]
+        %tmp = load i32, i32* getelementptr ([0 x i32]* @X, i32 0, i32 0)            ; <i32> [#uses=1]
+        %tmp3 = load i32, i32* getelementptr ([0 x i32]* @X, i32 0, i32 1)           ; <i32> [#uses=1]
         %tmp4 = tail call i32 @f1( i32 %tmp, i32 %tmp3 )                ; <i32> [#uses=1]
         ret i32 %tmp4
 }
@@ -19,9 +19,9 @@ define i32 @t2() {
 ; CHECK: pop
 ; V4T-LABEL: t2:
 ; V4T: pop
-        %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2)            ; <i32> [#uses=1]
-        %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3)           ; <i32> [#uses=1]
-        %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 4)           ; <i32> [#uses=1]
+        %tmp = load i32, i32* getelementptr ([0 x i32]* @X, i32 0, i32 2)            ; <i32> [#uses=1]
+        %tmp3 = load i32, i32* getelementptr ([0 x i32]* @X, i32 0, i32 3)           ; <i32> [#uses=1]
+        %tmp5 = load i32, i32* getelementptr ([0 x i32]* @X, i32 0, i32 4)           ; <i32> [#uses=1]
         %tmp6 = tail call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 )             ; <i32> [#uses=1]
         ret i32 %tmp6
 }
@@ -34,9 +34,9 @@ define i32 @t3() {
 ; V4T: ldmib
 ; V4T: pop
 ; V4T-NEXT: bx lr
-        %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1)            ; <i32> [#uses=1]
-        %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2)           ; <i32> [#uses=1]
-        %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3)           ; <i32> [#uses=1]
+        %tmp = load i32, i32* getelementptr ([0 x i32]* @X, i32 0, i32 1)            ; <i32> [#uses=1]
+        %tmp3 = load i32, i32* getelementptr ([0 x i32]* @X, i32 0, i32 2)           ; <i32> [#uses=1]
+        %tmp5 = load i32, i32* getelementptr ([0 x i32]* @X, i32 0, i32 3)           ; <i32> [#uses=1]
         %tmp6 = call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 )             ; <i32> [#uses=1]
         ret i32 %tmp6
 }

Modified: llvm/trunk/test/CodeGen/ARM/ldr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldr.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldr.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define i32 @f1(i32* %v) {
 ; CHECK-LABEL: f1:
 ; CHECK: ldr r0
 entry:
-        %tmp = load i32* %v
+        %tmp = load i32, i32* %v
         ret i32 %tmp
 }
 
@@ -13,7 +13,7 @@ define i32 @f2(i32* %v) {
 ; CHECK: ldr r0
 entry:
         %tmp2 = getelementptr i32, i32* %v, i32 1023
-        %tmp = load i32* %tmp2
+        %tmp = load i32, i32* %tmp2
         ret i32 %tmp
 }
 
@@ -23,7 +23,7 @@ define i32 @f3(i32* %v) {
 ; CHECK: ldr r0
 entry:
         %tmp2 = getelementptr i32, i32* %v, i32 1024
-        %tmp = load i32* %tmp2
+        %tmp = load i32, i32* %tmp2
         ret i32 %tmp
 }
 
@@ -34,7 +34,7 @@ define i32 @f4(i32 %base) {
 entry:
         %tmp1 = sub i32 %base, 128
         %tmp2 = inttoptr i32 %tmp1 to i32*
-        %tmp3 = load i32* %tmp2
+        %tmp3 = load i32, i32* %tmp2
         ret i32 %tmp3
 }
 
@@ -44,7 +44,7 @@ define i32 @f5(i32 %base, i32 %offset) {
 entry:
         %tmp1 = add i32 %base, %offset
         %tmp2 = inttoptr i32 %tmp1 to i32*
-        %tmp3 = load i32* %tmp2
+        %tmp3 = load i32, i32* %tmp2
         ret i32 %tmp3
 }
 
@@ -55,7 +55,7 @@ entry:
         %tmp1 = shl i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
         %tmp3 = inttoptr i32 %tmp2 to i32*
-        %tmp4 = load i32* %tmp3
+        %tmp4 = load i32, i32* %tmp3
         ret i32 %tmp4
 }
 
@@ -66,6 +66,6 @@ entry:
         %tmp1 = lshr i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
         %tmp3 = inttoptr i32 %tmp2 to i32*
-        %tmp4 = load i32* %tmp3
+        %tmp4 = load i32, i32* %tmp3
         ret i32 %tmp4
 }

Modified: llvm/trunk/test/CodeGen/ARM/ldr_ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldr_ext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldr_ext.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldr_ext.ll Fri Feb 27 15:17:42 2015
@@ -2,28 +2,28 @@
 
 define i32 @test1(i8* %t1) nounwind {
 ; CHECK: ldrb
-    %tmp.u = load i8* %t1
+    %tmp.u = load i8, i8* %t1
     %tmp1.s = zext i8 %tmp.u to i32
     ret i32 %tmp1.s
 }
 
 define i32 @test2(i16* %t1) nounwind {
 ; CHECK: ldrh
-    %tmp.u = load i16* %t1
+    %tmp.u = load i16, i16* %t1
     %tmp1.s = zext i16 %tmp.u to i32
     ret i32 %tmp1.s
 }
 
 define i32 @test3(i8* %t0) nounwind {
 ; CHECK: ldrsb
-    %tmp.s = load i8* %t0
+    %tmp.s = load i8, i8* %t0
     %tmp1.s = sext i8 %tmp.s to i32
     ret i32 %tmp1.s
 }
 
 define i32 @test4(i16* %t0) nounwind {
 ; CHECK: ldrsh
-    %tmp.s = load i16* %t0
+    %tmp.s = load i16, i16* %t0
     %tmp1.s = sext i16 %tmp.s to i32
     ret i32 %tmp1.s
 }
@@ -31,7 +31,7 @@ define i32 @test4(i16* %t0) nounwind {
 define i32 @test5() nounwind {
 ; CHECK: mov r0, #0
 ; CHECK: ldrsh
-    %tmp.s = load i16* null
+    %tmp.s = load i16, i16* null
     %tmp1.s = sext i16 %tmp.s to i32
     ret i32 %tmp1.s
 }

Modified: llvm/trunk/test/CodeGen/ARM/ldr_frame.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldr_frame.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldr_frame.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldr_frame.ll Fri Feb 27 15:17:42 2015
@@ -3,14 +3,14 @@
 define i32 @f1() {
 	%buf = alloca [32 x i32], align 4
 	%tmp = getelementptr [32 x i32], [32 x i32]* %buf, i32 0, i32 0
-	%tmp1 = load i32* %tmp
+	%tmp1 = load i32, i32* %tmp
 	ret i32 %tmp1
 }
 
 define i32 @f2() {
 	%buf = alloca [32 x i8], align 4
 	%tmp = getelementptr [32 x i8], [32 x i8]* %buf, i32 0, i32 0
-	%tmp1 = load i8* %tmp
+	%tmp1 = load i8, i8* %tmp
         %tmp2 = zext i8 %tmp1 to i32
 	ret i32 %tmp2
 }
@@ -18,14 +18,14 @@ define i32 @f2() {
 define i32 @f3() {
 	%buf = alloca [32 x i32], align 4
 	%tmp = getelementptr [32 x i32], [32 x i32]* %buf, i32 0, i32 32
-	%tmp1 = load i32* %tmp
+	%tmp1 = load i32, i32* %tmp
 	ret i32 %tmp1
 }
 
 define i32 @f4() {
 	%buf = alloca [32 x i8], align 4
 	%tmp = getelementptr [32 x i8], [32 x i8]* %buf, i32 0, i32 2
-	%tmp1 = load i8* %tmp
+	%tmp1 = load i8, i8* %tmp
         %tmp2 = zext i8 %tmp1 to i32
 	ret i32 %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/ldr_post.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldr_post.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldr_post.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldr_post.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 define i32 @test1(i32 %a, i32 %b, i32 %c) {
         %tmp1 = mul i32 %a, %b          ; <i32> [#uses=2]
         %tmp2 = inttoptr i32 %tmp1 to i32*              ; <i32*> [#uses=1]
-        %tmp3 = load i32* %tmp2         ; <i32> [#uses=1]
+        %tmp3 = load i32, i32* %tmp2         ; <i32> [#uses=1]
         %tmp4 = sub i32 %tmp1, %c               ; <i32> [#uses=1]
         %tmp5 = mul i32 %tmp4, %tmp3            ; <i32> [#uses=1]
         ret i32 %tmp5
@@ -19,7 +19,7 @@ define i32 @test1(i32 %a, i32 %b, i32 %c
 define i32 @test2(i32 %a, i32 %b) {
         %tmp1 = mul i32 %a, %b          ; <i32> [#uses=2]
         %tmp2 = inttoptr i32 %tmp1 to i32*              ; <i32*> [#uses=1]
-        %tmp3 = load i32* %tmp2         ; <i32> [#uses=1]
+        %tmp3 = load i32, i32* %tmp2         ; <i32> [#uses=1]
         %tmp4 = sub i32 %tmp1, 16               ; <i32> [#uses=1]
         %tmp5 = mul i32 %tmp4, %tmp3            ; <i32> [#uses=1]
         ret i32 %tmp5

Modified: llvm/trunk/test/CodeGen/ARM/ldr_pre.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldr_pre.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldr_pre.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldr_pre.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 ; CHECK-NOT: ldr
 define i32* @test1(i32* %X, i32* %dest) {
         %Y = getelementptr i32, i32* %X, i32 4               ; <i32*> [#uses=2]
-        %A = load i32* %Y               ; <i32> [#uses=1]
+        %A = load i32, i32* %Y               ; <i32> [#uses=1]
         store i32 %A, i32* %dest
         ret i32* %Y
 }
@@ -17,7 +17,7 @@ define i32* @test1(i32* %X, i32* %dest)
 define i32 @test2(i32 %a, i32 %b, i32 %c) {
         %tmp1 = sub i32 %a, %b          ; <i32> [#uses=2]
         %tmp2 = inttoptr i32 %tmp1 to i32*              ; <i32*> [#uses=1]
-        %tmp3 = load i32* %tmp2         ; <i32> [#uses=1]
+        %tmp3 = load i32, i32* %tmp2         ; <i32> [#uses=1]
         %tmp4 = sub i32 %tmp1, %c               ; <i32> [#uses=1]
         %tmp5 = add i32 %tmp4, %tmp3            ; <i32> [#uses=1]
         ret i32 %tmp5

Modified: llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@
 ; CHECK: Formed {{.*}} t2LDRD{{.*}} mem:LD4[%0] LD4[%0+4]
 define i64 @t(i64 %a) nounwind readonly {
 entry:
-	%0 = load i64** @b, align 4
-	%1 = load i64* %0, align 4
+	%0 = load i64*, i64** @b, align 4
+	%1 = load i64, i64* %0, align 4
 	%2 = mul i64 %1, %a
 	ret i64 %2
 }

Modified: llvm/trunk/test/CodeGen/ARM/ldrd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldrd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldrd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldrd.ll Fri Feb 27 15:17:42 2015
@@ -19,8 +19,8 @@ entry:
 ; M3-LABEL: t:
 ; M3-NOT: ldrd
 
-	%0 = load i64** @b, align 4
-	%1 = load i64* %0, align 4
+	%0 = load i64*, i64** @b, align 4
+	%1 = load i64, i64* %0, align 4
 	%2 = mul i64 %1, %a
 	ret i64 %2
 }
@@ -57,8 +57,8 @@ bb:
   %scevgep4 = getelementptr i32, i32* %b, i32 %i.03    ; <i32*> [#uses=1]
   %tmp = add i32 %i.03, 1                         ; <i32> [#uses=3]
   %scevgep5 = getelementptr i32, i32* %a, i32 %tmp     ; <i32*> [#uses=1]
-  %2 = load i32* %scevgep, align 4                ; <i32> [#uses=1]
-  %3 = load i32* %scevgep5, align 4               ; <i32> [#uses=1]
+  %2 = load i32, i32* %scevgep, align 4                ; <i32> [#uses=1]
+  %3 = load i32, i32* %scevgep5, align 4               ; <i32> [#uses=1]
   %4 = add nsw i32 %3, %2                         ; <i32> [#uses=1]
   store i32 %4, i32* %scevgep4, align 4
   %exitcond = icmp eq i32 %tmp, %0                ; <i1> [#uses=1]
@@ -84,8 +84,8 @@ entry:
 ; A8-NEXT: str [[FIELD1]], {{\[}}[[BASE]]{{\]}}
   %orig_blocks = alloca [256 x i16], align 2
   %0 = bitcast [256 x i16]* %orig_blocks to i8*call void @llvm.lifetime.start(i64 512, i8* %0) nounwind
-  %tmp1 = load i32* getelementptr inbounds (%struct.Test* @TestVar, i32 0, i32 1), align 4
-  %tmp2 = load i32* getelementptr inbounds (%struct.Test* @TestVar, i32 0, i32 2), align 4
+  %tmp1 = load i32, i32* getelementptr inbounds (%struct.Test* @TestVar, i32 0, i32 1), align 4
+  %tmp2 = load i32, i32* getelementptr inbounds (%struct.Test* @TestVar, i32 0, i32 2), align 4
   %add = add nsw i32 %tmp2, %tmp1
   store i32 %add, i32* getelementptr inbounds (%struct.Test* @TestVar, i32 0, i32 0), align 4
   call void @llvm.lifetime.end(i64 512, i8* %0) nounwind

Modified: llvm/trunk/test/CodeGen/ARM/ldst-f32-2-i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldst-f32-2-i32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldst-f32-2-i32.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldst-f32-2-i32.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ bb:
   %uglygep = getelementptr i8, i8* %src6, i32 %tmp
   %src_addr.04 = bitcast i8* %uglygep to float*
   %dst_addr.03 = getelementptr float, float* %dst, i32 %j.05
-  %1 = load float* %src_addr.04, align 4
+  %1 = load float, float* %src_addr.04, align 4
   store float %1, float* %dst_addr.03, align 4
   %2 = add i32 %j.05, 1
   %exitcond = icmp eq i32 %2, %width

Modified: llvm/trunk/test/CodeGen/ARM/ldstrex-m.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldstrex-m.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldstrex-m.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldstrex-m.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 ; CHECK-NOT: ldrexd
 define i64 @f0(i64* %p) nounwind readonly {
 entry:
-  %0 = load atomic i64* %p seq_cst, align 8
+  %0 = load atomic i64, i64* %p seq_cst, align 8
   ret i64 %0
 }
 
@@ -29,7 +29,7 @@ entry:
 ; CHECK: ldr
 define i32 @f3(i32* %p) nounwind readonly {
 entry:
-  %0 = load atomic i32* %p seq_cst, align 4
+  %0 = load atomic i32, i32* %p seq_cst, align 4
   ret i32 %0
 }
 
@@ -37,7 +37,7 @@ entry:
 ; CHECK: ldrb
 define i8 @f4(i8* %p) nounwind readonly {
 entry:
-  %0 = load atomic i8* %p seq_cst, align 4
+  %0 = load atomic i8, i8* %p seq_cst, align 4
   ret i8 %0
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/ldstrex.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ldstrex.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ldstrex.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ldstrex.ll Fri Feb 27 15:17:42 2015
@@ -106,14 +106,14 @@ declare void @llvm.arm.clrex() nounwind
 
 define void @excl_addrmode() {
 ; CHECK-T2ADDRMODE-LABEL: excl_addrmode:
-  %base1020 = load i32** @base
+  %base1020 = load i32*, i32** @base
   %offset1020 = getelementptr i32, i32* %base1020, i32 255
   call i32 @llvm.arm.ldrex.p0i32(i32* %offset1020)
   call i32 @llvm.arm.strex.p0i32(i32 0, i32* %offset1020)
 ; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [{{r[0-9]+}}, #1020]
 ; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [{{r[0-9]+}}, #1020]
 
-  %base1024 = load i32** @base
+  %base1024 = load i32*, i32** @base
   %offset1024 = getelementptr i32, i32* %base1024, i32 256
   call i32 @llvm.arm.ldrex.p0i32(i32* %offset1024)
   call i32 @llvm.arm.strex.p0i32(i32 0, i32* %offset1024)
@@ -121,7 +121,7 @@ define void @excl_addrmode() {
 ; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]]
 ; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
 
-  %base1 = load i32** @base
+  %base1 = load i32*, i32** @base
   %addr8 = bitcast i32* %base1 to i8*
   %offset1_8 = getelementptr i8, i8* %addr8, i32 1
   %offset1 = bitcast i8* %offset1_8 to i32*

Modified: llvm/trunk/test/CodeGen/ARM/load-global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/load-global.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/load-global.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/load-global.ll Fri Feb 27 15:17:42 2015
@@ -45,6 +45,6 @@ define i32 @test1() {
 ; LINUX: ldr r0, [r1, r0]
 ; LINUX: ldr r0, [r0]
 ; LINUX: .long G(GOT)
-	%tmp = load i32* @G
+	%tmp = load i32, i32* @G
 	ret i32 %tmp
 }

Modified: llvm/trunk/test/CodeGen/ARM/load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/load.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/load.ll Fri Feb 27 15:17:42 2015
@@ -2,28 +2,28 @@
 
 define i32 @f1(i8* %p) {
 entry:
-        %tmp = load i8* %p              ; <i8> [#uses=1]
+        %tmp = load i8, i8* %p              ; <i8> [#uses=1]
         %tmp1 = sext i8 %tmp to i32              ; <i32> [#uses=1]
         ret i32 %tmp1
 }
 
 define i32 @f2(i8* %p) {
 entry:
-        %tmp = load i8* %p              ; <i8> [#uses=1]
+        %tmp = load i8, i8* %p              ; <i8> [#uses=1]
         %tmp2 = zext i8 %tmp to i32              ; <i32> [#uses=1]
         ret i32 %tmp2
 }
 
 define i32 @f3(i16* %p) {
 entry:
-        %tmp = load i16* %p             ; <i16> [#uses=1]
+        %tmp = load i16, i16* %p             ; <i16> [#uses=1]
         %tmp3 = sext i16 %tmp to i32             ; <i32> [#uses=1]
         ret i32 %tmp3
 }
 
 define i32 @f4(i16* %p) {
 entry:
-        %tmp = load i16* %p             ; <i16> [#uses=1]
+        %tmp = load i16, i16* %p             ; <i16> [#uses=1]
         %tmp4 = zext i16 %tmp to i32             ; <i32> [#uses=1]
         ret i32 %tmp4
 }

Modified: llvm/trunk/test/CodeGen/ARM/load_i1_select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/load_i1_select.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/load_i1_select.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/load_i1_select.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "thumbv7-apple-ios0.0.0"
 ; CHECK: tst.w r[[R0]], #1
 define void @foo(i8* %call, double* %p) nounwind {
 entry:
-  %tmp2 = load i8* %call
+  %tmp2 = load i8, i8* %call
   %tmp3 = trunc i8 %tmp2 to i1
   %cond = select i1 %tmp3, double 2.000000e+00, double 1.000000e+00
   store double %cond, double* %p

Modified: llvm/trunk/test/CodeGen/ARM/long.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/long.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/long.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/long.ll Fri Feb 27 15:17:42 2015
@@ -85,6 +85,6 @@ define i64 @f10() {
 ; CHECK-LABEL: f10:
 entry:
         %a = alloca i64, align 8                ; <i64*> [#uses=1]
-        %retval = load i64* %a          ; <i64> [#uses=1]
+        %retval = load i64, i64* %a          ; <i64> [#uses=1]
         ret i64 %retval
 }

Modified: llvm/trunk/test/CodeGen/ARM/lsr-code-insertion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/lsr-code-insertion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/lsr-code-insertion.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/lsr-code-insertion.ll Fri Feb 27 15:17:42 2015
@@ -23,16 +23,16 @@ bb:		; preds = %cond_next59, %entry
 	%indvar = phi i32 [ 0, %entry ], [ %k.069.0, %cond_next59 ]		; <i32> [#uses=6]
 	%k.069.0 = add i32 %indvar, 1		; <i32> [#uses=3]
 	%tmp3 = getelementptr i32, i32* %mpp, i32 %indvar		; <i32*> [#uses=1]
-	%tmp4 = load i32* %tmp3		; <i32> [#uses=1]
+	%tmp4 = load i32, i32* %tmp3		; <i32> [#uses=1]
 	%tmp8 = getelementptr i32, i32* %tpmm, i32 %indvar		; <i32*> [#uses=1]
-	%tmp9 = load i32* %tmp8		; <i32> [#uses=1]
+	%tmp9 = load i32, i32* %tmp8		; <i32> [#uses=1]
 	%tmp10 = add i32 %tmp9, %tmp4		; <i32> [#uses=2]
 	%tmp13 = getelementptr i32, i32* %mc, i32 %k.069.0		; <i32*> [#uses=5]
 	store i32 %tmp10, i32* %tmp13
 	%tmp17 = getelementptr i32, i32* %ip, i32 %indvar		; <i32*> [#uses=1]
-	%tmp18 = load i32* %tmp17		; <i32> [#uses=1]
+	%tmp18 = load i32, i32* %tmp17		; <i32> [#uses=1]
 	%tmp22 = getelementptr i32, i32* %tpim, i32 %indvar		; <i32*> [#uses=1]
-	%tmp23 = load i32* %tmp22		; <i32> [#uses=1]
+	%tmp23 = load i32, i32* %tmp22		; <i32> [#uses=1]
 	%tmp24 = add i32 %tmp23, %tmp18		; <i32> [#uses=2]
 	%tmp30 = icmp sgt i32 %tmp24, %tmp10		; <i1> [#uses=1]
 	br i1 %tmp30, label %cond_true, label %cond_next
@@ -42,9 +42,9 @@ cond_true:		; preds = %bb
 	br label %cond_next
 
 cond_next:		; preds = %cond_true, %bb
-	%tmp39 = load i32* %tmp13		; <i32> [#uses=1]
+	%tmp39 = load i32, i32* %tmp13		; <i32> [#uses=1]
 	%tmp42 = getelementptr i32, i32* %ms, i32 %k.069.0		; <i32*> [#uses=1]
-	%tmp43 = load i32* %tmp42		; <i32> [#uses=1]
+	%tmp43 = load i32, i32* %tmp42		; <i32> [#uses=1]
 	%tmp44 = add i32 %tmp43, %tmp39		; <i32> [#uses=2]
 	store i32 %tmp44, i32* %tmp13
 	%tmp52 = icmp slt i32 %tmp44, -987654321		; <i1> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/lsr-icmp-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/lsr-icmp-imm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/lsr-icmp-imm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/lsr-icmp-imm.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ for.body:
   %i.addr.05 = phi i32 [ %sub, %for.body ], [ %i, %entry ]
   %b.04 = phi i32 [ %.b.0, %for.body ], [ 0, %entry ]
   %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.addr.05
-  %0 = load i32* %arrayidx, align 4
+  %0 = load i32, i32* %arrayidx, align 4
   %cmp1 = icmp sgt i32 %0, %b.04
   %.b.0 = select i1 %cmp1, i32 %0, i32 %b.04
   %i.addr.0.bi.0 = select i1 %cmp1, i32 %i.addr.05, i32 %bi.06

Modified: llvm/trunk/test/CodeGen/ARM/lsr-unfolded-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/lsr-unfolded-offset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/lsr-unfolded-offset.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/lsr-unfolded-offset.ll Fri Feb 27 15:17:42 2015
@@ -25,8 +25,8 @@ outer.loop:
   %0 = phi i32 [ %inc71, %for.inc69 ], [ 0, %entry ]
   %offset = getelementptr %struct.partition_entry, %struct.partition_entry* %part, i32 %0, i32 2
   %len = getelementptr %struct.partition_entry, %struct.partition_entry* %part, i32 %0, i32 3
-  %tmp5 = load i64* %offset, align 4
-  %tmp15 = load i64* %len, align 4
+  %tmp5 = load i64, i64* %offset, align 4
+  %tmp15 = load i64, i64* %len, align 4
   %add = add nsw i64 %tmp15, %tmp5
   br label %inner.loop
 
@@ -39,8 +39,8 @@ inner.loop:
 if.end:                                           ; preds = %inner.loop
   %len39 = getelementptr %struct.partition_entry, %struct.partition_entry* %part, i32 %1, i32 3
   %offset28 = getelementptr %struct.partition_entry, %struct.partition_entry* %part, i32 %1, i32 2
-  %tmp29 = load i64* %offset28, align 4
-  %tmp40 = load i64* %len39, align 4
+  %tmp29 = load i64, i64* %offset28, align 4
+  %tmp40 = load i64, i64* %len39, align 4
   %add41 = add nsw i64 %tmp40, %tmp29
   %cmp44 = icmp sge i64 %tmp29, %tmp5
   %cmp47 = icmp slt i64 %tmp29, %add

Modified: llvm/trunk/test/CodeGen/ARM/machine-cse-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/machine-cse-cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/machine-cse-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/machine-cse-cmp.ll Fri Feb 27 15:17:42 2015
@@ -30,7 +30,7 @@ entry:
 ; CHECK: poplt
 ; CHECK-NOT: cmp
 ; CHECK: movle
-  %0 = load i32* @foo, align 4
+  %0 = load i32, i32* @foo, align 4
   %cmp28 = icmp sgt i32 %0, 0
   br i1 %cmp28, label %for.body.lr.ph, label %for.cond1.preheader
 
@@ -53,7 +53,7 @@ entry:
 ; CHECK-NOT: sub
 ; CHECK: cmp
 ; CHECK: blt
-%0 = load i32* %offset, align 4
+%0 = load i32, i32* %offset, align 4
 %cmp = icmp slt i32 %0, %size
 %s = sub nsw i32 %0, %size
 %size2 = sub nsw i32 %size, 0

Modified: llvm/trunk/test/CodeGen/ARM/machine-licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/machine-licm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/machine-licm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/machine-licm.ll Fri Feb 27 15:17:42 2015
@@ -39,14 +39,14 @@ bb.nph:
 ; THUMB: LCPI0_0:
 ; THUMB-NOT: LCPI0_1:
 ; THUMB: .section
-  %.pre = load i32* @GV, align 4                  ; <i32> [#uses=1]
+  %.pre = load i32, i32* @GV, align 4                  ; <i32> [#uses=1]
   br label %bb
 
 bb:                                               ; preds = %bb, %bb.nph
   %1 = phi i32 [ %.pre, %bb.nph ], [ %3, %bb ]    ; <i32> [#uses=1]
   %i.03 = phi i32 [ 0, %bb.nph ], [ %4, %bb ]     ; <i32> [#uses=2]
   %scevgep = getelementptr i32, i32* %vals, i32 %i.03  ; <i32*> [#uses=1]
-  %2 = load i32* %scevgep, align 4                ; <i32> [#uses=1]
+  %2 = load i32, i32* %scevgep, align 4                ; <i32> [#uses=1]
   %3 = add nsw i32 %1, %2                         ; <i32> [#uses=2]
   store i32 %3, i32* @GV, align 4
   %4 = add i32 %i.03, 1                           ; <i32> [#uses=2]

Modified: llvm/trunk/test/CodeGen/ARM/minsize-litpools.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/minsize-litpools.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/minsize-litpools.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/minsize-litpools.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define i32 @small_global() minsize {
 ; CHECK: ldr r[[GLOBDEST:[0-9]+]], {{.?LCPI0_0}}
 ; CHECK: ldr r0, [r[[GLOBDEST]]]
 
-  %val = load i32* @var
+  %val = load i32, i32* @var
   ret i32 %val
 }
 
@@ -21,6 +21,6 @@ define i32 @big_global() {
 ; CHECK: movw [[GLOBDEST:r[0-9]+]], :lower16:var
 ; CHECK: movt [[GLOBDEST]], :upper16:var
 
-  %val = load i32* @var
+  %val = load i32, i32* @var
   ret i32 %val
 }

Modified: llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ for.body:
   %s.05 = phi i32 [ %mul, %for.body ], [ 0, %entry ]
   %indvars.iv.next = add i32 %indvars.iv, %s
   %arrayidx = getelementptr inbounds i32, i32* %d, i32 %indvars.iv
-  %0 = load i32* %arrayidx, align 4
+  %0 = load i32, i32* %arrayidx, align 4
   %mul = mul nsw i32 %0, %s.05
   %exitcond = icmp eq i32 %indvars.iv.next, %a
   br i1 %exitcond, label %for.end, label %for.body
@@ -65,7 +65,7 @@ if.end28:
   %dst.1 = phi %struct.rtx_def* [ undef, %if.then24 ], [ %dst.0, %while.cond ], [ %dst.0, %while.cond ]
   %arrayidx30 = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %dst.1, i32 0, i32 1, i32 0
   %rtx31 = bitcast %union.rtunion_def* %arrayidx30 to %struct.rtx_def**
-  %0 = load %struct.rtx_def** %rtx31, align 4
+  %0 = load %struct.rtx_def*, %struct.rtx_def** %rtx31, align 4
   br label %while.cond
 
 if.then46:                                        ; preds = %while.cond

Modified: llvm/trunk/test/CodeGen/ARM/mult-alt-generic-arm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/mult-alt-generic-arm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/mult-alt-generic-arm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/mult-alt-generic-arm.ll Fri Feb 27 15:17:42 2015
@@ -33,10 +33,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -48,10 +48,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -63,7 +63,7 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
   ret void
@@ -120,10 +120,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
@@ -137,10 +137,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
@@ -166,7 +166,7 @@ entry:
 
 define arm_aapcscc void @multi_m() nounwind {
 entry:
-  %tmp = load i32* @min1, align 4
+  %tmp = load i32, i32* @min1, align 4
   call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
   ret void
 }
@@ -191,10 +191,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -206,10 +206,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -221,7 +221,7 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
   ret void
@@ -278,10 +278,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
@@ -295,10 +295,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind

Modified: llvm/trunk/test/CodeGen/ARM/negative-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/negative-offset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/negative-offset.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/negative-offset.ll Fri Feb 27 15:17:42 2015
@@ -8,9 +8,9 @@ entry:
 ;CHECK: ldr r{{.*}}, [r0, #-16]
 ;CHECK: ldr r{{.*}}, [r0, #-8]
   %arrayidx = getelementptr inbounds i32, i32* %p, i32 -4
-  %0 = load i32* %arrayidx, align 4
+  %0 = load i32, i32* %arrayidx, align 4
   %arrayidx1 = getelementptr inbounds i32, i32* %p, i32 -2
-  %1 = load i32* %arrayidx1, align 4
+  %1 = load i32, i32* %arrayidx1, align 4
   %add = add nsw i32 %1, %0
   ret i32 %add
 }

Modified: llvm/trunk/test/CodeGen/ARM/neon_cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/neon_cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/neon_cmp.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/neon_cmp.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@
 ; radar://13191881
 ; CHECK: vfcmp
 define void @vfcmp(<2 x double>* %a, <2 x double>* %b) {
-  %wide.load = load <2 x double>* %a, align 4
-  %wide.load2 = load <2 x double>* %b, align 4
+  %wide.load = load <2 x double>, <2 x double>* %a, align 4
+  %wide.load2 = load <2 x double>, <2 x double>* %b, align 4
 ; CHECK-NOT: vdup.32
 ; CHECK-NOT: vmovn.i64
   %v1 = fcmp olt <2 x double> %wide.load, %wide.load2

Modified: llvm/trunk/test/CodeGen/ARM/neon_div.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/neon_div.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/neon_div.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/neon_div.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@ define <8 x i8> @sdivi8(<8 x i8>* %A, <8
 ;CHECK: vrecpe.f32
 ;CHECK: vmovn.i32
 ;CHECK: vmovn.i16
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = sdiv <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -21,8 +21,8 @@ define <8 x i8> @udivi8(<8 x i8>* %A, <8
 ;CHECK: vrecps.f32
 ;CHECK: vmovn.i32
 ;CHECK: vqmovun.s16
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = udiv <8 x i8> %tmp1, %tmp2
 	ret <8 x i8> %tmp3
 }
@@ -31,8 +31,8 @@ define <4 x i16> @sdivi16(<4 x i16>* %A,
 ;CHECK: vrecpe.f32
 ;CHECK: vrecps.f32
 ;CHECK: vmovn.i32
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = sdiv <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }
@@ -42,8 +42,8 @@ define <4 x i16> @udivi16(<4 x i16>* %A,
 ;CHECK: vrecps.f32
 ;CHECK: vrecps.f32
 ;CHECK: vmovn.i32
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = udiv <4 x i16> %tmp1, %tmp2
 	ret <4 x i16> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/ARM/neon_fpconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/neon_fpconv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/neon_fpconv.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/neon_fpconv.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define <2 x double> @vextend(<2 x float>
 ; CHECK: vsitofp_double
 define void @vsitofp_double(<2 x i32>* %loadaddr,
                             <2 x double>* %storeaddr) {
-  %v0 = load <2 x i32>* %loadaddr
+  %v0 = load <2 x i32>, <2 x i32>* %loadaddr
 ; CHECK:      vldr
 ; CHECK-NEXT:	vcvt.f64.s32
 ; CHECK-NEXT:	vcvt.f64.s32
@@ -31,7 +31,7 @@ define void @vsitofp_double(<2 x i32>* %
 ; CHECK: vuitofp_double
 define void @vuitofp_double(<2 x i32>* %loadaddr,
                             <2 x double>* %storeaddr) {
-  %v0 = load <2 x i32>* %loadaddr
+  %v0 = load <2 x i32>, <2 x i32>* %loadaddr
 ; CHECK:      vldr
 ; CHECK-NEXT:	vcvt.f64.u32
 ; CHECK-NEXT:	vcvt.f64.u32

Modified: llvm/trunk/test/CodeGen/ARM/neon_ld1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/neon_ld1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/neon_ld1.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/neon_ld1.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 ; CHECK: vstr d
 define void @t1(<2 x i32>* %r, <4 x i16>* %a, <4 x i16>* %b) nounwind {
 entry:
-	%0 = load <4 x i16>* %a, align 8		; <<4 x i16>> [#uses=1]
-	%1 = load <4 x i16>* %b, align 8		; <<4 x i16>> [#uses=1]
+	%0 = load <4 x i16>, <4 x i16>* %a, align 8		; <<4 x i16>> [#uses=1]
+	%1 = load <4 x i16>, <4 x i16>* %b, align 8		; <<4 x i16>> [#uses=1]
 	%2 = add <4 x i16> %0, %1		; <<4 x i16>> [#uses=1]
 	%3 = bitcast <4 x i16> %2 to <2 x i32>		; <<2 x i32>> [#uses=1]
 	store <2 x i32> %3, <2 x i32>* %r, align 8
@@ -22,8 +22,8 @@ entry:
 ; CHECK: vmov r0, r1, d
 define <2 x i32> @t2(<4 x i16>* %a, <4 x i16>* %b) nounwind readonly {
 entry:
-	%0 = load <4 x i16>* %a, align 8		; <<4 x i16>> [#uses=1]
-	%1 = load <4 x i16>* %b, align 8		; <<4 x i16>> [#uses=1]
+	%0 = load <4 x i16>, <4 x i16>* %a, align 8		; <<4 x i16>> [#uses=1]
+	%1 = load <4 x i16>, <4 x i16>* %b, align 8		; <<4 x i16>> [#uses=1]
 	%2 = sub <4 x i16> %0, %1		; <<4 x i16>> [#uses=1]
 	%3 = bitcast <4 x i16> %2 to <2 x i32>		; <<2 x i32>> [#uses=1]
 	ret <2 x i32> %3

Modified: llvm/trunk/test/CodeGen/ARM/neon_ld2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/neon_ld2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/neon_ld2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/neon_ld2.ll Fri Feb 27 15:17:42 2015
@@ -13,8 +13,8 @@
 ; SWIFT: vst1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+:128\]}}
 define void @t1(<4 x i32>* %r, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 entry:
-	%0 = load <2 x i64>* %a, align 16		; <<2 x i64>> [#uses=1]
-	%1 = load <2 x i64>* %b, align 16		; <<2 x i64>> [#uses=1]
+	%0 = load <2 x i64>, <2 x i64>* %a, align 16		; <<2 x i64>> [#uses=1]
+	%1 = load <2 x i64>, <2 x i64>* %b, align 16		; <<2 x i64>> [#uses=1]
 	%2 = add <2 x i64> %0, %1		; <<2 x i64>> [#uses=1]
 	%3 = bitcast <2 x i64> %2 to <4 x i32>		; <<4 x i32>> [#uses=1]
 	store <4 x i32> %3, <4 x i32>* %r, align 16
@@ -35,8 +35,8 @@ entry:
 ; SWIFT: vmov r2, r3, d
 define <4 x i32> @t2(<2 x i64>* %a, <2 x i64>* %b) nounwind readonly {
 entry:
-	%0 = load <2 x i64>* %a, align 16		; <<2 x i64>> [#uses=1]
-	%1 = load <2 x i64>* %b, align 16		; <<2 x i64>> [#uses=1]
+	%0 = load <2 x i64>, <2 x i64>* %a, align 16		; <<2 x i64>> [#uses=1]
+	%1 = load <2 x i64>, <2 x i64>* %b, align 16		; <<2 x i64>> [#uses=1]
 	%2 = sub <2 x i64> %0, %1		; <<2 x i64>> [#uses=1]
 	%3 = bitcast <2 x i64> %2 to <4 x i32>		; <<4 x i32>> [#uses=1]
 	ret <4 x i32> %3
@@ -50,8 +50,8 @@ entry:
 ; SWIFT: vst1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+}}
 define void @t3(<4 x i32>* %r, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 entry:
-	%0 = load <2 x i64>* %a, align 8
-	%1 = load <2 x i64>* %b, align 8
+	%0 = load <2 x i64>, <2 x i64>* %a, align 8
+	%1 = load <2 x i64>, <2 x i64>* %b, align 8
 	%2 = add <2 x i64> %0, %1
 	%3 = bitcast <2 x i64> %2 to <4 x i32>
 	store <4 x i32> %3, <4 x i32>* %r, align 8

Modified: llvm/trunk/test/CodeGen/ARM/neon_spill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/neon_spill.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/neon_spill.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/neon_spill.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ declare arm_aapcs_vfpcc %2** @func4()
 define arm_aapcs_vfpcc void @foo(%3* nocapture) nounwind align 2 {
   call void @llvm.arm.neon.vst4.v4i32(i8* undef, <4 x i32> <i32 0, i32 1065353216, i32 1073741824, i32 1077936128>, <4 x i32> <i32 1082130432, i32 1084227584, i32 1086324736, i32 1088421888>, <4 x i32> <i32 1090519040, i32 1091567616, i32 1092616192, i32 1093664768>, <4 x i32> <i32 1094713344, i32 1095761920, i32 1096810496, i32 1097859072>, i32 16) nounwind
   %2 = call arm_aapcs_vfpcc  %0** @func2() nounwind
-  %3 = load %0** %2, align 4
+  %3 = load %0*, %0** %2, align 4
   store float 0.000000e+00, float* undef, align 4
   %4 = call arm_aapcs_vfpcc  %2* @func3(%2* undef, %2* undef, i32 2956) nounwind
   call arm_aapcs_vfpcc  void @func1(%0* %3, float* undef, float* undef, %2* undef)

Modified: llvm/trunk/test/CodeGen/ARM/no-fpu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/no-fpu.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/no-fpu.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/no-fpu.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 ; Check no NEON instructions are selected when feature is disabled.
 define void @neonop(i64* nocapture readonly %a, i64* nocapture %b) #0 {
   %1 = bitcast i64* %a to <2 x i64>*
-  %wide.load = load <2 x i64>* %1, align 8
+  %wide.load = load <2 x i64>, <2 x i64>* %1, align 8
   ; NONEON-NOVFP-NOT: vld1.64
   ; NONEON-NOT: vld1.64
   %add = add <2 x i64> %wide.load, %wide.load

Modified: llvm/trunk/test/CodeGen/ARM/no-tail-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/no-tail-call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/no-tail-call.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/no-tail-call.ll Fri Feb 27 15:17:42 2015
@@ -38,7 +38,7 @@ entry:
   %5 = call float @llvm.ceil.f32(float 5.000000e+00)
   %.native3 = getelementptr inbounds %foo, %foo* %1, i32 0, i32 0
   %.native3.value = getelementptr inbounds %Sf, %Sf* %.native3, i32 0, i32 0
-  %6 = load float* %.native3.value, align 4
+  %6 = load float, float* %.native3.value, align 4
   %7 = call float @llvm.ceil.f32(float %6)
   %8 = insertvalue { float, float, float } { float 0.000000e+00, float undef, float undef }, float %5, 1
   %9 = insertvalue { float, float, float } %8, float %7, 2
@@ -76,7 +76,7 @@ entry:
   %5 = call float @llvm.ceil.f32(float 5.000000e+00)
   %.native3 = getelementptr inbounds %foo, %foo* %1, i32 0, i32 0
   %.native3.value = getelementptr inbounds %Sf, %Sf* %.native3, i32 0, i32 0
-  %6 = load float* %.native3.value, align 4
+  %6 = load float, float* %.native3.value, align 4
   %7 = call float @llvm.ceil.f32(float %6)
   %8 = insertvalue { float, float } { float 0.000000e+00, float undef }, float %7, 1
   ret { float, float } %8

Modified: llvm/trunk/test/CodeGen/ARM/none-macho.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/none-macho.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/none-macho.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/none-macho.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
 
 define i32 @test_litpool() minsize {
 ; CHECK-LABEL: test_litpool:
-  %val = load i32* @var
+  %val = load i32, i32* @var
   ret i32 %val
 
   ; Lit-pool entries need to produce a "$non_lazy_ptr" version of the symbol.
@@ -21,7 +21,7 @@ define i32 @test_litpool() minsize {
 
 define i32 @test_movw_movt() {
 ; CHECK-LABEL: test_movw_movt:
-  %val = load i32* @var
+  %val = load i32, i32* @var
   ret i32 %val
 
   ; movw/movt should also address their symbols MachO-style
@@ -56,7 +56,7 @@ define i32 @test_frame_ptr() {
 %big_arr = type [8 x i32]
 define void @test_two_areas(%big_arr* %addr) {
 ; CHECK-LABEL: test_two_areas:
-  %val = load %big_arr* %addr
+  %val = load %big_arr, %big_arr* %addr
   call void @test_trap()
   store %big_arr %val, %big_arr* %addr
 

Modified: llvm/trunk/test/CodeGen/ARM/nop_concat_vectors.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/nop_concat_vectors.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/nop_concat_vectors.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/nop_concat_vectors.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 ;CHECK-NOT: vst1.32
 ;CHECK: bx
 define void @foo(<16 x i8>* %J) {
-  %A = load <16 x i8>* %J
+  %A = load <16 x i8>, <16 x i8>* %J
   %T1 = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %T2 = shufflevector <8 x i8>  %T1, <8 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   store <16 x i8> %T2, <16 x i8>* %J

Modified: llvm/trunk/test/CodeGen/ARM/optselect-regclass.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/optselect-regclass.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/optselect-regclass.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/optselect-regclass.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 ; Function Attrs: nounwind ssp
 define void @xfr() {
 entry:
-  %bf.load4 = load i32* getelementptr inbounds (%union.opcode.0.2.5.8.15.28* @opcode, i32 0, i32 0), align 4
+  %bf.load4 = load i32, i32* getelementptr inbounds (%union.opcode.0.2.5.8.15.28* @opcode, i32 0, i32 0), align 4
   %bf.clear10 = and i32 %bf.load4, 65535
   %and11 = and i32 %bf.load4, 32768
   %tobool12 = icmp ne i32 %and11, 0

Modified: llvm/trunk/test/CodeGen/ARM/phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/phi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/phi.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/phi.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ rblock:
 end:
 ; CHECK: ldr	r0, [r1, #4]
   %gep = phi i32* [%lbranch, %lblock], [%rbranch, %rblock]
-  %r = load i32* %gep
+  %r = load i32, i32* %gep
 ; CHECK-NEXT: bx	lr
   ret i32 %r
 }

Modified: llvm/trunk/test/CodeGen/ARM/popcnt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/popcnt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/popcnt.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/popcnt.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vcnt8:
 ;CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -12,7 +12,7 @@ define <8 x i8> @vcnt8(<8 x i8>* %A) nou
 define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vcntQ8:
 ;CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp2
 }
@@ -24,7 +24,7 @@ define <4 x i16> @vcnt16(<4 x i16>* %A)
 ; CHECK: vadd.i8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
 ; CHECK: vuzp.8 {{d[0-9]+}}, {{d[0-9]+}}
 ; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -36,7 +36,7 @@ define <8 x i16> @vcntQ16(<8 x i16>* %A)
 ; CHECK: vadd.i8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
 ; CHECK: vuzp.8 {{q[0-9]+}}, {{q[0-9]+}}
 ; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %tmp1)
 	ret <8 x i16> %tmp2
 }
@@ -51,7 +51,7 @@ define <2 x i32> @vcnt32(<2 x i32>* %A)
 ; CHECK: vrev32.16 {{d[0-9]+}}, {{d[0-9]+}}
 ; CHECK: vuzp.16 {{d[0-9]+}}, {{d[0-9]+}}
 ; CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -66,7 +66,7 @@ define <4 x i32> @vcntQ32(<4 x i32>* %A)
 ; CHECK: vrev32.16 {{q[0-9]+}}, {{q[0-9]+}}
 ; CHECK: vuzp.16 {{q[0-9]+}}, {{q[0-9]+}}
 ; CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }
@@ -81,7 +81,7 @@ declare <4 x i32> @llvm.ctpop.v4i32(<4 x
 define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vclz8:
 ;CHECK: vclz.i8 {{d[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0)
 	ret <8 x i8> %tmp2
 }
@@ -89,7 +89,7 @@ define <8 x i8> @vclz8(<8 x i8>* %A) nou
 define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vclz16:
 ;CHECK: vclz.i16 {{d[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0)
 	ret <4 x i16> %tmp2
 }
@@ -97,7 +97,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A)
 define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vclz32:
 ;CHECK: vclz.i32 {{d[0-9]+}}, {{d[0-9]+}}
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0)
 	ret <2 x i32> %tmp2
 }
@@ -105,7 +105,7 @@ define <2 x i32> @vclz32(<2 x i32>* %A)
 define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vclzQ8:
 ;CHECK: vclz.i8 {{q[0-9]+}}, {{q[0-9]+}}
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
 	ret <16 x i8> %tmp2
 }
@@ -113,7 +113,7 @@ define <16 x i8> @vclzQ8(<16 x i8>* %A)
 define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vclzQ16:
 ;CHECK: vclz.i16 {{q[0-9]+}}, {{q[0-9]+}}
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0)
 	ret <8 x i16> %tmp2
 }
@@ -121,7 +121,7 @@ define <8 x i16> @vclzQ16(<8 x i16>* %A)
 define <4 x i32> @vclzQ32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vclzQ32:
 ;CHECK: vclz.i32 {{q[0-9]+}}, {{q[0-9]+}}
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0)
 	ret <4 x i32> %tmp2
 }
@@ -137,7 +137,7 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x
 define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
 ;CHECK-LABEL: vclss8:
 ;CHECK: vcls.s8
-	%tmp1 = load <8 x i8>* %A
+	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp2
 }
@@ -145,7 +145,7 @@ define <8 x i8> @vclss8(<8 x i8>* %A) no
 define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
 ;CHECK-LABEL: vclss16:
 ;CHECK: vcls.s16
-	%tmp1 = load <4 x i16>* %A
+	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %tmp1)
 	ret <4 x i16> %tmp2
 }
@@ -153,7 +153,7 @@ define <4 x i16> @vclss16(<4 x i16>* %A)
 define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
 ;CHECK-LABEL: vclss32:
 ;CHECK: vcls.s32
-	%tmp1 = load <2 x i32>* %A
+	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
@@ -161,7 +161,7 @@ define <2 x i32> @vclss32(<2 x i32>* %A)
 define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
 ;CHECK-LABEL: vclsQs8:
 ;CHECK: vcls.s8
-	%tmp1 = load <16 x i8>* %A
+	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp2
 }
@@ -169,7 +169,7 @@ define <16 x i8> @vclsQs8(<16 x i8>* %A)
 define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
 ;CHECK-LABEL: vclsQs16:
 ;CHECK: vcls.s16
-	%tmp1 = load <8 x i16>* %A
+	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1)
 	ret <8 x i16> %tmp2
 }
@@ -177,7 +177,7 @@ define <8 x i16> @vclsQs16(<8 x i16>* %A
 define <4 x i32> @vclsQs32(<4 x i32>* %A) nounwind {
 ;CHECK-LABEL: vclsQs32:
 ;CHECK: vcls.s32
-	%tmp1 = load <4 x i32>* %A
+	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }

Modified: llvm/trunk/test/CodeGen/ARM/pr13249.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/pr13249.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/pr13249.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/pr13249.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ bb:
 
 bb3:                                              ; preds = %bb3, %bb
   %tmp = phi i8* [ %tmp5, %bb3 ], [ %arg, %bb ]
-  %tmp4 = load i8* %tmp, align 1
+  %tmp4 = load i8, i8* %tmp, align 1
   %tmp5 = getelementptr inbounds i8, i8* %tmp, i32 1
   br i1 undef, label %bb3, label %bb7
 
@@ -18,7 +18,7 @@ bb7:
   br i1 %tmp10, label %bb13, label %bb15
 
 bb13:                                             ; preds = %bb7
-  %tmp14 = load i8* %tmp12, align 1
+  %tmp14 = load i8, i8* %tmp12, align 1
   br label %bb7
 
 bb15:                                             ; preds = %bb7

Modified: llvm/trunk/test/CodeGen/ARM/pr18364-movw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/pr18364-movw.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/pr18364-movw.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/pr18364-movw.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@ entry:
   %z = alloca i64, align 8
   store i64 1, i64* %y, align 8
   store i64 11579764786944, i64* %z, align 8
-  %0 = load i64* %y, align 8
-  %1 = load i64* %z, align 8
+  %0 = load i64, i64* %y, align 8
+  %1 = load i64, i64* %z, align 8
   %sub = sub i64 %0, %1
   ret i64 %sub
 }

Modified: llvm/trunk/test/CodeGen/ARM/pr3502.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/pr3502.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/pr3502.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/pr3502.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define void @SomeCall(i32 %num) nounwind
 entry:
 	tail call void asm sideeffect "mcr p15, 0, $0, c7, c10, 4 \0A\09", "r,~{memory}"(i32 0) nounwind
 	tail call void asm sideeffect "mcr p15,0,$0,c7,c14,0", "r,~{memory}"(i32 0) nounwind
-	%0 = load %struct.SHARED_AREA** null, align 4		; <%struct.SHARED_AREA*> [#uses=1]
+	%0 = load %struct.SHARED_AREA*, %struct.SHARED_AREA** null, align 4		; <%struct.SHARED_AREA*> [#uses=1]
 	%1 = ptrtoint %struct.SHARED_AREA* %0 to i32		; <i32> [#uses=1]
 	%2 = lshr i32 %1, 20		; <i32> [#uses=1]
 	%3 = tail call i32 @SetCurrEntry(i32 %2, i32 0) nounwind		; <i32> [#uses=0]

Modified: llvm/trunk/test/CodeGen/ARM/private.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/private.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/private.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/private.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ define private void @foo() {
 
 define i32 @bar() {
         call void @foo()
-	%1 = load i32* @baz, align 4
+	%1 = load i32, i32* @baz, align 4
         ret i32 %1
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/reg_sequence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/reg_sequence.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/reg_sequence.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/reg_sequence.ll Fri Feb 27 15:17:42 2015
@@ -20,9 +20,9 @@ entry:
 ; CHECK-NOT:    vmov d
 ; CHECK-NEXT:   vst1.16
   %0 = getelementptr inbounds %struct.int32x4_t, %struct.int32x4_t* %vT0ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
-  %1 = load <4 x i32>* %0, align 16               ; <<4 x i32>> [#uses=1]
+  %1 = load <4 x i32>, <4 x i32>* %0, align 16               ; <<4 x i32>> [#uses=1]
   %2 = getelementptr inbounds %struct.int32x4_t, %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
-  %3 = load <4 x i32>* %2, align 16               ; <<4 x i32>> [#uses=1]
+  %3 = load <4 x i32>, <4 x i32>* %2, align 16               ; <<4 x i32>> [#uses=1]
   %4 = bitcast i16* %i_ptr to i8*                 ; <i8*> [#uses=1]
   %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1]
   %6 = bitcast <8 x i16> %5 to <2 x double>       ; <<2 x double>> [#uses=2]
@@ -56,9 +56,9 @@ entry:
 ; CHECK:        vst1.16
 ; CHECK:        vst1.16
   %0 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %vT0ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
-  %1 = load <8 x i16>* %0, align 16               ; <<8 x i16>> [#uses=1]
+  %1 = load <8 x i16>, <8 x i16>* %0, align 16               ; <<8 x i16>> [#uses=1]
   %2 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
-  %3 = load <8 x i16>* %2, align 16               ; <<8 x i16>> [#uses=1]
+  %3 = load <8 x i16>, <8 x i16>* %2, align 16               ; <<8 x i16>> [#uses=1]
   %4 = bitcast i16* %i_ptr to i8*                 ; <i8*> [#uses=1]
   %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1]
   %6 = getelementptr inbounds i16, i16* %i_ptr, i32 8  ; <i16*> [#uses=1]
@@ -146,7 +146,7 @@ define <8 x i16> @t5(i16* %A, <8 x i16>*
 ; CHECK-NOT:    vmov
 ; CHECK:        vadd.i16
   %tmp0 = bitcast i16* %A to i8*                  ; <i8*> [#uses=1]
-  %tmp1 = load <8 x i16>* %B                      ; <<8 x i16>> [#uses=2]
+  %tmp1 = load <8 x i16>, <8 x i16>* %B                      ; <<8 x i16>> [#uses=2]
   %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 1) ; <%struct.__neon_int16x8x2_t> [#uses=2]
   %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0 ; <<8 x i16>> [#uses=1]
   %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1 ; <<8 x i16>> [#uses=1]
@@ -159,7 +159,7 @@ define <8 x i8> @t6(i8* %A, <8 x i8>* %B
 ; CHECK:        vldr
 ; CHECK:        vorr d[[D0:[0-9]+]], d[[D1:[0-9]+]]
 ; CHECK-NEXT:   vld2.8 {d[[D1]][1], d[[D0]][1]}
-  %tmp1 = load <8 x i8>* %B                       ; <<8 x i8>> [#uses=2]
+  %tmp1 = load <8 x i8>, <8 x i8>* %B                       ; <<8 x i8>> [#uses=2]
   %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2]
   %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 ; <<8 x i8>> [#uses=1]
   %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 ; <<8 x i8>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/saxpy10-a9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/saxpy10-a9.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/saxpy10-a9.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/saxpy10-a9.ll Fri Feb 27 15:17:42 2015
@@ -63,72 +63,72 @@ target datalayout = "e-p:32:32:32-i1:8:3
 ; This accumulates a sum rather than storing each result.
 define float @saxpy10(float* nocapture readonly %data1, float* nocapture readonly %data2, float %a) {
 entry:
-  %0 = load float* %data1, align 4
+  %0 = load float, float* %data1, align 4
   %mul = fmul float %0, %a
-  %1 = load float* %data2, align 4
+  %1 = load float, float* %data2, align 4
   %add = fadd float %mul, %1
   %add2 = fadd float %add, 0.000000e+00
   %arrayidx.1 = getelementptr inbounds float, float* %data1, i32 1
-  %2 = load float* %arrayidx.1, align 4
+  %2 = load float, float* %arrayidx.1, align 4
   %mul.1 = fmul float %2, %a
   %arrayidx1.1 = getelementptr inbounds float, float* %data2, i32 1
-  %3 = load float* %arrayidx1.1, align 4
+  %3 = load float, float* %arrayidx1.1, align 4
   %add.1 = fadd float %mul.1, %3
   %add2.1 = fadd float %add2, %add.1
   %arrayidx.2 = getelementptr inbounds float, float* %data1, i32 2
-  %4 = load float* %arrayidx.2, align 4
+  %4 = load float, float* %arrayidx.2, align 4
   %mul.2 = fmul float %4, %a
   %arrayidx1.2 = getelementptr inbounds float, float* %data2, i32 2
-  %5 = load float* %arrayidx1.2, align 4
+  %5 = load float, float* %arrayidx1.2, align 4
   %add.2 = fadd float %mul.2, %5
   %add2.2 = fadd float %add2.1, %add.2
   %arrayidx.3 = getelementptr inbounds float, float* %data1, i32 3
-  %6 = load float* %arrayidx.3, align 4
+  %6 = load float, float* %arrayidx.3, align 4
   %mul.3 = fmul float %6, %a
   %arrayidx1.3 = getelementptr inbounds float, float* %data2, i32 3
-  %7 = load float* %arrayidx1.3, align 4
+  %7 = load float, float* %arrayidx1.3, align 4
   %add.3 = fadd float %mul.3, %7
   %add2.3 = fadd float %add2.2, %add.3
   %arrayidx.4 = getelementptr inbounds float, float* %data1, i32 4
-  %8 = load float* %arrayidx.4, align 4
+  %8 = load float, float* %arrayidx.4, align 4
   %mul.4 = fmul float %8, %a
   %arrayidx1.4 = getelementptr inbounds float, float* %data2, i32 4
-  %9 = load float* %arrayidx1.4, align 4
+  %9 = load float, float* %arrayidx1.4, align 4
   %add.4 = fadd float %mul.4, %9
   %add2.4 = fadd float %add2.3, %add.4
   %arrayidx.5 = getelementptr inbounds float, float* %data1, i32 5
-  %10 = load float* %arrayidx.5, align 4
+  %10 = load float, float* %arrayidx.5, align 4
   %mul.5 = fmul float %10, %a
   %arrayidx1.5 = getelementptr inbounds float, float* %data2, i32 5
-  %11 = load float* %arrayidx1.5, align 4
+  %11 = load float, float* %arrayidx1.5, align 4
   %add.5 = fadd float %mul.5, %11
   %add2.5 = fadd float %add2.4, %add.5
   %arrayidx.6 = getelementptr inbounds float, float* %data1, i32 6
-  %12 = load float* %arrayidx.6, align 4
+  %12 = load float, float* %arrayidx.6, align 4
   %mul.6 = fmul float %12, %a
   %arrayidx1.6 = getelementptr inbounds float, float* %data2, i32 6
-  %13 = load float* %arrayidx1.6, align 4
+  %13 = load float, float* %arrayidx1.6, align 4
   %add.6 = fadd float %mul.6, %13
   %add2.6 = fadd float %add2.5, %add.6
   %arrayidx.7 = getelementptr inbounds float, float* %data1, i32 7
-  %14 = load float* %arrayidx.7, align 4
+  %14 = load float, float* %arrayidx.7, align 4
   %mul.7 = fmul float %14, %a
   %arrayidx1.7 = getelementptr inbounds float, float* %data2, i32 7
-  %15 = load float* %arrayidx1.7, align 4
+  %15 = load float, float* %arrayidx1.7, align 4
   %add.7 = fadd float %mul.7, %15
   %add2.7 = fadd float %add2.6, %add.7
   %arrayidx.8 = getelementptr inbounds float, float* %data1, i32 8
-  %16 = load float* %arrayidx.8, align 4
+  %16 = load float, float* %arrayidx.8, align 4
   %mul.8 = fmul float %16, %a
   %arrayidx1.8 = getelementptr inbounds float, float* %data2, i32 8
-  %17 = load float* %arrayidx1.8, align 4
+  %17 = load float, float* %arrayidx1.8, align 4
   %add.8 = fadd float %mul.8, %17
   %add2.8 = fadd float %add2.7, %add.8
   %arrayidx.9 = getelementptr inbounds float, float* %data1, i32 9
-  %18 = load float* %arrayidx.9, align 4
+  %18 = load float, float* %arrayidx.9, align 4
   %mul.9 = fmul float %18, %a
   %arrayidx1.9 = getelementptr inbounds float, float* %data2, i32 9
-  %19 = load float* %arrayidx1.9, align 4
+  %19 = load float, float* %arrayidx1.9, align 4
   %add.9 = fadd float %mul.9, %19
   %add2.9 = fadd float %add2.8, %add.9
   ret float %add2.9

Modified: llvm/trunk/test/CodeGen/ARM/segmented-stacks.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/segmented-stacks.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/segmented-stacks.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/segmented-stacks.ll Fri Feb 27 15:17:42 2015
@@ -55,7 +55,7 @@ define void @test_basic() #0 {
 }
 
 define i32 @test_nested(i32 * nest %closure, i32 %other) #0 {
-       %addend = load i32 * %closure
+       %addend = load i32 , i32 * %closure
        %result = add i32 %other, %addend
        %mem = alloca i32, i32 10
        call void @dummy_use (i32* %mem, i32 10)

Modified: llvm/trunk/test/CodeGen/ARM/select_xform.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/select_xform.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/select_xform.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/select_xform.ll Fri Feb 27 15:17:42 2015
@@ -183,7 +183,7 @@ define i32 @t12(i32 %a, i32 %b) nounwind
 ; Handle frame index operands.
 define void @pr13628() nounwind uwtable align 2 {
   %x3 = alloca i8, i32 256, align 8
-  %x4 = load i8* undef, align 1
+  %x4 = load i8, i8* undef, align 1
   %x5 = icmp ne i8 %x4, 0
   %x6 = select i1 %x5, i8* %x3, i8* null
   call void @bar(i8* %x6) nounwind

Modified: llvm/trunk/test/CodeGen/ARM/shifter_operand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/shifter_operand.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/shifter_operand.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/shifter_operand.ll Fri Feb 27 15:17:42 2015
@@ -43,8 +43,8 @@ entry:
         %tmp3 = inttoptr i32 %tmp2 to i32*
         %tmp4 = add i32 %base2, %tmp1
         %tmp5 = inttoptr i32 %tmp4 to i32*
-        %tmp6 = load i32* %tmp3
-        %tmp7 = load i32* %tmp5
+        %tmp6 = load i32, i32* %tmp3
+        %tmp7 = load i32, i32* %tmp5
         %tmp8 = add i32 %tmp7, %tmp6
         ret i32 %tmp8
 }
@@ -68,7 +68,7 @@ entry:
   %1 = bitcast i8* %0 to i32*
   %2 = sext i16 %addr to i32
   %3 = getelementptr inbounds i32, i32* %1, i32 %2
-  %4 = load i32* %3, align 4
+  %4 = load i32, i32* %3, align 4
   %5 = add nsw i32 %4, 1
   store i32 %5, i32* %3, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/ARM/smul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smul.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/smul.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 define i32 @f1(i32 %y) {
 ; CHECK: f1
 ; CHECK: smulbt
-        %tmp = load i16* @x             ; <i16> [#uses=1]
+        %tmp = load i16, i16* @x             ; <i16> [#uses=1]
         %tmp1 = add i16 %tmp, 2         ; <i16> [#uses=1]
         %tmp2 = sext i16 %tmp1 to i32           ; <i32> [#uses=1]
         %tmp3 = ashr i32 %y, 16         ; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/space-directive.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/space-directive.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/space-directive.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/space-directive.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i32 @test_space() minsize {
 ; CHECK: [[PAST_CP]]:
 ; CHECK: .zero 10000
   %addr = inttoptr i32 12345678 to i32*
-  %val = load i32* %addr
+  %val = load i32, i32* %addr
   call i32 @llvm.arm.space(i32 10000, i32 undef)
   ret i32 %val
 }

Modified: llvm/trunk/test/CodeGen/ARM/spill-q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/spill-q.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/spill-q.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/spill-q.ll Fri Feb 27 15:17:42 2015
@@ -43,7 +43,7 @@ entry:
   store float 0.000000e+00, float* undef, align 4
   %ld12 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
   store float 0.000000e+00, float* undef, align 4
-  %val173 = load <4 x float>* undef               ; <<4 x float>> [#uses=1]
+  %val173 = load <4 x float>, <4 x float>* undef               ; <<4 x float>> [#uses=1]
   br label %bb4
 
 bb4:                                              ; preds = %bb193, %entry

Modified: llvm/trunk/test/CodeGen/ARM/ssp-data-layout.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/ssp-data-layout.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/ssp-data-layout.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/ssp-data-layout.ll Fri Feb 27 15:17:42 2015
@@ -149,18 +149,18 @@ entry:
   %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
   %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
   %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
-  %0 = load i32* %x, align 4
-  %1 = load i32* %y, align 4
-  %2 = load i32* %z, align 4
+  %0 = load i32, i32* %x, align 4
+  %1 = load i32, i32* %y, align 4
+  %2 = load i32, i32* %z, align 4
   %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
   %3 = bitcast [8 x i8]* %coerce.dive to i64*
-  %4 = load i64* %3, align 1
+  %4 = load i64, i64* %3, align 1
   %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
   %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
-  %6 = load i16* %5, align 1
+  %6 = load i16, i16* %5, align 1
   %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
   %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
-  %8 = load i32* %7, align 1
+  %8 = load i32, i32* %7, align 1
   call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
   ret void
 }
@@ -296,18 +296,18 @@ entry:
   %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
   %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
   %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
-  %0 = load i32* %x, align 4
-  %1 = load i32* %y, align 4
-  %2 = load i32* %z, align 4
+  %0 = load i32, i32* %x, align 4
+  %1 = load i32, i32* %y, align 4
+  %2 = load i32, i32* %z, align 4
   %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
   %3 = bitcast [8 x i8]* %coerce.dive to i64*
-  %4 = load i64* %3, align 1
+  %4 = load i64, i64* %3, align 1
   %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
   %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
-  %6 = load i16* %5, align 1
+  %6 = load i16, i16* %5, align 1
   %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
   %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
-  %8 = load i32* %7, align 1
+  %8 = load i32, i32* %7, align 1
   call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
   ret void
 }
@@ -431,18 +431,18 @@ entry:
   %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
   %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
   %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
-  %0 = load i32* %x, align 4
-  %1 = load i32* %y, align 4
-  %2 = load i32* %z, align 4
+  %0 = load i32, i32* %x, align 4
+  %1 = load i32, i32* %y, align 4
+  %2 = load i32, i32* %z, align 4
   %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
   %3 = bitcast [8 x i8]* %coerce.dive to i64*
-  %4 = load i64* %3, align 1
+  %4 = load i64, i64* %3, align 1
   %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
   %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
-  %6 = load i16* %5, align 1
+  %6 = load i16, i16* %5, align 1
   %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
   %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
-  %8 = load i32* %7, align 1
+  %8 = load i32, i32* %7, align 1
   call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
   ret void
 }
@@ -475,13 +475,13 @@ entry:
   %0 = bitcast %struct.struct_large_char2* %b to %struct.struct_large_char*
   %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %0, i32 0, i32 0
   %1 = bitcast [8 x i8]* %coerce.dive to i64*
-  %2 = load i64* %1, align 1
+  %2 = load i64, i64* %1, align 1
   %coerce.dive4 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %a, i32 0, i32 0
   %3 = bitcast [2 x i8]* %coerce.dive4 to i16*
-  %4 = load i16* %3, align 1
+  %4 = load i16, i16* %3, align 1
   %coerce.dive5 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d2, i32 0, i32 0
   %5 = bitcast [2 x i16]* %coerce.dive5 to i32*
-  %6 = load i32* %5, align 1
+  %6 = load i32, i32* %5, align 1
   call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* byval align 8 %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/ARM/stack-alignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/stack-alignment.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/stack-alignment.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/stack-alignment.ll Fri Feb 27 15:17:42 2015
@@ -40,65 +40,65 @@ entry:
 ; CHECK-THUMB2:      bfc	r4, #0, #12
 ; CHECK-THUMB2-NEXT: mov	sp, r4
   %a = alloca i8, align 4096
-  %0 = load double* %d, align 4
+  %0 = load double, double* %d, align 4
   %arrayidx1 = getelementptr inbounds double, double* %d, i32 1
-  %1 = load double* %arrayidx1, align 4
+  %1 = load double, double* %arrayidx1, align 4
   %arrayidx2 = getelementptr inbounds double, double* %d, i32 2
-  %2 = load double* %arrayidx2, align 4
+  %2 = load double, double* %arrayidx2, align 4
   %arrayidx3 = getelementptr inbounds double, double* %d, i32 3
-  %3 = load double* %arrayidx3, align 4
+  %3 = load double, double* %arrayidx3, align 4
   %arrayidx4 = getelementptr inbounds double, double* %d, i32 4
-  %4 = load double* %arrayidx4, align 4
+  %4 = load double, double* %arrayidx4, align 4
   %arrayidx5 = getelementptr inbounds double, double* %d, i32 5
-  %5 = load double* %arrayidx5, align 4
+  %5 = load double, double* %arrayidx5, align 4
   %arrayidx6 = getelementptr inbounds double, double* %d, i32 6
-  %6 = load double* %arrayidx6, align 4
+  %6 = load double, double* %arrayidx6, align 4
   %arrayidx7 = getelementptr inbounds double, double* %d, i32 7
-  %7 = load double* %arrayidx7, align 4
+  %7 = load double, double* %arrayidx7, align 4
   %arrayidx8 = getelementptr inbounds double, double* %d, i32 8
-  %8 = load double* %arrayidx8, align 4
+  %8 = load double, double* %arrayidx8, align 4
   %arrayidx9 = getelementptr inbounds double, double* %d, i32 9
-  %9 = load double* %arrayidx9, align 4
+  %9 = load double, double* %arrayidx9, align 4
   %arrayidx10 = getelementptr inbounds double, double* %d, i32 10
-  %10 = load double* %arrayidx10, align 4
+  %10 = load double, double* %arrayidx10, align 4
   %arrayidx11 = getelementptr inbounds double, double* %d, i32 11
-  %11 = load double* %arrayidx11, align 4
+  %11 = load double, double* %arrayidx11, align 4
   %arrayidx12 = getelementptr inbounds double, double* %d, i32 12
-  %12 = load double* %arrayidx12, align 4
+  %12 = load double, double* %arrayidx12, align 4
   %arrayidx13 = getelementptr inbounds double, double* %d, i32 13
-  %13 = load double* %arrayidx13, align 4
+  %13 = load double, double* %arrayidx13, align 4
   %arrayidx14 = getelementptr inbounds double, double* %d, i32 14
-  %14 = load double* %arrayidx14, align 4
+  %14 = load double, double* %arrayidx14, align 4
   %arrayidx15 = getelementptr inbounds double, double* %d, i32 15
-  %15 = load double* %arrayidx15, align 4
+  %15 = load double, double* %arrayidx15, align 4
   %arrayidx16 = getelementptr inbounds double, double* %d, i32 16
-  %16 = load double* %arrayidx16, align 4
+  %16 = load double, double* %arrayidx16, align 4
   %arrayidx17 = getelementptr inbounds double, double* %d, i32 17
-  %17 = load double* %arrayidx17, align 4
+  %17 = load double, double* %arrayidx17, align 4
   %arrayidx18 = getelementptr inbounds double, double* %d, i32 18
-  %18 = load double* %arrayidx18, align 4
+  %18 = load double, double* %arrayidx18, align 4
   %arrayidx19 = getelementptr inbounds double, double* %d, i32 19
-  %19 = load double* %arrayidx19, align 4
+  %19 = load double, double* %arrayidx19, align 4
   %arrayidx20 = getelementptr inbounds double, double* %d, i32 20
-  %20 = load double* %arrayidx20, align 4
+  %20 = load double, double* %arrayidx20, align 4
   %arrayidx21 = getelementptr inbounds double, double* %d, i32 21
-  %21 = load double* %arrayidx21, align 4
+  %21 = load double, double* %arrayidx21, align 4
   %arrayidx22 = getelementptr inbounds double, double* %d, i32 22
-  %22 = load double* %arrayidx22, align 4
+  %22 = load double, double* %arrayidx22, align 4
   %arrayidx23 = getelementptr inbounds double, double* %d, i32 23
-  %23 = load double* %arrayidx23, align 4
+  %23 = load double, double* %arrayidx23, align 4
   %arrayidx24 = getelementptr inbounds double, double* %d, i32 24
-  %24 = load double* %arrayidx24, align 4
+  %24 = load double, double* %arrayidx24, align 4
   %arrayidx25 = getelementptr inbounds double, double* %d, i32 25
-  %25 = load double* %arrayidx25, align 4
+  %25 = load double, double* %arrayidx25, align 4
   %arrayidx26 = getelementptr inbounds double, double* %d, i32 26
-  %26 = load double* %arrayidx26, align 4
+  %26 = load double, double* %arrayidx26, align 4
   %arrayidx27 = getelementptr inbounds double, double* %d, i32 27
-  %27 = load double* %arrayidx27, align 4
+  %27 = load double, double* %arrayidx27, align 4
   %arrayidx28 = getelementptr inbounds double, double* %d, i32 28
-  %28 = load double* %arrayidx28, align 4
+  %28 = load double, double* %arrayidx28, align 4
   %arrayidx29 = getelementptr inbounds double, double* %d, i32 29
-  %29 = load double* %arrayidx29, align 4
+  %29 = load double, double* %arrayidx29, align 4
   %div = fdiv double %29, %28
   %div30 = fdiv double %div, %27
   %div31 = fdiv double %div30, %26

Modified: llvm/trunk/test/CodeGen/ARM/str_post.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/str_post.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/str_post.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/str_post.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 define i16 @test1(i32* %X, i16* %A) {
 ; CHECK-LABEL: test1:
 ; CHECK: strh {{.*}}[{{.*}}], #-4
-        %Y = load i32* %X               ; <i32> [#uses=1]
+        %Y = load i32, i32* %X               ; <i32> [#uses=1]
         %tmp1 = trunc i32 %Y to i16             ; <i16> [#uses=1]
         store i16 %tmp1, i16* %A
         %tmp2 = ptrtoint i16* %A to i16         ; <i16> [#uses=1]
@@ -14,7 +14,7 @@ define i16 @test1(i32* %X, i16* %A) {
 define i32 @test2(i32* %X, i32* %A) {
 ; CHECK-LABEL: test2:
 ; CHECK: str {{.*}}[{{.*}}],
-        %Y = load i32* %X               ; <i32> [#uses=1]
+        %Y = load i32, i32* %X               ; <i32> [#uses=1]
         store i32 %Y, i32* %A
         %tmp1 = ptrtoint i32* %A to i32         ; <i32> [#uses=1]
         %tmp2 = sub i32 %tmp1, 4                ; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/ARM/str_pre-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/str_pre-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/str_pre-2.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/str_pre-2.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@ entry:
 ; CHECK: push {r4, r5, lr}
 ; CHECK: pop {r4, r5, pc}
         call void asm sideeffect "", "~{r4},~{r5}"() nounwind
-	%0 = load i64** @b, align 4
-	%1 = load i64* %0, align 4
+	%0 = load i64*, i64** @b, align 4
+	%1 = load i64, i64* %0, align 4
 	%2 = mul i64 %1, %a
 	ret i64 %2
 }

Modified: llvm/trunk/test/CodeGen/ARM/str_pre.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/str_pre.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/str_pre.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/str_pre.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=arm-eabi %s -o -  | FileCheck %s
 
 define void @test1(i32* %X, i32* %A, i32** %dest) {
-        %B = load i32* %A               ; <i32> [#uses=1]
+        %B = load i32, i32* %A               ; <i32> [#uses=1]
         %Y = getelementptr i32, i32* %X, i32 4               ; <i32*> [#uses=2]
         store i32 %B, i32* %Y
         store i32* %Y, i32** %dest
@@ -9,7 +9,7 @@ define void @test1(i32* %X, i32* %A, i32
 }
 
 define i16* @test2(i16* %X, i32* %A) {
-        %B = load i32* %A               ; <i32> [#uses=1]
+        %B = load i32, i32* %A               ; <i32> [#uses=1]
         %Y = getelementptr i16, i16* %X, i32 4               ; <i16*> [#uses=2]
         %tmp = trunc i32 %B to i16              ; <i16> [#uses=1]
         store i16 %tmp, i16* %Y

Modified: llvm/trunk/test/CodeGen/ARM/struct-byval-frame-index.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/struct-byval-frame-index.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/struct-byval-frame-index.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/struct-byval-frame-index.ll Fri Feb 27 15:17:42 2015
@@ -72,10 +72,10 @@ declare void @SetMotionVectorsMB(%struct
 ; Function Attrs: nounwind
 define void @set_stored_macroblock_parameters() #1 {
 entry:
-  %0 = load %structB** @img, align 4
-  %1 = load i32* undef, align 4
+  %0 = load %structB*, %structB** @img, align 4
+  %1 = load i32, i32* undef, align 4
   %mb_data = getelementptr inbounds %structB, %structB* %0, i32 0, i32 61
-  %2 = load %structK** %mb_data, align 4
+  %2 = load %structK*, %structK** %mb_data, align 4
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %entry
@@ -134,7 +134,7 @@ if.then233:
 
 if.end236:                                        ; preds = %if.end230
   %cmp242 = icmp ne i16 undef, 8
-  %4 = load i32* @luma_transform_size_8x8_flag, align 4
+  %4 = load i32, i32* @luma_transform_size_8x8_flag, align 4
   %tobool245 = icmp ne i32 %4, 0
   %or.cond812 = or i1 %cmp242, %tobool245
   br i1 %or.cond812, label %if.end249, label %land.lhs.true246
@@ -150,11 +150,11 @@ if.then248:
   br label %if.end249
 
 if.end249:                                        ; preds = %if.then248, %land.lhs.true246, %if.end236
-  %5 = load i32* @luma_transform_size_8x8_flag, align 4
-  %6 = load %structA** @rdopt, align 4
+  %5 = load i32, i32* @luma_transform_size_8x8_flag, align 4
+  %6 = load %structA*, %structA** @rdopt, align 4
   %luma_transform_size_8x8_flag264 = getelementptr inbounds %structA, %structA* %6, i32 0, i32 21
   store i32 %5, i32* %luma_transform_size_8x8_flag264, align 4
-  %7 = load i32* undef, align 4
+  %7 = load i32, i32* undef, align 4
   %add281 = add nsw i32 %7, 0
   br label %for.body285
 
@@ -162,36 +162,36 @@ for.body285:
   %8 = phi %structB* [ undef, %if.end249 ], [ %.pre1155, %for.inc503 ]
   %i.21103 = phi i32 [ 0, %if.end249 ], [ %inc504, %for.inc503 ]
   %block_x286 = getelementptr inbounds %structB, %structB* %8, i32 0, i32 37
-  %9 = load i32* %block_x286, align 4
+  %9 = load i32, i32* %block_x286, align 4
   %add287 = add nsw i32 %9, %i.21103
   %shr289 = ashr i32 %i.21103, 1
   %add290 = add nsw i32 %shr289, 0
   %arrayidx292 = getelementptr inbounds %structK, %structK* %2, i32 %1, i32 15, i32 %add290
-  %10 = load %structM** @enc_picture, align 4
+  %10 = load %structM*, %structM** @enc_picture, align 4
   %ref_idx = getelementptr inbounds %structM, %structM* %10, i32 0, i32 35
-  %11 = load i8**** %ref_idx, align 4
-  %12 = load i8*** %11, align 4
+  %11 = load i8***, i8**** %ref_idx, align 4
+  %12 = load i8**, i8*** %11, align 4
   %arrayidx313 = getelementptr inbounds i8*, i8** %12, i32 %add281
-  %13 = load i8** %arrayidx313, align 4
+  %13 = load i8*, i8** %arrayidx313, align 4
   %arrayidx314 = getelementptr inbounds i8, i8* %13, i32 %add287
   store i8 -1, i8* %arrayidx314, align 1
-  %14 = load %structB** @img, align 4
+  %14 = load %structB*, %structB** @img, align 4
   %MbaffFrameFlag327 = getelementptr inbounds %structB, %structB* %14, i32 0, i32 100
-  %15 = load i32* %MbaffFrameFlag327, align 4
+  %15 = load i32, i32* %MbaffFrameFlag327, align 4
   %tobool328 = icmp eq i32 %15, 0
   br i1 %tobool328, label %if.end454, label %if.then329
 
 if.then329:                                       ; preds = %for.body285
-  %16 = load %structA** @rdopt, align 4
+  %16 = load %structA*, %structA** @rdopt, align 4
   br label %if.end454
 
 if.end454:                                        ; preds = %if.then329, %for.body285
-  %17 = load i32* %arrayidx292, align 4
+  %17 = load i32, i32* %arrayidx292, align 4
   %cmp457 = icmp eq i32 %17, 0
   br i1 %cmp457, label %if.then475, label %lor.lhs.false459
 
 lor.lhs.false459:                                 ; preds = %if.end454
-  %18 = load i32* %mb_type, align 4
+  %18 = load i32, i32* %mb_type, align 4
   switch i32 %18, label %for.inc503 [
     i32 9, label %if.then475
     i32 10, label %if.then475
@@ -205,7 +205,7 @@ if.then475:
 
 for.inc503:                                       ; preds = %if.then475, %lor.lhs.false459
   %inc504 = add nsw i32 %i.21103, 1
-  %.pre1155 = load %structB** @img, align 4
+  %.pre1155 = load %structB*, %structB** @img, align 4
   br label %for.body285
 }
 

Modified: llvm/trunk/test/CodeGen/ARM/sub-cmp-peephole.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/sub-cmp-peephole.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/sub-cmp-peephole.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/sub-cmp-peephole.ll Fri Feb 27 15:17:42 2015
@@ -169,7 +169,7 @@ entry:
 ; CHECK: sub
 ; CHECK: cmp
 ; CHECK: bge
-  %load = load i32* @t, align 4
+  %load = load i32, i32* @t, align 4
   %sub = sub i32 %load, 17
   %cmp = icmp slt i32 %sub, 0
   br i1 %cmp, label %if.then, label %if.else
@@ -191,7 +191,7 @@ entry:
 ; CHECK: sub
 ; CHECK: cmp
 ; CHECK: bhs
-  %load = load i32* @t, align 4
+  %load = load i32, i32* @t, align 4
   %sub = sub i32 %load, 17
   %cmp = icmp ult i32 %sub, 0
   br i1 %cmp, label %if.then, label %if.else

Modified: llvm/trunk/test/CodeGen/ARM/swift-atomics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/swift-atomics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/swift-atomics.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/swift-atomics.ll Fri Feb 27 15:17:42 2015
@@ -33,7 +33,7 @@ define i32 @test_seq_cst(i32* %p, i32 %v
 ; CHECK-STRICT-ATOMIC: dmb {{ish$}}
 
   store atomic i32 %v, i32* %p seq_cst, align 4
-  %val = load atomic i32* %p seq_cst, align 4
+  %val = load atomic i32, i32* %p seq_cst, align 4
   ret i32 %val
 }
 
@@ -46,6 +46,6 @@ define i32 @test_acq(i32* %addr) {
 
 ; CHECK-STRICT-ATOMIC-LABEL: test_acq:
 ; CHECK-STRICT-ATOMIC: dmb {{ish$}}
-  %val = load atomic i32* %addr acquire, align 4
+  %val = load atomic i32, i32* %addr acquire, align 4
   ret i32 %val
 }

Modified: llvm/trunk/test/CodeGen/ARM/swift-vldm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/swift-vldm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/swift-vldm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/swift-vldm.ll Fri Feb 27 15:17:42 2015
@@ -15,11 +15,11 @@ entry:
   %addr1 = getelementptr double, double * %x, i32 1
   %addr2 = getelementptr double, double * %x, i32 2
   %addr3 = getelementptr double, double * %x, i32 3
-  %d0 = load double * %y
-  %d1 = load double * %x
-  %d2 = load double * %addr1
-  %d3 = load double * %addr2
-  %d4 = load double * %addr3
+  %d0 = load double , double * %y
+  %d1 = load double , double * %x
+  %d2 = load double , double * %addr1
+  %d3 = load double , double * %addr2
+  %d4 = load double , double * %addr3
   ; We are trying to force x[0-3] in registers d1 to d4 so that we can test we
   ; don't form a "vldmia rX, {d1, d2, d3, d4}".
   ; We are relying on the calling convention and that register allocation

Modified: llvm/trunk/test/CodeGen/ARM/tail-dup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/tail-dup.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/tail-dup.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/tail-dup.ll Fri Feb 27 15:17:42 2015
@@ -11,19 +11,19 @@
 
 define i32 @fn(i32* nocapture %opcodes) nounwind readonly ssp {
 entry:
-  %0 = load i32* %opcodes, align 4
+  %0 = load i32, i32* %opcodes, align 4
   %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @fn.codetable, i32 0, i32 %0
   br label %indirectgoto
 
 INCREMENT:                                        ; preds = %indirectgoto
   %inc = add nsw i32 %result.0, 1
-  %1 = load i32* %opcodes.addr.0, align 4
+  %1 = load i32, i32* %opcodes.addr.0, align 4
   %arrayidx2 = getelementptr inbounds [3 x i8*], [3 x i8*]* @fn.codetable, i32 0, i32 %1
   br label %indirectgoto
 
 DECREMENT:                                        ; preds = %indirectgoto
   %dec = add nsw i32 %result.0, -1
-  %2 = load i32* %opcodes.addr.0, align 4
+  %2 = load i32, i32* %opcodes.addr.0, align 4
   %arrayidx4 = getelementptr inbounds [3 x i8*], [3 x i8*]* @fn.codetable, i32 0, i32 %2
   br label %indirectgoto
 
@@ -32,7 +32,7 @@ indirectgoto:
   %opcodes.pn = phi i32* [ %opcodes, %entry ], [ %opcodes.addr.0, %DECREMENT ], [ %opcodes.addr.0, %INCREMENT ]
   %indirect.goto.dest.in = phi i8** [ %arrayidx, %entry ], [ %arrayidx4, %DECREMENT ], [ %arrayidx2, %INCREMENT ]
   %opcodes.addr.0 = getelementptr inbounds i32, i32* %opcodes.pn, i32 1
-  %indirect.goto.dest = load i8** %indirect.goto.dest.in, align 4
+  %indirect.goto.dest = load i8*, i8** %indirect.goto.dest.in, align 4
   indirectbr i8* %indirect.goto.dest, [label %RETURN, label %INCREMENT, label %DECREMENT]
 
 RETURN:                                           ; preds = %indirectgoto

Modified: llvm/trunk/test/CodeGen/ARM/test-sharedidx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/test-sharedidx.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/test-sharedidx.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/test-sharedidx.ll Fri Feb 27 15:17:42 2015
@@ -24,10 +24,10 @@ for.body:
 ; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
   %i.09 = phi i32 [ %add5.3, %for.body.3 ], [ 0, %entry ]
   %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.09
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv6 = zext i8 %0 to i32
   %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.09
-  %1 = load i8* %arrayidx1, align 1
+  %1 = load i8, i8* %arrayidx1, align 1
   %conv27 = zext i8 %1 to i32
   %add = add nsw i32 %conv27, %conv6
   %conv3 = trunc i32 %add to i8
@@ -45,10 +45,10 @@ for.body.1:
 ; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
 ; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
   %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %add5
-  %2 = load i8* %arrayidx.1, align 1
+  %2 = load i8, i8* %arrayidx.1, align 1
   %conv6.1 = zext i8 %2 to i32
   %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %add5
-  %3 = load i8* %arrayidx1.1, align 1
+  %3 = load i8, i8* %arrayidx1.1, align 1
   %conv27.1 = zext i8 %3 to i32
   %add.1 = add nsw i32 %conv27.1, %conv6.1
   %conv3.1 = trunc i32 %add.1 to i8
@@ -63,10 +63,10 @@ for.body.2:
 ; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
 ; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
   %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %add5.1
-  %4 = load i8* %arrayidx.2, align 1
+  %4 = load i8, i8* %arrayidx.2, align 1
   %conv6.2 = zext i8 %4 to i32
   %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %add5.1
-  %5 = load i8* %arrayidx1.2, align 1
+  %5 = load i8, i8* %arrayidx1.2, align 1
   %conv27.2 = zext i8 %5 to i32
   %add.2 = add nsw i32 %conv27.2, %conv6.2
   %conv3.2 = trunc i32 %add.2 to i8
@@ -81,10 +81,10 @@ for.body.3:
 ; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
 ; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
   %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %add5.2
-  %6 = load i8* %arrayidx.3, align 1
+  %6 = load i8, i8* %arrayidx.3, align 1
   %conv6.3 = zext i8 %6 to i32
   %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %add5.2
-  %7 = load i8* %arrayidx1.3, align 1
+  %7 = load i8, i8* %arrayidx1.3, align 1
   %conv27.3 = zext i8 %7 to i32
   %add.3 = add nsw i32 %conv27.3, %conv6.3
   %conv3.3 = trunc i32 %add.3 to i8

Modified: llvm/trunk/test/CodeGen/ARM/thumb1-varalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/thumb1-varalloc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/thumb1-varalloc.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/thumb1-varalloc.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
 ; CHECK-LABEL: foo:
 
 	%size = alloca i32, align 4
-	%0 = load i8** @__bar, align 4
+	%0 = load i8*, i8** @__bar, align 4
 	%1 = icmp eq i8* %0, null
 	br i1 %1, label %bb1, label %bb3
 ; CHECK: bne

Modified: llvm/trunk/test/CodeGen/ARM/thumb1_return_sequence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/thumb1_return_sequence.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/thumb1_return_sequence.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/thumb1_return_sequence.ll Fri Feb 27 15:17:42 2015
@@ -13,10 +13,10 @@ entry:
 
   %b = alloca <6 x i32>, align 16
   %a = alloca <4 x i32>, align 16
-  %stuff = load <6 x i32>* %p, align 16
+  %stuff = load <6 x i32>, <6 x i32>* %p, align 16
   store <6 x i32> %stuff, <6 x i32>* %b, align 16
   store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>* %a, align 16
-  %0 = load <4 x i32>* %a, align 16
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
   ret <4 x i32> %0
 
 ; Epilogue
@@ -46,7 +46,7 @@ entry:
   %a = alloca <4 x i32>, align 16
   store <4 x i32> <i32 42, i32 42, i32 42, i32 42>, <4 x i32>* %b, align 16
   store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>* %a, align 16
-  %0 = load <4 x i32>* %a, align 16
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
   call void @llvm.va_start(i8* null)
   ret <4 x i32> %0
 
@@ -78,7 +78,7 @@ entry:
 ; CHECK-V4T:    push    {[[SAVED:(r[4567](, )?)+]], lr}
 ; CHECK-V5T:    push    {[[SAVED:(r[4567](, )?)+]], lr}
 
-  %0 = load <6 x i32>* %p, align 16
+  %0 = load <6 x i32>, <6 x i32>* %p, align 16
   %1 = extractelement <6 x i32> %0, i32 0
   %2 = extractelement <6 x i32> %0, i32 1
   %3 = extractelement <6 x i32> %0, i32 2
@@ -121,24 +121,24 @@ entry:
   store i32 2, i32* %b, align 4
   store i32 3, i32* %c, align 4
   store i32 4, i32* %d, align 4
-  %0 = load i32* %a, align 4
+  %0 = load i32, i32* %a, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* %a, align 4
-  %1 = load i32* %b, align 4
+  %1 = load i32, i32* %b, align 4
   %inc1 = add nsw i32 %1, 1
   store i32 %inc1, i32* %b, align 4
-  %2 = load i32* %c, align 4
+  %2 = load i32, i32* %c, align 4
   %inc2 = add nsw i32 %2, 1
   store i32 %inc2, i32* %c, align 4
-  %3 = load i32* %d, align 4
+  %3 = load i32, i32* %d, align 4
   %inc3 = add nsw i32 %3, 1
   store i32 %inc3, i32* %d, align 4
-  %4 = load i32* %a, align 4
-  %5 = load i32* %b, align 4
+  %4 = load i32, i32* %a, align 4
+  %5 = load i32, i32* %b, align 4
   %add = add nsw i32 %4, %5
-  %6 = load i32* %c, align 4
+  %6 = load i32, i32* %c, align 4
   %add4 = add nsw i32 %add, %6
-  %7 = load i32* %d, align 4
+  %7 = load i32, i32* %d, align 4
   %add5 = add nsw i32 %add4, %7
   %add6 = add nsw i32 %add5, %i
   call void @llvm.va_start(i8* null)





More information about the llvm-commits mailing list