[llvm] r230794 - [opaque pointer type] Add textual IR support for explicit type parameter to load instruction

David Blaikie dblaikie at gmail.com
Fri Feb 27 13:18:04 PST 2015


Modified: llvm/trunk/test/CodeGen/PowerPC/cr-spills.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/cr-spills.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/cr-spills.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/cr-spills.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ land.rhs:
 
 land.end:                                         ; preds = %land.rhs, %land.lhs.true, %entry
   %0 = phi i1 [ %tobool21, %land.rhs ], [ false, %land.lhs.true ], [ false, %entry ]
-  %cond = load i32** undef, align 8
+  %cond = load i32*, i32** undef, align 8
   br i1 undef, label %if.then95, label %for.body.lr.ph
 
 if.then95:                                        ; preds = %land.end
@@ -53,11 +53,11 @@ for.cond286.preheader:
 
 for.cond290.preheader:                            ; preds = %for.end520, %for.cond286.preheader
   %srcptr.31595 = phi i16* [ getelementptr inbounds ([768 x i16]* @SetupFastFullPelSearch.orig_pels, i64 0, i64 0), %for.cond286.preheader ], [ null, %for.end520 ]
-  %1 = load i32* undef, align 4
-  %2 = load i32* @weight_luma, align 4
-  %3 = load i32* @wp_luma_round, align 4
-  %4 = load i32* @luma_log_weight_denom, align 4
-  %5 = load i32* @offset_luma, align 4
+  %1 = load i32, i32* undef, align 4
+  %2 = load i32, i32* @weight_luma, align 4
+  %3 = load i32, i32* @wp_luma_round, align 4
+  %4 = load i32, i32* @luma_log_weight_denom, align 4
+  %5 = load i32, i32* @offset_luma, align 4
   %incdec.ptr502.sum = add i64 undef, 16
   br label %for.body293
 
@@ -68,7 +68,7 @@ for.body293:
   %LineSadBlk1.01587 = phi i32 [ 0, %for.cond290.preheader ], [ %add402, %for.body293 ]
   %LineSadBlk3.01586 = phi i32 [ 0, %for.cond290.preheader ], [ %add514, %for.body293 ]
   %LineSadBlk2.01585 = phi i32 [ 0, %for.cond290.preheader ], [ %add458, %for.body293 ]
-  %6 = load i16* %refptr.11590, align 2
+  %6 = load i16, i16* %refptr.11590, align 2
   %conv294 = zext i16 %6 to i32
   %mul295 = mul nsw i32 %conv294, %2
   %add296 = add nsw i32 %mul295, %3
@@ -78,16 +78,16 @@ for.body293:
   %cond.i.i1514 = select i1 %cmp.i.i1513, i32 %add297, i32 0
   %cmp.i4.i1515 = icmp slt i32 %cond.i.i1514, %1
   %cond.i5.i1516 = select i1 %cmp.i4.i1515, i32 %cond.i.i1514, i32 %1
-  %7 = load i16* %srcptr.41591, align 2
+  %7 = load i16, i16* %srcptr.41591, align 2
   %conv300 = zext i16 %7 to i32
   %sub301 = sub nsw i32 %cond.i5.i1516, %conv300
   %idxprom302 = sext i32 %sub301 to i64
   %arrayidx303 = getelementptr inbounds i32, i32* %cond, i64 %idxprom302
-  %8 = load i32* %arrayidx303, align 4
+  %8 = load i32, i32* %arrayidx303, align 4
   %add304 = add nsw i32 %8, %LineSadBlk0.01588
-  %9 = load i32* undef, align 4
+  %9 = load i32, i32* undef, align 4
   %add318 = add nsw i32 %add304, %9
-  %10 = load i16* undef, align 2
+  %10 = load i16, i16* undef, align 2
   %conv321 = zext i16 %10 to i32
   %mul322 = mul nsw i32 %conv321, %2
   %add323 = add nsw i32 %mul322, %3
@@ -100,22 +100,22 @@ for.body293:
   %sub329 = sub nsw i32 %cond.i5.i1508, 0
   %idxprom330 = sext i32 %sub329 to i64
   %arrayidx331 = getelementptr inbounds i32, i32* %cond, i64 %idxprom330
-  %11 = load i32* %arrayidx331, align 4
+  %11 = load i32, i32* %arrayidx331, align 4
   %add332 = add nsw i32 %add318, %11
   %cmp.i.i1501 = icmp sgt i32 undef, 0
   %cond.i.i1502 = select i1 %cmp.i.i1501, i32 undef, i32 0
   %cmp.i4.i1503 = icmp slt i32 %cond.i.i1502, %1
   %cond.i5.i1504 = select i1 %cmp.i4.i1503, i32 %cond.i.i1502, i32 %1
   %incdec.ptr341 = getelementptr inbounds i16, i16* %srcptr.41591, i64 4
-  %12 = load i16* null, align 2
+  %12 = load i16, i16* null, align 2
   %conv342 = zext i16 %12 to i32
   %sub343 = sub nsw i32 %cond.i5.i1504, %conv342
   %idxprom344 = sext i32 %sub343 to i64
   %arrayidx345 = getelementptr inbounds i32, i32* %cond, i64 %idxprom344
-  %13 = load i32* %arrayidx345, align 4
+  %13 = load i32, i32* %arrayidx345, align 4
   %add346 = add nsw i32 %add332, %13
   %incdec.ptr348 = getelementptr inbounds i16, i16* %refptr.11590, i64 5
-  %14 = load i16* null, align 2
+  %14 = load i16, i16* null, align 2
   %conv349 = zext i16 %14 to i32
   %mul350 = mul nsw i32 %conv349, %2
   %add351 = add nsw i32 %mul350, %3
@@ -126,15 +126,15 @@ for.body293:
   %cmp.i4.i1499 = icmp slt i32 %cond.i.i1498, %1
   %cond.i5.i1500 = select i1 %cmp.i4.i1499, i32 %cond.i.i1498, i32 %1
   %incdec.ptr355 = getelementptr inbounds i16, i16* %srcptr.41591, i64 5
-  %15 = load i16* %incdec.ptr341, align 2
+  %15 = load i16, i16* %incdec.ptr341, align 2
   %conv356 = zext i16 %15 to i32
   %sub357 = sub nsw i32 %cond.i5.i1500, %conv356
   %idxprom358 = sext i32 %sub357 to i64
   %arrayidx359 = getelementptr inbounds i32, i32* %cond, i64 %idxprom358
-  %16 = load i32* %arrayidx359, align 4
+  %16 = load i32, i32* %arrayidx359, align 4
   %add360 = add nsw i32 %16, %LineSadBlk1.01587
   %incdec.ptr362 = getelementptr inbounds i16, i16* %refptr.11590, i64 6
-  %17 = load i16* %incdec.ptr348, align 2
+  %17 = load i16, i16* %incdec.ptr348, align 2
   %conv363 = zext i16 %17 to i32
   %mul364 = mul nsw i32 %conv363, %2
   %add365 = add nsw i32 %mul364, %3
@@ -145,15 +145,15 @@ for.body293:
   %cmp.i4.i1495 = icmp slt i32 %cond.i.i1494, %1
   %cond.i5.i1496 = select i1 %cmp.i4.i1495, i32 %cond.i.i1494, i32 %1
   %incdec.ptr369 = getelementptr inbounds i16, i16* %srcptr.41591, i64 6
-  %18 = load i16* %incdec.ptr355, align 2
+  %18 = load i16, i16* %incdec.ptr355, align 2
   %conv370 = zext i16 %18 to i32
   %sub371 = sub nsw i32 %cond.i5.i1496, %conv370
   %idxprom372 = sext i32 %sub371 to i64
   %arrayidx373 = getelementptr inbounds i32, i32* %cond, i64 %idxprom372
-  %19 = load i32* %arrayidx373, align 4
+  %19 = load i32, i32* %arrayidx373, align 4
   %add374 = add nsw i32 %add360, %19
   %incdec.ptr376 = getelementptr inbounds i16, i16* %refptr.11590, i64 7
-  %20 = load i16* %incdec.ptr362, align 2
+  %20 = load i16, i16* %incdec.ptr362, align 2
   %conv377 = zext i16 %20 to i32
   %mul378 = mul nsw i32 %conv377, %2
   %add379 = add nsw i32 %mul378, %3
@@ -164,14 +164,14 @@ for.body293:
   %cmp.i4.i1491 = icmp slt i32 %cond.i.i1490, %1
   %cond.i5.i1492 = select i1 %cmp.i4.i1491, i32 %cond.i.i1490, i32 %1
   %incdec.ptr383 = getelementptr inbounds i16, i16* %srcptr.41591, i64 7
-  %21 = load i16* %incdec.ptr369, align 2
+  %21 = load i16, i16* %incdec.ptr369, align 2
   %conv384 = zext i16 %21 to i32
   %sub385 = sub nsw i32 %cond.i5.i1492, %conv384
   %idxprom386 = sext i32 %sub385 to i64
   %arrayidx387 = getelementptr inbounds i32, i32* %cond, i64 %idxprom386
-  %22 = load i32* %arrayidx387, align 4
+  %22 = load i32, i32* %arrayidx387, align 4
   %add388 = add nsw i32 %add374, %22
-  %23 = load i16* %incdec.ptr376, align 2
+  %23 = load i16, i16* %incdec.ptr376, align 2
   %conv391 = zext i16 %23 to i32
   %mul392 = mul nsw i32 %conv391, %2
   %add395 = add nsw i32 0, %5
@@ -180,25 +180,25 @@ for.body293:
   %cmp.i4.i1487 = icmp slt i32 %cond.i.i1486, %1
   %cond.i5.i1488 = select i1 %cmp.i4.i1487, i32 %cond.i.i1486, i32 %1
   %incdec.ptr397 = getelementptr inbounds i16, i16* %srcptr.41591, i64 8
-  %24 = load i16* %incdec.ptr383, align 2
+  %24 = load i16, i16* %incdec.ptr383, align 2
   %conv398 = zext i16 %24 to i32
   %sub399 = sub nsw i32 %cond.i5.i1488, %conv398
   %idxprom400 = sext i32 %sub399 to i64
   %arrayidx401 = getelementptr inbounds i32, i32* %cond, i64 %idxprom400
-  %25 = load i32* %arrayidx401, align 4
+  %25 = load i32, i32* %arrayidx401, align 4
   %add402 = add nsw i32 %add388, %25
   %incdec.ptr404 = getelementptr inbounds i16, i16* %refptr.11590, i64 9
   %cmp.i4.i1483 = icmp slt i32 undef, %1
   %cond.i5.i1484 = select i1 %cmp.i4.i1483, i32 undef, i32 %1
-  %26 = load i16* %incdec.ptr397, align 2
+  %26 = load i16, i16* %incdec.ptr397, align 2
   %conv412 = zext i16 %26 to i32
   %sub413 = sub nsw i32 %cond.i5.i1484, %conv412
   %idxprom414 = sext i32 %sub413 to i64
   %arrayidx415 = getelementptr inbounds i32, i32* %cond, i64 %idxprom414
-  %27 = load i32* %arrayidx415, align 4
+  %27 = load i32, i32* %arrayidx415, align 4
   %add416 = add nsw i32 %27, %LineSadBlk2.01585
   %incdec.ptr418 = getelementptr inbounds i16, i16* %refptr.11590, i64 10
-  %28 = load i16* %incdec.ptr404, align 2
+  %28 = load i16, i16* %incdec.ptr404, align 2
   %conv419 = zext i16 %28 to i32
   %mul420 = mul nsw i32 %conv419, %2
   %add421 = add nsw i32 %mul420, %3
@@ -212,10 +212,10 @@ for.body293:
   %sub427 = sub nsw i32 %cond.i5.i1480, 0
   %idxprom428 = sext i32 %sub427 to i64
   %arrayidx429 = getelementptr inbounds i32, i32* %cond, i64 %idxprom428
-  %29 = load i32* %arrayidx429, align 4
+  %29 = load i32, i32* %arrayidx429, align 4
   %add430 = add nsw i32 %add416, %29
   %incdec.ptr432 = getelementptr inbounds i16, i16* %refptr.11590, i64 11
-  %30 = load i16* %incdec.ptr418, align 2
+  %30 = load i16, i16* %incdec.ptr418, align 2
   %conv433 = zext i16 %30 to i32
   %mul434 = mul nsw i32 %conv433, %2
   %add435 = add nsw i32 %mul434, %3
@@ -225,15 +225,15 @@ for.body293:
   %cond.i.i1474 = select i1 %cmp.i.i1473, i32 %add437, i32 0
   %cmp.i4.i1475 = icmp slt i32 %cond.i.i1474, %1
   %cond.i5.i1476 = select i1 %cmp.i4.i1475, i32 %cond.i.i1474, i32 %1
-  %31 = load i16* %incdec.ptr425, align 2
+  %31 = load i16, i16* %incdec.ptr425, align 2
   %conv440 = zext i16 %31 to i32
   %sub441 = sub nsw i32 %cond.i5.i1476, %conv440
   %idxprom442 = sext i32 %sub441 to i64
   %arrayidx443 = getelementptr inbounds i32, i32* %cond, i64 %idxprom442
-  %32 = load i32* %arrayidx443, align 4
+  %32 = load i32, i32* %arrayidx443, align 4
   %add444 = add nsw i32 %add430, %32
   %incdec.ptr446 = getelementptr inbounds i16, i16* %refptr.11590, i64 12
-  %33 = load i16* %incdec.ptr432, align 2
+  %33 = load i16, i16* %incdec.ptr432, align 2
   %conv447 = zext i16 %33 to i32
   %mul448 = mul nsw i32 %conv447, %2
   %add449 = add nsw i32 %mul448, %3
@@ -244,15 +244,15 @@ for.body293:
   %cmp.i4.i1471 = icmp slt i32 %cond.i.i1470, %1
   %cond.i5.i1472 = select i1 %cmp.i4.i1471, i32 %cond.i.i1470, i32 %1
   %incdec.ptr453 = getelementptr inbounds i16, i16* %srcptr.41591, i64 12
-  %34 = load i16* undef, align 2
+  %34 = load i16, i16* undef, align 2
   %conv454 = zext i16 %34 to i32
   %sub455 = sub nsw i32 %cond.i5.i1472, %conv454
   %idxprom456 = sext i32 %sub455 to i64
   %arrayidx457 = getelementptr inbounds i32, i32* %cond, i64 %idxprom456
-  %35 = load i32* %arrayidx457, align 4
+  %35 = load i32, i32* %arrayidx457, align 4
   %add458 = add nsw i32 %add444, %35
   %incdec.ptr460 = getelementptr inbounds i16, i16* %refptr.11590, i64 13
-  %36 = load i16* %incdec.ptr446, align 2
+  %36 = load i16, i16* %incdec.ptr446, align 2
   %conv461 = zext i16 %36 to i32
   %mul462 = mul nsw i32 %conv461, %2
   %add463 = add nsw i32 %mul462, %3
@@ -263,12 +263,12 @@ for.body293:
   %cmp.i4.i1467 = icmp slt i32 %cond.i.i1466, %1
   %cond.i5.i1468 = select i1 %cmp.i4.i1467, i32 %cond.i.i1466, i32 %1
   %incdec.ptr467 = getelementptr inbounds i16, i16* %srcptr.41591, i64 13
-  %37 = load i16* %incdec.ptr453, align 2
+  %37 = load i16, i16* %incdec.ptr453, align 2
   %conv468 = zext i16 %37 to i32
   %sub469 = sub nsw i32 %cond.i5.i1468, %conv468
   %idxprom470 = sext i32 %sub469 to i64
   %arrayidx471 = getelementptr inbounds i32, i32* %cond, i64 %idxprom470
-  %38 = load i32* %arrayidx471, align 4
+  %38 = load i32, i32* %arrayidx471, align 4
   %add472 = add nsw i32 %38, %LineSadBlk3.01586
   %incdec.ptr474 = getelementptr inbounds i16, i16* %refptr.11590, i64 14
   %add477 = add nsw i32 0, %3
@@ -279,15 +279,15 @@ for.body293:
   %cmp.i4.i1463 = icmp slt i32 %cond.i.i1462, %1
   %cond.i5.i1464 = select i1 %cmp.i4.i1463, i32 %cond.i.i1462, i32 %1
   %incdec.ptr481 = getelementptr inbounds i16, i16* %srcptr.41591, i64 14
-  %39 = load i16* %incdec.ptr467, align 2
+  %39 = load i16, i16* %incdec.ptr467, align 2
   %conv482 = zext i16 %39 to i32
   %sub483 = sub nsw i32 %cond.i5.i1464, %conv482
   %idxprom484 = sext i32 %sub483 to i64
   %arrayidx485 = getelementptr inbounds i32, i32* %cond, i64 %idxprom484
-  %40 = load i32* %arrayidx485, align 4
+  %40 = load i32, i32* %arrayidx485, align 4
   %add486 = add nsw i32 %add472, %40
   %incdec.ptr488 = getelementptr inbounds i16, i16* %refptr.11590, i64 15
-  %41 = load i16* %incdec.ptr474, align 2
+  %41 = load i16, i16* %incdec.ptr474, align 2
   %conv489 = zext i16 %41 to i32
   %mul490 = mul nsw i32 %conv489, %2
   %add491 = add nsw i32 %mul490, %3
@@ -298,14 +298,14 @@ for.body293:
   %cmp.i4.i1459 = icmp slt i32 %cond.i.i1458, %1
   %cond.i5.i1460 = select i1 %cmp.i4.i1459, i32 %cond.i.i1458, i32 %1
   %incdec.ptr495 = getelementptr inbounds i16, i16* %srcptr.41591, i64 15
-  %42 = load i16* %incdec.ptr481, align 2
+  %42 = load i16, i16* %incdec.ptr481, align 2
   %conv496 = zext i16 %42 to i32
   %sub497 = sub nsw i32 %cond.i5.i1460, %conv496
   %idxprom498 = sext i32 %sub497 to i64
   %arrayidx499 = getelementptr inbounds i32, i32* %cond, i64 %idxprom498
-  %43 = load i32* %arrayidx499, align 4
+  %43 = load i32, i32* %arrayidx499, align 4
   %add500 = add nsw i32 %add486, %43
-  %44 = load i16* %incdec.ptr488, align 2
+  %44 = load i16, i16* %incdec.ptr488, align 2
   %conv503 = zext i16 %44 to i32
   %mul504 = mul nsw i32 %conv503, %2
   %add505 = add nsw i32 %mul504, %3
@@ -315,12 +315,12 @@ for.body293:
   %cond.i.i1454 = select i1 %cmp.i.i1453, i32 %add507, i32 0
   %cmp.i4.i1455 = icmp slt i32 %cond.i.i1454, %1
   %cond.i5.i1456 = select i1 %cmp.i4.i1455, i32 %cond.i.i1454, i32 %1
-  %45 = load i16* %incdec.ptr495, align 2
+  %45 = load i16, i16* %incdec.ptr495, align 2
   %conv510 = zext i16 %45 to i32
   %sub511 = sub nsw i32 %cond.i5.i1456, %conv510
   %idxprom512 = sext i32 %sub511 to i64
   %arrayidx513 = getelementptr inbounds i32, i32* %cond, i64 %idxprom512
-  %46 = load i32* %arrayidx513, align 4
+  %46 = load i32, i32* %arrayidx513, align 4
   %add514 = add nsw i32 %add500, %46
   %add.ptr517 = getelementptr inbounds i16, i16* %refptr.11590, i64 %incdec.ptr502.sum
   %exitcond1692 = icmp eq i32 undef, 4

Modified: llvm/trunk/test/CodeGen/PowerPC/crbits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/crbits.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/crbits.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/crbits.ll Fri Feb 27 15:17:42 2015
@@ -145,7 +145,7 @@ entry:
 
 define zeroext i32 @exttest8() #0 {
 entry:
-  %v0 = load i64* undef, align 8
+  %v0 = load i64, i64* undef, align 8
   %sub = sub i64 80, %v0
   %div = lshr i64 %sub, 1
   %conv13 = trunc i64 %div to i32

Modified: llvm/trunk/test/CodeGen/PowerPC/crsave.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/crsave.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/crsave.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/crsave.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
   %0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09mfcr $0", "=r,r,r,r,r,~{cr2}"(i32 1, i32 2, i32 3, i32 0) nounwind
   store i32 %0, i32* %ret, align 4
   call void @foo()
-  %1 = load i32* %ret, align 4
+  %1 = load i32, i32* %ret, align 4
   ret i32 %1
 }
 
@@ -38,7 +38,7 @@ entry:
   %0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09cmpw 3,$2,$2\0A\09cmpw 4,$2,$3\0A\09mfcr $0", "=r,r,r,r,r,~{cr2},~{cr3},~{cr4}"(i32 1, i32 2, i32 3, i32 0) nounwind
   store i32 %0, i32* %ret, align 4
   call void @foo()
-  %1 = load i32* %ret, align 4
+  %1 = load i32, i32* %ret, align 4
   ret i32 %1
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-cpsgn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-cpsgn.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-cpsgn.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-cpsgn.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ for.body:
   %i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %x.05 = phi ppc_fp128 [ %d, %entry ], [ %conv, %for.body ]
   %arrayidx = getelementptr inbounds ppc_fp128, ppc_fp128* %n, i32 %i.06
-  %0 = load ppc_fp128* %arrayidx, align 8
+  %0 = load ppc_fp128, ppc_fp128* %arrayidx, align 8
   %conv = tail call ppc_fp128 @copysignl(ppc_fp128 %x.05, ppc_fp128 %d) nounwind readonly
   %inc = add nsw i32 %i.06, 1
   %exitcond = icmp eq i32 %inc, 2048

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-fp64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-fp64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-fp64.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-fp64.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ for.body:
   %i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
   %arrayidx = getelementptr inbounds double, double* %n, i32 %i.06
-  %0 = load double* %arrayidx, align 8
+  %0 = load double, double* %arrayidx, align 8
   %conv = sitofp i64 %x.05 to double
   %add = fadd double %conv, %0
   %conv1 = fptosi double %add to i64
@@ -31,7 +31,7 @@ for.end:
 
 define i32 @main(i32 %argc, i8** nocapture %argv) {
 entry:
-  %0 = load double* @init_value, align 8
+  %0 = load double, double* @init_value, align 8
   %conv = fptosi double %0 to i64
   %broadcast.splatinsert.i = insertelement <2 x i64> undef, i64 %conv, i32 0
   %broadcast.splat.i = shufflevector <2 x i64> %broadcast.splatinsert.i, <2 x i64> undef, <2 x i32> zeroinitializer

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-i64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-i64.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-i64.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ for.body:
   %i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
   %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
-  %0 = load i64* %arrayidx, align 8
+  %0 = load i64, i64* %arrayidx, align 8
   %conv = udiv i64 %x.05, %d
   %conv1 = add i64 %conv, %0
   %inc = add nsw i32 %i.06, 1
@@ -33,7 +33,7 @@ for.body:
   %i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
   %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
-  %0 = load i64* %arrayidx, align 8
+  %0 = load i64, i64* %arrayidx, align 8
   %conv = sdiv i64 %x.05, %d
   %conv1 = add i64 %conv, %0
   %inc = add nsw i32 %i.06, 1
@@ -55,7 +55,7 @@ for.body:
   %i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
   %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
-  %0 = load i64* %arrayidx, align 8
+  %0 = load i64, i64* %arrayidx, align 8
   %conv = urem i64 %x.05, %d
   %conv1 = add i64 %conv, %0
   %inc = add nsw i32 %i.06, 1
@@ -77,7 +77,7 @@ for.body:
   %i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
   %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
-  %0 = load i64* %arrayidx, align 8
+  %0 = load i64, i64* %arrayidx, align 8
   %conv = srem i64 %x.05, %d
   %conv1 = add i64 %conv, %0
   %inc = add nsw i32 %i.06, 1

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-le.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-le.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-le.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-le.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -48,7 +48,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -77,7 +77,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -106,7 +106,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -135,7 +135,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -164,7 +164,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -193,7 +193,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -222,7 +222,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -251,7 +251,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -280,7 +280,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -310,7 +310,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -340,7 +340,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -370,7 +370,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -400,7 +400,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -430,7 +430,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-lt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-lt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-lt.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-lt.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -49,7 +49,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -79,7 +79,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -108,7 +108,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -137,7 +137,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -166,7 +166,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -195,7 +195,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -224,7 +224,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -253,7 +253,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -282,7 +282,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -311,7 +311,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -340,7 +340,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -369,7 +369,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -398,7 +398,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -427,7 +427,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-ne.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-ne.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-ne.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-ne.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 32623, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -46,7 +46,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 29554, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -76,7 +76,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 15692, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -106,7 +106,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 10449, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -136,7 +136,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ 32087, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -165,7 +165,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -195,7 +195,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -225,7 +225,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -255,7 +255,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -285,7 +285,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -314,7 +314,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -344,7 +344,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -374,7 +374,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -404,7 +404,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
@@ -434,7 +434,7 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
   %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8* %arrayidx, align 1
+  %0 = load i8, i8* %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-s000.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-s000.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-s000.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-s000.ll Fri Feb 27 15:17:42 2015
@@ -36,97 +36,97 @@ for.cond1.preheader:
 for.body3:                                        ; preds = %for.body3, %for.cond1.preheader
   %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next.15, %for.body3 ]
   %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
-  %0 = load double* %arrayidx, align 32
+  %0 = load double, double* %arrayidx, align 32
   %add = fadd double %0, 1.000000e+00
   %arrayidx5 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
   store double %add, double* %arrayidx5, align 32
   %indvars.iv.next11 = or i64 %indvars.iv, 1
   %arrayidx.1 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next11
-  %1 = load double* %arrayidx.1, align 8
+  %1 = load double, double* %arrayidx.1, align 8
   %add.1 = fadd double %1, 1.000000e+00
   %arrayidx5.1 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next11
   store double %add.1, double* %arrayidx5.1, align 8
   %indvars.iv.next.112 = or i64 %indvars.iv, 2
   %arrayidx.2 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.112
-  %2 = load double* %arrayidx.2, align 16
+  %2 = load double, double* %arrayidx.2, align 16
   %add.2 = fadd double %2, 1.000000e+00
   %arrayidx5.2 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.112
   store double %add.2, double* %arrayidx5.2, align 16
   %indvars.iv.next.213 = or i64 %indvars.iv, 3
   %arrayidx.3 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.213
-  %3 = load double* %arrayidx.3, align 8
+  %3 = load double, double* %arrayidx.3, align 8
   %add.3 = fadd double %3, 1.000000e+00
   %arrayidx5.3 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.213
   store double %add.3, double* %arrayidx5.3, align 8
   %indvars.iv.next.314 = or i64 %indvars.iv, 4
   %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.314
-  %4 = load double* %arrayidx.4, align 32
+  %4 = load double, double* %arrayidx.4, align 32
   %add.4 = fadd double %4, 1.000000e+00
   %arrayidx5.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.314
   store double %add.4, double* %arrayidx5.4, align 32
   %indvars.iv.next.415 = or i64 %indvars.iv, 5
   %arrayidx.5 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.415
-  %5 = load double* %arrayidx.5, align 8
+  %5 = load double, double* %arrayidx.5, align 8
   %add.5 = fadd double %5, 1.000000e+00
   %arrayidx5.5 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.415
   store double %add.5, double* %arrayidx5.5, align 8
   %indvars.iv.next.516 = or i64 %indvars.iv, 6
   %arrayidx.6 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.516
-  %6 = load double* %arrayidx.6, align 16
+  %6 = load double, double* %arrayidx.6, align 16
   %add.6 = fadd double %6, 1.000000e+00
   %arrayidx5.6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.516
   store double %add.6, double* %arrayidx5.6, align 16
   %indvars.iv.next.617 = or i64 %indvars.iv, 7
   %arrayidx.7 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.617
-  %7 = load double* %arrayidx.7, align 8
+  %7 = load double, double* %arrayidx.7, align 8
   %add.7 = fadd double %7, 1.000000e+00
   %arrayidx5.7 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.617
   store double %add.7, double* %arrayidx5.7, align 8
   %indvars.iv.next.718 = or i64 %indvars.iv, 8
   %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.718
-  %8 = load double* %arrayidx.8, align 32
+  %8 = load double, double* %arrayidx.8, align 32
   %add.8 = fadd double %8, 1.000000e+00
   %arrayidx5.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.718
   store double %add.8, double* %arrayidx5.8, align 32
   %indvars.iv.next.819 = or i64 %indvars.iv, 9
   %arrayidx.9 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.819
-  %9 = load double* %arrayidx.9, align 8
+  %9 = load double, double* %arrayidx.9, align 8
   %add.9 = fadd double %9, 1.000000e+00
   %arrayidx5.9 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.819
   store double %add.9, double* %arrayidx5.9, align 8
   %indvars.iv.next.920 = or i64 %indvars.iv, 10
   %arrayidx.10 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.920
-  %10 = load double* %arrayidx.10, align 16
+  %10 = load double, double* %arrayidx.10, align 16
   %add.10 = fadd double %10, 1.000000e+00
   %arrayidx5.10 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.920
   store double %add.10, double* %arrayidx5.10, align 16
   %indvars.iv.next.1021 = or i64 %indvars.iv, 11
   %arrayidx.11 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1021
-  %11 = load double* %arrayidx.11, align 8
+  %11 = load double, double* %arrayidx.11, align 8
   %add.11 = fadd double %11, 1.000000e+00
   %arrayidx5.11 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1021
   store double %add.11, double* %arrayidx5.11, align 8
   %indvars.iv.next.1122 = or i64 %indvars.iv, 12
   %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1122
-  %12 = load double* %arrayidx.12, align 32
+  %12 = load double, double* %arrayidx.12, align 32
   %add.12 = fadd double %12, 1.000000e+00
   %arrayidx5.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1122
   store double %add.12, double* %arrayidx5.12, align 32
   %indvars.iv.next.1223 = or i64 %indvars.iv, 13
   %arrayidx.13 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1223
-  %13 = load double* %arrayidx.13, align 8
+  %13 = load double, double* %arrayidx.13, align 8
   %add.13 = fadd double %13, 1.000000e+00
   %arrayidx5.13 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1223
   store double %add.13, double* %arrayidx5.13, align 8
   %indvars.iv.next.1324 = or i64 %indvars.iv, 14
   %arrayidx.14 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1324
-  %14 = load double* %arrayidx.14, align 16
+  %14 = load double, double* %arrayidx.14, align 16
   %add.14 = fadd double %14, 1.000000e+00
   %arrayidx5.14 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1324
   store double %add.14, double* %arrayidx5.14, align 16
   %indvars.iv.next.1425 = or i64 %indvars.iv, 15
   %arrayidx.15 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1425
-  %15 = load double* %arrayidx.15, align 8
+  %15 = load double, double* %arrayidx.15, align 8
   %add.15 = fadd double %15, 1.000000e+00
   %arrayidx5.15 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1425
   store double %add.15, double* %arrayidx5.15, align 8

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-sh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-sh.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-sh.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-sh.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ entry:
 
 for.body:                                         ; preds = %for.body, %entry
   %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %0 = load i128* %b, align 16
-  %1 = load i128* %c, align 16
+  %0 = load i128, i128* %b, align 16
+  %1 = load i128, i128* %c, align 16
   %shl = shl i128 %0, %1
   store i128 %shl, i128* %a, align 16
   %inc = add nsw i32 %i.02, 1
@@ -31,8 +31,8 @@ entry:
 
 for.body:                                         ; preds = %for.body, %entry
   %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %0 = load i128* %b, align 16
-  %1 = load i128* %c, align 16
+  %0 = load i128, i128* %b, align 16
+  %1 = load i128, i128* %c, align 16
   %shl = ashr i128 %0, %1
   store i128 %shl, i128* %a, align 16
   %inc = add nsw i32 %i.02, 1
@@ -53,8 +53,8 @@ entry:
 
 for.body:                                         ; preds = %for.body, %entry
   %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %0 = load i128* %b, align 16
-  %1 = load i128* %c, align 16
+  %0 = load i128, i128* %b, align 16
+  %1 = load i128, i128* %c, align 16
   %shl = lshr i128 %0, %1
   store i128 %shl, i128* %a, align 16
   %inc = add nsw i32 %i.02, 1

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloop-sums.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloop-sums.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloop-sums.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloop-sums.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ for.body3.us:
   %indvars.iv = phi i64 [ 0, %for.body3.lr.ph.us ], [ %indvars.iv.next, %for.body3.us ]
   %Result.111.us = phi i32 [ %Result.014.us, %for.body3.lr.ph.us ], [ %add.us, %for.body3.us ]
   %arrayidx5.us = getelementptr inbounds [100 x i32], [100 x i32]* %Array, i64 %indvars.iv16, i64 %indvars.iv
-  %0 = load i32* %arrayidx5.us, align 4
+  %0 = load i32, i32* %arrayidx5.us, align 4
   %add.us = add nsw i32 %0, %Result.111.us
   %indvars.iv.next = add i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -106,7 +106,7 @@ for.body3.us.i:
   %indvars.iv.i = phi i64 [ 0, %for.body3.lr.ph.us.i ], [ %indvars.iv.next.i, %for.body3.us.i ]
   %Result.111.us.i = phi i32 [ %Result.014.us.i, %for.body3.lr.ph.us.i ], [ %add.us.i, %for.body3.us.i ]
   %arrayidx5.us.i = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv16.i, i64 %indvars.iv.i
-  %5 = load i32* %arrayidx5.us.i, align 4
+  %5 = load i32, i32* %arrayidx5.us.i, align 4
   %add.us.i = add nsw i32 %5, %Result.111.us.i
   %indvars.iv.next.i = add i64 %indvars.iv.i, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next.i to i32

Modified: llvm/trunk/test/CodeGen/PowerPC/ctrloops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ctrloops.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ctrloops.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ctrloops.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
 
 for.body:                                         ; preds = %for.body, %entry
   %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %0 = load volatile i32* @a, align 4
+  %0 = load volatile i32, i32* @a, align 4
   %add = add nsw i32 %0, %c
   store volatile i32 %add, i32* @a, align 4
   %inc = add nsw i32 %i.01, 1
@@ -34,7 +34,7 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-  %0 = load volatile i32* @a, align 4
+  %0 = load volatile i32, i32* @a, align 4
   %add = add nsw i32 %0, %c
   store volatile i32 %add, i32* @a, align 4
   %inc = add nsw i32 %i.02, 1
@@ -58,7 +58,7 @@ entry:
 for.body:                                         ; preds = %entry, %for.body
   %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
   %mul = mul nsw i32 %i.02, %c
-  %0 = load volatile i32* @a, align 4
+  %0 = load volatile i32, i32* @a, align 4
   %add = add nsw i32 %0, %mul
   store volatile i32 %add, i32* @a, align 4
   %inc = add nsw i32 %i.02, 1

Modified: llvm/trunk/test/CodeGen/PowerPC/dcbt-sched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/dcbt-sched.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/dcbt-sched.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/dcbt-sched.ll Fri Feb 27 15:17:42 2015
@@ -4,9 +4,9 @@ target triple = "powerpc64-unknown-linux
 
 define i8 @test1(i8* noalias %a, i8* noalias %b, i8* noalias %c) nounwind {
 entry:
-  %q = load i8* %b
+  %q = load i8, i8* %b
   call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 1)
-  %r = load i8* %c
+  %r = load i8, i8* %c
   %s = add i8 %q, %r
   ret i8 %s
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/delete-node.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/delete-node.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/delete-node.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/delete-node.ll Fri Feb 27 15:17:42 2015
@@ -9,11 +9,11 @@ entry:
       	br label %bb1
 
 bb1:            ; preds = %bb1, %entry
-        %0 = load i16* null, align 2            ; <i16> [#uses=1]
+        %0 = load i16, i16* null, align 2            ; <i16> [#uses=1]
         %1 = ashr i16 %0, 4             ; <i16> [#uses=1]
         %2 = sext i16 %1 to i32         ; <i32> [#uses=1]
         %3 = getelementptr i8, i8* null, i32 %2             ; <i8*> [#uses=1]
-        %4 = load i8* %3, align 1               ; <i8> [#uses=1]
+        %4 = load i8, i8* %3, align 1               ; <i8> [#uses=1]
         %5 = zext i8 %4 to i32          ; <i32> [#uses=1]
         %6 = shl i32 %5, 24             ; <i32> [#uses=1]
         %7 = or i32 0, %6               ; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll Fri Feb 27 15:17:42 2015
@@ -12,10 +12,10 @@ entry:
   %vla = alloca i32, i64 %0, align 128
   %vla1 = alloca i32, i64 %0, align 128
   %a2 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
-  %1 = load i32* %a2, align 4
+  %1 = load i32, i32* %a2, align 4
   store i32 %1, i32* %vla1, align 128
   %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
-  %2 = load i32* %b, align 4
+  %2 = load i32, i32* %b, align 4
   %arrayidx3 = getelementptr inbounds i32, i32* %vla1, i64 1
   store i32 %2, i32* %arrayidx3, align 4
   call void @bar(i32* %vla1, i32* %vla) #0

Modified: llvm/trunk/test/CodeGen/PowerPC/emptystruct.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/emptystruct.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/emptystruct.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/emptystruct.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ define void @callee(%struct.empty* noali
 entry:
   %a2.addr = alloca %struct.empty*, align 8
   store %struct.empty* %a2, %struct.empty** %a2.addr, align 8
-  %0 = load %struct.empty** %a2.addr, align 8
+  %0 = load %struct.empty*, %struct.empty** %a2.addr, align 8
   %1 = bitcast %struct.empty* %agg.result to i8*
   %2 = bitcast %struct.empty* %0 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 0, i32 1, i1 false)

Modified: llvm/trunk/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll Fri Feb 27 15:17:42 2015
@@ -69,9 +69,9 @@ define i32 @NAND1(i32 %X, i32 %Y) nounwi
 }
 
 define void @VNOR(<4 x float>* %P, <4 x float>* %Q) nounwind {
-	%tmp = load <4 x float>* %P		; <<4 x float>> [#uses=1]
+	%tmp = load <4 x float>, <4 x float>* %P		; <<4 x float>> [#uses=1]
 	%tmp.upgrd.1 = bitcast <4 x float> %tmp to <4 x i32>		; <<4 x i32>> [#uses=1]
-	%tmp2 = load <4 x float>* %Q		; <<4 x float>> [#uses=1]
+	%tmp2 = load <4 x float>, <4 x float>* %Q		; <<4 x float>> [#uses=1]
 	%tmp2.upgrd.2 = bitcast <4 x float> %tmp2 to <4 x i32>		; <<4 x i32>> [#uses=1]
 	%tmp3 = or <4 x i32> %tmp.upgrd.1, %tmp2.upgrd.2		; <<4 x i32>> [#uses=1]
 	%tmp4 = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
@@ -81,9 +81,9 @@ define void @VNOR(<4 x float>* %P, <4 x
 }
 
 define void @VANDC(<4 x float>* %P, <4 x float>* %Q) nounwind {
-	%tmp = load <4 x float>* %P		; <<4 x float>> [#uses=1]
+	%tmp = load <4 x float>, <4 x float>* %P		; <<4 x float>> [#uses=1]
 	%tmp.upgrd.4 = bitcast <4 x float> %tmp to <4 x i32>		; <<4 x i32>> [#uses=1]
-	%tmp2 = load <4 x float>* %Q		; <<4 x float>> [#uses=1]
+	%tmp2 = load <4 x float>, <4 x float>* %Q		; <<4 x float>> [#uses=1]
 	%tmp2.upgrd.5 = bitcast <4 x float> %tmp2 to <4 x i32>		; <<4 x i32>> [#uses=1]
 	%tmp4 = xor <4 x i32> %tmp2.upgrd.5, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
 	%tmp3 = and <4 x i32> %tmp.upgrd.4, %tmp4		; <<4 x i32>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ entry:
   %addr = alloca i32*, align 4
   store i32* getelementptr inbounds ([2 x [2 x [2 x [2 x [2 x i32]]]]]* @arr, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1), i32** %addr, align 4
 ; ELF64: addi {{[0-9]+}}, {{[0-9]+}}, 124
-  %0 = load i32** %addr, align 4
+  %0 = load i32*, i32** %addr, align 4
   ret i32* %0
 }
 
@@ -23,7 +23,7 @@ entry:
   %addr = alloca i32*, align 4
   store i32* getelementptr inbounds ([3 x [3 x %struct.A]]* @A, i32 0, i32 2, i32 2, i32 3, i32 1, i32 2, i32 2), i32** %addr, align 4
 ; ELF64: addi {{[0-9]+}}, {{[0-9]+}}, 1148
-  %0 = load i32** %addr, align 4
+  %0 = load i32*, i32** %addr, align 4
   ret i32* %0
 }
 
@@ -33,7 +33,7 @@ entry:
   %addr = alloca i32*, align 4
   store i32* getelementptr inbounds ([3 x [3 x %struct.A]]* @A, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1), i32** %addr, align 4
 ; ELF64: addi {{[0-9]+}}, {{[0-9]+}}, 140
-  %0 = load i32** %addr, align 4
+  %0 = load i32*, i32** %addr, align 4
   ret i32* %0
 }
 
@@ -43,6 +43,6 @@ entry:
   %addr = alloca i32*, align 4
   store i32* getelementptr inbounds ([2 x [2 x [2 x %struct.B]]]* @B, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 3, i32 1, i32 2, i32 1), i32** %addr, align 4
 ; ELF64: addi {{[0-9]+}}, {{[0-9]+}}, 1284
-  %0 = load i32** %addr, align 4
+  %0 = load i32*, i32** %addr, align 4
   ret i32* %0
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/fast-isel-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/fast-isel-call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/fast-isel-call.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/fast-isel-call.ll Fri Feb 27 15:17:42 2015
@@ -85,7 +85,7 @@ define i32 @bar0(i32 %i) nounwind {
 ;define void @foo3() uwtable {
 ;  %fptr = alloca i32 (i32)*, align 8
 ;  store i32 (i32)* @bar0, i32 (i32)** %fptr, align 8
-;  %1 = load i32 (i32)** %fptr, align 8
+;  %1 = load i32 (i32)*, i32 (i32)** %fptr, align 8
 ;  %call = call i32 %1(i32 0)
 ;  ret void
 ;}

Modified: llvm/trunk/test/CodeGen/PowerPC/fast-isel-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/fast-isel-fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/fast-isel-fold.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/fast-isel-fold.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 
 define void @t1() nounwind uwtable ssp {
 ; ELF64: t1
-  %1 = load i8* @a, align 1
+  %1 = load i8, i8* @a, align 1
   call void @foo1(i8 zeroext %1)
 ; ELF64: lbz
 ; ELF64-NOT: rldicl
@@ -16,7 +16,7 @@ define void @t1() nounwind uwtable ssp {
 
 define void @t2() nounwind uwtable ssp {
 ; ELF64: t2
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   call void @foo2(i16 zeroext %1)
 ; ELF64: lhz
 ; ELF64-NOT: rldicl
@@ -26,7 +26,7 @@ define void @t2() nounwind uwtable ssp {
 
 define void @t2a() nounwind uwtable ssp {
 ; ELF64: t2a
-  %1 = load i32* @c, align 4
+  %1 = load i32, i32* @c, align 4
   call void @foo3(i32 zeroext %1)
 ; ELF64: lwz
 ; ELF64-NOT: rldicl
@@ -40,7 +40,7 @@ declare void @foo3(i32 zeroext)
 
 define i32 @t3() nounwind uwtable ssp {
 ; ELF64: t3
-  %1 = load i8* @a, align 1
+  %1 = load i8, i8* @a, align 1
   %2 = zext i8 %1 to i32
 ; ELF64: lbz
 ; ELF64-NOT: rlwinm
@@ -49,7 +49,7 @@ define i32 @t3() nounwind uwtable ssp {
 
 define i32 @t4() nounwind uwtable ssp {
 ; ELF64: t4
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   %2 = zext i16 %1 to i32
 ; ELF64: lhz
 ; ELF64-NOT: rlwinm
@@ -58,7 +58,7 @@ define i32 @t4() nounwind uwtable ssp {
 
 define i32 @t5() nounwind uwtable ssp {
 ; ELF64: t5
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   %2 = sext i16 %1 to i32
 ; ELF64: lha
 ; ELF64-NOT: rlwinm
@@ -67,7 +67,7 @@ define i32 @t5() nounwind uwtable ssp {
 
 define i32 @t6() nounwind uwtable ssp {
 ; ELF64: t6
-  %1 = load i8* @a, align 2
+  %1 = load i8, i8* @a, align 2
   %2 = sext i8 %1 to i32
 ; ELF64: lbz
 ; ELF64-NOT: rlwinm
@@ -76,7 +76,7 @@ define i32 @t6() nounwind uwtable ssp {
 
 define i64 @t7() nounwind uwtable ssp {
 ; ELF64: t7
-  %1 = load i8* @a, align 1
+  %1 = load i8, i8* @a, align 1
   %2 = zext i8 %1 to i64
 ; ELF64: lbz
 ; ELF64-NOT: rldicl
@@ -85,7 +85,7 @@ define i64 @t7() nounwind uwtable ssp {
 
 define i64 @t8() nounwind uwtable ssp {
 ; ELF64: t8
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   %2 = zext i16 %1 to i64
 ; ELF64: lhz
 ; ELF64-NOT: rldicl
@@ -94,7 +94,7 @@ define i64 @t8() nounwind uwtable ssp {
 
 define i64 @t9() nounwind uwtable ssp {
 ; ELF64: t9
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
   %2 = sext i16 %1 to i64
 ; ELF64: lha
 ; ELF64-NOT: extsh
@@ -103,7 +103,7 @@ define i64 @t9() nounwind uwtable ssp {
 
 define i64 @t10() nounwind uwtable ssp {
 ; ELF64: t10
-  %1 = load i8* @a, align 2
+  %1 = load i8, i8* @a, align 2
   %2 = sext i8 %1 to i64
 ; ELF64: lbz
 ; ELF64: extsb
@@ -112,7 +112,7 @@ define i64 @t10() nounwind uwtable ssp {
 
 define i64 @t11() nounwind uwtable ssp {
 ; ELF64: t11
-  %1 = load i32* @c, align 4
+  %1 = load i32, i32* @c, align 4
   %2 = zext i32 %1 to i64
 ; ELF64: lwz
 ; ELF64-NOT: rldicl
@@ -121,7 +121,7 @@ define i64 @t11() nounwind uwtable ssp {
 
 define i64 @t12() nounwind uwtable ssp {
 ; ELF64: t12
-  %1 = load i32* @c, align 4
+  %1 = load i32, i32* @c, align 4
   %2 = sext i32 %1 to i64
 ; ELF64: lwa
 ; ELF64-NOT: extsw

Modified: llvm/trunk/test/CodeGen/PowerPC/fast-isel-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/fast-isel-load-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/fast-isel-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/fast-isel-load-store.ll Fri Feb 27 15:17:42 2015
@@ -26,7 +26,7 @@
 
 define i8 @t1() nounwind uwtable ssp {
 ; ELF64: t1
-  %1 = load i8* @a, align 1
+  %1 = load i8, i8* @a, align 1
 ; ELF64: lbz
   %2 = add nsw i8 %1, 1
 ; ELF64: addi
@@ -35,7 +35,7 @@ define i8 @t1() nounwind uwtable ssp {
 
 define i16 @t2() nounwind uwtable ssp {
 ; ELF64: t2
-  %1 = load i16* @b, align 2
+  %1 = load i16, i16* @b, align 2
 ; ELF64: lhz
   %2 = add nsw i16 %1, 1
 ; ELF64: addi
@@ -44,7 +44,7 @@ define i16 @t2() nounwind uwtable ssp {
 
 define i32 @t3() nounwind uwtable ssp {
 ; ELF64: t3
-  %1 = load i32* @c, align 4
+  %1 = load i32, i32* @c, align 4
 ; ELF64: lwz
   %2 = add nsw i32 %1, 1
 ; ELF64: addi
@@ -53,7 +53,7 @@ define i32 @t3() nounwind uwtable ssp {
 
 define i64 @t4() nounwind uwtable ssp {
 ; ELF64: t4
-  %1 = load i64* @d, align 4
+  %1 = load i64, i64* @d, align 4
 ; ELF64: ld
   %2 = add nsw i64 %1, 1
 ; ELF64: addi
@@ -62,7 +62,7 @@ define i64 @t4() nounwind uwtable ssp {
 
 define float @t5() nounwind uwtable ssp {
 ; ELF64: t5
-  %1 = load float* @e, align 4
+  %1 = load float, float* @e, align 4
 ; ELF64: lfs
   %2 = fadd float %1, 1.0
 ; ELF64: fadds
@@ -71,7 +71,7 @@ define float @t5() nounwind uwtable ssp
 
 define double @t6() nounwind uwtable ssp {
 ; ELF64: t6
-  %1 = load double* @f, align 8
+  %1 = load double, double* @f, align 8
 ; ELF64: lfd
   %2 = fadd double %1, 1.0
 ; ELF64: fadd
@@ -145,7 +145,7 @@ define void @t12(double %v) nounwind uwt
 ;; lwa requires an offset divisible by 4, so we need lwax here.
 define i64 @t13() nounwind uwtable ssp {
 ; ELF64: t13
-  %1 = load i32* getelementptr inbounds (%struct.s* @g, i32 0, i32 1), align 1
+  %1 = load i32, i32* getelementptr inbounds (%struct.s* @g, i32 0, i32 1), align 1
   %2 = sext i32 %1 to i64
 ; ELF64: li
 ; ELF64: lwax
@@ -157,7 +157,7 @@ define i64 @t13() nounwind uwtable ssp {
 ;; ld requires an offset divisible by 4, so we need ldx here.
 define i64 @t14() nounwind uwtable ssp {
 ; ELF64: t14
-  %1 = load i64* getelementptr inbounds (%struct.t* @h, i32 0, i32 1), align 1
+  %1 = load i64, i64* getelementptr inbounds (%struct.t* @h, i32 0, i32 1), align 1
 ; ELF64: li
 ; ELF64: ldx
   %2 = add nsw i64 %1, 1
@@ -181,7 +181,7 @@ define void @t15(i64 %v) nounwind uwtabl
 ;; ld requires an offset that fits in 16 bits, so we need ldx here.
 define i64 @t16() nounwind uwtable ssp {
 ; ELF64: t16
-  %1 = load i64* getelementptr inbounds ([8192 x i64]* @i, i32 0, i64 5000), align 8
+  %1 = load i64, i64* getelementptr inbounds ([8192 x i64]* @i, i32 0, i64 5000), align 8
 ; ELF64: lis
 ; ELF64: ori
 ; ELF64: ldx

Modified: llvm/trunk/test/CodeGen/PowerPC/fast-isel-redefinition.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/fast-isel-redefinition.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/fast-isel-redefinition.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/fast-isel-redefinition.ll Fri Feb 27 15:17:42 2015
@@ -5,6 +5,6 @@
 
 define i32 @f(i32* %x) nounwind ssp {
   %y = getelementptr inbounds i32, i32* %x, i32 5000
-  %tmp103 = load i32* %y, align 4
+  %tmp103 = load i32, i32* %y, align 4
   ret i32 %tmp103
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll Fri Feb 27 15:17:42 2015
@@ -6,12 +6,12 @@ define zeroext i8 @gep_promotion(i8* %pt
 entry:
   %ptr.addr = alloca i8*, align 8
   %add = add i8 64, 64 ; 0x40 + 0x40
-  %0 = load i8** %ptr.addr, align 8
+  %0 = load i8*, i8** %ptr.addr, align 8
 
   ; CHECK-LABEL: gep_promotion:
   ; CHECK: lbz {{[0-9]+}}, 0({{.*}})
   %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
 
-  %1 = load i8* %arrayidx, align 1
+  %1 = load i8, i8* %arrayidx, align 1
   ret i8 %1
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/floatPSA.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/floatPSA.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/floatPSA.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/floatPSA.ll Fri Feb 27 15:17:42 2015
@@ -37,7 +37,7 @@ entry:
   store float %l, float* %l.addr, align 4
   store float %m, float* %m.addr, align 4
   store float %n, float* %n.addr, align 4
-  %0 = load float* %n.addr, align 4
+  %0 = load float, float* %n.addr, align 4
   ret float %0
 }
 
@@ -73,20 +73,20 @@ entry:
   store float 1.200000e+01, float* %l, align 4
   store float 1.300000e+01, float* %m, align 4
   store float 1.400000e+01, float* %n, align 4
-  %0 = load float* %a, align 4
-  %1 = load float* %b, align 4
-  %2 = load float* %c, align 4
-  %3 = load float* %d, align 4
-  %4 = load float* %e, align 4
-  %5 = load float* %f, align 4
-  %6 = load float* %g, align 4
-  %7 = load float* %h, align 4
-  %8 = load float* %i, align 4
-  %9 = load float* %j, align 4
-  %10 = load float* %k, align 4
-  %11 = load float* %l, align 4
-  %12 = load float* %m, align 4
-  %13 = load float* %n, align 4
+  %0 = load float, float* %a, align 4
+  %1 = load float, float* %b, align 4
+  %2 = load float, float* %c, align 4
+  %3 = load float, float* %d, align 4
+  %4 = load float, float* %e, align 4
+  %5 = load float, float* %f, align 4
+  %6 = load float, float* %g, align 4
+  %7 = load float, float* %h, align 4
+  %8 = load float, float* %i, align 4
+  %9 = load float, float* %j, align 4
+  %10 = load float, float* %k, align 4
+  %11 = load float, float* %l, align 4
+  %12 = load float, float* %m, align 4
+  %13 = load float, float* %n, align 4
   %call = call float @bar(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13)
   ret float %call
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/flt-preinc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/flt-preinc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/flt-preinc.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/flt-preinc.ll Fri Feb 27 15:17:42 2015
@@ -7,10 +7,10 @@ define float @tf(float* nocapture readon
 entry:
   %idx.ext = sext i32 %o to i64
   %add.ptr = getelementptr inbounds float, float* %i, i64 %idx.ext
-  %0 = load float* %add.ptr, align 4
+  %0 = load float, float* %add.ptr, align 4
   %add.ptr.sum = add nsw i64 %idx.ext, 1
   %add.ptr3 = getelementptr inbounds float, float* %i, i64 %add.ptr.sum
-  %1 = load float* %add.ptr3, align 4
+  %1 = load float, float* %add.ptr3, align 4
   %add = fadd float %0, %1
   ret float %add
 
@@ -24,10 +24,10 @@ define double @td(double* nocapture read
 entry:
   %idx.ext = sext i32 %o to i64
   %add.ptr = getelementptr inbounds double, double* %i, i64 %idx.ext
-  %0 = load double* %add.ptr, align 8
+  %0 = load double, double* %add.ptr, align 8
   %add.ptr.sum = add nsw i64 %idx.ext, 1
   %add.ptr3 = getelementptr inbounds double, double* %i, i64 %add.ptr.sum
-  %1 = load double* %add.ptr3, align 8
+  %1 = load double, double* %add.ptr3, align 8
   %add = fadd double %0, %1
   ret double %add
 

Modified: llvm/trunk/test/CodeGen/PowerPC/fp-to-int-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/fp-to-int-ext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/fp-to-int-ext.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/fp-to-int-ext.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ target triple = "powerpc64-unknown-linux
 ; Function Attrs: nounwind
 define double @foo1(i32* %x) #0 {
 entry:
-  %0 = load i32* %x, align 4
+  %0 = load i32, i32* %x, align 4
   %conv = sext i32 %0 to i64
   %conv1 = sitofp i64 %conv to double
   ret double %conv1
@@ -18,7 +18,7 @@ entry:
 
 define double @foo2(i32* %x) #0 {
 entry:
-  %0 = load i32* %x, align 4
+  %0 = load i32, i32* %x, align 4
   %conv = zext i32 %0 to i64
   %conv1 = sitofp i64 %conv to double
   ret double %conv1
@@ -31,7 +31,7 @@ entry:
 
 define double @foo3(i32* %x) #0 {
 entry:
-  %0 = load i32* %x, align 4
+  %0 = load i32, i32* %x, align 4
   %1 = add i32 %0, 8
   %conv = zext i32 %1 to i64
   %conv1 = sitofp i64 %conv to double
@@ -49,7 +49,7 @@ entry:
 
 define double @foo4(i32* %x) #0 {
 entry:
-  %0 = load i32* %x, align 4
+  %0 = load i32, i32* %x, align 4
   %1 = add i32 %0, 8
   %conv = sext i32 %1 to i64
   %conv1 = sitofp i64 %conv to double

Modified: llvm/trunk/test/CodeGen/PowerPC/frounds.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/frounds.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/frounds.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/frounds.ll Fri Feb 27 15:17:42 2015
@@ -7,12 +7,12 @@ entry:
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	%tmp1 = call i32 @llvm.flt.rounds( )		; <i32> [#uses=1]
 	store i32 %tmp1, i32* %tmp, align 4
-	%tmp2 = load i32* %tmp, align 4		; <i32> [#uses=1]
+	%tmp2 = load i32, i32* %tmp, align 4		; <i32> [#uses=1]
 	store i32 %tmp2, i32* %retval, align 4
 	br label %return
 
 return:		; preds = %entry
-	%retval3 = load i32* %retval		; <i32> [#uses=1]
+	%retval3 = load i32, i32* %retval		; <i32> [#uses=1]
 	ret i32 %retval3
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/glob-comp-aa-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/glob-comp-aa-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/glob-comp-aa-crash.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/glob-comp-aa-crash.ll Fri Feb 27 15:17:42 2015
@@ -65,12 +65,12 @@ lpad3:
   br label %ehcleanup
 
 if.end:                                           ; preds = %invoke.cont
-  %7 = load i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
+  %7 = load i8, i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
   %tobool.i.i = icmp eq i8 %7, 0
   br i1 %tobool.i.i, label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit, label %if.then.i.i
 
 if.then.i.i:                                      ; preds = %if.end
-  %8 = load %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
+  %8 = load %"class.std::__1::mutex"*, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
   call void @_ZNSt3__15mutex6unlockEv(%"class.std::__1::mutex"* %8) #5
   br label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit
 
@@ -80,12 +80,12 @@ _ZNSt3__111unique_lockINS_5mutexEED1Ev.e
 ehcleanup:                                        ; preds = %lpad3, %lpad
   %exn.slot.0 = phi i8* [ %5, %lpad3 ], [ %2, %lpad ]
   %ehselector.slot.0 = phi i32 [ %6, %lpad3 ], [ %3, %lpad ]
-  %9 = load i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
+  %9 = load i8, i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
   %tobool.i.i9 = icmp eq i8 %9, 0
   br i1 %tobool.i.i9, label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit12, label %if.then.i.i11
 
 if.then.i.i11:                                    ; preds = %ehcleanup
-  %10 = load %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
+  %10 = load %"class.std::__1::mutex"*, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
   call void @_ZNSt3__15mutex6unlockEv(%"class.std::__1::mutex"* %10) #5
   br label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit12
 

Modified: llvm/trunk/test/CodeGen/PowerPC/hidden-vis-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/hidden-vis-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/hidden-vis-2.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/hidden-vis-2.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@
 
 define i32 @t() nounwind readonly {
 entry:
-	%0 = load i32* @x, align 4		; <i32> [#uses=1]
-	%1 = load i32* @y, align 4		; <i32> [#uses=1]
+	%0 = load i32, i32* @x, align 4		; <i32> [#uses=1]
+	%1 = load i32, i32* @y, align 4		; <i32> [#uses=1]
 	%2 = add i32 %1, %0		; <i32> [#uses=1]
 	ret i32 %2
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/hidden-vis.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/hidden-vis.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/hidden-vis.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/hidden-vis.ll Fri Feb 27 15:17:42 2015
@@ -4,6 +4,6 @@
 
 define i32 @t() nounwind readonly {
 entry:
-	%0 = load i32* @x, align 4		; <i32> [#uses=1]
+	%0 = load i32, i32* @x, align 4		; <i32> [#uses=1]
 	ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/ia-mem-r0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ia-mem-r0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ia-mem-r0.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ia-mem-r0.ll Fri Feb 27 15:17:42 2015
@@ -84,10 +84,10 @@ define void @test1({ i8*, void (i8*, i8*
   %52 = bitcast i8* %51 to i64*
   call void asm sideeffect "std  31, $0", "=*m"(i64* %52)
   %53 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 1
-  %.funcptr = load void (i8*, i8*)** %53
+  %.funcptr = load void (i8*, i8*)*, void (i8*, i8*)** %53
   %54 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 0
-  %.ptr = load i8** %54
-  %55 = load i8** %sp
+  %.ptr = load i8*, i8** %54
+  %55 = load i8*, i8** %sp
   call void %.funcptr(i8* %.ptr, i8* %55)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/indexed-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/indexed-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/indexed-load.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/indexed-load.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ define void @f(%class.test* %this) {
 entry:
   %Subminor.i.i = getelementptr inbounds %class.test, %class.test* %this, i64 0, i32 1
   %0 = bitcast [5 x i8]* %Subminor.i.i to i40*
-  %bf.load2.i.i = load i40* %0, align 4
+  %bf.load2.i.i = load i40, i40* %0, align 4
   %bf.clear7.i.i = and i40 %bf.load2.i.i, -8589934592
   store i40 %bf.clear7.i.i, i40* %0, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/PowerPC/indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/indirectbr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/indirectbr.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/indirectbr.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ define internal i32 @foo(i32 %i) nounwin
 ; STATIC-LABEL: foo:
 ; PPC64-LABEL: foo:
 entry:
-  %0 = load i8** @nextaddr, align 4               ; <i8*> [#uses=2]
+  %0 = load i8*, i8** @nextaddr, align 4               ; <i8*> [#uses=2]
   %1 = icmp eq i8* %0, null                       ; <i1> [#uses=1]
   br i1 %1, label %bb3, label %bb2
 
@@ -38,7 +38,7 @@ bb2:
 
 bb3:                                              ; preds = %entry
   %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
-  %gotovar.4.0.pre = load i8** %2, align 4        ; <i8*> [#uses=1]
+  %gotovar.4.0.pre = load i8*, i8** %2, align 4        ; <i8*> [#uses=1]
   br label %bb2
 
 L5:                                               ; preds = %bb2

Modified: llvm/trunk/test/CodeGen/PowerPC/inlineasm-i64-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/inlineasm-i64-reg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/inlineasm-i64-reg.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/inlineasm-i64-reg.ll Fri Feb 27 15:17:42 2015
@@ -19,18 +19,18 @@ entry:
   store %struct.BG_CoordinateMapping_t* %map, %struct.BG_CoordinateMapping_t** %map.addr, align 8
   store i64* %numentries, i64** %numentries.addr, align 8
   store i64 1055, i64* %r0, align 8
-  %0 = load i64* %mapsize.addr, align 8
+  %0 = load i64, i64* %mapsize.addr, align 8
   store i64 %0, i64* %r3, align 8
-  %1 = load %struct.BG_CoordinateMapping_t** %map.addr, align 8
+  %1 = load %struct.BG_CoordinateMapping_t*, %struct.BG_CoordinateMapping_t** %map.addr, align 8
   %2 = ptrtoint %struct.BG_CoordinateMapping_t* %1 to i64
   store i64 %2, i64* %r4, align 8
-  %3 = load i64** %numentries.addr, align 8
+  %3 = load i64*, i64** %numentries.addr, align 8
   %4 = ptrtoint i64* %3 to i64
   store i64 %4, i64* %r5, align 8
-  %5 = load i64* %r0, align 8
-  %6 = load i64* %r3, align 8
-  %7 = load i64* %r4, align 8
-  %8 = load i64* %r5, align 8
+  %5 = load i64, i64* %r0, align 8
+  %6 = load i64, i64* %r3, align 8
+  %7 = load i64, i64* %r4, align 8
+  %8 = load i64, i64* %r5, align 8
   %9 = call { i64, i64, i64, i64 } asm sideeffect "sc", "={r0},={r3},={r4},={r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 %5, i64 %6, i64 %7, i64 %8) #1, !srcloc !0
 
 ; CHECK-LABEL: @Kernel_RanksToCoords
@@ -52,9 +52,9 @@ entry:
   store i64 %asmresult1, i64* %r3, align 8
   store i64 %asmresult2, i64* %r4, align 8
   store i64 %asmresult3, i64* %r5, align 8
-  %10 = load i64* %r3, align 8
+  %10 = load i64, i64* %r3, align 8
   store i64 %10, i64* %tmp
-  %11 = load i64* %tmp
+  %11 = load i64, i64* %tmp
   %conv = trunc i64 %11 to i32
   ret i32 %conv
 }
@@ -87,7 +87,7 @@ entry:
 
 if.then:                                          ; preds = %entry
   call void @mtrace()
-  %.pre = load i32* %argc.addr, align 4
+  %.pre = load i32, i32* %argc.addr, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

Modified: llvm/trunk/test/CodeGen/PowerPC/isel-rc-nox0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/isel-rc-nox0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/isel-rc-nox0.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/isel-rc-nox0.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ crc32_gentab.exit:
 
 for.cond1.preheader.i2961.i:                      ; preds = %for.inc44.i2977.i, %crc32_gentab.exit
   call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x [9 x i32]]* @g_62 to i8*), i8 -1, i64 36, i32 4, i1 false) #1
-  %0 = load i32* %retval.0.i.i.i, align 4
+  %0 = load i32, i32* %retval.0.i.i.i, align 4
   %tobool.i2967.i = icmp eq i32 %0, 0
   br label %for.body21.i2968.i
 

Modified: llvm/trunk/test/CodeGen/PowerPC/lbz-from-ld-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/lbz-from-ld-shift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/lbz-from-ld-shift.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/lbz-from-ld-shift.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ target triple = "powerpc64-unknown-linux
 ; Function Attrs: nounwind readonly
 define signext i32 @test(i32* nocapture readonly %P) #0 {
 entry:
-  %0 = load i32* %P, align 4
+  %0 = load i32, i32* %P, align 4
   %shr = lshr i32 %0, 24
   ret i32 %shr
 

Modified: llvm/trunk/test/CodeGen/PowerPC/lbzux.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/lbzux.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/lbzux.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/lbzux.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ target triple = "powerpc64-unknown-linux
 
 define fastcc void @allocateSpace(i1 %cond1, i1 %cond2) nounwind {
 entry:
-  %0 = load i8** undef, align 8
+  %0 = load i8*, i8** undef, align 8
   br i1 undef, label %return, label %lor.lhs.false
 
 lor.lhs.false:                                    ; preds = %entry
@@ -30,7 +30,7 @@ if.then45:
   %arrayidx49 = getelementptr inbounds i8, i8* %0, i64 %idxprom48139
   %1 = bitcast i8* %arrayidx49 to i16*
   %2 = bitcast i8* %arrayidx18 to i16*
-  %3 = load i16* %1, align 1
+  %3 = load i16, i16* %1, align 1
   store i16 %3, i16* %2, align 1
   br label %return
 

Modified: llvm/trunk/test/CodeGen/PowerPC/ld-st-upd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ld-st-upd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ld-st-upd.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ld-st-upd.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ target triple = "powerpc-unknown-linux-g
 ; Function Attrs: nounwind
 define i32* @test4(i32* readonly %X, i32* nocapture %dest) #0 {
   %Y = getelementptr i32, i32* %X, i64 4
-  %A = load i32* %Y, align 4
+  %A = load i32, i32* %Y, align 4
   store i32 %A, i32* %dest, align 4
   ret i32* %Y
 

Modified: llvm/trunk/test/CodeGen/PowerPC/ldtoc-inv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ldtoc-inv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ldtoc-inv.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ldtoc-inv.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ for.body:
   %idxprom2 = sext i32 %shl1 to i64
   %arrayidx.sum = add nsw i64 %idxprom2, %idxprom
   %arrayidx3 = getelementptr inbounds [4096 x i32], [4096 x i32]* @phasor, i64 0, i64 %arrayidx.sum
-  %1 = load i32* %arrayidx3, align 4
+  %1 = load i32, i32* %arrayidx3, align 4
   %arrayidx5 = getelementptr inbounds i32, i32* %out, i64 %indvars.iv
   store i32 %1, i32* %arrayidx5, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4

Modified: llvm/trunk/test/CodeGen/PowerPC/lha.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/lha.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/lha.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/lha.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=ppc32 | grep lha
 
 define i32 @test(i16* %a) {
-        %tmp.1 = load i16* %a           ; <i16> [#uses=1]
+        %tmp.1 = load i16, i16* %a           ; <i16> [#uses=1]
         %tmp.2 = sext i16 %tmp.1 to i32         ; <i32> [#uses=1]
         ret i32 %tmp.2
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/load-constant-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/load-constant-addr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/load-constant-addr.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/load-constant-addr.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 ; RUN: llc < %s -march=ppc32 | not grep ori
 
 define float @test() {
-        %tmp.i = load float* inttoptr (i32 186018016 to float*)         ; <float> [#uses=1]
+        %tmp.i = load float, float* inttoptr (i32 186018016 to float*)         ; <float> [#uses=1]
         ret float %tmp.i
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/load-shift-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/load-shift-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/load-shift-combine.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/load-shift-combine.ll Fri Feb 27 15:17:42 2015
@@ -16,17 +16,17 @@
 define void @test1847() nounwind {
 entry:
   %j = alloca i32, align 4
-  %0 = load i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
-  %1 = load i32* @fails, align 4
-  %bf.load1 = load i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+  %0 = load i64, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
+  %1 = load i32, i32* @fails, align 4
+  %bf.load1 = load i96, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
   %bf.clear2 = and i96 %bf.load1, 302231454903657293676543
   %bf.set3 = or i96 %bf.clear2, -38383394772764476296921088
   store i96 %bf.set3, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
-  %2 = load i32* %j, align 4
-  %3 = load i32* %j, align 4
+  %2 = load i32, i32* %j, align 4
+  %3 = load i32, i32* %j, align 4
   %inc11 = add nsw i32 %3, 1
   store i32 %inc11, i32* %j, align 4
-  %bf.load15 = load i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+  %bf.load15 = load i96, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
   %bf.clear16 = and i96 %bf.load15, -18446744069414584321
   %bf.set17 = or i96 %bf.clear16, 18446743532543672320
   store i96 %bf.set17, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8

Modified: llvm/trunk/test/CodeGen/PowerPC/loop-data-prefetch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/loop-data-prefetch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/loop-data-prefetch.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/loop-data-prefetch.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
-  %0 = load double* %arrayidx, align 8
+  %0 = load double, double* %arrayidx, align 8
   %add = fadd double %0, 1.000000e+00
   %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
   store double %add, double* %arrayidx2, align 8

Modified: llvm/trunk/test/CodeGen/PowerPC/lsa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/lsa.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/lsa.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/lsa.ll Fri Feb 27 15:17:42 2015
@@ -17,9 +17,9 @@ entry:
   %arraydecay1 = getelementptr inbounds [8200 x i32], [8200 x i32]* %v, i64 0, i64 0
   %arraydecay2 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 0
   call void @bar(i32* %arraydecay, i32* %arraydecay1, i32* %arraydecay2) #0
-  %3 = load i32* %arraydecay2, align 4
+  %3 = load i32, i32* %arraydecay2, align 4
   %arrayidx3 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 1
-  %4 = load i32* %arrayidx3, align 4
+  %4 = load i32, i32* %arrayidx3, align 4
 
 ; CHECK: @foo
 ; CHECK-NOT: lwzx

Modified: llvm/trunk/test/CodeGen/PowerPC/lsr-postinc-pos.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/lsr-postinc-pos.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/lsr-postinc-pos.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/lsr-postinc-pos.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 ; The icmp is a post-inc use, and the increment is in %bb11, but the
 ; scevgep needs to be inserted in %bb so that it is dominated by %t.
 
-; CHECK: %t = load i8** undef
+; CHECK: %t = load i8*, i8** undef
 ; CHECK: %scevgep = getelementptr i8, i8* %t, i32 %lsr.iv.next
 ; CHECK: %c1 = icmp ult i8* %scevgep, undef
 
@@ -21,7 +21,7 @@ bb11:
   br i1 %c0, label %bb13, label %bb
 
 bb:
-  %t = load i8** undef, align 16                ; <i8*> [#uses=1]
+  %t = load i8*, i8** undef, align 16                ; <i8*> [#uses=1]
   %p = getelementptr i8, i8* %t, i32 %ii ; <i8*> [#uses=1]
   %c1 = icmp ult i8* %p, undef          ; <i1> [#uses=1]
   %i.next = add i32 %i, 1                        ; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/mask64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mask64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mask64.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mask64.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ entry:
 	br i1 false, label %bb16, label %bb49
 
 bb16:		; preds = %entry
-	%tmp19 = load i8** null, align 1		; <i8*> [#uses=1]
-	%tmp21 = load i8* %tmp19, align 1		; <i8> [#uses=1]
+	%tmp19 = load i8*, i8** null, align 1		; <i8*> [#uses=1]
+	%tmp21 = load i8, i8* %tmp19, align 1		; <i8> [#uses=1]
 	switch i8 %tmp21, label %bb49 [
 		 i8 0, label %bb45
 		 i8 1, label %bb34

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-1.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-1.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_external() nounwind {
 entry:
-  %0 = load i32* @ei, align 4
+  %0 = load i32, i32* @ei, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @ei, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-10.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-10.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-10.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-10.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_fn_static() nounwind {
 entry:
-  %0 = load i32* @test_fn_static.si, align 4
+  %0 = load i32, i32* @test_fn_static.si, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @test_fn_static.si, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-11.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-11.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-11.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-11.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_file_static() nounwind {
 entry:
-  %0 = load i32* @gi, align 4
+  %0 = load i32, i32* @gi, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @gi, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-2.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-2.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_fn_static() nounwind {
 entry:
-  %0 = load i32* @test_fn_static.si, align 4
+  %0 = load i32, i32* @test_fn_static.si, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @test_fn_static.si, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-3.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-3.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_file_static() nounwind {
 entry:
-  %0 = load i32* @gi, align 4
+  %0 = load i32, i32* @gi, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @gi, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-5.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-5.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-5.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define signext i32 @test_jump_table(i32
 entry:
   %i.addr = alloca i32, align 4
   store i32 %i, i32* %i.addr, align 4
-  %0 = load i32* %i.addr, align 4
+  %0 = load i32, i32* %i.addr, align 4
   switch i32 %0, label %sw.default [
     i32 3, label %sw.bb
     i32 4, label %sw.bb1
@@ -23,31 +23,31 @@ sw.default:
   br label %sw.epilog
 
 sw.bb:                                            ; preds = %entry
-  %1 = load i32* %i.addr, align 4
+  %1 = load i32, i32* %i.addr, align 4
   %mul = mul nsw i32 %1, 7
   store i32 %mul, i32* %i.addr, align 4
   br label %sw.bb1
 
 sw.bb1:                                           ; preds = %entry, %sw.bb
-  %2 = load i32* %i.addr, align 4
+  %2 = load i32, i32* %i.addr, align 4
   %dec = add nsw i32 %2, -1
   store i32 %dec, i32* %i.addr, align 4
   br label %sw.bb2
 
 sw.bb2:                                           ; preds = %entry, %sw.bb1
-  %3 = load i32* %i.addr, align 4
+  %3 = load i32, i32* %i.addr, align 4
   %add = add nsw i32 %3, 3
   store i32 %add, i32* %i.addr, align 4
   br label %sw.bb3
 
 sw.bb3:                                           ; preds = %entry, %sw.bb2
-  %4 = load i32* %i.addr, align 4
+  %4 = load i32, i32* %i.addr, align 4
   %shl = shl i32 %4, 1
   store i32 %shl, i32* %i.addr, align 4
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %sw.bb3, %sw.default
-  %5 = load i32* %i.addr, align 4
+  %5 = load i32, i32* %i.addr, align 4
   ret i32 %5
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-6.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-6.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-6.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_tentative() nounwind {
 entry:
-  %0 = load i32* @ti, align 4
+  %0 = load i32, i32* @ti, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @ti, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-7.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-7.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-7.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i8* @test_fnaddr() nounwind {
 entry:
   %func = alloca i32 (i32)*, align 8
   store i32 (i32)* @foo, i32 (i32)** %func, align 8
-  %0 = load i32 (i32)** %func, align 8
+  %0 = load i32 (i32)*, i32 (i32)** %func, align 8
   %1 = bitcast i32 (i32)* %0 to i8*
   ret i8* %1
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-8.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-8.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-8.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ target triple = "powerpc64-unknown-linux
 define signext i8 @test_avext() nounwind {
 entry:
   %0 = getelementptr inbounds [13 x i8], [13 x i8]* @x, i32 0, i32 0
-  %1 = load i8* %0, align 1
+  %1 = load i8, i8* %0, align 1
   ret i8 %1
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-9.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-9.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-9.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_external() nounwind {
 entry:
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @a, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-default.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-default.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-default.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-default.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_external() nounwind {
 entry:
-  %0 = load i32* @ei, align 4
+  %0 = load i32, i32* @ei, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @ei, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-obj-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-obj-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-obj-2.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-obj-2.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_fn_static() nounwind {
 entry:
-  %0 = load i32* @test_fn_static.si, align 4
+  %0 = load i32, i32* @test_fn_static.si, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @test_fn_static.si, align 4
   ret i32 %0
@@ -29,7 +29,7 @@ entry:
 
 define signext i32 @test_file_static() nounwind {
 entry:
-  %0 = load i32* @gi, align 4
+  %0 = load i32, i32* @gi, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @gi, align 4
   ret i32 %0

Modified: llvm/trunk/test/CodeGen/PowerPC/mcm-obj.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mcm-obj.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mcm-obj.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mcm-obj.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ target triple = "powerpc64-unknown-linux
 
 define signext i32 @test_external() nounwind {
 entry:
-  %0 = load i32* @ei, align 4
+  %0 = load i32, i32* @ei, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @ei, align 4
   ret i32 %0
@@ -35,7 +35,7 @@ entry:
 
 define signext i32 @test_fn_static() nounwind {
 entry:
-  %0 = load i32* @test_fn_static.si, align 4
+  %0 = load i32, i32* @test_fn_static.si, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @test_fn_static.si, align 4
   ret i32 %0
@@ -57,7 +57,7 @@ entry:
 
 define signext i32 @test_file_static() nounwind {
 entry:
-  %0 = load i32* @gi, align 4
+  %0 = load i32, i32* @gi, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @gi, align 4
   ret i32 %0
@@ -96,7 +96,7 @@ define signext i32 @test_jump_table(i32
 entry:
   %i.addr = alloca i32, align 4
   store i32 %i, i32* %i.addr, align 4
-  %0 = load i32* %i.addr, align 4
+  %0 = load i32, i32* %i.addr, align 4
   switch i32 %0, label %sw.default [
     i32 3, label %sw.bb
     i32 4, label %sw.bb1
@@ -108,31 +108,31 @@ sw.default:
   br label %sw.epilog
 
 sw.bb:                                            ; preds = %entry
-  %1 = load i32* %i.addr, align 4
+  %1 = load i32, i32* %i.addr, align 4
   %mul = mul nsw i32 %1, 7
   store i32 %mul, i32* %i.addr, align 4
   br label %sw.bb1
 
 sw.bb1:                                           ; preds = %entry, %sw.bb
-  %2 = load i32* %i.addr, align 4
+  %2 = load i32, i32* %i.addr, align 4
   %dec = add nsw i32 %2, -1
   store i32 %dec, i32* %i.addr, align 4
   br label %sw.bb2
 
 sw.bb2:                                           ; preds = %entry, %sw.bb1
-  %3 = load i32* %i.addr, align 4
+  %3 = load i32, i32* %i.addr, align 4
   %add = add nsw i32 %3, 3
   store i32 %add, i32* %i.addr, align 4
   br label %sw.bb3
 
 sw.bb3:                                           ; preds = %entry, %sw.bb2
-  %4 = load i32* %i.addr, align 4
+  %4 = load i32, i32* %i.addr, align 4
   %shl = shl i32 %4, 1
   store i32 %shl, i32* %i.addr, align 4
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %sw.bb3, %sw.default
-  %5 = load i32* %i.addr, align 4
+  %5 = load i32, i32* %i.addr, align 4
   ret i32 %5
 }
 
@@ -149,7 +149,7 @@ sw.epilog:
 
 define signext i32 @test_tentative() nounwind {
 entry:
-  %0 = load i32* @ti, align 4
+  %0 = load i32, i32* @ti, align 4
   %inc = add nsw i32 %0, 1
   store i32 %inc, i32* @ti, align 4
   ret i32 %0
@@ -168,7 +168,7 @@ define i8* @test_fnaddr() nounwind {
 entry:
   %func = alloca i32 (i32)*, align 8
   store i32 (i32)* @foo, i32 (i32)** %func, align 8
-  %0 = load i32 (i32)** %func, align 8
+  %0 = load i32 (i32)*, i32 (i32)** %func, align 8
   %1 = bitcast i32 (i32)* %0 to i8*
   ret i8* %1
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mem-rr-addr-mode.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mem-rr-addr-mode.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mem-rr-addr-mode.ll Fri Feb 27 15:17:42 2015
@@ -6,11 +6,11 @@
 
 define void @func(<4 x float>* %a, <4 x float>* %b) {
         %tmp1 = getelementptr <4 x float>, <4 x float>* %b, i32 1            ; <<4 x float>*> [#uses=1]
-        %tmp = load <4 x float>* %tmp1          ; <<4 x float>> [#uses=1]
+        %tmp = load <4 x float>, <4 x float>* %tmp1          ; <<4 x float>> [#uses=1]
         %tmp3 = getelementptr <4 x float>, <4 x float>* %a, i32 1            ; <<4 x float>*> [#uses=1]
-        %tmp4 = load <4 x float>* %tmp3         ; <<4 x float>> [#uses=1]
+        %tmp4 = load <4 x float>, <4 x float>* %tmp3         ; <<4 x float>> [#uses=1]
         %tmp5 = fmul <4 x float> %tmp, %tmp4             ; <<4 x float>> [#uses=1]
-        %tmp8 = load <4 x float>* %b            ; <<4 x float>> [#uses=1]
+        %tmp8 = load <4 x float>, <4 x float>* %b            ; <<4 x float>> [#uses=1]
         %tmp9 = fadd <4 x float> %tmp5, %tmp8            ; <<4 x float>> [#uses=1]
         store <4 x float> %tmp9, <4 x float>* %a
         ret void

Modified: llvm/trunk/test/CodeGen/PowerPC/mem_update.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mem_update.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mem_update.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mem_update.ll Fri Feb 27 15:17:42 2015
@@ -7,21 +7,21 @@
 
 define i32* @test0(i32* %X, i32* %dest) nounwind {
 	%Y = getelementptr i32, i32* %X, i32 4
-	%A = load i32* %Y
+	%A = load i32, i32* %Y
 	store i32 %A, i32* %dest
 	ret i32* %Y
 }
 
 define i32* @test1(i32* %X, i32* %dest) nounwind {
 	%Y = getelementptr i32, i32* %X, i32 4
-	%A = load i32* %Y
+	%A = load i32, i32* %Y
 	store i32 %A, i32* %dest
 	ret i32* %Y
 }
 
 define i16* @test2(i16* %X, i32* %dest) nounwind {
 	%Y = getelementptr i16, i16* %X, i32 4
-	%A = load i16* %Y
+	%A = load i16, i16* %Y
 	%B = sext i16 %A to i32
 	store i32 %B, i32* %dest
 	ret i16* %Y
@@ -29,7 +29,7 @@ define i16* @test2(i16* %X, i32* %dest)
 
 define i16* @test3(i16* %X, i32* %dest) nounwind {
 	%Y = getelementptr i16, i16* %X, i32 4
-	%A = load i16* %Y
+	%A = load i16, i16* %Y
 	%B = zext i16 %A to i32
 	store i32 %B, i32* %dest
 	ret i16* %Y
@@ -37,7 +37,7 @@ define i16* @test3(i16* %X, i32* %dest)
 
 define i16* @test3a(i16* %X, i64* %dest) nounwind {
 	%Y = getelementptr i16, i16* %X, i32 4
-	%A = load i16* %Y
+	%A = load i16, i16* %Y
 	%B = sext i16 %A to i64
 	store i64 %B, i64* %dest
 	ret i16* %Y
@@ -45,7 +45,7 @@ define i16* @test3a(i16* %X, i64* %dest)
 
 define i64* @test4(i64* %X, i64* %dest) nounwind {
 	%Y = getelementptr i64, i64* %X, i32 4
-	%A = load i64* %Y
+	%A = load i64, i64* %Y
 	store i64 %A, i64* %dest
 	ret i64* %Y
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/misched-inorder-latency.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/misched-inorder-latency.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/misched-inorder-latency.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/misched-inorder-latency.ll Fri Feb 27 15:17:42 2015
@@ -15,13 +15,13 @@ target triple = "powerpc64-bgq-linux"
 define i32 @testload(i32 *%ptr, i32 %sumin) {
 entry:
   %sum1 = add i32 %sumin, 1
-  %val1 = load i32* %ptr
+  %val1 = load i32, i32* %ptr
   %p = icmp eq i32 %sumin, 0
   br i1 %p, label %true, label %end
 true:
   %sum2 = add i32 %sum1, 1
   %ptr2 = getelementptr i32, i32* %ptr, i32 1
-  %val = load i32* %ptr2
+  %val = load i32, i32* %ptr2
   %val2 = add i32 %val1, %val
   br label %end
 end:

Modified: llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll Fri Feb 27 15:17:42 2015
@@ -33,10 +33,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -48,10 +48,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -63,7 +63,7 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
   ret void
@@ -120,10 +120,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
@@ -137,10 +137,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
@@ -165,7 +165,7 @@ entry:
 
 define void @multi_m() nounwind {
 entry:
-  %tmp = load i32* @min1, align 4
+  %tmp = load i32, i32* @min1, align 4
   call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
   ret void
 }
@@ -190,10 +190,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -205,10 +205,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -220,7 +220,7 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
   ret void
@@ -277,10 +277,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
@@ -294,10 +294,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind

Modified: llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll Fri Feb 27 15:17:42 2015
@@ -33,10 +33,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -48,10 +48,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -63,7 +63,7 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
   ret void
@@ -120,10 +120,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
@@ -137,10 +137,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
@@ -165,7 +165,7 @@ entry:
 
 define void @multi_m() nounwind {
 entry:
-  %tmp = load i32* @min1, align 4
+  %tmp = load i32, i32* @min1, align 4
   call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
   ret void
 }
@@ -190,10 +190,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -205,10 +205,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* %in1, align 4
+  %tmp1 = load i32, i32* %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   ret void
@@ -220,7 +220,7 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
   ret void
@@ -277,10 +277,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
@@ -294,10 +294,10 @@ entry:
   %in1 = alloca i32, align 4
   store i32 0, i32* %out0, align 4
   store i32 1, i32* %in1, align 4
-  %tmp = load i32* %in1, align 4
+  %tmp = load i32, i32* %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
   store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32* @min1, align 4
+  %tmp1 = load i32, i32* @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
   store i32 %1, i32* %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind

Modified: llvm/trunk/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ target triple = "powerpc64-unknown-linux
 ; Function Attrs: nounwind readonly
 define double @test1(i64* nocapture readonly %x) #0 {
 entry:
-  %0 = load i64* %x, align 8
+  %0 = load i64, i64* %x, align 8
   %conv = sitofp i64 %0 to double
   ret double %conv
 
@@ -18,7 +18,7 @@ entry:
 ; Function Attrs: nounwind readonly
 define double @test2(i32* nocapture readonly %x) #0 {
 entry:
-  %0 = load i32* %x, align 4
+  %0 = load i32, i32* %x, align 4
   %conv = sitofp i32 %0 to double
   ret double %conv
 

Modified: llvm/trunk/test/CodeGen/PowerPC/novrsave.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/novrsave.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/novrsave.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/novrsave.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define <4 x float> @bar(<4 x float> %v)
 entry:
   %v.addr = alloca <4 x float>, align 16
   store <4 x float> %v, <4 x float>* %v.addr, align 16
-  %0 = load <4 x float>* %v.addr, align 16
+  %0 = load <4 x float>, <4 x float>* %v.addr, align 16
   ret <4 x float> %0
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/or-addressing-mode.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/or-addressing-mode.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/or-addressing-mode.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/or-addressing-mode.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define i32 @test1(i8* %P) {
         %tmp.11.i = and i32 %tmp.10.i, 2040             ; <i32> [#uses=1]
         %tmp.13.i = or i32 %tmp.11.i, %tmp.4.i          ; <i32> [#uses=1]
         %tmp.14.i = inttoptr i32 %tmp.13.i to i32*              ; <i32*> [#uses=1]
-        %tmp.3 = load i32* %tmp.14.i            ; <i32> [#uses=1]
+        %tmp.3 = load i32, i32* %tmp.14.i            ; <i32> [#uses=1]
         ret i32 %tmp.3
 }
 
@@ -16,7 +16,7 @@ define i32 @test2(i32 %P) {
         %tmp.2 = shl i32 %P, 4          ; <i32> [#uses=1]
         %tmp.3 = or i32 %tmp.2, 2               ; <i32> [#uses=1]
         %tmp.4 = inttoptr i32 %tmp.3 to i32*            ; <i32*> [#uses=1]
-        %tmp.5 = load i32* %tmp.4               ; <i32> [#uses=1]
+        %tmp.5 = load i32, i32* %tmp.4               ; <i32> [#uses=1]
         ret i32 %tmp.5
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/post-ra-ec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/post-ra-ec.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/post-ra-ec.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/post-ra-ec.ll Fri Feb 27 15:17:42 2015
@@ -16,9 +16,9 @@ entry:
   br i1 undef, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %0 = load i64* undef, align 8
+  %0 = load i64, i64* undef, align 8
   %conv.i = trunc i64 %0 to i32
-  %1 = load i32* null, align 4
+  %1 = load i32, i32* null, align 4
   %add = add i32 %1, %conv.i
   store i32 %add, i32* null, align 4
   %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124, %struct.task_struct.4.16.124* %call1.i, i64 0, i32 1, i32 0

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc-prologue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc-prologue.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc-prologue.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc-prologue.ll Fri Feb 27 15:17:42 2015
@@ -14,12 +14,12 @@ entry:
   store i32 %a, i32* %a_addr
   %1 = call i32 @_Z3barPi(i32* %a_addr)           ; <i32> [#uses=1]
   store i32 %1, i32* %0, align 4
-  %2 = load i32* %0, align 4                      ; <i32> [#uses=1]
+  %2 = load i32, i32* %0, align 4                      ; <i32> [#uses=1]
   store i32 %2, i32* %retval, align 4
   br label %return
 
 return:                                           ; preds = %entry
-  %retval1 = load i32* %retval                    ; <i32> [#uses=1]
+  %retval1 = load i32, i32* %retval                    ; <i32> [#uses=1]
   ret i32 %retval1
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc32-lshrti3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc32-lshrti3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc32-lshrti3.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc32-lshrti3.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ target triple = "powerpc--netbsd"
 ; Function Attrs: nounwind uwtable
 define i32 @fn1() #0 {
 entry:
-  %.promoted = load i72* inttoptr (i32 1 to i72*), align 4
+  %.promoted = load i72, i72* inttoptr (i32 1 to i72*), align 4
   br label %while.cond
 
 while.cond:                                       ; preds = %while.cond, %entry

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc32-pic-large.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc32-pic-large.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc32-pic-large.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc32-pic-large.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ declare i32 @call_foo(i32, ...)
 
 define i32 @foo() {
 entry:
-  %0 = load i32* @bar, align 4
+  %0 = load i32, i32* @bar, align 4
   %call = call i32 (i32, ...)* @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
   ret i32 %0
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc32-pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc32-pic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc32-pic.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc32-pic.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ declare i32 @call_foo(i32, ...)
 
 define i32 @foo() {
 entry:
-  %0 = load i32* @bar, align 4
+  %0 = load i32, i32* @bar, align 4
   %call = call i32 (i32, ...)* @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
   ret i32 0
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc440-fp-basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc440-fp-basic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc440-fp-basic.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc440-fp-basic.ll Fri Feb 27 15:17:42 2015
@@ -5,13 +5,13 @@
 define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind {
 entry:
   %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
-  %a.real = load double* %a.realp
+  %a.real = load double, double* %a.realp
   %a.imagp = getelementptr inbounds %0, %0* %a, i32 0, i32 1
-  %a.imag = load double* %a.imagp
+  %a.imag = load double, double* %a.imagp
   %b.realp = getelementptr inbounds %0, %0* %b, i32 0, i32 0
-  %b.real = load double* %b.realp
+  %b.real = load double, double* %b.realp
   %b.imagp = getelementptr inbounds %0, %0* %b, i32 0, i32 1
-  %b.imag = load double* %b.imagp
+  %b.imag = load double, double* %b.imagp
   %mul.rl = fmul double %a.real, %b.real
   %mul.rr = fmul double %a.imag, %b.imag
   %mul.r = fsub double %mul.rl, %mul.rr
@@ -19,9 +19,9 @@ entry:
   %mul.ir = fmul double %a.real, %b.imag
   %mul.i = fadd double %mul.il, %mul.ir
   %c.realp = getelementptr inbounds %0, %0* %c, i32 0, i32 0
-  %c.real = load double* %c.realp
+  %c.real = load double, double* %c.realp
   %c.imagp = getelementptr inbounds %0, %0* %c, i32 0, i32 1
-  %c.imag = load double* %c.imagp
+  %c.imag = load double, double* %c.imagp
   %add.r = fadd double %mul.r, %c.real
   %add.i = fadd double %mul.i, %c.imag
   %real = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 0

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64-abi-extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64-abi-extend.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64-abi-extend.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64-abi-extend.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ declare zeroext i32 @ret_ui()
 
 define void @pass_arg_si() nounwind {
 entry:
-  %0 = load i32* @si, align 4
+  %0 = load i32, i32* @si, align 4
   tail call void @arg_si(i32 signext %0) nounwind
   ret void
 }
@@ -25,7 +25,7 @@ entry:
 
 define void @pass_arg_ui() nounwind {
 entry:
-  %0 = load i32* @ui, align 4
+  %0 = load i32, i32* @ui, align 4
   tail call void @arg_ui(i32 zeroext %0) nounwind
   ret void
 }
@@ -53,7 +53,7 @@ entry:
 
 define signext i32 @pass_ret_si() nounwind readonly {
 entry:
-  %0 = load i32* @si, align 4
+  %0 = load i32, i32* @si, align 4
   ret i32 %0
 }
 ; CHECK: @pass_ret_si
@@ -62,7 +62,7 @@ entry:
 
 define zeroext i32 @pass_ret_ui() nounwind readonly {
 entry:
-  %0 = load i32* @ui, align 4
+  %0 = load i32, i32* @ui, align 4
   ret i32 %0
 }
 ; CHECK: @pass_ret_ui

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64-align-long-double.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64-align-long-double.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64-align-long-double.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64-align-long-double.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ target triple = "powerpc64-unknown-linux
 define ppc_fp128 @test(%struct.S* byval %x) nounwind {
 entry:
   %b = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1
-  %0 = load ppc_fp128* %b, align 16
+  %0 = load ppc_fp128, ppc_fp128* %b, align 16
   ret ppc_fp128 %0
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64-byval-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64-byval-align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64-byval-align.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64-byval-align.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ entry:
 define i64 @callee2(%struct.pad* byval nocapture readnone %x, i32 signext %y, %struct.test* byval align 16 nocapture readonly %z) {
 entry:
   %x1 = getelementptr inbounds %struct.test, %struct.test* %z, i64 0, i32 0
-  %0 = load i64* %x1, align 16
+  %0 = load i64, i64* %x1, align 16
   ret i64 %0
 }
 ; CHECK-LABEL: @callee2

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64-calls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64-calls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64-calls.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64-calls.ll Fri Feb 27 15:17:42 2015
@@ -73,7 +73,7 @@ define double @test_external(double %x)
 @g = external global void ()*
 declare void @h(i64)
 define void @test_indir_toc_reload(i64 %x) {
-  %1 = load void ()** @g
+  %1 = load void ()*, void ()** @g
   call void %1()
   call void @h(i64 %x)
   ret void

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64-gep-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64-gep-opt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64-gep-opt.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64-gep-opt.ll Fri Feb 27 15:17:42 2015
@@ -15,13 +15,13 @@ target triple = "powerpc64-unknown-linux
 ; elimilate the common subexpression for the second use.
 define void @test_GEP_CSE([240 x %struct]* %string, i32* %adj, i32 %lib, i64 %idxprom) {
   %liberties = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
-  %1 = load i32* %liberties, align 4
+  %1 = load i32, i32* %liberties, align 4
   %cmp = icmp eq i32 %1, %lib
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
   %origin = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
-  %2 = load i32* %origin, align 4
+  %2 = load i32, i32* %origin, align 4
   store i32 %2, i32* %adj, align 4
   br label %if.end
 
@@ -60,9 +60,9 @@ if.end:
 ; use.
 define void @test_GEP_across_BB(%class.my* %this, i64 %idx) {
   %1 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
-  %2 = load i32* %1, align 4
+  %2 = load i32, i32* %1, align 4
   %3 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
-  %4 = load i32* %3, align 4
+  %4 = load i32, i32* %3, align 4
   %5 = icmp eq i32 %2, %4
   br i1 %5, label %if.true, label %exit
 

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64-patchpoint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64-patchpoint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64-patchpoint.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64-patchpoint.ll Fri Feb 27 15:17:42 2015
@@ -63,13 +63,13 @@ define i64 @testLowerConstant(i64 %arg,
 entry:
   %tmp80 = add i64 %tmp79, -16
   %tmp81 = inttoptr i64 %tmp80 to i64*
-  %tmp82 = load i64* %tmp81, align 8
+  %tmp82 = load i64, i64* %tmp81, align 8
   tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
   tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
-  %tmp83 = load i64* %tmp33, align 8
+  %tmp83 = load i64, i64* %tmp33, align 8
   %tmp84 = add i64 %tmp83, -24
   %tmp85 = inttoptr i64 %tmp84 to i64*
-  %tmp86 = load i64* %tmp85, align 8
+  %tmp86 = load i64, i64* %tmp85, align 8
   tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
   tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
   ret i64 10

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64-smallarg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64-smallarg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64-smallarg.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64-smallarg.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ define void @callee1(%struct.small_arg*
 entry:
   %0 = bitcast %struct.small_arg* %x to i32*
   %1 = bitcast %struct.small_arg* %agg.result to i32*
-  %2 = load i32* %0, align 2
+  %2 = load i32, i32* %0, align 2
   store i32 %2, i32* %1, align 2
   ret void
 }
@@ -47,7 +47,7 @@ entry:
 
 define void @caller2() {
 entry:
-  %0 = load float* @gf, align 4
+  %0 = load float, float* @gf, align 4
   %call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64-toc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64-toc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64-toc.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64-toc.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ entry:
 ; CHECK-NEXT: .quad   .TOC. at tocbase
 ; CHECK-NEXT: .quad   0
 ; CHECK-NEXT: .text
-  %0 = load i64* @number64, align 8
+  %0 = load i64, i64* @number64, align 8
 ; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
   %cmp = icmp eq i64 %0, %a
   %conv1 = zext i1 %cmp to i64 
@@ -25,7 +25,7 @@ define i64 @internal_static_var(i64 %a)
 entry:
 ; CHECK-LABEL: internal_static_var:
 ; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
-  %0 = load i64* @internal_static_var.x, align 8
+  %0 = load i64, i64* @internal_static_var.x, align 8
   %cmp = icmp eq i64 %0, %a
   %conv1 = zext i1 %cmp to i64 
   ret i64 %conv1 
@@ -46,7 +46,7 @@ entry:
 ; CHECK-LABEL: access_double_array:
   %idxprom = sext i32 %i to i64
   %arrayidx = getelementptr inbounds [32 x double], [32 x double]* @double_array, i64 0, i64 %idxprom
-  %0 = load double* %arrayidx, align 8
+  %0 = load double, double* %arrayidx, align 8
 ; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
   %cmp = fcmp oeq double %0, %a
   %conv = zext i1 %cmp to i32

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll Fri Feb 27 15:17:42 2015
@@ -257,9 +257,9 @@ entry:
 
 define void @caller2() {
 entry:
-  %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
-  %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
-  %2 = load [2 x float]* getelementptr inbounds (%struct.float2* @g2, i64 0, i32 0), align 4
+  %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
+  %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
+  %2 = load [2 x float], [2 x float]* getelementptr inbounds (%struct.float2* @g2, i64 0, i32 0), align 4
   tail call void @test2([8 x float] %0, [5 x float] %1, [2 x float] %2)
   ret void
 }
@@ -299,8 +299,8 @@ entry:
 
 define void @caller3(double %d) {
 entry:
-  %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
-  %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
+  %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
+  %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
   tail call void @test3([8 x float] %0, [5 x float] %1, double %d)
   ret void
 }
@@ -322,8 +322,8 @@ entry:
 
 define void @caller4(float %f) {
 entry:
-  %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
-  %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
+  %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
+  %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
   tail call void @test4([8 x float] %0, [5 x float] %1, float %f)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64le-localentry.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64le-localentry.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64le-localentry.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64le-localentry.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ entry:
 ; CHECK-NEXT: .Ltmp[[TMP2:[0-9]+]]:
 ; CHECK-NEXT: .localentry use_toc, .Ltmp[[TMP2]]-.Ltmp[[TMP1]]
 ; CHECK-NEXT: %entry
-  %0 = load i64* @number64, align 8
+  %0 = load i64, i64* @number64, align 8
   %cmp = icmp eq i64 %0, %a
   %conv1 = zext i1 %cmp to i64
   ret i64 %conv1

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64le-smallarg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64le-smallarg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64le-smallarg.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64le-smallarg.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ define void @callee1(%struct.small_arg*
 entry:
   %0 = bitcast %struct.small_arg* %x to i32*
   %1 = bitcast %struct.small_arg* %agg.result to i32*
-  %2 = load i32* %0, align 2
+  %2 = load i32, i32* %0, align 2
   store i32 %2, i32* %1, align 2
   ret void
 }
@@ -47,7 +47,7 @@ entry:
 
 define void @caller2() {
 entry:
-  %0 = load float* @gf, align 4
+  %0 = load float, float* @gf, align 4
   %call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/ppcf128-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppcf128-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppcf128-1.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppcf128-1.ll Fri Feb 27 15:17:42 2015
@@ -12,16 +12,16 @@ entry:
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store ppc_fp128 %x, ppc_fp128* %x_addr
 	store ppc_fp128 %y, ppc_fp128* %y_addr
-	%tmp1 = load ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
-	%tmp2 = load ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
 	%tmp3 = fadd ppc_fp128 %tmp1, %tmp2		; <ppc_fp128> [#uses=1]
 	store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
-	%tmp4 = load ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
+	%tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
 	store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
+	%retval5 = load ppc_fp128, ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
 	ret ppc_fp128 %retval5
 }
 
@@ -34,16 +34,16 @@ entry:
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store ppc_fp128 %x, ppc_fp128* %x_addr
 	store ppc_fp128 %y, ppc_fp128* %y_addr
-	%tmp1 = load ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
-	%tmp2 = load ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
 	%tmp3 = fsub ppc_fp128 %tmp1, %tmp2		; <ppc_fp128> [#uses=1]
 	store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
-	%tmp4 = load ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
+	%tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
 	store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
+	%retval5 = load ppc_fp128, ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
 	ret ppc_fp128 %retval5
 }
 
@@ -56,16 +56,16 @@ entry:
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store ppc_fp128 %x, ppc_fp128* %x_addr
 	store ppc_fp128 %y, ppc_fp128* %y_addr
-	%tmp1 = load ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
-	%tmp2 = load ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
 	%tmp3 = fmul ppc_fp128 %tmp1, %tmp2		; <ppc_fp128> [#uses=1]
 	store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
-	%tmp4 = load ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
+	%tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
 	store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
+	%retval5 = load ppc_fp128, ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
 	ret ppc_fp128 %retval5
 }
 
@@ -78,15 +78,15 @@ entry:
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store ppc_fp128 %x, ppc_fp128* %x_addr
 	store ppc_fp128 %y, ppc_fp128* %y_addr
-	%tmp1 = load ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
-	%tmp2 = load ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
 	%tmp3 = fdiv ppc_fp128 %tmp1, %tmp2		; <ppc_fp128> [#uses=1]
 	store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
-	%tmp4 = load ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
+	%tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
 	store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
+	%retval5 = load ppc_fp128, ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
 	ret ppc_fp128 %retval5
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/ppcf128-endian.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppcf128-endian.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppcf128-endian.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppcf128-endian.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define void @callee(ppc_fp128 %x) {
 entry:
   %x.addr = alloca ppc_fp128, align 16
   store ppc_fp128 %x, ppc_fp128* %x.addr, align 16
-  %0 = load ppc_fp128* %x.addr, align 16
+  %0 = load ppc_fp128, ppc_fp128* %x.addr, align 16
   store ppc_fp128 %0, ppc_fp128* @g, align 16
   ret void
 }
@@ -21,7 +21,7 @@ entry:
 
 define void @caller() {
 entry:
-  %0 = load ppc_fp128* @g, align 16
+  %0 = load ppc_fp128, ppc_fp128* @g, align 16
   call void @test(ppc_fp128 %0)
   ret void
 }
@@ -51,7 +51,7 @@ entry:
 
 define ppc_fp128 @result() {
 entry:
-  %0 = load ppc_fp128* @g, align 16
+  %0 = load ppc_fp128, ppc_fp128* @g, align 16
   ret ppc_fp128 %0
 }
 ; CHECK: @result

Modified: llvm/trunk/test/CodeGen/PowerPC/pr13891.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pr13891.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pr13891.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/pr13891.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ define void @_Z5check3foos(%struct.foo*
 ; CHECK: lha {{[0-9]+}}, {{[0-9]+}}(1)
 entry:
   %0 = bitcast %struct.foo* %f to i16*
-  %1 = load i16* %0, align 2
+  %1 = load i16, i16* %0, align 2
   %bf.val.sext = ashr i16 %1, 8
   %cmp = icmp eq i16 %bf.val.sext, %i
   br i1 %cmp, label %if.end, label %if.then

Modified: llvm/trunk/test/CodeGen/PowerPC/pr15031.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pr15031.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pr15031.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/pr15031.ll Fri Feb 27 15:17:42 2015
@@ -300,7 +300,7 @@ define void @_ZN4llvm14MachineOperand12s
 entry:
   %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 1
   %0 = bitcast [3 x i8]* %SubReg_TargetFlags.i to i24*
-  %bf.load.i = load i24* %0, align 1
+  %bf.load.i = load i24, i24* %0, align 1
   %bf.lshr.i = lshr i24 %bf.load.i, 12
   %tobool = icmp eq i24 %bf.lshr.i, 0
   br i1 %tobool, label %if.end, label %if.then
@@ -309,7 +309,7 @@ if.then:
   %bf.cast.i = zext i24 %bf.lshr.i to i32
   %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo", %"class.llvm::TargetRegisterInfo"* %TRI, i64 0, i32 1
   %call3 = tail call zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(%"class.llvm::MCRegisterInfo"* %add.ptr, i32 zeroext %Reg, i32 zeroext %bf.cast.i)
-  %bf.load.i10 = load i24* %0, align 1
+  %bf.load.i10 = load i24, i24* %0, align 1
   %bf.clear.i = and i24 %bf.load.i10, 4095
   store i24 %bf.clear.i, i24* %0, align 1
   br label %if.end
@@ -317,31 +317,31 @@ if.then:
 if.end:                                           ; preds = %entry, %if.then
   %Reg.addr.0 = phi i32 [ %call3, %if.then ], [ %Reg, %entry ]
   %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 2, i32 0
-  %1 = load i32* %RegNo.i.i, align 4
+  %1 = load i32, i32* %RegNo.i.i, align 4
   %cmp.i = icmp eq i32 %1, %Reg.addr.0
   br i1 %cmp.i, label %_ZN4llvm14MachineOperand6setRegEj.exit, label %if.end.i
 
 if.end.i:                                         ; preds = %if.end
   %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 3
-  %2 = load %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8
+  %2 = load %"class.llvm::MachineInstr"*, %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8
   %tobool.i = icmp eq %"class.llvm::MachineInstr"* %2, null
   br i1 %tobool.i, label %if.end13.i, label %if.then3.i
 
 if.then3.i:                                       ; preds = %if.end.i
   %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr", %"class.llvm::MachineInstr"* %2, i64 0, i32 2
-  %3 = load %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8
+  %3 = load %"class.llvm::MachineBasicBlock"*, %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8
   %tobool5.i = icmp eq %"class.llvm::MachineBasicBlock"* %3, null
   br i1 %tobool5.i, label %if.end13.i, label %if.then6.i
 
 if.then6.i:                                       ; preds = %if.then3.i
   %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock", %"class.llvm::MachineBasicBlock"* %3, i64 0, i32 4
-  %4 = load %"class.llvm::MachineFunction"** %xParent.i.i, align 8
+  %4 = load %"class.llvm::MachineFunction"*, %"class.llvm::MachineFunction"** %xParent.i.i, align 8
   %tobool8.i = icmp eq %"class.llvm::MachineFunction"* %4, null
   br i1 %tobool8.i, label %if.end13.i, label %if.then9.i
 
 if.then9.i:                                       ; preds = %if.then6.i
   %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction", %"class.llvm::MachineFunction"* %4, i64 0, i32 5
-  %5 = load %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8
+  %5 = load %"class.llvm::MachineRegisterInfo"*, %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8
   tail call void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
   store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
   tail call void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)

Modified: llvm/trunk/test/CodeGen/PowerPC/pr15630.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pr15630.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pr15630.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/pr15630.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
   %newval = alloca i8
   %ordering = alloca i32, align 4
   store i8 %newval_arg, i8* %newval
-  %tmp = load i8* %newval
+  %tmp = load i8, i8* %newval
   store atomic volatile i8 %tmp, i8* %val_arg seq_cst, align 1
   ret void
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/pr16556-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pr16556-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pr16556-2.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/pr16556-2.ll Fri Feb 27 15:17:42 2015
@@ -23,15 +23,15 @@ entry:
   br i1 %tmp, label %noassert, label %assert
 
 assert:                                           ; preds = %entry
-  %tmp1 = load { i32, i8* }* @.modulefilename
+  %tmp1 = load { i32, i8* }, { i32, i8* }* @.modulefilename
   %0 = call i8* @_d_assert_msg({ i32, i8* } { i32 9, i8* getelementptr inbounds ([10 x i8]* @.str83, i32 0, i32 0) }, { i32, i8* } %tmp1, i32 1586)
   unreachable
 
 noassert:                                         ; preds = %entry
   %tmp2 = getelementptr %core.time.TickDuration, %core.time.TickDuration* %.this_arg, i32 0, i32 0
-  %tmp3 = load i64* %tmp2
+  %tmp3 = load i64, i64* %tmp2
   %tmp4 = sitofp i64 %tmp3 to ppc_fp128
-  %tmp5 = load i64* @_D4core4time12TickDuration11ticksPerSecyl
+  %tmp5 = load i64, i64* @_D4core4time12TickDuration11ticksPerSecyl
   %tmp6 = sitofp i64 %tmp5 to ppc_fp128
   %tmp7 = fdiv ppc_fp128 %tmp6, 0xM80000000000000000000000000000000
   %tmp8 = fdiv ppc_fp128 %tmp4, %tmp7

Modified: llvm/trunk/test/CodeGen/PowerPC/pr17168.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pr17168.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pr17168.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/pr17168.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ for.cond968.preheader:
 
 for.end1042:                                      ; preds = %for.cond968.preheader, %for.cond964.preheader, %entry
   %0 = phi i32 [ undef, %for.cond964.preheader ], [ undef, %for.cond968.preheader ], [ undef, %entry ]
-  %1 = load i32* getelementptr inbounds ([3 x i32]* @grid_points, i64 0, i64 0), align 4, !dbg !443, !tbaa !444
+  %1 = load i32, i32* getelementptr inbounds ([3 x i32]* @grid_points, i64 0, i64 0), align 4, !dbg !443, !tbaa !444
   tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !119, metadata !{!"0x102"}), !dbg !448
   %sub10454270 = add nsw i32 %0, -1, !dbg !448
   %cmp10464271 = icmp sgt i32 %sub10454270, 1, !dbg !448

Modified: llvm/trunk/test/CodeGen/PowerPC/pr18663.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pr18663.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pr18663.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/pr18663.ll Fri Feb 27 15:17:42 2015
@@ -61,21 +61,21 @@
 
 define void @_ZNK18TriaObjectAccessorILi3ELi3EE10barycenterEv(%class.Point.1* noalias nocapture sret %agg.result, %class.TriaObjectAccessor.57* %this) #0 align 2 {
 entry:
-  %0 = load double* null, align 8
-  %1 = load double* undef, align 8
+  %0 = load double, double* null, align 8
+  %1 = load double, double* undef, align 8
   %call18 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
-  %2 = load double* undef, align 8
+  %2 = load double, double* undef, align 8
   %call21 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
-  %3 = load double* undef, align 8
+  %3 = load double, double* undef, align 8
   %call33 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 3)
-  %4 = load double* null, align 8
-  %5 = load double* undef, align 8
+  %4 = load double, double* null, align 8
+  %5 = load double, double* undef, align 8
   %call45 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
-  %6 = load double* undef, align 8
+  %6 = load double, double* undef, align 8
   %call48 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 0)
-  %7 = load double* undef, align 8
+  %7 = load double, double* undef, align 8
   %call66 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
-  %8 = load double* undef, align 8
+  %8 = load double, double* undef, align 8
   %mul334 = fmul double undef, 2.000000e+00
   %mul579 = fmul double %2, %5
   %mul597 = fmul double undef, %mul579

Modified: llvm/trunk/test/CodeGen/PowerPC/pr20442.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pr20442.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pr20442.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/pr20442.ll Fri Feb 27 15:17:42 2015
@@ -20,15 +20,15 @@ target triple = "powerpc-unknown-linux-g
 ; Function Attrs: nounwind readonly uwtable
 define i32 @fn1() #0 {
 entry:
-  %0 = load %struct.anon** @b, align 4
+  %0 = load %struct.anon*, %struct.anon** @b, align 4
   %1 = ptrtoint %struct.anon* %0 to i32
   %cmp = icmp sgt %struct.anon* %0, null
-  %2 = load %struct.anon.0** @a, align 4
+  %2 = load %struct.anon.0*, %struct.anon.0** @a, align 4
   br i1 %cmp, label %for.bodythread-pre-split, label %if.end8
 
 for.bodythread-pre-split:                         ; preds = %entry
   %aclass = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 0, i32 0
-  %.pr = load i32* %aclass, align 4
+  %.pr = load i32, i32* %aclass, align 4
   br label %for.body
 
 for.body:                                         ; preds = %for.bodythread-pre-split, %for.body
@@ -52,9 +52,9 @@ while.cond:
 while.body:                                       ; preds = %while.body.lr.ph, %while.cond
   %j.110 = phi i32 [ %j.1.ph13, %while.body.lr.ph ], [ %inc7, %while.cond ]
   %aclass_index = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 %j.110, i32 0
-  %3 = load i32* %aclass_index, align 4
+  %3 = load i32, i32* %aclass_index, align 4
   %aclass5 = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 %3, i32 0
-  %4 = load i32* %aclass5, align 4
+  %4 = load i32, i32* %aclass5, align 4
   %tobool = icmp eq i32 %4, 0
   %inc7 = add nsw i32 %j.110, 1
   br i1 %tobool, label %while.cond, label %if.then6

Modified: llvm/trunk/test/CodeGen/PowerPC/preincprep-invoke.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/preincprep-invoke.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/preincprep-invoke.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/preincprep-invoke.ll Fri Feb 27 15:17:42 2015
@@ -36,7 +36,7 @@ invoke.cont4:
 for.cond.i.i30:                                   ; preds = %for.cond.i.i30, %invoke.cont4
   %indvars.iv.i.i26 = phi i64 [ %indvars.iv.next.i.i29, %for.cond.i.i30 ], [ 0, %invoke.cont4 ]
   %arrayidx.i.i27 = getelementptr inbounds i8, i8* %call7, i64 %indvars.iv.i.i26
-  %0 = load i8* %arrayidx.i.i27, align 1
+  %0 = load i8, i8* %arrayidx.i.i27, align 1
   %indvars.iv.next.i.i29 = add nuw nsw i64 %indvars.iv.i.i26, 1
   br label %for.cond.i.i30
 

Modified: llvm/trunk/test/CodeGen/PowerPC/private.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/private.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/private.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/private.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define i32 @bar() nounwind {
 
 ; LINUX: lis{{.*}}.Lbaz
 ; OSX:  lis{{.*}}l_baz
-	%1 = load i32* @baz, align 4
+	%1 = load i32, i32* @baz, align 4
         ret i32 %1
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/pwr7-gt-nop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pwr7-gt-nop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pwr7-gt-nop.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/pwr7-gt-nop.ll Fri Feb 27 15:17:42 2015
@@ -8,11 +8,11 @@ define void @foo(float* nocapture %a, fl
 ; CHECK-LABEL: @foo
 
 entry:
-  %0 = load float* %b, align 4
+  %0 = load float, float* %b, align 4
   store float %0, float* %a, align 4
-  %1 = load float* %c, align 4
+  %1 = load float, float* %c, align 4
   store float %1, float* %b, align 4
-  %2 = load float* %a, align 4
+  %2 = load float, float* %a, align 4
   store float %2, float* %d, align 4
   ret void
 

Modified: llvm/trunk/test/CodeGen/PowerPC/qpx-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/qpx-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/qpx-load.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/qpx-load.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@ target triple = "powerpc64-bgq-linux"
 
 define <4 x double> @foo(<4 x double>* %p) {
 entry:
-  %v = load <4 x double>* %p, align 8
+  %v = load <4 x double>, <4 x double>* %p, align 8
   ret <4 x double> %v
 }
 
@@ -17,7 +17,7 @@ entry:
 
 define <4 x double> @bar(<4 x double>* %p) {
 entry:
-  %v = load <4 x double>* %p, align 32
+  %v = load <4 x double>, <4 x double>* %p, align 32
   ret <4 x double> %v
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/qpx-s-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/qpx-s-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/qpx-s-load.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/qpx-s-load.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@ target triple = "powerpc64-bgq-linux"
 
 define <4 x float> @foo(<4 x float>* %p) {
 entry:
-  %v = load <4 x float>* %p, align 4
+  %v = load <4 x float>, <4 x float>* %p, align 4
   ret <4 x float> %v
 }
 
@@ -17,7 +17,7 @@ entry:
 
 define <4 x float> @bar(<4 x float>* %p) {
 entry:
-  %v = load <4 x float>* %p, align 16
+  %v = load <4 x float>, <4 x float>* %p, align 16
   ret <4 x float> %v
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/qpx-s-sel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/qpx-s-sel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/qpx-s-sel.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/qpx-s-sel.ll Fri Feb 27 15:17:42 2015
@@ -46,7 +46,7 @@ entry:
 
 define <4 x i1> @test4(<4 x i1> %a) nounwind {
 entry:
-  %q = load <4 x i1>* @Q, align 16
+  %q = load <4 x i1>, <4 x i1>* @Q, align 16
   %v = and <4 x i1> %a, %q
   ret <4 x i1> %v
 

Modified: llvm/trunk/test/CodeGen/PowerPC/qpx-sel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/qpx-sel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/qpx-sel.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/qpx-sel.ll Fri Feb 27 15:17:42 2015
@@ -50,7 +50,7 @@ entry:
 
 define <4 x i1> @test4(<4 x i1> %a) nounwind {
 entry:
-  %q = load <4 x i1>* @Q, align 16
+  %q = load <4 x i1>, <4 x i1>* @Q, align 16
   %v = and <4 x i1> %a, %q
   ret <4 x i1> %v
 

Modified: llvm/trunk/test/CodeGen/PowerPC/qpx-unalperm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/qpx-unalperm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/qpx-unalperm.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/qpx-unalperm.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ target triple = "powerpc64-bgq-linux"
 
 define <4 x double> @foo(<4 x double>* %a) {
 entry:
-  %r = load <4 x double>* %a, align 32
+  %r = load <4 x double>, <4 x double>* %a, align 32
   ret <4 x double> %r
 ; CHECK: qvlfdx
 ; CHECK: blr
@@ -12,9 +12,9 @@ entry:
 
 define <4 x double> @bar(<4 x double>* %a) {
 entry:
-  %r = load <4 x double>* %a, align 8
+  %r = load <4 x double>, <4 x double>* %a, align 8
   %b = getelementptr <4 x double>, <4 x double>* %a, i32 16
-  %s = load <4 x double>* %b, align 32
+  %s = load <4 x double>, <4 x double>* %b, align 32
   %t = fadd <4 x double> %r, %s
   ret <4 x double> %t
 ; CHECK: qvlpcldx
@@ -25,38 +25,38 @@ entry:
 
 define <4 x double> @bar1(<4 x double>* %a) {
 entry:
-  %r = load <4 x double>* %a, align 8
+  %r = load <4 x double>, <4 x double>* %a, align 8
   %b = getelementptr <4 x double>, <4 x double>* %a, i32 16
-  %s = load <4 x double>* %b, align 8
+  %s = load <4 x double>, <4 x double>* %b, align 8
   %t = fadd <4 x double> %r, %s
   ret <4 x double> %t
 }
 
 define <4 x double> @bar2(<4 x double>* %a) {
 entry:
-  %r = load <4 x double>* %a, align 8
+  %r = load <4 x double>, <4 x double>* %a, align 8
   %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
-  %s = load <4 x double>* %b, align 32
+  %s = load <4 x double>, <4 x double>* %b, align 32
   %t = fadd <4 x double> %r, %s
   ret <4 x double> %t
 }
 
 define <4 x double> @bar3(<4 x double>* %a) {
 entry:
-  %r = load <4 x double>* %a, align 8
+  %r = load <4 x double>, <4 x double>* %a, align 8
   %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
-  %s = load <4 x double>* %b, align 8
+  %s = load <4 x double>, <4 x double>* %b, align 8
   %t = fadd <4 x double> %r, %s
   ret <4 x double> %t
 }
 
 define <4 x double> @bar4(<4 x double>* %a) {
 entry:
-  %r = load <4 x double>* %a, align 8
+  %r = load <4 x double>, <4 x double>* %a, align 8
   %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
-  %s = load <4 x double>* %b, align 8
+  %s = load <4 x double>, <4 x double>* %b, align 8
   %c = getelementptr <4 x double>, <4 x double>* %b, i32 1
-  %t = load <4 x double>* %c, align 8
+  %t = load <4 x double>, <4 x double>* %c, align 8
   %u = fadd <4 x double> %r, %s
   %v = fadd <4 x double> %u, %t
   ret <4 x double> %v

Modified: llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define i128 @foo() nounwind {
 entry:
   %x = alloca i128, align 16
   store i128 27, i128* %x, align 16
-  %0 = load i128* %x, align 16
+  %0 = load i128, i128* %x, align 16
   ret i128 %0
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/reg-coalesce-simple.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/reg-coalesce-simple.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/reg-coalesce-simple.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/reg-coalesce-simple.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 
 define i32 @test(%struct.foo* %X) nounwind {
         %tmp1 = getelementptr %struct.foo, %struct.foo* %X, i32 0, i32 2, i32 100            ; <i8*> [#uses=1]
-        %tmp = load i8* %tmp1           ; <i8> [#uses=1]
+        %tmp = load i8, i8* %tmp1           ; <i8> [#uses=1]
         %tmp2 = zext i8 %tmp to i32             ; <i32> [#uses=1]
         ret i32 %tmp2
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/reloc-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/reloc-align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/reloc-align.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/reloc-align.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ entry:
 define internal fastcc signext i32 @func_90(%struct.S1* byval nocapture %p_91) #0 {
 entry:
   %0 = bitcast %struct.S1* %p_91 to i64*
-  %bf.load = load i64* %0, align 1
+  %bf.load = load i64, i64* %0, align 1
   %bf.shl = shl i64 %bf.load, 26
   %bf.ashr = ashr i64 %bf.shl, 54
   %bf.cast = trunc i64 %bf.ashr to i32

Modified: llvm/trunk/test/CodeGen/PowerPC/resolvefi-basereg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/resolvefi-basereg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/resolvefi-basereg.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/resolvefi-basereg.ll Fri Feb 27 15:17:42 2015
@@ -42,14 +42,14 @@ entry:
   store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
   store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 9), align 8
   store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 10), align 8
-  %0 = load i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
+  %0 = load i64, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
   %sub = sub i64 %0, 1
   %and = and i64 ptrtoint (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 3) to i64), %sub
   %tobool = icmp ne i64 %and, 0
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %1 = load i32* @fails, align 4
+  %1 = load i32, i32* @fails, align 4
   %inc = add nsw i32 %1, 1
   store i32 %inc, i32* @fails, align 4
   br label %if.end
@@ -57,276 +57,276 @@ if.then:
 if.end:                                           ; preds = %if.then, %entry
   store i32 0, i32* %i, align 4
   store i32 0, i32* %j, align 4
-  %2 = load i32* %i, align 4
+  %2 = load i32, i32* %i, align 4
   %idxprom = sext i32 %2 to i64
   %arrayidx = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
   store i8* bitcast (i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
-  %3 = load i32* %i, align 4
+  %3 = load i32, i32* %i, align 4
   %idxprom1 = sext i32 %3 to i64
   %arrayidx2 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
   store i64 8, i64* %arrayidx2, align 8
-  %4 = load i32* %i, align 4
+  %4 = load i32, i32* %i, align 4
   %idxprom3 = sext i32 %4 to i64
   %arrayidx4 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
   store i64 8, i64* %arrayidx4, align 8
   store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
   store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
-  %5 = load i32* %i, align 4
+  %5 = load i32, i32* %i, align 4
   %inc5 = add nsw i32 %5, 1
   store i32 %inc5, i32* %i, align 4
-  %6 = load i32* %i, align 4
+  %6 = load i32, i32* %i, align 4
   %idxprom6 = sext i32 %6 to i64
   %arrayidx7 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
   store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
-  %7 = load i32* %i, align 4
+  %7 = load i32, i32* %i, align 4
   %idxprom8 = sext i32 %7 to i64
   %arrayidx9 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
   store i64 8, i64* %arrayidx9, align 8
-  %8 = load i32* %i, align 4
+  %8 = load i32, i32* %i, align 4
   %idxprom10 = sext i32 %8 to i64
   %arrayidx11 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
   store i64 8, i64* %arrayidx11, align 8
   store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1), align 8
   store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
-  %9 = load i32* %i, align 4
+  %9 = load i32, i32* %i, align 4
   %inc12 = add nsw i32 %9, 1
   store i32 %inc12, i32* %i, align 4
-  %10 = load i32* %i, align 4
+  %10 = load i32, i32* %i, align 4
   %idxprom13 = sext i32 %10 to i64
   %arrayidx14 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
   store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
-  %11 = load i32* %i, align 4
+  %11 = load i32, i32* %i, align 4
   %idxprom15 = sext i32 %11 to i64
   %arrayidx16 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
   store i64 8, i64* %arrayidx16, align 8
-  %12 = load i32* %i, align 4
+  %12 = load i32, i32* %i, align 4
   %idxprom17 = sext i32 %12 to i64
   %arrayidx18 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
   store i64 8, i64* %arrayidx18, align 8
   store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2), align 8
   store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
-  %13 = load i32* %i, align 4
+  %13 = load i32, i32* %i, align 4
   %inc19 = add nsw i32 %13, 1
   store i32 %inc19, i32* %i, align 4
-  %14 = load i32* %i, align 4
+  %14 = load i32, i32* %i, align 4
   %idxprom20 = sext i32 %14 to i64
   %arrayidx21 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
   store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
-  %15 = load i32* %i, align 4
+  %15 = load i32, i32* %i, align 4
   %idxprom22 = sext i32 %15 to i64
   %arrayidx23 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
   store i64 8, i64* %arrayidx23, align 8
-  %16 = load i32* %i, align 4
+  %16 = load i32, i32* %i, align 4
   %idxprom24 = sext i32 %16 to i64
   %arrayidx25 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
   store i64 16, i64* %arrayidx25, align 8
   store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3), align 16
   store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
-  %17 = load i32* %i, align 4
+  %17 = load i32, i32* %i, align 4
   %inc26 = add nsw i32 %17, 1
   store i32 %inc26, i32* %i, align 4
-  %18 = load i32* %i, align 4
+  %18 = load i32, i32* %i, align 4
   %idxprom27 = sext i32 %18 to i64
   %arrayidx28 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
   store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
-  %19 = load i32* %i, align 4
+  %19 = load i32, i32* %i, align 4
   %idxprom29 = sext i32 %19 to i64
   %arrayidx30 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
   store i64 2, i64* %arrayidx30, align 8
-  %20 = load i32* %i, align 4
+  %20 = load i32, i32* %i, align 4
   %idxprom31 = sext i32 %20 to i64
   %arrayidx32 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
   store i64 2, i64* %arrayidx32, align 8
   store i16 -15897, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4), align 2
   store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
-  %21 = load i32* %i, align 4
+  %21 = load i32, i32* %i, align 4
   %inc33 = add nsw i32 %21, 1
   store i32 %inc33, i32* %i, align 4
   store i32 -419541644, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 5), align 4
   store i32 2125926812, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 5), align 4
-  %22 = load i32* %j, align 4
+  %22 = load i32, i32* %j, align 4
   %inc34 = add nsw i32 %22, 1
   store i32 %inc34, i32* %j, align 4
-  %23 = load i32* %i, align 4
+  %23 = load i32, i32* %i, align 4
   %idxprom35 = sext i32 %23 to i64
   %arrayidx36 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
   store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
-  %24 = load i32* %i, align 4
+  %24 = load i32, i32* %i, align 4
   %idxprom37 = sext i32 %24 to i64
   %arrayidx38 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
   store i64 8, i64* %arrayidx38, align 8
-  %25 = load i32* %i, align 4
+  %25 = load i32, i32* %i, align 4
   %idxprom39 = sext i32 %25 to i64
   %arrayidx40 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
   store i64 8, i64* %arrayidx40, align 8
   store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
   store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
-  %26 = load i32* %i, align 4
+  %26 = load i32, i32* %i, align 4
   %inc41 = add nsw i32 %26, 1
   store i32 %inc41, i32* %i, align 4
-  %bf.load = load i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+  %bf.load = load i32, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
   %bf.clear = and i32 %bf.load, 7
   %bf.set = or i32 %bf.clear, 16
   store i32 %bf.set, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
-  %bf.load42 = load i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+  %bf.load42 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
   %bf.clear43 = and i32 %bf.load42, 7
   %bf.set44 = or i32 %bf.clear43, 24
   store i32 %bf.set44, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
-  %27 = load i32* %j, align 4
+  %27 = load i32, i32* %j, align 4
   %inc45 = add nsw i32 %27, 1
   store i32 %inc45, i32* %j, align 4
-  %bf.load46 = load i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+  %bf.load46 = load i16, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
   %bf.clear47 = and i16 %bf.load46, 127
   store i16 %bf.clear47, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
-  %bf.load48 = load i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+  %bf.load48 = load i16, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
   %bf.clear49 = and i16 %bf.load48, 127
   store i16 %bf.clear49, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
-  %28 = load i32* %j, align 4
+  %28 = load i32, i32* %j, align 4
   %inc50 = add nsw i32 %28, 1
   store i32 %inc50, i32* %j, align 4
-  %bf.load51 = load i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+  %bf.load51 = load i32, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
   %bf.clear52 = and i32 %bf.load51, 63
   store i32 %bf.clear52, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
-  %bf.load53 = load i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+  %bf.load53 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
   %bf.clear54 = and i32 %bf.load53, 63
   %bf.set55 = or i32 %bf.clear54, 64
   store i32 %bf.set55, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
-  %29 = load i32* %j, align 4
+  %29 = load i32, i32* %j, align 4
   %inc56 = add nsw i32 %29, 1
   store i32 %inc56, i32* %j, align 4
-  %bf.load57 = load i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+  %bf.load57 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
   %bf.clear58 = and i24 %bf.load57, 63
   store i24 %bf.clear58, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
-  %bf.load59 = load i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+  %bf.load59 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
   %bf.clear60 = and i24 %bf.load59, 63
   store i24 %bf.clear60, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
-  %30 = load i32* %j, align 4
+  %30 = load i32, i32* %j, align 4
   %inc61 = add nsw i32 %30, 1
   store i32 %inc61, i32* %j, align 4
-  %31 = load i32* %i, align 4
+  %31 = load i32, i32* %i, align 4
   %idxprom62 = sext i32 %31 to i64
   %arrayidx63 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
   store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
-  %32 = load i32* %i, align 4
+  %32 = load i32, i32* %i, align 4
   %idxprom64 = sext i32 %32 to i64
   %arrayidx65 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
   store i64 1, i64* %arrayidx65, align 8
-  %33 = load i32* %i, align 4
+  %33 = load i32, i32* %i, align 4
   %idxprom66 = sext i32 %33 to i64
   %arrayidx67 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
   store i64 1, i64* %arrayidx67, align 8
   store i8 -83, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
   store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
-  %34 = load i32* %i, align 4
+  %34 = load i32, i32* %i, align 4
   %inc68 = add nsw i32 %34, 1
   store i32 %inc68, i32* %i, align 4
-  %35 = load i32* %i, align 4
+  %35 = load i32, i32* %i, align 4
   %idxprom69 = sext i32 %35 to i64
   %arrayidx70 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
   store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
-  %36 = load i32* %i, align 4
+  %36 = load i32, i32* %i, align 4
   %idxprom71 = sext i32 %36 to i64
   %arrayidx72 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
   store i64 1, i64* %arrayidx72, align 8
-  %37 = load i32* %i, align 4
+  %37 = load i32, i32* %i, align 4
   %idxprom73 = sext i32 %37 to i64
   %arrayidx74 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
   store i64 1, i64* %arrayidx74, align 8
   store i8 34, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
   store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
-  %38 = load i32* %i, align 4
+  %38 = load i32, i32* %i, align 4
   %inc75 = add nsw i32 %38, 1
   store i32 %inc75, i32* %i, align 4
-  %39 = load i32* %i, align 4
+  %39 = load i32, i32* %i, align 4
   %idxprom76 = sext i32 %39 to i64
   %arrayidx77 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
   store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
-  %40 = load i32* %i, align 4
+  %40 = load i32, i32* %i, align 4
   %idxprom78 = sext i32 %40 to i64
   %arrayidx79 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
   store i64 4, i64* %arrayidx79, align 8
-  %41 = load i32* %i, align 4
+  %41 = load i32, i32* %i, align 4
   %idxprom80 = sext i32 %41 to i64
   %arrayidx81 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
   store i64 4, i64* %arrayidx81, align 8
   store i32 -3, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
   store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
-  %42 = load i32* %i, align 4
+  %42 = load i32, i32* %i, align 4
   %inc82 = add nsw i32 %42, 1
   store i32 %inc82, i32* %i, align 4
-  %43 = load i32* %i, align 4
+  %43 = load i32, i32* %i, align 4
   %idxprom83 = sext i32 %43 to i64
   %arrayidx84 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
   store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
-  %44 = load i32* %i, align 4
+  %44 = load i32, i32* %i, align 4
   %idxprom85 = sext i32 %44 to i64
   %arrayidx86 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
   store i64 1, i64* %arrayidx86, align 8
-  %45 = load i32* %i, align 4
+  %45 = load i32, i32* %i, align 4
   %idxprom87 = sext i32 %45 to i64
   %arrayidx88 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
   store i64 1, i64* %arrayidx88, align 8
   store i8 106, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
   store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
-  %46 = load i32* %i, align 4
+  %46 = load i32, i32* %i, align 4
   %inc89 = add nsw i32 %46, 1
   store i32 %inc89, i32* %i, align 4
-  %47 = load i32* %i, align 4
+  %47 = load i32, i32* %i, align 4
   %idxprom90 = sext i32 %47 to i64
   %arrayidx91 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
   store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
-  %48 = load i32* %i, align 4
+  %48 = load i32, i32* %i, align 4
   %idxprom92 = sext i32 %48 to i64
   %arrayidx93 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
   store i64 2, i64* %arrayidx93, align 8
-  %49 = load i32* %i, align 4
+  %49 = load i32, i32* %i, align 4
   %idxprom94 = sext i32 %49 to i64
   %arrayidx95 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
   store i64 2, i64* %arrayidx95, align 8
   store i16 29665, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7), align 2
   store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
-  %50 = load i32* %i, align 4
+  %50 = load i32, i32* %i, align 4
   %inc96 = add nsw i32 %50, 1
   store i32 %inc96, i32* %i, align 4
-  %51 = load i32* %i, align 4
+  %51 = load i32, i32* %i, align 4
   %idxprom97 = sext i32 %51 to i64
   %arrayidx98 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
   store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
-  %52 = load i32* %i, align 4
+  %52 = load i32, i32* %i, align 4
   %idxprom99 = sext i32 %52 to i64
   %arrayidx100 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
   store i64 1, i64* %arrayidx100, align 8
-  %53 = load i32* %i, align 4
+  %53 = load i32, i32* %i, align 4
   %idxprom101 = sext i32 %53 to i64
   %arrayidx102 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
   store i64 1, i64* %arrayidx102, align 8
   store i8 52, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), align 1
   store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
-  %54 = load i32* %i, align 4
+  %54 = load i32, i32* %i, align 4
   %inc103 = add nsw i32 %54, 1
   store i32 %inc103, i32* %i, align 4
-  %55 = load i32* %i, align 4
+  %55 = load i32, i32* %i, align 4
   %idxprom104 = sext i32 %55 to i64
   %arrayidx105 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
   store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
-  %56 = load i32* %i, align 4
+  %56 = load i32, i32* %i, align 4
   %idxprom106 = sext i32 %56 to i64
   %arrayidx107 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
   store i64 4, i64* %arrayidx107, align 8
-  %57 = load i32* %i, align 4
+  %57 = load i32, i32* %i, align 4
   %idxprom108 = sext i32 %57 to i64
   %arrayidx109 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
   store i64 4, i64* %arrayidx109, align 8
   store i32 -54118453, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9), align 4
   store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
-  %58 = load i32* %i, align 4
+  %58 = load i32, i32* %i, align 4
   %inc110 = add nsw i32 %58, 1
   store i32 %inc110, i32* %i, align 4
   store i32 %inc110, i32* %tmp
-  %59 = load i32* %tmp
-  %60 = load i32* %i, align 4
+  %59 = load i32, i32* %tmp
+  %60 = load i32, i32* %i, align 4
   store i32 %60, i32* getelementptr inbounds (%struct.Info* @info, i32 0, i32 0), align 4
-  %61 = load i32* %j, align 4
+  %61 = load i32, i32* %j, align 4
   store i32 %61, i32* getelementptr inbounds (%struct.Info* @info, i32 0, i32 1), align 4
   %62 = bitcast %struct.S1998* %agg.tmp111 to i8*
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %62, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)

Modified: llvm/trunk/test/CodeGen/PowerPC/resolvefi-disp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/resolvefi-disp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/resolvefi-disp.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/resolvefi-disp.ll Fri Feb 27 15:17:42 2015
@@ -43,19 +43,19 @@ entry:
   call void @llvm.memset.p0i8.i64(i8* %8, i8 0, i64 11104, i32 32, i1 false)
   %b = getelementptr inbounds %struct.S2760, %struct.S2760* %arg0, i32 0, i32 1
   %g = getelementptr inbounds %struct.anon, %struct.anon* %b, i32 0, i32 1
-  %9 = load i64* %g, align 8
-  %10 = load i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
+  %9 = load i64, i64* %g, align 8
+  %10 = load i64, i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
   %cmp = icmp ne i64 %9, %10
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %11 = load i32* @fails, align 4
+  %11 = load i32, i32* @fails, align 4
   %inc = add nsw i32 %11, 1
   store i32 %inc, i32* @fails, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %12 = load i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
+  %12 = load i64, i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
   %b3 = getelementptr inbounds %struct.S2760, %struct.S2760* %ret, i32 0, i32 1
   %g4 = getelementptr inbounds %struct.anon, %struct.anon* %b3, i32 0, i32 1
   store i64 %12, i64* %g4, align 8

Modified: llvm/trunk/test/CodeGen/PowerPC/return-val-i128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/return-val-i128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/return-val-i128.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/return-val-i128.ll Fri Feb 27 15:17:42 2015
@@ -7,29 +7,29 @@ entry:
 	%tmp = alloca i128, align 16		; <i128*> [#uses=3]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store float %a, float* %a_addr
-	%tmp1 = load float* %a_addr, align 4		; <float> [#uses=1]
+	%tmp1 = load float, float* %a_addr, align 4		; <float> [#uses=1]
 	%tmp2 = fcmp olt float %tmp1, 0.000000e+00		; <i1> [#uses=1]
 	%tmp23 = zext i1 %tmp2 to i8		; <i8> [#uses=1]
 	%toBool = icmp ne i8 %tmp23, 0		; <i1> [#uses=1]
 	br i1 %toBool, label %bb, label %bb8
 bb:		; preds = %entry
-	%tmp4 = load float* %a_addr, align 4		; <float> [#uses=1]
+	%tmp4 = load float, float* %a_addr, align 4		; <float> [#uses=1]
 	%tmp5 = fsub float -0.000000e+00, %tmp4		; <float> [#uses=1]
 	%tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind 		; <i128> [#uses=1]
 	%tmp7 = sub i128 0, %tmp6		; <i128> [#uses=1]
 	store i128 %tmp7, i128* %tmp, align 16
 	br label %bb11
 bb8:		; preds = %entry
-	%tmp9 = load float* %a_addr, align 4		; <float> [#uses=1]
+	%tmp9 = load float, float* %a_addr, align 4		; <float> [#uses=1]
 	%tmp10 = call i128 @__fixunssfDI( float %tmp9 ) nounwind 		; <i128> [#uses=1]
 	store i128 %tmp10, i128* %tmp, align 16
 	br label %bb11
 bb11:		; preds = %bb8, %bb
-	%tmp12 = load i128* %tmp, align 16		; <i128> [#uses=1]
+	%tmp12 = load i128, i128* %tmp, align 16		; <i128> [#uses=1]
 	store i128 %tmp12, i128* %retval, align 16
 	br label %return
 return:		; preds = %bb11
-	%retval13 = load i128* %retval		; <i128> [#uses=1]
+	%retval13 = load i128, i128* %retval		; <i128> [#uses=1]
 	ret i128 %retval13
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/rlwimi-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/rlwimi-and.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/rlwimi-and.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/rlwimi-and.ll Fri Feb 27 15:17:42 2015
@@ -16,11 +16,11 @@ codeRepl12:
   unreachable
 
 codeRepl17:                                       ; preds = %codeRepl4
-  %0 = load i8* undef, align 2
+  %0 = load i8, i8* undef, align 2
   %1 = and i8 %0, 1
   %not.tobool.i.i.i = icmp eq i8 %1, 0
   %2 = select i1 %not.tobool.i.i.i, i16 0, i16 256
-  %3 = load i8* undef, align 1
+  %3 = load i8, i8* undef, align 1
   %4 = and i8 %3, 1
   %not.tobool.i.1.i.i = icmp eq i8 %4, 0
   %rvml38.sroa.1.1.insert.ext = select i1 %not.tobool.i.1.i.i, i16 0, i16 1

Modified: llvm/trunk/test/CodeGen/PowerPC/rlwimi-commute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/rlwimi-commute.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/rlwimi-commute.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/rlwimi-commute.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@
 ; Make sure there is no register-register copies here.
 
 define void @test1(i32* %A, i32* %B, i32* %D, i32* %E) {
-	%A.upgrd.1 = load i32* %A		; <i32> [#uses=2]
-	%B.upgrd.2 = load i32* %B		; <i32> [#uses=1]
+	%A.upgrd.1 = load i32, i32* %A		; <i32> [#uses=2]
+	%B.upgrd.2 = load i32, i32* %B		; <i32> [#uses=1]
 	%X = and i32 %A.upgrd.1, 15		; <i32> [#uses=1]
 	%Y = and i32 %B.upgrd.2, -16		; <i32> [#uses=1]
 	%Z = or i32 %X, %Y		; <i32> [#uses=1]
@@ -15,8 +15,8 @@ define void @test1(i32* %A, i32* %B, i32
 }
 
 define void @test2(i32* %A, i32* %B, i32* %D, i32* %E) {
-	%A.upgrd.3 = load i32* %A		; <i32> [#uses=1]
-	%B.upgrd.4 = load i32* %B		; <i32> [#uses=2]
+	%A.upgrd.3 = load i32, i32* %A		; <i32> [#uses=1]
+	%B.upgrd.4 = load i32, i32* %B		; <i32> [#uses=2]
 	%X = and i32 %A.upgrd.3, 15		; <i32> [#uses=1]
 	%Y = and i32 %B.upgrd.4, -16		; <i32> [#uses=1]
 	%Z = or i32 %X, %Y		; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/rlwimi-dyn-and.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/rlwimi-dyn-and.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/rlwimi-dyn-and.ll Fri Feb 27 15:17:42 2015
@@ -4,13 +4,13 @@ target triple = "powerpc64-unknown-linux
 
 define i32 @test1() #0 {
 entry:
-  %conv67.reload = load i32* undef
+  %conv67.reload = load i32, i32* undef
   %const = bitcast i32 65535 to i32
   br label %next
 
 next:
   %shl161 = shl nuw nsw i32 %conv67.reload, 15
-  %0 = load i8* undef, align 1
+  %0 = load i8, i8* undef, align 1
   %conv169 = zext i8 %0 to i32
   %shl170 = shl nuw nsw i32 %conv169, 7
   %const_mat = add i32 %const, -32767
@@ -25,13 +25,13 @@ next:
 
 define i32 @test2() #0 {
 entry:
-  %conv67.reload = load i32* undef
+  %conv67.reload = load i32, i32* undef
   %const = bitcast i32 65535 to i32
   br label %next
 
 next:
   %shl161 = shl nuw nsw i32 %conv67.reload, 15
-  %0 = load i8* undef, align 1
+  %0 = load i8, i8* undef, align 1
   %conv169 = zext i8 %0 to i32
   %shl170 = shl nuw nsw i32 %conv169, 7
   %shl161.masked = and i32 %shl161, 32768

Modified: llvm/trunk/test/CodeGen/PowerPC/rm-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/rm-zext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/rm-zext.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/rm-zext.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ declare i32 @llvm.bswap.i32(i32) #0
 ; Function Attrs: nounwind readonly
 define zeroext i32 @bs32(i32* nocapture readonly %x) #1 {
 entry:
-  %0 = load i32* %x, align 4
+  %0 = load i32, i32* %x, align 4
   %1 = tail call i32 @llvm.bswap.i32(i32 %0)
   ret i32 %1
 
@@ -57,7 +57,7 @@ entry:
 ; Function Attrs: nounwind readonly
 define zeroext i16 @bs16(i16* nocapture readonly %x) #1 {
 entry:
-  %0 = load i16* %x, align 2
+  %0 = load i16, i16* %x, align 2
   %1 = tail call i16 @llvm.bswap.i16(i16 %0)
   ret i16 %1
 

Modified: llvm/trunk/test/CodeGen/PowerPC/rs-undef-use.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/rs-undef-use.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/rs-undef-use.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/rs-undef-use.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ CF82.critedge:
   br label %CF82
 
 CF82:                                             ; preds = %CF82, %CF82.critedge
-  %L17 = load i8* %0
+  %L17 = load i8, i8* %0
   %E18 = extractelement <2 x i64> undef, i32 0
   %PC = bitcast <2 x i1>* %A3 to i64*
   br i1 undef, label %CF82, label %CF84.critedge
@@ -25,13 +25,13 @@ CF84.critedge:
   br label %CF84
 
 CF84:                                             ; preds = %CF84, %CF84.critedge
-  %L40 = load i64* %PC
+  %L40 = load i64, i64* %PC
   store i64 -1, i64* %PC
   %Sl46 = select i1 undef, i1 undef, i1 false
   br i1 %Sl46, label %CF84, label %CF85
 
 CF85:                                             ; preds = %CF84
-  %L47 = load i64* %PC
+  %L47 = load i64, i64* %PC
   store i64 %E18, i64* %PC
   %PC52 = bitcast <8 x i32>* %A2 to ppc_fp128*
   store ppc_fp128 0xM4D436562A0416DE00000000000000000, ppc_fp128* %PC52

Modified: llvm/trunk/test/CodeGen/PowerPC/s000-alias-misched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/s000-alias-misched.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/s000-alias-misched.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/s000-alias-misched.ll Fri Feb 27 15:17:42 2015
@@ -37,7 +37,7 @@ for.body4:
   %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
   %arrayidx6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
   %0 = bitcast double* %arrayidx to <1 x double>*
-  %1 = load <1 x double>* %0, align 32
+  %1 = load <1 x double>, <1 x double>* %0, align 32
   %add = fadd <1 x double> %1, <double 1.000000e+00>
   %2 = bitcast double* %arrayidx6 to <1 x double>*
   store <1 x double> %add, <1 x double>* %2, align 32
@@ -45,7 +45,7 @@ for.body4:
   %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
   %arrayidx6.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
   %3 = bitcast double* %arrayidx.4 to <1 x double>*
-  %4 = load <1 x double>* %3, align 32
+  %4 = load <1 x double>, <1 x double>* %3, align 32
   %add.4 = fadd <1 x double> %4, <double 1.000000e+00>
   %5 = bitcast double* %arrayidx6.4 to <1 x double>*
   store <1 x double> %add.4, <1 x double>* %5, align 32
@@ -53,7 +53,7 @@ for.body4:
   %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
   %arrayidx6.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
   %6 = bitcast double* %arrayidx.8 to <1 x double>*
-  %7 = load <1 x double>* %6, align 32
+  %7 = load <1 x double>, <1 x double>* %6, align 32
   %add.8 = fadd <1 x double> %7, <double 1.000000e+00>
   %8 = bitcast double* %arrayidx6.8 to <1 x double>*
   store <1 x double> %add.8, <1 x double>* %8, align 32
@@ -61,7 +61,7 @@ for.body4:
   %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
   %arrayidx6.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
   %9 = bitcast double* %arrayidx.12 to <1 x double>*
-  %10 = load <1 x double>* %9, align 32
+  %10 = load <1 x double>, <1 x double>* %9, align 32
   %add.12 = fadd <1 x double> %10, <double 1.000000e+00>
   %11 = bitcast double* %arrayidx6.12 to <1 x double>*
   store <1 x double> %add.12, <1 x double>* %11, align 32

Modified: llvm/trunk/test/CodeGen/PowerPC/sjlj.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/sjlj.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/sjlj.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/sjlj.ll Fri Feb 27 15:17:42 2015
@@ -55,7 +55,7 @@ if.end:
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %3 = load i32* %retval
+  %3 = load i32, i32* %retval
   ret i32 %3
 
 ; FIXME: We should be saving VRSAVE on Darwin, but we're not!
@@ -128,7 +128,7 @@ if.end:
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %3 = load i32* %retval
+  %3 = load i32, i32* %retval
   ret i32 %3
 
 ; CHECK: @main2

Modified: llvm/trunk/test/CodeGen/PowerPC/small-arguments.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/small-arguments.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/small-arguments.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/small-arguments.ll Fri Feb 27 15:17:42 2015
@@ -26,14 +26,14 @@ UnifiedReturnBlock:
 }
 
 define i32 @test4(i16* %P) {
-        %tmp.1 = load i16* %P
+        %tmp.1 = load i16, i16* %P
         %tmp.2 = zext i16 %tmp.1 to i32
         %tmp.3 = and i32 %tmp.2, 255
         ret i32 %tmp.3
 }
 
 define i32 @test5(i16* %P) {
-        %tmp.1 = load i16* %P
+        %tmp.1 = load i16, i16* %P
         %tmp.2 = bitcast i16 %tmp.1 to i16
         %tmp.3 = zext i16 %tmp.2 to i32
         %tmp.4 = and i32 %tmp.3, 255
@@ -41,7 +41,7 @@ define i32 @test5(i16* %P) {
 }
 
 define i32 @test6(i32* %P) {
-        %tmp.1 = load i32* %P
+        %tmp.1 = load i32, i32* %P
         %tmp.2 = and i32 %tmp.1, 255
         ret i32 %tmp.2
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/split-index-tc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/split-index-tc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/split-index-tc.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/split-index-tc.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ define void @_ZN4llvm17ScheduleDAGInstrs
 ; CHECK-NOT: lhzu
 
 entry:
-  %0 = load %"class.llvm::MachineOperand"** undef, align 8
+  %0 = load %"class.llvm::MachineOperand"*, %"class.llvm::MachineOperand"** undef, align 8
   br i1 undef, label %_ZNK4llvm14MachineOperand6getRegEv.exit, label %cond.false.i123
 
 cond.false.i123:                                  ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
@@ -22,7 +22,7 @@ cond.false.i123:
 _ZNK4llvm14MachineOperand6getRegEv.exit:          ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
   %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %0, i64 undef, i32 1
   %1 = bitcast [3 x i8]* %IsDef.i to i24*
-  %bf.load.i = load i24* %1, align 1
+  %bf.load.i = load i24, i24* %1, align 1
   %2 = and i24 %bf.load.i, 128
   br i1 undef, label %for.cond.cleanup, label %for.body.lr.ph
 
@@ -61,7 +61,7 @@ cond.false.i257:
   unreachable
 
 _ZNK4llvm14MachineOperand6isDeadEv.exit262:       ; preds = %if.end55
-  %bf.load.i259 = load i24* %1, align 1
+  %bf.load.i259 = load i24, i24* %1, align 1
   br i1 undef, label %if.then57, label %if.else59
 
 if.then57:                                        ; preds = %_ZNK4llvm14MachineOperand6isDeadEv.exit262

Modified: llvm/trunk/test/CodeGen/PowerPC/stack-protector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/stack-protector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/stack-protector.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/stack-protector.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ entry:
   %"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
 	store i8* %a, i8** %a_addr
 	%buf1 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%0 = load i8** %a_addr, align 4		; <i8*> [#uses=1]
+	%0 = load i8*, i8** %a_addr, align 4		; <i8*> [#uses=1]
 	%1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind		; <i8*> [#uses=0]
   %buf2 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
 	%2 = call i32 (i8*, ...)* @printf(i8* getelementptr ([11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind		; <i32> [#uses=0]

Modified: llvm/trunk/test/CodeGen/PowerPC/stack-realign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/stack-realign.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/stack-realign.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/stack-realign.ll Fri Feb 27 15:17:42 2015
@@ -15,12 +15,12 @@ define void @goo(%struct.s* byval nocapt
 entry:
   %x = alloca [2 x i32], align 32
   %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
-  %0 = load i32* %a1, align 4
+  %0 = load i32, i32* %a1, align 4
   %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
   store i32 %0, i32* %arrayidx, align 32
   %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
-  %1 = load i32* %b, align 4
-  %2 = load i32* @barbaz, align 4
+  %1 = load i32, i32* %b, align 4
+  %2 = load i32, i32* @barbaz, align 4
   %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
   store i32 %2, i32* %arrayidx2, align 4
   call void @bar(i32* %arrayidx)
@@ -99,11 +99,11 @@ define void @hoo(%struct.s* byval nocapt
 entry:
   %x = alloca [200000 x i32], align 32
   %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
-  %0 = load i32* %a1, align 4
+  %0 = load i32, i32* %a1, align 4
   %arrayidx = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 0
   store i32 %0, i32* %arrayidx, align 32
   %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
-  %1 = load i32* %b, align 4
+  %1 = load i32, i32* %b, align 4
   %arrayidx2 = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 1
   store i32 %1, i32* %arrayidx2, align 4
   call void @bar(i32* %arrayidx)
@@ -160,11 +160,11 @@ define void @loo(%struct.s* byval nocapt
 entry:
   %x = alloca [2 x i32], align 32
   %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
-  %0 = load i32* %a1, align 4
+  %0 = load i32, i32* %a1, align 4
   %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
   store i32 %0, i32* %arrayidx, align 32
   %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
-  %1 = load i32* %b, align 4
+  %1 = load i32, i32* %b, align 4
   %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
   store i32 %1, i32* %arrayidx2, align 4
   call void @bar(i32* %arrayidx)

Modified: llvm/trunk/test/CodeGen/PowerPC/std-unal-fi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/std-unal-fi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/std-unal-fi.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/std-unal-fi.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ BB:
   br label %CF
 
 CF:                                               ; preds = %CF80, %CF, %BB
-  %L5 = load i64* undef
+  %L5 = load i64, i64* undef
   store i8 %0, i8* %A4
   %Shuff7 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> %Shuff, <16 x i32> <i32 28, i32 30, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 undef, i32 20, i32 22, i32 24, i32 26>
   %PC10 = bitcast i8* %A4 to ppc_fp128*
@@ -19,13 +19,13 @@ CF77:
   br i1 undef, label %CF77, label %CF82
 
 CF82:                                             ; preds = %CF82, %CF77
-  %L19 = load i64* undef
+  %L19 = load i64, i64* undef
   store <1 x ppc_fp128> zeroinitializer, <1 x ppc_fp128>* %A
   store i8 -65, i8* %A4
   br i1 undef, label %CF82, label %CF83
 
 CF83:                                             ; preds = %CF82
-  %L34 = load i64* undef
+  %L34 = load i64, i64* undef
   br i1 undef, label %CF77, label %CF81
 
 CF81:                                             ; preds = %CF83
@@ -54,7 +54,7 @@ define void @autogen_SD88042(i8*, i32*,
 BB:
   %A4 = alloca <2 x i1>
   %A = alloca <16 x float>
-  %L = load i8* %0
+  %L = load i8, i8* %0
   %Sl = select i1 false, <16 x float>* %A, <16 x float>* %A
   %PC = bitcast <2 x i1>* %A4 to i64*
   %Sl27 = select i1 false, i8 undef, i8 %L
@@ -66,7 +66,7 @@ CF:
 
 CF77:                                             ; preds = %CF80, %CF77, %CF
   store <16 x float> zeroinitializer, <16 x float>* %Sl
-  %L58 = load i32* %PC33
+  %L58 = load i32, i32* %PC33
   store i8 0, i8* %0
   br i1 undef, label %CF77, label %CF80
 
@@ -90,7 +90,7 @@ BB:
   %A1 = alloca i1
   %I8 = insertelement <1 x i32> <i32 -1>, i32 454855, i32 0
   %Cmp = icmp ult <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, undef
-  %L10 = load i64* %2
+  %L10 = load i64, i64* %2
   %E11 = extractelement <4 x i1> %Cmp, i32 2
   br label %CF72
 
@@ -103,7 +103,7 @@ CF72:
 CF74:                                             ; preds = %CF72
   store i8 0, i8* %0
   %PC = bitcast i1* %A1 to i64*
-  %L31 = load i64* %PC
+  %L31 = load i64, i64* %PC
   store i64 477323, i64* %PC
   %Sl37 = select i1 false, i32* undef, i32* %1
   %Cmp38 = icmp ugt i1 undef, undef

Modified: llvm/trunk/test/CodeGen/PowerPC/store-load-fwd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/store-load-fwd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/store-load-fwd.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/store-load-fwd.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
 
 define i32 @test(i32* %P) {
         store i32 1, i32* %P
-        %V = load i32* %P               ; <i32> [#uses=1]
+        %V = load i32, i32* %P               ; <i32> [#uses=1]
         ret i32 %V
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/structsinmem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/structsinmem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/structsinmem.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/structsinmem.ll Fri Feb 27 15:17:42 2015
@@ -89,27 +89,27 @@ entry:
   store i32 %z7, i32* %z7.addr, align 4
   store i32 %z8, i32* %z8.addr, align 4
   %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
-  %0 = load i8* %a, align 1
+  %0 = load i8, i8* %a, align 1
   %conv = zext i8 %0 to i32
   %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
-  %1 = load i16* %a1, align 2
+  %1 = load i16, i16* %a1, align 2
   %conv2 = sext i16 %1 to i32
   %add = add nsw i32 %conv, %conv2
   %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
-  %2 = load i16* %a3, align 2
+  %2 = load i16, i16* %a3, align 2
   %conv4 = sext i16 %2 to i32
   %add5 = add nsw i32 %add, %conv4
   %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
-  %3 = load i32* %a6, align 4
+  %3 = load i32, i32* %a6, align 4
   %add7 = add nsw i32 %add5, %3
   %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
-  %4 = load i32* %a8, align 4
+  %4 = load i32, i32* %a8, align 4
   %add9 = add nsw i32 %add7, %4
   %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
-  %5 = load i32* %a10, align 4
+  %5 = load i32, i32* %a10, align 4
   %add11 = add nsw i32 %add9, %5
   %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
-  %6 = load i32* %a12, align 4
+  %6 = load i32, i32* %a12, align 4
   %add13 = add nsw i32 %add11, %6
   ret i32 %add13
 
@@ -181,27 +181,27 @@ entry:
   store i32 %z7, i32* %z7.addr, align 4
   store i32 %z8, i32* %z8.addr, align 4
   %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
-  %0 = load i8* %a, align 1
+  %0 = load i8, i8* %a, align 1
   %conv = zext i8 %0 to i32
   %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
-  %1 = load i16* %a1, align 1
+  %1 = load i16, i16* %a1, align 1
   %conv2 = sext i16 %1 to i32
   %add = add nsw i32 %conv, %conv2
   %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
-  %2 = load i16* %a3, align 1
+  %2 = load i16, i16* %a3, align 1
   %conv4 = sext i16 %2 to i32
   %add5 = add nsw i32 %add, %conv4
   %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
-  %3 = load i32* %a6, align 1
+  %3 = load i32, i32* %a6, align 1
   %add7 = add nsw i32 %add5, %3
   %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
-  %4 = load i32* %a8, align 1
+  %4 = load i32, i32* %a8, align 1
   %add9 = add nsw i32 %add7, %4
   %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
-  %5 = load i32* %a10, align 1
+  %5 = load i32, i32* %a10, align 1
   %add11 = add nsw i32 %add9, %5
   %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
-  %6 = load i32* %a12, align 1
+  %6 = load i32, i32* %a12, align 1
   %add13 = add nsw i32 %add11, %6
   ret i32 %add13
 

Modified: llvm/trunk/test/CodeGen/PowerPC/structsinregs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/structsinregs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/structsinregs.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/structsinregs.ll Fri Feb 27 15:17:42 2015
@@ -73,27 +73,27 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(
 define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind {
 entry:
   %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
-  %0 = load i8* %a, align 1
+  %0 = load i8, i8* %a, align 1
   %conv = zext i8 %0 to i32
   %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
-  %1 = load i16* %a1, align 2
+  %1 = load i16, i16* %a1, align 2
   %conv2 = sext i16 %1 to i32
   %add = add nsw i32 %conv, %conv2
   %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
-  %2 = load i16* %a3, align 2
+  %2 = load i16, i16* %a3, align 2
   %conv4 = sext i16 %2 to i32
   %add5 = add nsw i32 %add, %conv4
   %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
-  %3 = load i32* %a6, align 4
+  %3 = load i32, i32* %a6, align 4
   %add7 = add nsw i32 %add5, %3
   %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
-  %4 = load i32* %a8, align 4
+  %4 = load i32, i32* %a8, align 4
   %add9 = add nsw i32 %add7, %4
   %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
-  %5 = load i32* %a10, align 4
+  %5 = load i32, i32* %a10, align 4
   %add11 = add nsw i32 %add9, %5
   %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
-  %6 = load i32* %a12, align 4
+  %6 = load i32, i32* %a12, align 4
   %add13 = add nsw i32 %add11, %6
   ret i32 %add13
 
@@ -160,27 +160,27 @@ entry:
 define internal i32 @callee2(%struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind {
 entry:
   %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
-  %0 = load i8* %a, align 1
+  %0 = load i8, i8* %a, align 1
   %conv = zext i8 %0 to i32
   %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
-  %1 = load i16* %a1, align 1
+  %1 = load i16, i16* %a1, align 1
   %conv2 = sext i16 %1 to i32
   %add = add nsw i32 %conv, %conv2
   %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
-  %2 = load i16* %a3, align 1
+  %2 = load i16, i16* %a3, align 1
   %conv4 = sext i16 %2 to i32
   %add5 = add nsw i32 %add, %conv4
   %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
-  %3 = load i32* %a6, align 1
+  %3 = load i32, i32* %a6, align 1
   %add7 = add nsw i32 %add5, %3
   %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
-  %4 = load i32* %a8, align 1
+  %4 = load i32, i32* %a8, align 1
   %add9 = add nsw i32 %add7, %4
   %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
-  %5 = load i32* %a10, align 1
+  %5 = load i32, i32* %a10, align 1
   %add11 = add nsw i32 %add9, %5
   %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
-  %6 = load i32* %a12, align 1
+  %6 = load i32, i32* %a12, align 1
   %add13 = add nsw i32 %add11, %6
   ret i32 %add13
 

Modified: llvm/trunk/test/CodeGen/PowerPC/subreg-postra-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/subreg-postra-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/subreg-postra-2.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/subreg-postra-2.ll Fri Feb 27 15:17:42 2015
@@ -134,7 +134,7 @@ while.body392.lr.ph:
   br label %while.body392
 
 while.body392:                                    ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
-  %0 = load i8** undef, align 8
+  %0 = load i8*, i8** undef, align 8
   %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
   %b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
   %tobool.i1316 = icmp eq i64 undef, 0
@@ -144,7 +144,7 @@ if.then.i1317:
   unreachable
 
 wait_on_buffer.exit1319:                          ; preds = %while.body392
-  %1 = load volatile i64* %b_state.i.i1314, align 8
+  %1 = load volatile i64, i64* %b_state.i.i1314, align 8
   %conv.i.i1322 = and i64 %1, 1
   %lnot404 = icmp eq i64 %conv.i.i1322, 0
   %.err.4 = select i1 %lnot404, i32 -5, i32 undef

Modified: llvm/trunk/test/CodeGen/PowerPC/subreg-postra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/subreg-postra.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/subreg-postra.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/subreg-postra.ll Fri Feb 27 15:17:42 2015
@@ -120,7 +120,7 @@ while.body392.lr.ph:
   br label %while.body392
 
 while.body392:                                    ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
-  %0 = load i8** undef, align 8
+  %0 = load i8*, i8** undef, align 8
   %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
   %b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
   %tobool.i1316 = icmp eq i64 undef, 0
@@ -130,13 +130,13 @@ if.then.i1317:
   unreachable
 
 wait_on_buffer.exit1319:                          ; preds = %while.body392
-  %1 = load volatile i64* %b_state.i.i1314, align 8
+  %1 = load volatile i64, i64* %b_state.i.i1314, align 8
   %conv.i.i1322 = and i64 %1, 1
   %lnot404 = icmp eq i64 %conv.i.i1322, 0
   %.err.4 = select i1 %lnot404, i32 -5, i32 undef
   %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* %b_state.i.i1314) #1
   %prev.i.i.i1325 = getelementptr inbounds i8, i8* %0, i64 8
-  %3 = load i32** null, align 8
+  %3 = load i32*, i32** null, align 8
   store i32* %3, i32** undef, align 8
   call void @__brelse(i32* undef) #1
   br i1 undef, label %while.end418, label %while.body392

Modified: llvm/trunk/test/CodeGen/PowerPC/subsumes-pred-regs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/subsumes-pred-regs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/subsumes-pred-regs.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/subsumes-pred-regs.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ if.then:
   br i1 undef, label %return, label %if.end.i24
 
 if.end.i24:                                       ; preds = %if.then
-  %0 = load i32* undef, align 4
+  %0 = load i32, i32* undef, align 4
   %lnot.i.i16.i23 = icmp eq i32 %0, 0
   br i1 %lnot.i.i16.i23, label %if.end7.i37, label %test.exit27.i34
 

Modified: llvm/trunk/test/CodeGen/PowerPC/tls-cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/tls-cse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/tls-cse.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/tls-cse.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ define void @_ZN4llvm21PrettyStackTraceE
 entry:
   %0 = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 0
   store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*]* @_ZTVN4llvm21PrettyStackTraceEntryE, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
-  %1 = load %"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead, align 8
+  %1 = load %"class.llvm::PrettyStackTraceEntry"*, %"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead, align 8
   %cmp.i = icmp eq %"class.llvm::PrettyStackTraceEntry"* %1, %this
   br i1 %cmp.i, label %_ZN4llvm21PrettyStackTraceEntryD2Ev.exit, label %cond.false.i
 
@@ -36,7 +36,7 @@ cond.false.i:
 _ZN4llvm21PrettyStackTraceEntryD2Ev.exit:         ; preds = %entry
   %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 1
   %2 = bitcast %"class.llvm::PrettyStackTraceEntry"** %NextEntry.i.i to i64*
-  %3 = load i64* %2, align 8
+  %3 = load i64, i64* %2, align 8
   store i64 %3, i64* bitcast (%"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead to i64*), align 8
   %4 = bitcast %"class.llvm::PrettyStackTraceEntry"* %this to i8*
   tail call void @_ZdlPv(i8* %4)

Modified: llvm/trunk/test/CodeGen/PowerPC/tls-pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/tls-pic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/tls-pic.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/tls-pic.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ define signext i32 @main() nounwind {
 entry:
   %retval = alloca i32, align 4
   store i32 0, i32* %retval
-  %0 = load i32* @a, align 4
+  %0 = load i32, i32* @a, align 4
   ret i32 %0
 }
 
@@ -55,7 +55,7 @@ define signext i32 @main2() nounwind {
 entry:
   %retval = alloca i32, align 4
   store i32 0, i32* %retval
-  %0 = load i32* @a2, align 4
+  %0 = load i32, i32* @a2, align 4
   ret i32 %0
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/tls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/tls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/tls.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/tls.ll Fri Feb 27 15:17:42 2015
@@ -30,7 +30,7 @@ define signext i32 @main2() nounwind {
 entry:
   %retval = alloca i32, align 4
   store i32 0, i32* %retval
-  %0 = load i32* @a2, align 4
+  %0 = load i32, i32* @a2, align 4
   ret i32 %0
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/toc-load-sched-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/toc-load-sched-bug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/toc-load-sched-bug.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/toc-load-sched-bug.ll Fri Feb 27 15:17:42 2015
@@ -177,48 +177,48 @@ entry:
   %ref.tmp = alloca %"class.llvm::SMDiagnostic", align 8
   %ref.tmp5 = alloca %"class.std::basic_string", align 8
   %_M_p.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Filename, i64 0, i32 0, i32 0
-  %0 = load i8** %_M_p.i.i.i, align 8, !tbaa !1
+  %0 = load i8*, i8** %_M_p.i.i.i, align 8, !tbaa !1
   %1 = ptrtoint i8* %0 to i64
   %arrayidx.i.i.i = getelementptr inbounds i8, i8* %0, i64 -24
   %_M_length.i.i = bitcast i8* %arrayidx.i.i.i to i64*
-  %2 = load i64* %_M_length.i.i, align 8, !tbaa !7
+  %2 = load i64, i64* %_M_length.i.i, align 8, !tbaa !7
   %.fca.0.insert18 = insertvalue [2 x i64] undef, i64 %1, 0
   %.fca.1.insert21 = insertvalue [2 x i64] %.fca.0.insert18, i64 %2, 1
   call void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(%"class.llvm::ErrorOr"* sret %FileOrErr, [2 x i64] %.fca.1.insert21, i64 -1) #3
   %HasError.i24 = getelementptr inbounds %"class.llvm::ErrorOr", %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 1
-  %bf.load.i25 = load i8* %HasError.i24, align 8
+  %bf.load.i25 = load i8, i8* %HasError.i24, align 8
   %3 = and i8 %bf.load.i25, 1
   %bf.cast.i26 = icmp eq i8 %3, 0
   br i1 %bf.cast.i26, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit, label %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
 
 _ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit: ; preds = %entry
   %retval.sroa.0.0..sroa_cast.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to i64*
-  %retval.sroa.0.0.copyload.i = load i64* %retval.sroa.0.0..sroa_cast.i, align 8
+  %retval.sroa.0.0.copyload.i = load i64, i64* %retval.sroa.0.0..sroa_cast.i, align 8
   %retval.sroa.3.0..sroa_idx.i = getelementptr inbounds %"class.llvm::ErrorOr", %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 0, i32 0, i32 0, i32 0, i64 8
   %retval.sroa.3.0..sroa_cast.i = bitcast i8* %retval.sroa.3.0..sroa_idx.i to i64*
-  %retval.sroa.3.0.copyload.i = load i64* %retval.sroa.3.0..sroa_cast.i, align 8
+  %retval.sroa.3.0.copyload.i = load i64, i64* %retval.sroa.3.0..sroa_cast.i, align 8
   %phitmp = trunc i64 %retval.sroa.0.0.copyload.i to i32
   %cmp.i = icmp eq i32 %phitmp, 0
   br i1 %cmp.i, label %cond.false.i.i, label %if.then
 
 if.then:                                          ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
   %.c = inttoptr i64 %retval.sroa.3.0.copyload.i to %"class.std::error_category"*
-  %4 = load i8** %_M_p.i.i.i, align 8, !tbaa !1
+  %4 = load i8*, i8** %_M_p.i.i.i, align 8, !tbaa !1
   %arrayidx.i.i.i30 = getelementptr inbounds i8, i8* %4, i64 -24
   %_M_length.i.i31 = bitcast i8* %arrayidx.i.i.i30 to i64*
-  %5 = load i64* %_M_length.i.i31, align 8, !tbaa !7
+  %5 = load i64, i64* %_M_length.i.i31, align 8, !tbaa !7
   %6 = inttoptr i64 %retval.sroa.3.0.copyload.i to void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)***
-  %vtable.i = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*** %6, align 8, !tbaa !11
+  %vtable.i = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)**, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*** %6, align 8, !tbaa !11
   %vfn.i = getelementptr inbounds void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vtable.i, i64 3
-  %7 = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vfn.i, align 8
+  %7 = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vfn.i, align 8
   call void %7(%"class.std::basic_string"* sret %ref.tmp5, %"class.std::error_category"* %.c, i32 signext %phitmp) #3
   %call2.i.i = call dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"* %ref.tmp5, i64 0, i8* getelementptr inbounds ([28 x i8]* @.str, i64 0, i64 0), i64 27) #3
   %_M_p2.i.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %call2.i.i, i64 0, i32 0, i32 0
-  %8 = load i8** %_M_p2.i.i.i.i, align 8, !tbaa !13
+  %8 = load i8*, i8** %_M_p2.i.i.i.i, align 8, !tbaa !13
   store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p2.i.i.i.i, align 8, !tbaa !1
   %arrayidx.i.i.i36 = getelementptr inbounds i8, i8* %8, i64 -24
   %_M_length.i.i37 = bitcast i8* %arrayidx.i.i.i36 to i64*
-  %9 = load i64* %_M_length.i.i37, align 8, !tbaa !7
+  %9 = load i64, i64* %_M_length.i.i37, align 8, !tbaa !7
   %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 2
   %10 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
   %11 = bitcast %"class.llvm::SMDiagnostic"* %ref.tmp to i8*
@@ -289,21 +289,21 @@ _ZN4llvm12SMDiagnosticC2ENS_9StringRefEN
   call void @_ZNSs4swapERSs(%"class.std::basic_string"* %LineContents.i, %"class.std::basic_string"* dereferenceable(8) %LineContents7.i) #3
   %Ranges.i41 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8
   %_M_start.i7.i.i.i = getelementptr inbounds %"class.std::vector.79", %"class.std::vector.79"* %Ranges.i41, i64 0, i32 0, i32 0, i32 0
-  %18 = load %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
+  %18 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
   %_M_finish.i9.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 1
   %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 2
   %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0
   %19 = bitcast %"class.std::vector.79"* %Ranges.i41 to i8*
   call void @llvm.memset.p0i8.i64(i8* %19, i8 0, i64 16, i32 8, i1 false) #3
-  %20 = load %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
+  %20 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
   store %"struct.std::pair"* %20, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
   store %"struct.std::pair"* null, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
   %_M_finish3.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 1
-  %21 = load %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
+  %21 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
   store %"struct.std::pair"* %21, %"struct.std::pair"** %_M_finish.i9.i.i.i, align 8, !tbaa !27
   store %"struct.std::pair"* null, %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
   %_M_end_of_storage4.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 2
-  %22 = load %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
+  %22 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
   store %"struct.std::pair"* %22, %"struct.std::pair"** %_M_end_of_storage.i11.i.i.i, align 8, !tbaa !27
   store %"struct.std::pair"* null, %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
   %tobool.i.i.i.i.i.i = icmp eq %"struct.std::pair"* %18, null
@@ -335,12 +335,12 @@ if.then.i.i.i.i:
   call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
   %29 = atomicrmw volatile add i32* %28, i32 -1 acq_rel
   store i32 %29, i32* %.atomicdst.i.i.i.i.i, align 4
-  %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32* %.atomicdst.i.i.i.i.i, align 4
+  %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32, i32* %.atomicdst.i.i.i.i.i, align 4
   call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
   br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
 
 if.else.i.i.i.i:                                  ; preds = %if.then.i.i.i45
-  %30 = load i32* %28, align 4, !tbaa !29
+  %30 = load i32, i32* %28, align 4, !tbaa !29
   %add.i.i.i.i.i = add nsw i32 %30, -1
   store i32 %add.i.i.i.i.i, i32* %28, align 4, !tbaa !29
   br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
@@ -359,7 +359,7 @@ _ZNSsD1Ev.exit:
   %31 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
   call void @llvm.lifetime.start(i64 1, i8* %31) #3
   %_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
-  %32 = load i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
+  %32 = load i8*, i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
   %arrayidx.i.i.i49 = getelementptr inbounds i8, i8* %32, i64 -24
   %33 = bitcast i8* %arrayidx.i.i.i49 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
   %cmp.i.i.i50 = icmp eq i8* %arrayidx.i.i.i49, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
@@ -375,12 +375,12 @@ if.then.i.i.i.i55:
   call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
   %35 = atomicrmw volatile add i32* %34, i32 -1 acq_rel
   store i32 %35, i32* %.atomicdst.i.i.i.i.i46, align 4
-  %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32* %.atomicdst.i.i.i.i.i46, align 4
+  %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32, i32* %.atomicdst.i.i.i.i.i46, align 4
   call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
   br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
 
 if.else.i.i.i.i57:                                ; preds = %if.then.i.i.i52
-  %36 = load i32* %34, align 4, !tbaa !29
+  %36 = load i32, i32* %34, align 4, !tbaa !29
   %add.i.i.i.i.i56 = add nsw i32 %36, -1
   store i32 %add.i.i.i.i.i56, i32* %34, align 4, !tbaa !29
   br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
@@ -404,28 +404,28 @@ cond.false.i.i:
 
 _ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit: ; preds = %entry
   %_M_head_impl.i.i.i.i.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to %"class.llvm::MemoryBuffer"**
-  %37 = load %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i, align 8, !tbaa !27
+  %37 = load %"class.llvm::MemoryBuffer"*, %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i, align 8, !tbaa !27
   %call9 = call %"class.llvm::Module"* @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiagnosticERNS_11LLVMContextE(%"class.llvm::MemoryBuffer"* %37, %"class.llvm::SMDiagnostic"* dereferenceable(200) %Err, %"class.llvm::LLVMContext"* dereferenceable(8) %Context)
   br label %cleanup
 
 cleanup:                                          ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit, %_ZNSsD1Ev.exit62
   %retval.0 = phi %"class.llvm::Module"* [ null, %_ZNSsD1Ev.exit62 ], [ %call9, %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit ]
-  %bf.load.i = load i8* %HasError.i24, align 8
+  %bf.load.i = load i8, i8* %HasError.i24, align 8
   %38 = and i8 %bf.load.i, 1
   %bf.cast.i = icmp eq i8 %38, 0
   br i1 %bf.cast.i, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.exit
 
 _ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i: ; preds = %cleanup
   %_M_head_impl.i.i.i.i.i.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to %"class.llvm::MemoryBuffer"**
-  %39 = load %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
+  %39 = load %"class.llvm::MemoryBuffer"*, %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
   %cmp.i.i = icmp eq %"class.llvm::MemoryBuffer"* %39, null
   br i1 %cmp.i.i, label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i, label %_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i
 
 _ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i: ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i
   %40 = bitcast %"class.llvm::MemoryBuffer"* %39 to void (%"class.llvm::MemoryBuffer"*)***
-  %vtable.i.i.i = load void (%"class.llvm::MemoryBuffer"*)*** %40, align 8, !tbaa !11
+  %vtable.i.i.i = load void (%"class.llvm::MemoryBuffer"*)**, void (%"class.llvm::MemoryBuffer"*)*** %40, align 8, !tbaa !11
   %vfn.i.i.i = getelementptr inbounds void (%"class.llvm::MemoryBuffer"*)*, void (%"class.llvm::MemoryBuffer"*)** %vtable.i.i.i, i64 1
-  %41 = load void (%"class.llvm::MemoryBuffer"*)** %vfn.i.i.i, align 8
+  %41 = load void (%"class.llvm::MemoryBuffer"*)*, void (%"class.llvm::MemoryBuffer"*)** %vfn.i.i.i, align 8
   call void %41(%"class.llvm::MemoryBuffer"* %39) #3
   br label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i
 

Modified: llvm/trunk/test/CodeGen/PowerPC/trampoline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/trampoline.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/trampoline.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/trampoline.ll Fri Feb 27 15:17:42 2015
@@ -63,7 +63,7 @@ entry:
 	store %struct.objc_selector* %_cmd, %struct.objc_selector** %_cmd_addr
 	store %struct.NSZone* %zone, %struct.NSZone** %zone_addr
 	%3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0		; <%struct.NSBitmapImageRep**> [#uses=1]
-	%4 = load %struct.NSBitmapImageRep** %self_addr, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
+	%4 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %self_addr, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
 	store %struct.NSBitmapImageRep* %4, %struct.NSBitmapImageRep** %3, align 4
 	%TRAMP.91 = bitcast %struct.__builtin_trampoline* %TRAMP.9 to i8*		; <i8*> [#uses=1]
 	%FRAME.72 = bitcast %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7 to i8*		; <i8*> [#uses=1]
@@ -71,7 +71,7 @@ entry:
         %tramp = call i8* @llvm.adjust.trampoline(i8* %TRAMP.91)
 	store i8* %tramp, i8** %0, align 4
 	%5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1		; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
-	%6 = load i8** %0, align 4		; <i8*> [#uses=1]
+	%6 = load i8*, i8** %0, align 4		; <i8*> [#uses=1]
 	%7 = bitcast i8* %6 to void (%struct.__block_1*, %struct.CGImage*)*		; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
 	store void (%struct.__block_1*, %struct.CGImage*)* %7, void (%struct.__block_1*, %struct.CGImage*)** %5, align 4
 	store %struct.NSBitmapImageRep* null, %struct.NSBitmapImageRep** %new, align 4
@@ -85,32 +85,32 @@ entry:
 	%13 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %12, i32 0, i32 2		; <i32*> [#uses=1]
 	store i32 24, i32* %13, align 4
 	%14 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1		; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
-	%15 = load void (%struct.__block_1*, %struct.CGImage*)** %14, align 4		; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
+	%15 = load void (%struct.__block_1*, %struct.CGImage*)*, void (%struct.__block_1*, %struct.CGImage*)** %14, align 4		; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
 	store void (%struct.__block_1*, %struct.CGImage*)* %15, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4
 	%16 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0		; <%struct.__invoke_impl*> [#uses=1]
 	%17 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %16, i32 0, i32 3		; <i8**> [#uses=1]
-	%18 = load void (%struct.__block_1*, %struct.CGImage*)** %1, align 4		; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
+	%18 = load void (%struct.__block_1*, %struct.CGImage*)*, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4		; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
 	%19 = bitcast void (%struct.__block_1*, %struct.CGImage*)* %18 to i8*		; <i8*> [#uses=1]
 	store i8* %19, i8** %17, align 4
 	%20 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 1		; <%struct.NSZone**> [#uses=1]
-	%21 = load %struct.NSZone** %zone_addr, align 4		; <%struct.NSZone*> [#uses=1]
+	%21 = load %struct.NSZone*, %struct.NSZone** %zone_addr, align 4		; <%struct.NSZone*> [#uses=1]
 	store %struct.NSZone* %21, %struct.NSZone** %20, align 4
 	%22 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 2		; <%struct.NSBitmapImageRep***> [#uses=1]
 	store %struct.NSBitmapImageRep** %new, %struct.NSBitmapImageRep*** %22, align 4
 	%23 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0		; <%struct.NSBitmapImageRep**> [#uses=1]
-	%24 = load %struct.NSBitmapImageRep** %23, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
+	%24 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %23, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
 	store %struct.NSBitmapImageRep* %24, %struct.NSBitmapImageRep** %2, align 4
-	%25 = load %struct.NSBitmapImageRep** %2, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
+	%25 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %2, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
 	%26 = bitcast %struct.NSBitmapImageRep* %25 to %struct.objc_object*		; <%struct.objc_object*> [#uses=1]
 	store %struct.objc_object* %26, %struct.objc_object** %self.1, align 4
-	%27 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4		; <%struct.objc_selector*> [#uses=1]
+	%27 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4		; <%struct.objc_selector*> [#uses=1]
 	%__block_holder_tmp_1.03 = bitcast %struct.__block_1* %__block_holder_tmp_1.0 to void (%struct.CGImage*)*		; <void (%struct.CGImage*)*> [#uses=1]
-	%28 = load %struct.objc_object** %self.1, align 4		; <%struct.objc_object*> [#uses=1]
+	%28 = load %struct.objc_object*, %struct.objc_object** %self.1, align 4		; <%struct.objc_object*> [#uses=1]
 	%29 = call %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)* inttoptr (i64 4294901504 to %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)*)(%struct.objc_object* %28, %struct.objc_selector* %27, void (%struct.CGImage*)* %__block_holder_tmp_1.03) nounwind		; <%struct.objc_object*> [#uses=0]
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load %struct.objc_object** %retval		; <%struct.objc_object*> [#uses=1]
+	%retval5 = load %struct.objc_object*, %struct.objc_object** %retval		; <%struct.objc_object*> [#uses=1]
 	ret %struct.objc_object* %retval5
 }
 
@@ -131,33 +131,33 @@ entry:
 	store %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %CHAIN.8, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr
 	store %struct.__block_1* %_self, %struct.__block_1** %_self_addr
 	store %struct.CGImage* %cgImage, %struct.CGImage** %cgImage_addr
-	%1 = load %struct.__block_1** %_self_addr, align 4		; <%struct.__block_1*> [#uses=1]
+	%1 = load %struct.__block_1*, %struct.__block_1** %_self_addr, align 4		; <%struct.__block_1*> [#uses=1]
 	%2 = getelementptr %struct.__block_1, %struct.__block_1* %1, i32 0, i32 2		; <%struct.NSBitmapImageRep***> [#uses=1]
-	%3 = load %struct.NSBitmapImageRep*** %2, align 4		; <%struct.NSBitmapImageRep**> [#uses=1]
+	%3 = load %struct.NSBitmapImageRep**, %struct.NSBitmapImageRep*** %2, align 4		; <%struct.NSBitmapImageRep**> [#uses=1]
 	store %struct.NSBitmapImageRep** %3, %struct.NSBitmapImageRep*** %new, align 4
-	%4 = load %struct.__block_1** %_self_addr, align 4		; <%struct.__block_1*> [#uses=1]
+	%4 = load %struct.__block_1*, %struct.__block_1** %_self_addr, align 4		; <%struct.__block_1*> [#uses=1]
 	%5 = getelementptr %struct.__block_1, %struct.__block_1* %4, i32 0, i32 1		; <%struct.NSZone**> [#uses=1]
-	%6 = load %struct.NSZone** %5, align 4		; <%struct.NSZone*> [#uses=1]
+	%6 = load %struct.NSZone*, %struct.NSZone** %5, align 4		; <%struct.NSZone*> [#uses=1]
 	store %struct.NSZone* %6, %struct.NSZone** %zone, align 4
-	%7 = load %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr, align 4		; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=1]
+	%7 = load %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr, align 4		; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=1]
 	%8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %7, i32 0, i32 0		; <%struct.NSBitmapImageRep**> [#uses=1]
-	%9 = load %struct.NSBitmapImageRep** %8, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
+	%9 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %8, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
 	store %struct.NSBitmapImageRep* %9, %struct.NSBitmapImageRep** %0, align 4
-	%10 = load %struct.NSBitmapImageRep** %0, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
+	%10 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %0, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
 	%11 = bitcast %struct.NSBitmapImageRep* %10 to %struct.objc_object*		; <%struct.objc_object*> [#uses=1]
 	%12 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 0		; <%struct.objc_object**> [#uses=1]
 	store %struct.objc_object* %11, %struct.objc_object** %12, align 4
-	%13 = load %struct._objc_class** getelementptr (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4		; <%struct._objc_class*> [#uses=1]
+	%13 = load %struct._objc_class*, %struct._objc_class** getelementptr (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4		; <%struct._objc_class*> [#uses=1]
 	%14 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 1		; <%struct._objc_class**> [#uses=1]
 	store %struct._objc_class* %13, %struct._objc_class** %14, align 4
 	%objc_super1 = bitcast %struct._objc_super* %objc_super to %struct.objc_super*		; <%struct.objc_super*> [#uses=1]
 	store %struct.objc_super* %objc_super1, %struct.objc_super** %objc_super.5, align 4
-	%15 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4		; <%struct.objc_selector*> [#uses=1]
-	%16 = load %struct.objc_super** %objc_super.5, align 4		; <%struct.objc_super*> [#uses=1]
-	%17 = load %struct.NSZone** %zone, align 4		; <%struct.NSZone*> [#uses=1]
+	%15 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4		; <%struct.objc_selector*> [#uses=1]
+	%16 = load %struct.objc_super*, %struct.objc_super** %objc_super.5, align 4		; <%struct.objc_super*> [#uses=1]
+	%17 = load %struct.NSZone*, %struct.NSZone** %zone, align 4		; <%struct.NSZone*> [#uses=1]
 	%18 = call %struct.objc_object* (%struct.objc_super*, %struct.objc_selector*, ...)* @objc_msgSendSuper(%struct.objc_super* %16, %struct.objc_selector* %15, %struct.NSZone* %17) nounwind		; <%struct.objc_object*> [#uses=1]
 	%19 = bitcast %struct.objc_object* %18 to %struct.NSBitmapImageRep*		; <%struct.NSBitmapImageRep*> [#uses=1]
-	%20 = load %struct.NSBitmapImageRep*** %new, align 4		; <%struct.NSBitmapImageRep**> [#uses=1]
+	%20 = load %struct.NSBitmapImageRep**, %struct.NSBitmapImageRep*** %new, align 4		; <%struct.NSBitmapImageRep**> [#uses=1]
 	store %struct.NSBitmapImageRep* %19, %struct.NSBitmapImageRep** %20, align 4
 	br label %return
 

Modified: llvm/trunk/test/CodeGen/PowerPC/unal-altivec-wint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/unal-altivec-wint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/unal-altivec-wint.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/unal-altivec-wint.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
   %hv = bitcast <4 x i32>* %h1 to i8*
   %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
 
-  %v0 = load <4 x i32>* %h, align 8
+  %v0 = load <4 x i32>, <4 x i32>* %h, align 8
 
   %a = add <4 x i32> %v0, %vl
   ret <4 x i32> %a
@@ -31,7 +31,7 @@ entry:
   %hv = bitcast <4 x i32>* %h1 to i8*
   call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
 
-  %v0 = load <4 x i32>* %h, align 8
+  %v0 = load <4 x i32>, <4 x i32>* %h, align 8
 
   ret <4 x i32> %v0
 

Modified: llvm/trunk/test/CodeGen/PowerPC/unal-altivec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/unal-altivec.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/unal-altivec.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/unal-altivec.ll Fri Feb 27 15:17:42 2015
@@ -10,11 +10,11 @@ vector.body:
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %0 = getelementptr inbounds float, float* %b, i64 %index
   %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>* %1, align 4
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
   %.sum11 = or i64 %index, 4
   %2 = getelementptr float, float* %b, i64 %.sum11
   %3 = bitcast float* %2 to <4 x float>*
-  %wide.load8 = load <4 x float>* %3, align 4
+  %wide.load8 = load <4 x float>, <4 x float>* %3, align 4
   %4 = fadd <4 x float> %wide.load, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
   %5 = fadd <4 x float> %wide.load8, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
   %6 = getelementptr inbounds float, float* %a, i64 %index

Modified: llvm/trunk/test/CodeGen/PowerPC/unal-altivec2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/unal-altivec2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/unal-altivec2.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/unal-altivec2.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next.15, %vector.body ]
   %0 = getelementptr inbounds float, float* %y, i64 %index
   %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>* %1, align 4
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
   %2 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load)
   %3 = getelementptr inbounds float, float* %x, i64 %index
   %4 = bitcast float* %3 to <4 x float>*
@@ -22,7 +22,7 @@ vector.body:
   %index.next = add i64 %index, 4
   %5 = getelementptr inbounds float, float* %y, i64 %index.next
   %6 = bitcast float* %5 to <4 x float>*
-  %wide.load.1 = load <4 x float>* %6, align 4
+  %wide.load.1 = load <4 x float>, <4 x float>* %6, align 4
   %7 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.1)
   %8 = getelementptr inbounds float, float* %x, i64 %index.next
   %9 = bitcast float* %8 to <4 x float>*
@@ -30,7 +30,7 @@ vector.body:
   %index.next.1 = add i64 %index.next, 4
   %10 = getelementptr inbounds float, float* %y, i64 %index.next.1
   %11 = bitcast float* %10 to <4 x float>*
-  %wide.load.2 = load <4 x float>* %11, align 4
+  %wide.load.2 = load <4 x float>, <4 x float>* %11, align 4
   %12 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.2)
   %13 = getelementptr inbounds float, float* %x, i64 %index.next.1
   %14 = bitcast float* %13 to <4 x float>*
@@ -38,7 +38,7 @@ vector.body:
   %index.next.2 = add i64 %index.next.1, 4
   %15 = getelementptr inbounds float, float* %y, i64 %index.next.2
   %16 = bitcast float* %15 to <4 x float>*
-  %wide.load.3 = load <4 x float>* %16, align 4
+  %wide.load.3 = load <4 x float>, <4 x float>* %16, align 4
   %17 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.3)
   %18 = getelementptr inbounds float, float* %x, i64 %index.next.2
   %19 = bitcast float* %18 to <4 x float>*
@@ -46,7 +46,7 @@ vector.body:
   %index.next.3 = add i64 %index.next.2, 4
   %20 = getelementptr inbounds float, float* %y, i64 %index.next.3
   %21 = bitcast float* %20 to <4 x float>*
-  %wide.load.4 = load <4 x float>* %21, align 4
+  %wide.load.4 = load <4 x float>, <4 x float>* %21, align 4
   %22 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.4)
   %23 = getelementptr inbounds float, float* %x, i64 %index.next.3
   %24 = bitcast float* %23 to <4 x float>*
@@ -54,7 +54,7 @@ vector.body:
   %index.next.4 = add i64 %index.next.3, 4
   %25 = getelementptr inbounds float, float* %y, i64 %index.next.4
   %26 = bitcast float* %25 to <4 x float>*
-  %wide.load.5 = load <4 x float>* %26, align 4
+  %wide.load.5 = load <4 x float>, <4 x float>* %26, align 4
   %27 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.5)
   %28 = getelementptr inbounds float, float* %x, i64 %index.next.4
   %29 = bitcast float* %28 to <4 x float>*
@@ -62,7 +62,7 @@ vector.body:
   %index.next.5 = add i64 %index.next.4, 4
   %30 = getelementptr inbounds float, float* %y, i64 %index.next.5
   %31 = bitcast float* %30 to <4 x float>*
-  %wide.load.6 = load <4 x float>* %31, align 4
+  %wide.load.6 = load <4 x float>, <4 x float>* %31, align 4
   %32 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.6)
   %33 = getelementptr inbounds float, float* %x, i64 %index.next.5
   %34 = bitcast float* %33 to <4 x float>*
@@ -70,7 +70,7 @@ vector.body:
   %index.next.6 = add i64 %index.next.5, 4
   %35 = getelementptr inbounds float, float* %y, i64 %index.next.6
   %36 = bitcast float* %35 to <4 x float>*
-  %wide.load.7 = load <4 x float>* %36, align 4
+  %wide.load.7 = load <4 x float>, <4 x float>* %36, align 4
   %37 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.7)
   %38 = getelementptr inbounds float, float* %x, i64 %index.next.6
   %39 = bitcast float* %38 to <4 x float>*
@@ -78,7 +78,7 @@ vector.body:
   %index.next.7 = add i64 %index.next.6, 4
   %40 = getelementptr inbounds float, float* %y, i64 %index.next.7
   %41 = bitcast float* %40 to <4 x float>*
-  %wide.load.8 = load <4 x float>* %41, align 4
+  %wide.load.8 = load <4 x float>, <4 x float>* %41, align 4
   %42 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.8)
   %43 = getelementptr inbounds float, float* %x, i64 %index.next.7
   %44 = bitcast float* %43 to <4 x float>*
@@ -86,7 +86,7 @@ vector.body:
   %index.next.8 = add i64 %index.next.7, 4
   %45 = getelementptr inbounds float, float* %y, i64 %index.next.8
   %46 = bitcast float* %45 to <4 x float>*
-  %wide.load.9 = load <4 x float>* %46, align 4
+  %wide.load.9 = load <4 x float>, <4 x float>* %46, align 4
   %47 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.9)
   %48 = getelementptr inbounds float, float* %x, i64 %index.next.8
   %49 = bitcast float* %48 to <4 x float>*
@@ -94,7 +94,7 @@ vector.body:
   %index.next.9 = add i64 %index.next.8, 4
   %50 = getelementptr inbounds float, float* %y, i64 %index.next.9
   %51 = bitcast float* %50 to <4 x float>*
-  %wide.load.10 = load <4 x float>* %51, align 4
+  %wide.load.10 = load <4 x float>, <4 x float>* %51, align 4
   %52 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.10)
   %53 = getelementptr inbounds float, float* %x, i64 %index.next.9
   %54 = bitcast float* %53 to <4 x float>*
@@ -102,7 +102,7 @@ vector.body:
   %index.next.10 = add i64 %index.next.9, 4
   %55 = getelementptr inbounds float, float* %y, i64 %index.next.10
   %56 = bitcast float* %55 to <4 x float>*
-  %wide.load.11 = load <4 x float>* %56, align 4
+  %wide.load.11 = load <4 x float>, <4 x float>* %56, align 4
   %57 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.11)
   %58 = getelementptr inbounds float, float* %x, i64 %index.next.10
   %59 = bitcast float* %58 to <4 x float>*
@@ -110,7 +110,7 @@ vector.body:
   %index.next.11 = add i64 %index.next.10, 4
   %60 = getelementptr inbounds float, float* %y, i64 %index.next.11
   %61 = bitcast float* %60 to <4 x float>*
-  %wide.load.12 = load <4 x float>* %61, align 4
+  %wide.load.12 = load <4 x float>, <4 x float>* %61, align 4
   %62 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.12)
   %63 = getelementptr inbounds float, float* %x, i64 %index.next.11
   %64 = bitcast float* %63 to <4 x float>*
@@ -118,7 +118,7 @@ vector.body:
   %index.next.12 = add i64 %index.next.11, 4
   %65 = getelementptr inbounds float, float* %y, i64 %index.next.12
   %66 = bitcast float* %65 to <4 x float>*
-  %wide.load.13 = load <4 x float>* %66, align 4
+  %wide.load.13 = load <4 x float>, <4 x float>* %66, align 4
   %67 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.13)
   %68 = getelementptr inbounds float, float* %x, i64 %index.next.12
   %69 = bitcast float* %68 to <4 x float>*
@@ -126,7 +126,7 @@ vector.body:
   %index.next.13 = add i64 %index.next.12, 4
   %70 = getelementptr inbounds float, float* %y, i64 %index.next.13
   %71 = bitcast float* %70 to <4 x float>*
-  %wide.load.14 = load <4 x float>* %71, align 4
+  %wide.load.14 = load <4 x float>, <4 x float>* %71, align 4
   %72 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.14)
   %73 = getelementptr inbounds float, float* %x, i64 %index.next.13
   %74 = bitcast float* %73 to <4 x float>*
@@ -134,7 +134,7 @@ vector.body:
   %index.next.14 = add i64 %index.next.13, 4
   %75 = getelementptr inbounds float, float* %y, i64 %index.next.14
   %76 = bitcast float* %75 to <4 x float>*
-  %wide.load.15 = load <4 x float>* %76, align 4
+  %wide.load.15 = load <4 x float>, <4 x float>* %76, align 4
   %77 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.15)
   %78 = getelementptr inbounds float, float* %x, i64 %index.next.14
   %79 = bitcast float* %78 to <4 x float>*
@@ -153,7 +153,7 @@ declare <4 x float> @llvm_cos_v4f32(<4 x
 define <2 x double> @bar(double* %x) {
 entry:
   %p = bitcast double* %x to <2 x double>*
-  %r = load <2 x double>* %p, align 8
+  %r = load <2 x double>, <2 x double>* %p, align 8
 
 ; CHECK-LABEL: @bar
 ; CHECK-NOT: lvsl

Modified: llvm/trunk/test/CodeGen/PowerPC/unaligned.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/unaligned.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/unaligned.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/unaligned.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ target datalayout = "E-p:32:32:32-i1:8:8
 
 define void @foo1(i16* %p, i16* %r) nounwind {
 entry:
-  %v = load i16* %p, align 1
+  %v = load i16, i16* %p, align 1
   store i16 %v, i16* %r, align 1
   ret void
 
@@ -20,7 +20,7 @@ entry:
 
 define void @foo2(i32* %p, i32* %r) nounwind {
 entry:
-  %v = load i32* %p, align 1
+  %v = load i32, i32* %p, align 1
   store i32 %v, i32* %r, align 1
   ret void
 
@@ -35,7 +35,7 @@ entry:
 
 define void @foo3(i64* %p, i64* %r) nounwind {
 entry:
-  %v = load i64* %p, align 1
+  %v = load i64, i64* %p, align 1
   store i64 %v, i64* %r, align 1
   ret void
 
@@ -50,7 +50,7 @@ entry:
 
 define void @foo4(float* %p, float* %r) nounwind {
 entry:
-  %v = load float* %p, align 1
+  %v = load float, float* %p, align 1
   store float %v, float* %r, align 1
   ret void
 
@@ -65,7 +65,7 @@ entry:
 
 define void @foo5(double* %p, double* %r) nounwind {
 entry:
-  %v = load double* %p, align 1
+  %v = load double, double* %p, align 1
   store double %v, double* %r, align 1
   ret void
 
@@ -80,7 +80,7 @@ entry:
 
 define void @foo6(<4 x float>* %p, <4 x float>* %r) nounwind {
 entry:
-  %v = load <4 x float>* %p, align 1
+  %v = load <4 x float>, <4 x float>* %p, align 1
   store <4 x float> %v, <4 x float>* %r, align 1
   ret void
 

Modified: llvm/trunk/test/CodeGen/PowerPC/vaddsplat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vaddsplat.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vaddsplat.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vaddsplat.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux
 %v16i8 = type <16 x i8>
 
 define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
-       %p = load %v4i32* %P
+       %p = load %v4i32, %v4i32* %P
        %r = add %v4i32 %p, < i32 18, i32 18, i32 18, i32 18 >
        store %v4i32 %r, %v4i32* %S
        ret void
@@ -21,7 +21,7 @@ define void @test_v4i32_pos_even(%v4i32*
 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
 
 define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
-       %p = load %v4i32* %P
+       %p = load %v4i32, %v4i32* %P
        %r = add %v4i32 %p, < i32 -28, i32 -28, i32 -28, i32 -28 >
        store %v4i32 %r, %v4i32* %S
        ret void
@@ -32,7 +32,7 @@ define void @test_v4i32_neg_even(%v4i32*
 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
 
 define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
-       %p = load %v8i16* %P
+       %p = load %v8i16, %v8i16* %P
        %r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
        store %v8i16 %r, %v8i16* %S
        ret void
@@ -43,7 +43,7 @@ define void @test_v8i16_pos_even(%v8i16*
 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
 
 define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
-       %p = load %v8i16* %P
+       %p = load %v8i16, %v8i16* %P
        %r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
        store %v8i16 %r, %v8i16* %S
        ret void
@@ -54,7 +54,7 @@ define void @test_v8i16_neg_even(%v8i16*
 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
 
 define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
-       %p = load %v16i8* %P
+       %p = load %v16i8, %v16i8* %P
        %r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16 >
        store %v16i8 %r, %v16i8* %S
        ret void
@@ -65,7 +65,7 @@ define void @test_v16i8_pos_even(%v16i8*
 ; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
 
 define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
-       %p = load %v16i8* %P
+       %p = load %v16i8, %v16i8* %P
        %r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18 >
        store %v16i8 %r, %v16i8* %S
        ret void
@@ -76,7 +76,7 @@ define void @test_v16i8_neg_even(%v16i8*
 ; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
 
 define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
-       %p = load %v4i32* %P
+       %p = load %v4i32, %v4i32* %P
        %r = add %v4i32 %p, < i32 27, i32 27, i32 27, i32 27 >
        store %v4i32 %r, %v4i32* %S
        ret void
@@ -88,7 +88,7 @@ define void @test_v4i32_pos_odd(%v4i32*
 ; CHECK: vsubuwm {{[0-9]+}}, [[REG1]], [[REG2]]
 
 define void @test_v4i32_neg_odd(%v4i32* %P, %v4i32* %S) {
-       %p = load %v4i32* %P
+       %p = load %v4i32, %v4i32* %P
        %r = add %v4i32 %p, < i32 -27, i32 -27, i32 -27, i32 -27 >
        store %v4i32 %r, %v4i32* %S
        ret void
@@ -100,7 +100,7 @@ define void @test_v4i32_neg_odd(%v4i32*
 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG2]]
 
 define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
-       %p = load %v8i16* %P
+       %p = load %v8i16, %v8i16* %P
        %r = add %v8i16 %p, < i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31 >
        store %v8i16 %r, %v8i16* %S
        ret void
@@ -112,7 +112,7 @@ define void @test_v8i16_pos_odd(%v8i16*
 ; CHECK: vsubuhm {{[0-9]+}}, [[REG1]], [[REG2]]
 
 define void @test_v8i16_neg_odd(%v8i16* %P, %v8i16* %S) {
-       %p = load %v8i16* %P
+       %p = load %v8i16, %v8i16* %P
        %r = add %v8i16 %p, < i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31 >
        store %v8i16 %r, %v8i16* %S
        ret void
@@ -124,7 +124,7 @@ define void @test_v8i16_neg_odd(%v8i16*
 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG2]]
 
 define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
-       %p = load %v16i8* %P
+       %p = load %v16i8, %v16i8* %P
        %r = add %v16i8 %p, < i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17 >
        store %v16i8 %r, %v16i8* %S
        ret void
@@ -136,7 +136,7 @@ define void @test_v16i8_pos_odd(%v16i8*
 ; CHECK: vsububm {{[0-9]+}}, [[REG1]], [[REG2]]
 
 define void @test_v16i8_neg_odd(%v16i8* %P, %v16i8* %S) {
-       %p = load %v16i8* %P
+       %p = load %v16i8, %v16i8* %P
        %r = add %v16i8 %p, < i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17 >
        store %v16i8 %r, %v16i8* %S
        ret void

Modified: llvm/trunk/test/CodeGen/PowerPC/varargs-struct-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/varargs-struct-float.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/varargs-struct-float.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/varargs-struct-float.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ entry:
   %coerce.dive = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
   store float %s.coerce, float* %coerce.dive, align 1
   %coerce.dive1 = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
-  %0 = load float* %coerce.dive1, align 1
+  %0 = load float, float* %coerce.dive1, align 1
   call void (i32, ...)* @testvaSf1(i32 1, float inreg %0)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/vcmp-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vcmp-fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vcmp-fold.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vcmp-fold.ll Fri Feb 27 15:17:42 2015
@@ -5,11 +5,11 @@
 
 define void @test(<4 x float>* %x, <4 x float>* %y, i32* %P) {
 entry:
-	%tmp = load <4 x float>* %x		; <<4 x float>> [#uses=1]
-	%tmp2 = load <4 x float>* %y		; <<4 x float>> [#uses=1]
+	%tmp = load <4 x float>, <4 x float>* %x		; <<4 x float>> [#uses=1]
+	%tmp2 = load <4 x float>, <4 x float>* %y		; <<4 x float>> [#uses=1]
 	%tmp.upgrd.1 = call i32 @llvm.ppc.altivec.vcmpbfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp2 )		; <i32> [#uses=1]
-	%tmp4 = load <4 x float>* %x		; <<4 x float>> [#uses=1]
-	%tmp6 = load <4 x float>* %y		; <<4 x float>> [#uses=1]
+	%tmp4 = load <4 x float>, <4 x float>* %x		; <<4 x float>> [#uses=1]
+	%tmp6 = load <4 x float>, <4 x float>* %y		; <<4 x float>> [#uses=1]
 	%tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 )		; <<4 x i32>> [#uses=1]
 	%tmp7 = bitcast <4 x i32> %tmp.upgrd.2 to <4 x float>		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp7, <4 x float>* %x

Modified: llvm/trunk/test/CodeGen/PowerPC/vec-abi-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec-abi-align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec-abi-align.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec-abi-align.ll Fri Feb 27 15:17:42 2015
@@ -27,10 +27,10 @@ entry:
 define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* byval nocapture readonly %vs) #0 {
 entry:
   %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
-  %0 = load i64* %m, align 8
+  %0 = load i64, i64* %m, align 8
   store i64 %0, i64* @n, align 8
   %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
-  %1 = load <4 x float>* %v, align 16
+  %1 = load <4 x float>, <4 x float>* %v, align 16
   store <4 x float> %1, <4 x float>* @ve, align 16
   ret void
 
@@ -53,10 +53,10 @@ entry:
 define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* byval nocapture readonly %vs) #0 {
 entry:
   %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
-  %0 = load i64* %m, align 8
+  %0 = load i64, i64* %m, align 8
   store i64 %0, i64* @n, align 8
   %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
-  %1 = load <4 x float>* %v, align 16
+  %1 = load <4 x float>, <4 x float>* %v, align 16
   store <4 x float> %1, <4 x float>* @ve, align 16
   ret void
 

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_auto_constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_auto_constant.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_auto_constant.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_auto_constant.ll Fri Feb 27 15:17:42 2015
@@ -25,8 +25,8 @@ entry:
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
   store <16 x i8> %x, <16 x i8>* %x_addr
   store <16 x i8> <i8 22, i8 21, i8 20, i8 3, i8 25, i8 24, i8 23, i8 3, i8 28, i8 27, i8 26, i8 3, i8 31, i8 30, i8 29, i8 3>, <16 x i8>* %temp, align 16
-  %0 = load <16 x i8>* %x_addr, align 16          ; <<16 x i8>> [#uses=1]
-  %1 = load <16 x i8>* %temp, align 16            ; <<16 x i8>> [#uses=1]
+  %0 = load <16 x i8>, <16 x i8>* %x_addr, align 16          ; <<16 x i8>> [#uses=1]
+  %1 = load <16 x i8>, <16 x i8>* %temp, align 16            ; <<16 x i8>> [#uses=1]
   %tmp = add <16 x i8> %0, %1                     ; <<16 x i8>> [#uses=1]
   store <16 x i8> %tmp, <16 x i8>* @baz, align 16
   br label %return

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_br_cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_br_cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_br_cmp.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_br_cmp.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@
 ; A predicate compare used immediately by a branch should not generate an mfcr.
 
 define void @test(<4 x float>* %A, <4 x float>* %B) {
-	%tmp = load <4 x float>* %A		; <<4 x float>> [#uses=1]
-	%tmp3 = load <4 x float>* %B		; <<4 x float>> [#uses=1]
+	%tmp = load <4 x float>, <4 x float>* %A		; <<4 x float>> [#uses=1]
+	%tmp3 = load <4 x float>, <4 x float>* %B		; <<4 x float>> [#uses=1]
 	%tmp.upgrd.1 = tail call i32 @llvm.ppc.altivec.vcmpeqfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp3 )		; <i32> [#uses=1]
 	%tmp.upgrd.2 = icmp eq i32 %tmp.upgrd.1, 0		; <i1> [#uses=1]
 	br i1 %tmp.upgrd.2, label %cond_true, label %UnifiedReturnBlock

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define void @foo() nounwind ssp {
 ; CHECK: _foo:
 ; CHECK-NOT: stw
 entry:
-    %tmp0 = load <16 x i8>* @a, align 16
+    %tmp0 = load <16 x i8>, <16 x i8>* @a, align 16
   %tmp180.i = extractelement <16 x i8> %tmp0, i32 0 ; <i8> [#uses=1]
   %tmp181.i = insertelement <16 x i8> <i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp180.i, i32 2 ; <<16 x i8>> [#uses=1]
   %tmp182.i = extractelement <16 x i8> %tmp0, i32 1 ; <i8> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_constants.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_constants.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_constants.ll Fri Feb 27 15:17:42 2015
@@ -4,13 +4,13 @@ target datalayout = "E-p:64:64:64-i1:8:8
 target triple = "powerpc64-unknown-linux-gnu"
 
 define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) nounwind {
-	%tmp = load <4 x i32>* %P1		; <<4 x i32>> [#uses=1]
+	%tmp = load <4 x i32>, <4 x i32>* %P1		; <<4 x i32>> [#uses=1]
 	%tmp4 = and <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 >		; <<4 x i32>> [#uses=1]
 	store <4 x i32> %tmp4, <4 x i32>* %P1
-	%tmp7 = load <4 x i32>* %P2		; <<4 x i32>> [#uses=1]
+	%tmp7 = load <4 x i32>, <4 x i32>* %P2		; <<4 x i32>> [#uses=1]
 	%tmp9 = and <4 x i32> %tmp7, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 >		; <<4 x i32>> [#uses=1]
 	store <4 x i32> %tmp9, <4 x i32>* %P2
-	%tmp.upgrd.1 = load <4 x float>* %P3		; <<4 x float>> [#uses=1]
+	%tmp.upgrd.1 = load <4 x float>, <4 x float>* %P3		; <<4 x float>> [#uses=1]
 	%tmp11 = bitcast <4 x float> %tmp.upgrd.1 to <4 x i32>		; <<4 x i32>> [#uses=1]
 	%tmp12 = and <4 x i32> %tmp11, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 >		; <<4 x i32>> [#uses=1]
 	%tmp13 = bitcast <4 x i32> %tmp12 to <4 x float>		; <<4 x float>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_conv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_conv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_conv.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_conv.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux
 
 define void @v4f32_to_v4i32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind {
 entry:
-  %0 = load <4 x float>* @cte_float, align 16
+  %0 = load <4 x float>, <4 x float>* @cte_float, align 16
   %mul = fmul <4 x float> %0, %x
   %1 = fptosi <4 x float> %mul to <4 x i32>
   store <4 x i32> %1, <4 x i32>* %y, align 16
@@ -23,7 +23,7 @@ entry:
 
 define void @v4f32_to_v4u32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind {
 entry:
-  %0 = load <4 x float>* @cte_float, align 16
+  %0 = load <4 x float>, <4 x float>* @cte_float, align 16
   %mul = fmul <4 x float> %0, %x
   %1 = fptoui <4 x float> %mul to <4 x i32>
   store <4 x i32> %1, <4 x i32>* %y, align 16
@@ -35,7 +35,7 @@ entry:
 
 define void @v4i32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind {
 entry:
-  %0 = load <4 x i32>* @cte_int, align 16
+  %0 = load <4 x i32>, <4 x i32>* @cte_int, align 16
   %mul = mul <4 x i32> %0, %x
   %1 = sitofp <4 x i32> %mul to <4 x float>
   store <4 x float> %1, <4 x float>* %y, align 16
@@ -47,7 +47,7 @@ entry:
 
 define void @v4u32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind {
 entry:
-  %0 = load <4 x i32>* @cte_int, align 16
+  %0 = load <4 x i32>, <4 x i32>* @cte_int, align 16
   %mul = mul <4 x i32> %0, %x
   %1 = uitofp <4 x i32> %mul to <4 x float>
   store <4 x float> %1, <4 x float>* %y, align 16

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_fneg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_fneg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_fneg.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_fneg.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vsubfp
 
 define void @t(<4 x float>* %A) {
-	%tmp2 = load <4 x float>* %A
+	%tmp2 = load <4 x float>, <4 x float>* %A
 	%tmp3 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2
 	store <4 x float> %tmp3, <4 x float>* %A
 	ret void

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_misaligned.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_misaligned.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_misaligned.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_misaligned.ll Fri Feb 27 15:17:42 2015
@@ -19,18 +19,18 @@ entry:
 	store i32 %x, i32* %x_addr
 	%ap1 = bitcast i8** %ap to i8*		; <i8*> [#uses=1]
 	call void @llvm.va_start( i8* %ap1 )
-	%tmp = load i8** %ap, align 4		; <i8*> [#uses=1]
+	%tmp = load i8*, i8** %ap, align 4		; <i8*> [#uses=1]
 	store i8* %tmp, i8** %ap.0, align 4
-	%tmp2 = load i8** %ap.0, align 4		; <i8*> [#uses=1]
+	%tmp2 = load i8*, i8** %ap.0, align 4		; <i8*> [#uses=1]
 	%tmp3 = getelementptr i8, i8* %tmp2, i64 16		; <i8*> [#uses=1]
 	store i8* %tmp3, i8** %ap, align 4
-	%tmp4 = load i8** %ap.0, align 4		; <i8*> [#uses=1]
+	%tmp4 = load i8*, i8** %ap.0, align 4		; <i8*> [#uses=1]
 	%tmp45 = bitcast i8* %tmp4 to %struct.S2203*		; <%struct.S2203*> [#uses=1]
 	%tmp6 = getelementptr %struct.S2203, %struct.S2203* @s, i32 0, i32 0		; <%struct.u16qi*> [#uses=1]
 	%tmp7 = getelementptr %struct.S2203, %struct.S2203* %tmp45, i32 0, i32 0		; <%struct.u16qi*> [#uses=1]
 	%tmp8 = getelementptr %struct.u16qi, %struct.u16qi* %tmp6, i32 0, i32 0		; <<16 x i8>*> [#uses=1]
 	%tmp9 = getelementptr %struct.u16qi, %struct.u16qi* %tmp7, i32 0, i32 0		; <<16 x i8>*> [#uses=1]
-	%tmp10 = load <16 x i8>* %tmp9, align 4		; <<16 x i8>> [#uses=1]
+	%tmp10 = load <16 x i8>, <16 x i8>* %tmp9, align 4		; <<16 x i8>> [#uses=1]
 ; CHECK: lvsl
 ; CHECK: vperm
 ; CHECK-LE: lvsr

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_mul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_mul.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_mul.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@
 ; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-LE-VSX
 
 define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
-	%tmp = load <4 x i32>* %X		; <<4 x i32>> [#uses=1]
-	%tmp2 = load <4 x i32>* %Y		; <<4 x i32>> [#uses=1]
+	%tmp = load <4 x i32>, <4 x i32>* %X		; <<4 x i32>> [#uses=1]
+	%tmp2 = load <4 x i32>, <4 x i32>* %Y		; <<4 x i32>> [#uses=1]
 	%tmp3 = mul <4 x i32> %tmp, %tmp2		; <<4 x i32>> [#uses=1]
 	ret <4 x i32> %tmp3
 }
@@ -24,8 +24,8 @@ define <4 x i32> @test_v4i32(<4 x i32>*
 ; CHECK-LE-VSX-NOT: mullw
 
 define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
-	%tmp = load <8 x i16>* %X		; <<8 x i16>> [#uses=1]
-	%tmp2 = load <8 x i16>* %Y		; <<8 x i16>> [#uses=1]
+	%tmp = load <8 x i16>, <8 x i16>* %X		; <<8 x i16>> [#uses=1]
+	%tmp2 = load <8 x i16>, <8 x i16>* %Y		; <<8 x i16>> [#uses=1]
 	%tmp3 = mul <8 x i16> %tmp, %tmp2		; <<8 x i16>> [#uses=1]
 	ret <8 x i16> %tmp3
 }
@@ -43,8 +43,8 @@ define <8 x i16> @test_v8i16(<8 x i16>*
 ; CHECK-LE-VSX-NOT: mullw
 
 define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
-	%tmp = load <16 x i8>* %X		; <<16 x i8>> [#uses=1]
-	%tmp2 = load <16 x i8>* %Y		; <<16 x i8>> [#uses=1]
+	%tmp = load <16 x i8>, <16 x i8>* %X		; <<16 x i8>> [#uses=1]
+	%tmp2 = load <16 x i8>, <16 x i8>* %Y		; <<16 x i8>> [#uses=1]
 	%tmp3 = mul <16 x i8> %tmp, %tmp2		; <<16 x i8>> [#uses=1]
 	ret <16 x i8> %tmp3
 }
@@ -68,8 +68,8 @@ define <16 x i8> @test_v16i8(<16 x i8>*
 ; CHECK-LE-VSX-NOT: mullw
 
 define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
-	%tmp = load <4 x float>* %X
-	%tmp2 = load <4 x float>* %Y
+	%tmp = load <4 x float>, <4 x float>* %X
+	%tmp2 = load <4 x float>, <4 x float>* %Y
 	%tmp3 = fmul <4 x float> %tmp, %tmp2
 	ret <4 x float> %tmp3
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_perf_shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_perf_shuffle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_perf_shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_perf_shuffle.ll Fri Feb 27 15:17:42 2015
@@ -1,36 +1,36 @@
 ; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep vperm
 
 define <4 x float> @test_uu72(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>* %P2		; <<4 x float>> [#uses=1]
+	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 undef, i32 undef, i32 7, i32 2 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }
 
 define <4 x float> @test_30u5(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>* %P2		; <<4 x float>> [#uses=1]
+	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 0, i32 undef, i32 5 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }
 
 define <4 x float> @test_3u73(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>* %P2		; <<4 x float>> [#uses=1]
+	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 undef, i32 7, i32 3 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }
 
 define <4 x float> @test_3774(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>* %P2		; <<4 x float>> [#uses=1]
+	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 7, i32 7, i32 4 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }
 
 define <4 x float> @test_4450(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>* %P2		; <<4 x float>> [#uses=1]
+	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 4, i32 4, i32 5, i32 0 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_shuffle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_shuffle.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@
 
 define void @VSLDOI_xy(<8 x i16>* %A, <8 x i16>* %B) {
 entry:
-	%tmp = load <8 x i16>* %A		; <<8 x i16>> [#uses=1]
-	%tmp2 = load <8 x i16>* %B		; <<8 x i16>> [#uses=1]
+	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=1]
+	%tmp2 = load <8 x i16>, <8 x i16>* %B		; <<8 x i16>> [#uses=1]
 	%tmp.upgrd.1 = bitcast <8 x i16> %tmp to <16 x i8>		; <<16 x i8>> [#uses=11]
 	%tmp2.upgrd.2 = bitcast <8 x i16> %tmp2 to <16 x i8>		; <<16 x i8>> [#uses=5]
 	%tmp.upgrd.3 = extractelement <16 x i8> %tmp.upgrd.1, i32 5		; <i8> [#uses=1]
@@ -51,8 +51,8 @@ entry:
 }
 
 define void @VSLDOI_xx(<8 x i16>* %A, <8 x i16>* %B) {
-	%tmp = load <8 x i16>* %A		; <<8 x i16>> [#uses=1]
-	%tmp2 = load <8 x i16>* %A		; <<8 x i16>> [#uses=1]
+	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=1]
+	%tmp2 = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=1]
 	%tmp.upgrd.5 = bitcast <8 x i16> %tmp to <16 x i8>		; <<16 x i8>> [#uses=11]
 	%tmp2.upgrd.6 = bitcast <8 x i16> %tmp2 to <16 x i8>		; <<16 x i8>> [#uses=5]
 	%tmp.upgrd.7 = extractelement <16 x i8> %tmp.upgrd.5, i32 5		; <i8> [#uses=1]
@@ -94,9 +94,9 @@ define void @VSLDOI_xx(<8 x i16>* %A, <8
 
 define void @VPERM_promote(<8 x i16>* %A, <8 x i16>* %B) {
 entry:
-	%tmp = load <8 x i16>* %A		; <<8 x i16>> [#uses=1]
+	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=1]
 	%tmp.upgrd.9 = bitcast <8 x i16> %tmp to <4 x i32>		; <<4 x i32>> [#uses=1]
-	%tmp2 = load <8 x i16>* %B		; <<8 x i16>> [#uses=1]
+	%tmp2 = load <8 x i16>, <8 x i16>* %B		; <<8 x i16>> [#uses=1]
 	%tmp2.upgrd.10 = bitcast <8 x i16> %tmp2 to <4 x i32>		; <<4 x i32>> [#uses=1]
 	%tmp3 = call <4 x i32> @llvm.ppc.altivec.vperm( <4 x i32> %tmp.upgrd.9, <4 x i32> %tmp2.upgrd.10, <16 x i8> < i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14 > )		; <<4 x i32>> [#uses=1]
 	%tmp3.upgrd.11 = bitcast <4 x i32> %tmp3 to <8 x i16>		; <<8 x i16>> [#uses=1]
@@ -108,8 +108,8 @@ declare <4 x i32> @llvm.ppc.altivec.vper
 
 define void @tb_l(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
-	%tmp = load <16 x i8>* %A		; <<16 x i8>> [#uses=8]
-	%tmp2 = load <16 x i8>* %B		; <<16 x i8>> [#uses=8]
+	%tmp = load <16 x i8>, <16 x i8>* %A		; <<16 x i8>> [#uses=8]
+	%tmp2 = load <16 x i8>, <16 x i8>* %B		; <<16 x i8>> [#uses=8]
 	%tmp.upgrd.12 = extractelement <16 x i8> %tmp, i32 8		; <i8> [#uses=1]
 	%tmp3 = extractelement <16 x i8> %tmp2, i32 8		; <i8> [#uses=1]
 	%tmp4 = extractelement <16 x i8> %tmp, i32 9		; <i8> [#uses=1]
@@ -148,8 +148,8 @@ entry:
 
 define void @th_l(<8 x i16>* %A, <8 x i16>* %B) {
 entry:
-	%tmp = load <8 x i16>* %A		; <<8 x i16>> [#uses=4]
-	%tmp2 = load <8 x i16>* %B		; <<8 x i16>> [#uses=4]
+	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=4]
+	%tmp2 = load <8 x i16>, <8 x i16>* %B		; <<8 x i16>> [#uses=4]
 	%tmp.upgrd.13 = extractelement <8 x i16> %tmp, i32 4		; <i16> [#uses=1]
 	%tmp3 = extractelement <8 x i16> %tmp2, i32 4		; <i16> [#uses=1]
 	%tmp4 = extractelement <8 x i16> %tmp, i32 5		; <i16> [#uses=1]
@@ -172,8 +172,8 @@ entry:
 
 define void @tw_l(<4 x i32>* %A, <4 x i32>* %B) {
 entry:
-	%tmp = load <4 x i32>* %A		; <<4 x i32>> [#uses=2]
-	%tmp2 = load <4 x i32>* %B		; <<4 x i32>> [#uses=2]
+	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=2]
+	%tmp2 = load <4 x i32>, <4 x i32>* %B		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.14 = extractelement <4 x i32> %tmp, i32 2		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp2, i32 2		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp, i32 3		; <i32> [#uses=1]
@@ -188,8 +188,8 @@ entry:
 
 define void @tb_h(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
-	%tmp = load <16 x i8>* %A		; <<16 x i8>> [#uses=8]
-	%tmp2 = load <16 x i8>* %B		; <<16 x i8>> [#uses=8]
+	%tmp = load <16 x i8>, <16 x i8>* %A		; <<16 x i8>> [#uses=8]
+	%tmp2 = load <16 x i8>, <16 x i8>* %B		; <<16 x i8>> [#uses=8]
 	%tmp.upgrd.15 = extractelement <16 x i8> %tmp, i32 0		; <i8> [#uses=1]
 	%tmp3 = extractelement <16 x i8> %tmp2, i32 0		; <i8> [#uses=1]
 	%tmp4 = extractelement <16 x i8> %tmp, i32 1		; <i8> [#uses=1]
@@ -228,8 +228,8 @@ entry:
 
 define void @th_h(<8 x i16>* %A, <8 x i16>* %B) {
 entry:
-	%tmp = load <8 x i16>* %A		; <<8 x i16>> [#uses=4]
-	%tmp2 = load <8 x i16>* %B		; <<8 x i16>> [#uses=4]
+	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=4]
+	%tmp2 = load <8 x i16>, <8 x i16>* %B		; <<8 x i16>> [#uses=4]
 	%tmp.upgrd.16 = extractelement <8 x i16> %tmp, i32 0		; <i16> [#uses=1]
 	%tmp3 = extractelement <8 x i16> %tmp2, i32 0		; <i16> [#uses=1]
 	%tmp4 = extractelement <8 x i16> %tmp, i32 1		; <i16> [#uses=1]
@@ -252,8 +252,8 @@ entry:
 
 define void @tw_h(<4 x i32>* %A, <4 x i32>* %B) {
 entry:
-	%tmp = load <4 x i32>* %A		; <<4 x i32>> [#uses=2]
-	%tmp2 = load <4 x i32>* %B		; <<4 x i32>> [#uses=2]
+	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=2]
+	%tmp2 = load <4 x i32>, <4 x i32>* %B		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.17 = extractelement <4 x i32> %tmp2, i32 0		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp, i32 0		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp2, i32 1		; <i32> [#uses=1]
@@ -267,8 +267,8 @@ entry:
 }
 
 define void @tw_h_flop(<4 x i32>* %A, <4 x i32>* %B) {
-	%tmp = load <4 x i32>* %A		; <<4 x i32>> [#uses=2]
-	%tmp2 = load <4 x i32>* %B		; <<4 x i32>> [#uses=2]
+	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=2]
+	%tmp2 = load <4 x i32>, <4 x i32>* %B		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.18 = extractelement <4 x i32> %tmp, i32 0		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp2, i32 0		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp, i32 1		; <i32> [#uses=1]
@@ -283,7 +283,7 @@ define void @tw_h_flop(<4 x i32>* %A, <4
 
 define void @VMRG_UNARY_tb_l(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
-	%tmp = load <16 x i8>* %A		; <<16 x i8>> [#uses=16]
+	%tmp = load <16 x i8>, <16 x i8>* %A		; <<16 x i8>> [#uses=16]
 	%tmp.upgrd.19 = extractelement <16 x i8> %tmp, i32 8		; <i8> [#uses=1]
 	%tmp3 = extractelement <16 x i8> %tmp, i32 8		; <i8> [#uses=1]
 	%tmp4 = extractelement <16 x i8> %tmp, i32 9		; <i8> [#uses=1]
@@ -322,7 +322,7 @@ entry:
 
 define void @VMRG_UNARY_th_l(<8 x i16>* %A, <8 x i16>* %B) {
 entry:
-	%tmp = load <8 x i16>* %A		; <<8 x i16>> [#uses=8]
+	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=8]
 	%tmp.upgrd.20 = extractelement <8 x i16> %tmp, i32 4		; <i16> [#uses=1]
 	%tmp3 = extractelement <8 x i16> %tmp, i32 4		; <i16> [#uses=1]
 	%tmp4 = extractelement <8 x i16> %tmp, i32 5		; <i16> [#uses=1]
@@ -345,7 +345,7 @@ entry:
 
 define void @VMRG_UNARY_tw_l(<4 x i32>* %A, <4 x i32>* %B) {
 entry:
-	%tmp = load <4 x i32>* %A		; <<4 x i32>> [#uses=4]
+	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=4]
 	%tmp.upgrd.21 = extractelement <4 x i32> %tmp, i32 2		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp, i32 2		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp, i32 3		; <i32> [#uses=1]
@@ -360,7 +360,7 @@ entry:
 
 define void @VMRG_UNARY_tb_h(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
-	%tmp = load <16 x i8>* %A		; <<16 x i8>> [#uses=16]
+	%tmp = load <16 x i8>, <16 x i8>* %A		; <<16 x i8>> [#uses=16]
 	%tmp.upgrd.22 = extractelement <16 x i8> %tmp, i32 0		; <i8> [#uses=1]
 	%tmp3 = extractelement <16 x i8> %tmp, i32 0		; <i8> [#uses=1]
 	%tmp4 = extractelement <16 x i8> %tmp, i32 1		; <i8> [#uses=1]
@@ -399,7 +399,7 @@ entry:
 
 define void @VMRG_UNARY_th_h(<8 x i16>* %A, <8 x i16>* %B) {
 entry:
-	%tmp = load <8 x i16>* %A		; <<8 x i16>> [#uses=8]
+	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=8]
 	%tmp.upgrd.23 = extractelement <8 x i16> %tmp, i32 0		; <i16> [#uses=1]
 	%tmp3 = extractelement <8 x i16> %tmp, i32 0		; <i16> [#uses=1]
 	%tmp4 = extractelement <8 x i16> %tmp, i32 1		; <i16> [#uses=1]
@@ -422,7 +422,7 @@ entry:
 
 define void @VMRG_UNARY_tw_h(<4 x i32>* %A, <4 x i32>* %B) {
 entry:
-	%tmp = load <4 x i32>* %A		; <<4 x i32>> [#uses=4]
+	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=4]
 	%tmp.upgrd.24 = extractelement <4 x i32> %tmp, i32 0		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp, i32 0		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp, i32 1		; <i32> [#uses=1]
@@ -437,7 +437,7 @@ entry:
 
 define void @VPCKUHUM_unary(<8 x i16>* %A, <8 x i16>* %B) {
 entry:
-	%tmp = load <8 x i16>* %A		; <<8 x i16>> [#uses=2]
+	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=2]
 	%tmp.upgrd.25 = bitcast <8 x i16> %tmp to <16 x i8>		; <<16 x i8>> [#uses=8]
 	%tmp3 = bitcast <8 x i16> %tmp to <16 x i8>		; <<16 x i8>> [#uses=8]
 	%tmp.upgrd.26 = extractelement <16 x i8> %tmp.upgrd.25, i32 1		; <i8> [#uses=1]
@@ -479,7 +479,7 @@ entry:
 
 define void @VPCKUWUM_unary(<4 x i32>* %A, <4 x i32>* %B) {
 entry:
-	%tmp = load <4 x i32>* %A		; <<4 x i32>> [#uses=2]
+	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.28 = bitcast <4 x i32> %tmp to <8 x i16>		; <<8 x i16>> [#uses=4]
 	%tmp3 = bitcast <4 x i32> %tmp to <8 x i16>		; <<8 x i16>> [#uses=4]
 	%tmp.upgrd.29 = extractelement <8 x i16> %tmp.upgrd.28, i32 1		; <i16> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_shuffle_le.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_shuffle_le.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_shuffle_le.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_shuffle_le.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VPKUHUM_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -16,7 +16,7 @@ entry:
 define void @VPKUHUM_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VPKUHUM_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
 ; CHECK: vpkuhum
         store <16 x i8> %tmp2, <16 x i8>* %A
@@ -26,8 +26,8 @@ entry:
 define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VPKUWUM_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -39,7 +39,7 @@ entry:
 define void @VPKUWUM_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VPKUWUM_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
 ; CHECK: vpkuwum
         store <16 x i8> %tmp2, <16 x i8>* %A
@@ -49,8 +49,8 @@ entry:
 define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VMRGLB_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -62,7 +62,7 @@ entry:
 define void @VMRGLB_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VMRGLB_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
 ; CHECK: vmrglb
         store <16 x i8> %tmp2, <16 x i8>* %A
@@ -72,8 +72,8 @@ entry:
 define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VMRGHB_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -85,7 +85,7 @@ entry:
 define void @VMRGHB_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VMRGHB_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
 ; CHECK: vmrghb
         store <16 x i8> %tmp2, <16 x i8>* %A
@@ -95,8 +95,8 @@ entry:
 define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VMRGLH_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -108,7 +108,7 @@ entry:
 define void @VMRGLH_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VMRGLH_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
 ; CHECK: vmrglh
         store <16 x i8> %tmp2, <16 x i8>* %A
@@ -118,8 +118,8 @@ entry:
 define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VMRGHH_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -131,7 +131,7 @@ entry:
 define void @VMRGHH_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VMRGHH_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
 ; CHECK: vmrghh
         store <16 x i8> %tmp2, <16 x i8>* %A
@@ -141,8 +141,8 @@ entry:
 define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VMRGLW_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -154,7 +154,7 @@ entry:
 define void @VMRGLW_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VMRGLW_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
 ; CHECK: vmrglw
         store <16 x i8> %tmp2, <16 x i8>* %A
@@ -164,8 +164,8 @@ entry:
 define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VMRGHW_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -177,7 +177,7 @@ entry:
 define void @VMRGHW_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VMRGHW_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
 ; CHECK: vmrghw
         store <16 x i8> %tmp2, <16 x i8>* %A
@@ -187,8 +187,8 @@ entry:
 define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
 entry:
 ; CHECK: VSLDOI_xy:
-        %tmp = load <16 x i8>* %A
-        %tmp2 = load <16 x i8>* %B
+        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp2 = load <16 x i8>, <16 x i8>* %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
 ; CHECK: lvx [[REG1:[0-9]+]]
 ; CHECK: lvx [[REG2:[0-9]+]]
@@ -200,7 +200,7 @@ entry:
 define void @VSLDOI_xx(<16 x i8>* %A) {
 entry:
 ; CHECK: VSLDOI_xx:
-        %tmp = load <16 x i8>* %A
+        %tmp = load <16 x i8>, <16 x i8>* %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
 ; CHECK: vsldoi
         store <16 x i8> %tmp2, <16 x i8>* %A

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_splat.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_splat.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_splat.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define void @splat(%f4* %P, %f4* %Q, flo
         %tmp2 = insertelement %f4 %tmp, float %X, i32 1         ; <%f4> [#uses=1]
         %tmp4 = insertelement %f4 %tmp2, float %X, i32 2                ; <%f4> [#uses=1]
         %tmp6 = insertelement %f4 %tmp4, float %X, i32 3                ; <%f4> [#uses=1]
-        %q = load %f4* %Q               ; <%f4> [#uses=1]
+        %q = load %f4, %f4* %Q               ; <%f4> [#uses=1]
         %R = fadd %f4 %q, %tmp6          ; <%f4> [#uses=1]
         store %f4 %R, %f4* %P
         ret void
@@ -25,21 +25,21 @@ define void @splat_i4(%i4* %P, %i4* %Q,
         %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1           ; <%i4> [#uses=1]
         %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2          ; <%i4> [#uses=1]
         %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3          ; <%i4> [#uses=1]
-        %q = load %i4* %Q               ; <%i4> [#uses=1]
+        %q = load %i4, %i4* %Q               ; <%i4> [#uses=1]
         %R = add %i4 %q, %tmp6          ; <%i4> [#uses=1]
         store %i4 %R, %i4* %P
         ret void
 }
 
 define void @splat_imm_i32(%i4* %P, %i4* %Q, i32 %X) nounwind {
-        %q = load %i4* %Q               ; <%i4> [#uses=1]
+        %q = load %i4, %i4* %Q               ; <%i4> [#uses=1]
         %R = add %i4 %q, < i32 -1, i32 -1, i32 -1, i32 -1 >             ; <%i4> [#uses=1]
         store %i4 %R, %i4* %P
         ret void
 }
 
 define void @splat_imm_i16(%i4* %P, %i4* %Q, i32 %X) nounwind {
-        %q = load %i4* %Q               ; <%i4> [#uses=1]
+        %q = load %i4, %i4* %Q               ; <%i4> [#uses=1]
         %R = add %i4 %q, < i32 65537, i32 65537, i32 65537, i32 65537 >         ; <%i4> [#uses=1]
         store %i4 %R, %i4* %P
         ret void
@@ -60,7 +60,7 @@ define void @splat_h(i16 %tmp, <16 x i8>
 }
 
 define void @spltish(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-        %tmp = load <16 x i8>* %B               ; <<16 x i8>> [#uses=1]
+        %tmp = load <16 x i8>, <16 x i8>* %B               ; <<16 x i8>> [#uses=1]
         %tmp.s = bitcast <16 x i8> %tmp to <16 x i8>            ; <<16 x i8>> [#uses=1]
         %tmp4 = sub <16 x i8> %tmp.s, bitcast (<8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16
  15, i16 15, i16 15 > to <16 x i8>)             ; <<16 x i8>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_splat_constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_splat_constant.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_splat_constant.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_splat_constant.ll Fri Feb 27 15:17:42 2015
@@ -12,8 +12,8 @@ entry:
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
   store <16 x i8> %x, <16 x i8>* %x_addr
   store <16 x i8> <i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14>, <16 x i8>* %temp, align 16
-  %0 = load <16 x i8>* %x_addr, align 16          ; <<16 x i8>> [#uses=1]
-  %1 = load <16 x i8>* %temp, align 16            ; <<16 x i8>> [#uses=1]
+  %0 = load <16 x i8>, <16 x i8>* %x_addr, align 16          ; <<16 x i8>> [#uses=1]
+  %1 = load <16 x i8>, <16 x i8>* %temp, align 16            ; <<16 x i8>> [#uses=1]
   %tmp = add <16 x i8> %0, %1                     ; <<16 x i8>> [#uses=1]
   store <16 x i8> %tmp, <16 x i8>* @baz, align 16
   br label %return

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_zero.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_zero.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_zero.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vxor
 
 define void @foo(<4 x float>* %P) {
-        %T = load <4 x float>* %P               ; <<4 x float>> [#uses=1]
+        %T = load <4 x float>, <4 x float>* %P               ; <<4 x float>> [#uses=1]
         %S = fadd <4 x float> zeroinitializer, %T                ; <<4 x float>> [#uses=1]
         store <4 x float> %S, <4 x float>* %P
         ret void

Modified: llvm/trunk/test/CodeGen/PowerPC/vector-identity-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vector-identity-shuffle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vector-identity-shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vector-identity-shuffle.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep vperm
 
 define void @test(<4 x float>* %tmp2.i) {
-        %tmp2.i.upgrd.1 = load <4 x float>* %tmp2.i             ; <<4 x float>> [#uses=4]
+        %tmp2.i.upgrd.1 = load <4 x float>, <4 x float>* %tmp2.i             ; <<4 x float>> [#uses=4]
         %xFloat0.48 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 0      ; <float> [#uses=1]
         %inFloat0.49 = insertelement <4 x float> undef, float %xFloat0.48, i32 0              ; <<4 x float>> [#uses=1]
         %xFloat1.50 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 1      ; <float> [#uses=1]

Modified: llvm/trunk/test/CodeGen/PowerPC/vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vector.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vector.ll Fri Feb 27 15:17:42 2015
@@ -12,56 +12,56 @@
 ;;; TEST HANDLING OF VARIOUS VECTOR SIZES
 
 define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
-        %p = load %f1* %P               ; <%f1> [#uses=1]
-        %q = load %f1* %Q               ; <%f1> [#uses=1]
+        %p = load %f1, %f1* %P               ; <%f1> [#uses=1]
+        %q = load %f1, %f1* %Q               ; <%f1> [#uses=1]
         %R = fadd %f1 %p, %q             ; <%f1> [#uses=1]
         store %f1 %R, %f1* %S
         ret void
 }
 
 define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
-        %p = load %f2* %P               ; <%f2> [#uses=1]
-        %q = load %f2* %Q               ; <%f2> [#uses=1]
+        %p = load %f2, %f2* %P               ; <%f2> [#uses=1]
+        %q = load %f2, %f2* %Q               ; <%f2> [#uses=1]
         %R = fadd %f2 %p, %q             ; <%f2> [#uses=1]
         store %f2 %R, %f2* %S
         ret void
 }
 
 define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
-        %p = load %f4* %P               ; <%f4> [#uses=1]
-        %q = load %f4* %Q               ; <%f4> [#uses=1]
+        %p = load %f4, %f4* %P               ; <%f4> [#uses=1]
+        %q = load %f4, %f4* %Q               ; <%f4> [#uses=1]
         %R = fadd %f4 %p, %q             ; <%f4> [#uses=1]
         store %f4 %R, %f4* %S
         ret void
 }
 
 define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
-        %p = load %f8* %P               ; <%f8> [#uses=1]
-        %q = load %f8* %Q               ; <%f8> [#uses=1]
+        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
+        %q = load %f8, %f8* %Q               ; <%f8> [#uses=1]
         %R = fadd %f8 %p, %q             ; <%f8> [#uses=1]
         store %f8 %R, %f8* %S
         ret void
 }
 
 define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
-        %p = load %f8* %P               ; <%f8> [#uses=1]
-        %q = load %f8* %Q               ; <%f8> [#uses=1]
+        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
+        %q = load %f8, %f8* %Q               ; <%f8> [#uses=1]
         %R = fmul %f8 %p, %q             ; <%f8> [#uses=1]
         store %f8 %R, %f8* %S
         ret void
 }
 
 define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
-        %p = load %f8* %P               ; <%f8> [#uses=1]
-        %q = load %f8* %Q               ; <%f8> [#uses=1]
+        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
+        %q = load %f8, %f8* %Q               ; <%f8> [#uses=1]
         %R = fdiv %f8 %p, %q            ; <%f8> [#uses=1]
         store %f8 %R, %f8* %S
         ret void
 }
 
 define void @test_rem(%f8* %P, %f8* %Q, %f8* %S) {
-        %p = load %f8* %P               ; <%f8> [#uses=1]
-        %q = load %f8* %Q               ; <%f8> [#uses=1]
+        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
+        %q = load %f8, %f8* %Q               ; <%f8> [#uses=1]
         %R = frem %f8 %p, %q            ; <%f8> [#uses=1]
         store %f8 %R, %f8* %S
         ret void
@@ -70,7 +70,7 @@ define void @test_rem(%f8* %P, %f8* %Q,
 ;;; TEST VECTOR CONSTRUCTS
 
 define void @test_cst(%f4* %P, %f4* %S) {
-        %p = load %f4* %P               ; <%f4> [#uses=1]
+        %p = load %f4, %f4* %P               ; <%f4> [#uses=1]
         %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float
  2.000000e+00, float 4.500000e+00 >             ; <%f4> [#uses=1]
         store %f4 %R, %f4* %S
@@ -78,14 +78,14 @@ define void @test_cst(%f4* %P, %f4* %S)
 }
 
 define void @test_zero(%f4* %P, %f4* %S) {
-        %p = load %f4* %P               ; <%f4> [#uses=1]
+        %p = load %f4, %f4* %P               ; <%f4> [#uses=1]
         %R = fadd %f4 %p, zeroinitializer                ; <%f4> [#uses=1]
         store %f4 %R, %f4* %S
         ret void
 }
 
 define void @test_undef(%f4* %P, %f4* %S) {
-        %p = load %f4* %P               ; <%f4> [#uses=1]
+        %p = load %f4, %f4* %P               ; <%f4> [#uses=1]
         %R = fadd %f4 %p, undef          ; <%f4> [#uses=1]
         store %f4 %R, %f4* %S
         ret void
@@ -111,19 +111,19 @@ define void @test_scalar_to_vector(float
 }
 
 define float @test_extract_elt(%f8* %P) {
-        %p = load %f8* %P               ; <%f8> [#uses=1]
+        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
         %R = extractelement %f8 %p, i32 3               ; <float> [#uses=1]
         ret float %R
 }
 
 define double @test_extract_elt2(%d8* %P) {
-        %p = load %d8* %P               ; <%d8> [#uses=1]
+        %p = load %d8, %d8* %P               ; <%d8> [#uses=1]
         %R = extractelement %d8 %p, i32 3               ; <double> [#uses=1]
         ret double %R
 }
 
 define void @test_cast_1(%f4* %b, %i4* %a) {
-        %tmp = load %f4* %b             ; <%f4> [#uses=1]
+        %tmp = load %f4, %f4* %b             ; <%f4> [#uses=1]
         %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float
 3.000000e+00, float 4.000000e+00 >              ; <%f4> [#uses=1]
         %tmp3 = bitcast %f4 %tmp2 to %i4                ; <%i4> [#uses=1]
@@ -133,7 +133,7 @@ define void @test_cast_1(%f4* %b, %i4* %
 }
 
 define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
-        %T = load %f8* %a               ; <%f8> [#uses=1]
+        %T = load %f8, %f8* %a               ; <%f8> [#uses=1]
         %T2 = bitcast %f8 %T to <8 x i32>               
         store <8 x i32> %T2, <8 x i32>* %b
         ret void
@@ -147,7 +147,7 @@ define void @splat(%f4* %P, %f4* %Q, flo
         %tmp2 = insertelement %f4 %tmp, float %X, i32 1       
         %tmp4 = insertelement %f4 %tmp2, float %X, i32 2    
         %tmp6 = insertelement %f4 %tmp4, float %X, i32 3   
-        %q = load %f4* %Q               ; <%f4> [#uses=1]
+        %q = load %f4, %f4* %Q               ; <%f4> [#uses=1]
         %R = fadd %f4 %q, %tmp6          ; <%f4> [#uses=1]
         store %f4 %R, %f4* %P
         ret void
@@ -158,7 +158,7 @@ define void @splat_i4(%i4* %P, %i4* %Q,
         %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1         
         %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2       
         %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3     
-        %q = load %i4* %Q               ; <%i4> [#uses=1]
+        %q = load %i4, %i4* %Q               ; <%i4> [#uses=1]
         %R = add %i4 %q, %tmp6          ; <%i4> [#uses=1]
         store %i4 %R, %i4* %P
         ret void

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx-div.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx-div.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx-div.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx-div.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 
 define void @test1() {
 entry:
-  %0 = load <4 x float>* @vf, align 16
+  %0 = load <4 x float>, <4 x float>* @vf, align 16
   %1 = tail call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %0)
   store <4 x float> %1, <4 x float>* @vf_res, align 16
   ret void
@@ -17,7 +17,7 @@ entry:
 
 define void @test2() {
 entry:
-  %0 = load <2 x double>* @vd, align 16
+  %0 = load <2 x double>, <2 x double>* @vd, align 16
   %1 = tail call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %0)
   store <2 x double> %1, <2 x double>* @vd_res, align 16
   ret void

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy1.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy1.ll Fri Feb 27 15:17:42 2015
@@ -26,69 +26,69 @@ vector.body:
   %vec.phi28 = phi <4 x i32> [ zeroinitializer, %entry ], [ %51, %vector.body ]
   %vec.phi29 = phi <4 x i32> [ zeroinitializer, %entry ], [ %52, %vector.body ]
   %vec.phi30 = phi <4 x i32> [ zeroinitializer, %entry ], [ %53, %vector.body ]
-  %wide.load32 = load <4 x i32>* null, align 4
+  %wide.load32 = load <4 x i32>, <4 x i32>* null, align 4
   %.sum82 = add i64 %index, 24
   %0 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum82
   %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load36 = load <4 x i32>* %1, align 4
-  %wide.load37 = load <4 x i32>* undef, align 4
+  %wide.load36 = load <4 x i32>, <4 x i32>* %1, align 4
+  %wide.load37 = load <4 x i32>, <4 x i32>* undef, align 4
   %.sum84 = add i64 %index, 32
   %2 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum84
   %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load38 = load <4 x i32>* %3, align 4
+  %wide.load38 = load <4 x i32>, <4 x i32>* %3, align 4
   %.sum85 = add i64 %index, 36
   %4 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum85
   %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.load39 = load <4 x i32>* %5, align 4
+  %wide.load39 = load <4 x i32>, <4 x i32>* %5, align 4
   %6 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 undef
   %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load40 = load <4 x i32>* %7, align 4
+  %wide.load40 = load <4 x i32>, <4 x i32>* %7, align 4
   %.sum87 = add i64 %index, 44
   %8 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum87
   %9 = bitcast i32* %8 to <4 x i32>*
-  %wide.load41 = load <4 x i32>* %9, align 4
+  %wide.load41 = load <4 x i32>, <4 x i32>* %9, align 4
   %10 = getelementptr inbounds [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %index
   %11 = bitcast i32* %10 to <4 x i32>*
-  %wide.load42 = load <4 x i32>* %11, align 4
+  %wide.load42 = load <4 x i32>, <4 x i32>* %11, align 4
   %.sum8889 = or i64 %index, 4
   %12 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum8889
   %13 = bitcast i32* %12 to <4 x i32>*
-  %wide.load43 = load <4 x i32>* %13, align 4
+  %wide.load43 = load <4 x i32>, <4 x i32>* %13, align 4
   %.sum9091 = or i64 %index, 8
   %14 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum9091
   %15 = bitcast i32* %14 to <4 x i32>*
-  %wide.load44 = load <4 x i32>* %15, align 4
+  %wide.load44 = load <4 x i32>, <4 x i32>* %15, align 4
   %.sum94 = add i64 %index, 16
   %16 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum94
   %17 = bitcast i32* %16 to <4 x i32>*
-  %wide.load46 = load <4 x i32>* %17, align 4
+  %wide.load46 = load <4 x i32>, <4 x i32>* %17, align 4
   %.sum95 = add i64 %index, 20
   %18 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum95
   %19 = bitcast i32* %18 to <4 x i32>*
-  %wide.load47 = load <4 x i32>* %19, align 4
+  %wide.load47 = load <4 x i32>, <4 x i32>* %19, align 4
   %20 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 undef
   %21 = bitcast i32* %20 to <4 x i32>*
-  %wide.load48 = load <4 x i32>* %21, align 4
+  %wide.load48 = load <4 x i32>, <4 x i32>* %21, align 4
   %.sum97 = add i64 %index, 28
   %22 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum97
   %23 = bitcast i32* %22 to <4 x i32>*
-  %wide.load49 = load <4 x i32>* %23, align 4
+  %wide.load49 = load <4 x i32>, <4 x i32>* %23, align 4
   %.sum98 = add i64 %index, 32
   %24 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum98
   %25 = bitcast i32* %24 to <4 x i32>*
-  %wide.load50 = load <4 x i32>* %25, align 4
+  %wide.load50 = load <4 x i32>, <4 x i32>* %25, align 4
   %.sum99 = add i64 %index, 36
   %26 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum99
   %27 = bitcast i32* %26 to <4 x i32>*
-  %wide.load51 = load <4 x i32>* %27, align 4
+  %wide.load51 = load <4 x i32>, <4 x i32>* %27, align 4
   %.sum100 = add i64 %index, 40
   %28 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum100
   %29 = bitcast i32* %28 to <4 x i32>*
-  %wide.load52 = load <4 x i32>* %29, align 4
+  %wide.load52 = load <4 x i32>, <4 x i32>* %29, align 4
   %.sum101 = add i64 %index, 44
   %30 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum101
   %31 = bitcast i32* %30 to <4 x i32>*
-  %wide.load53 = load <4 x i32>* %31, align 4
+  %wide.load53 = load <4 x i32>, <4 x i32>* %31, align 4
   %32 = add <4 x i32> zeroinitializer, %vec.phi
   %33 = add <4 x i32> zeroinitializer, %vec.phi20
   %34 = add <4 x i32> %wide.load32, %vec.phi21

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy2.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy2.ll Fri Feb 27 15:17:42 2015
@@ -29,39 +29,39 @@ vector.body:
   %vec.phi70 = phi <4 x i32> [ %41, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
   %vec.phi71 = phi <4 x i32> [ %42, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
   %.sum = add i64 0, 4
-  %wide.load72 = load <4 x i32>* null, align 4
+  %wide.load72 = load <4 x i32>, <4 x i32>* null, align 4
   %.sum109 = add i64 0, 8
   %0 = getelementptr i32, i32* %first, i64 %.sum109
   %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load73 = load <4 x i32>* %1, align 4
+  %wide.load73 = load <4 x i32>, <4 x i32>* %1, align 4
   %.sum110 = add i64 0, 12
   %2 = getelementptr i32, i32* %first, i64 %.sum110
   %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load74 = load <4 x i32>* %3, align 4
+  %wide.load74 = load <4 x i32>, <4 x i32>* %3, align 4
   %.sum112 = add i64 0, 20
   %4 = getelementptr i32, i32* %first, i64 %.sum112
   %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.load76 = load <4 x i32>* %5, align 4
+  %wide.load76 = load <4 x i32>, <4 x i32>* %5, align 4
   %.sum114 = add i64 0, 28
   %6 = getelementptr i32, i32* %first, i64 %.sum114
   %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load78 = load <4 x i32>* %7, align 4
+  %wide.load78 = load <4 x i32>, <4 x i32>* %7, align 4
   %.sum115 = add i64 0, 32
   %8 = getelementptr i32, i32* %first, i64 %.sum115
   %9 = bitcast i32* %8 to <4 x i32>*
-  %wide.load79 = load <4 x i32>* %9, align 4
+  %wide.load79 = load <4 x i32>, <4 x i32>* %9, align 4
   %.sum116 = add i64 0, 36
   %10 = getelementptr i32, i32* %first, i64 %.sum116
   %11 = bitcast i32* %10 to <4 x i32>*
-  %wide.load80 = load <4 x i32>* %11, align 4
+  %wide.load80 = load <4 x i32>, <4 x i32>* %11, align 4
   %.sum117 = add i64 0, 40
   %12 = getelementptr i32, i32* %first, i64 %.sum117
   %13 = bitcast i32* %12 to <4 x i32>*
-  %wide.load81 = load <4 x i32>* %13, align 4
+  %wide.load81 = load <4 x i32>, <4 x i32>* %13, align 4
   %.sum118 = add i64 0, 44
   %14 = getelementptr i32, i32* %first, i64 %.sum118
   %15 = bitcast i32* %14 to <4 x i32>*
-  %wide.load82 = load <4 x i32>* %15, align 4
+  %wide.load82 = load <4 x i32>, <4 x i32>* %15, align 4
   %16 = mul <4 x i32> %wide.load72, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
   %17 = mul <4 x i32> %wide.load73, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
   %18 = mul <4 x i32> %wide.load74, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll Fri Feb 27 15:17:42 2015
@@ -51,24 +51,24 @@ entry:
   %__b.addr.i = alloca <4 x i32>*, align 8
   store i32 0, i32* %__a.addr.i, align 4
   store <4 x i32>* @vsi, <4 x i32>** %__b.addr.i, align 8
-  %0 = load i32* %__a.addr.i, align 4
-  %1 = load <4 x i32>** %__b.addr.i, align 8
+  %0 = load i32, i32* %__a.addr.i, align 4
+  %1 = load <4 x i32>*, <4 x i32>** %__b.addr.i, align 8
   %2 = bitcast <4 x i32>* %1 to i8*
   %3 = getelementptr i8, i8* %2, i32 %0
   %4 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %3)
   store <4 x i32> %4, <4 x i32>* @res_vsi, align 16
   store i32 0, i32* %__a.addr.i31, align 4
   store <4 x i32>* @vui, <4 x i32>** %__b.addr.i32, align 8
-  %5 = load i32* %__a.addr.i31, align 4
-  %6 = load <4 x i32>** %__b.addr.i32, align 8
+  %5 = load i32, i32* %__a.addr.i31, align 4
+  %6 = load <4 x i32>*, <4 x i32>** %__b.addr.i32, align 8
   %7 = bitcast <4 x i32>* %6 to i8*
   %8 = getelementptr i8, i8* %7, i32 %5
   %9 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %8)
   store <4 x i32> %9, <4 x i32>* @res_vui, align 16
   store i32 0, i32* %__a.addr.i29, align 4
   store <4 x float>* @vf, <4 x float>** %__b.addr.i30, align 8
-  %10 = load i32* %__a.addr.i29, align 4
-  %11 = load <4 x float>** %__b.addr.i30, align 8
+  %10 = load i32, i32* %__a.addr.i29, align 4
+  %11 = load <4 x float>*, <4 x float>** %__b.addr.i30, align 8
   %12 = bitcast <4 x float>* %11 to i8*
   %13 = getelementptr i8, i8* %12, i32 %10
   %14 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %13)
@@ -76,8 +76,8 @@ entry:
   store <4 x float> %15, <4 x float>* @res_vf, align 16
   store i32 0, i32* %__a.addr.i27, align 4
   store <2 x i64>* @vsll, <2 x i64>** %__b.addr.i28, align 8
-  %16 = load i32* %__a.addr.i27, align 4
-  %17 = load <2 x i64>** %__b.addr.i28, align 8
+  %16 = load i32, i32* %__a.addr.i27, align 4
+  %17 = load <2 x i64>*, <2 x i64>** %__b.addr.i28, align 8
   %18 = bitcast <2 x i64>* %17 to i8*
   %19 = getelementptr i8, i8* %18, i32 %16
   %20 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %19)
@@ -85,8 +85,8 @@ entry:
   store <2 x i64> %21, <2 x i64>* @res_vsll, align 16
   store i32 0, i32* %__a.addr.i25, align 4
   store <2 x i64>* @vull, <2 x i64>** %__b.addr.i26, align 8
-  %22 = load i32* %__a.addr.i25, align 4
-  %23 = load <2 x i64>** %__b.addr.i26, align 8
+  %22 = load i32, i32* %__a.addr.i25, align 4
+  %23 = load <2 x i64>*, <2 x i64>** %__b.addr.i26, align 8
   %24 = bitcast <2 x i64>* %23 to i8*
   %25 = getelementptr i8, i8* %24, i32 %22
   %26 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %25)
@@ -94,72 +94,72 @@ entry:
   store <2 x i64> %27, <2 x i64>* @res_vull, align 16
   store i32 0, i32* %__a.addr.i23, align 4
   store <2 x double>* @vd, <2 x double>** %__b.addr.i24, align 8
-  %28 = load i32* %__a.addr.i23, align 4
-  %29 = load <2 x double>** %__b.addr.i24, align 8
+  %28 = load i32, i32* %__a.addr.i23, align 4
+  %29 = load <2 x double>*, <2 x double>** %__b.addr.i24, align 8
   %30 = bitcast <2 x double>* %29 to i8*
   %31 = getelementptr i8, i8* %30, i32 %28
   %32 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %31)
   store <2 x double> %32, <2 x double>* @res_vd, align 16
-  %33 = load <4 x i32>* @vsi, align 16
+  %33 = load <4 x i32>, <4 x i32>* @vsi, align 16
   store <4 x i32> %33, <4 x i32>* %__a.addr.i20, align 16
   store i32 0, i32* %__b.addr.i21, align 4
   store <4 x i32>* @res_vsi, <4 x i32>** %__c.addr.i22, align 8
-  %34 = load <4 x i32>* %__a.addr.i20, align 16
-  %35 = load i32* %__b.addr.i21, align 4
-  %36 = load <4 x i32>** %__c.addr.i22, align 8
+  %34 = load <4 x i32>, <4 x i32>* %__a.addr.i20, align 16
+  %35 = load i32, i32* %__b.addr.i21, align 4
+  %36 = load <4 x i32>*, <4 x i32>** %__c.addr.i22, align 8
   %37 = bitcast <4 x i32>* %36 to i8*
   %38 = getelementptr i8, i8* %37, i32 %35
   call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %34, i8* %38)
-  %39 = load <4 x i32>* @vui, align 16
+  %39 = load <4 x i32>, <4 x i32>* @vui, align 16
   store <4 x i32> %39, <4 x i32>* %__a.addr.i17, align 16
   store i32 0, i32* %__b.addr.i18, align 4
   store <4 x i32>* @res_vui, <4 x i32>** %__c.addr.i19, align 8
-  %40 = load <4 x i32>* %__a.addr.i17, align 16
-  %41 = load i32* %__b.addr.i18, align 4
-  %42 = load <4 x i32>** %__c.addr.i19, align 8
+  %40 = load <4 x i32>, <4 x i32>* %__a.addr.i17, align 16
+  %41 = load i32, i32* %__b.addr.i18, align 4
+  %42 = load <4 x i32>*, <4 x i32>** %__c.addr.i19, align 8
   %43 = bitcast <4 x i32>* %42 to i8*
   %44 = getelementptr i8, i8* %43, i32 %41
   call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %40, i8* %44)
-  %45 = load <4 x float>* @vf, align 16
+  %45 = load <4 x float>, <4 x float>* @vf, align 16
   store <4 x float> %45, <4 x float>* %__a.addr.i14, align 16
   store i32 0, i32* %__b.addr.i15, align 4
   store <4 x float>* @res_vf, <4 x float>** %__c.addr.i16, align 8
-  %46 = load <4 x float>* %__a.addr.i14, align 16
+  %46 = load <4 x float>, <4 x float>* %__a.addr.i14, align 16
   %47 = bitcast <4 x float> %46 to <4 x i32>
-  %48 = load i32* %__b.addr.i15, align 4
-  %49 = load <4 x float>** %__c.addr.i16, align 8
+  %48 = load i32, i32* %__b.addr.i15, align 4
+  %49 = load <4 x float>*, <4 x float>** %__c.addr.i16, align 8
   %50 = bitcast <4 x float>* %49 to i8*
   %51 = getelementptr i8, i8* %50, i32 %48
   call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %47, i8* %51) #1
-  %52 = load <2 x i64>* @vsll, align 16
+  %52 = load <2 x i64>, <2 x i64>* @vsll, align 16
   store <2 x i64> %52, <2 x i64>* %__a.addr.i11, align 16
   store i32 0, i32* %__b.addr.i12, align 4
   store <2 x i64>* @res_vsll, <2 x i64>** %__c.addr.i13, align 8
-  %53 = load <2 x i64>* %__a.addr.i11, align 16
+  %53 = load <2 x i64>, <2 x i64>* %__a.addr.i11, align 16
   %54 = bitcast <2 x i64> %53 to <2 x double>
-  %55 = load i32* %__b.addr.i12, align 4
-  %56 = load <2 x i64>** %__c.addr.i13, align 8
+  %55 = load i32, i32* %__b.addr.i12, align 4
+  %56 = load <2 x i64>*, <2 x i64>** %__c.addr.i13, align 8
   %57 = bitcast <2 x i64>* %56 to i8*
   %58 = getelementptr i8, i8* %57, i32 %55
   call void @llvm.ppc.vsx.stxvd2x(<2 x double> %54, i8* %58)
-  %59 = load <2 x i64>* @vull, align 16
+  %59 = load <2 x i64>, <2 x i64>* @vull, align 16
   store <2 x i64> %59, <2 x i64>* %__a.addr.i8, align 16
   store i32 0, i32* %__b.addr.i9, align 4
   store <2 x i64>* @res_vull, <2 x i64>** %__c.addr.i10, align 8
-  %60 = load <2 x i64>* %__a.addr.i8, align 16
+  %60 = load <2 x i64>, <2 x i64>* %__a.addr.i8, align 16
   %61 = bitcast <2 x i64> %60 to <2 x double>
-  %62 = load i32* %__b.addr.i9, align 4
-  %63 = load <2 x i64>** %__c.addr.i10, align 8
+  %62 = load i32, i32* %__b.addr.i9, align 4
+  %63 = load <2 x i64>*, <2 x i64>** %__c.addr.i10, align 8
   %64 = bitcast <2 x i64>* %63 to i8*
   %65 = getelementptr i8, i8* %64, i32 %62
   call void @llvm.ppc.vsx.stxvd2x(<2 x double> %61, i8* %65)
-  %66 = load <2 x double>* @vd, align 16
+  %66 = load <2 x double>, <2 x double>* @vd, align 16
   store <2 x double> %66, <2 x double>* %__a.addr.i6, align 16
   store i32 0, i32* %__b.addr.i7, align 4
   store <2 x double>* @res_vd, <2 x double>** %__c.addr.i, align 8
-  %67 = load <2 x double>* %__a.addr.i6, align 16
-  %68 = load i32* %__b.addr.i7, align 4
-  %69 = load <2 x double>** %__c.addr.i, align 8
+  %67 = load <2 x double>, <2 x double>* %__a.addr.i6, align 16
+  %68 = load i32, i32* %__b.addr.i7, align 4
+  %69 = load <2 x double>*, <2 x double>** %__c.addr.i, align 8
   %70 = bitcast <2 x double>* %69 to i8*
   %71 = getelementptr i8, i8* %70, i32 %68
   call void @llvm.ppc.vsx.stxvd2x(<2 x double> %67, i8* %71)

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx-ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx-ldst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx-ldst.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx-ldst.ll Fri Feb 27 15:17:42 2015
@@ -30,12 +30,12 @@
 ; Function Attrs: nounwind
 define void @test1() {
 entry:
-  %0 = load <4 x i32>* @vsi, align 16
-  %1 = load <4 x i32>* @vui, align 16
-  %2 = load <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 16
-  %3 = load <2 x double>* bitcast (<2 x i64>* @vsll to <2 x double>*), align 16
-  %4 = load <2 x double>* bitcast (<2 x i64>* @vull to <2 x double>*), align 16
-  %5 = load <2 x double>* @vd, align 16
+  %0 = load <4 x i32>, <4 x i32>* @vsi, align 16
+  %1 = load <4 x i32>, <4 x i32>* @vui, align 16
+  %2 = load <4 x i32>, <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 16
+  %3 = load <2 x double>, <2 x double>* bitcast (<2 x i64>* @vsll to <2 x double>*), align 16
+  %4 = load <2 x double>, <2 x double>* bitcast (<2 x i64>* @vull to <2 x double>*), align 16
+  %5 = load <2 x double>, <2 x double>* @vd, align 16
   store <4 x i32> %0, <4 x i32>* @res_vsi, align 16
   store <4 x i32> %1, <4 x i32>* @res_vui, align 16
   store <4 x i32> %2, <4 x i32>* bitcast (<4 x float>* @res_vf to <4 x i32>*), align 16

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx-minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx-minmax.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx-minmax.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx-minmax.ll Fri Feb 27 15:17:42 2015
@@ -18,35 +18,35 @@ target triple = "powerpc64-unknown-linux
 define void @test1() #0 {
 ; CHECK-LABEL: @test1
 entry:
-  %0 = load volatile <4 x float>* @vf, align 16
-  %1 = load volatile <4 x float>* @vf, align 16
+  %0 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %1 = load volatile <4 x float>, <4 x float>* @vf, align 16
   %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
 ; CHECK: xvmaxsp
   store <4 x float> %2, <4 x float>* @vf1, align 16
-  %3 = load <2 x double>* @vd, align 16
+  %3 = load <2 x double>, <2 x double>* @vd, align 16
   %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
 ; CHECK: xvmaxdp
   store <2 x double> %4, <2 x double>* @vd1, align 16
-  %5 = load volatile <4 x float>* @vf, align 16
-  %6 = load volatile <4 x float>* @vf, align 16
+  %5 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %6 = load volatile <4 x float>, <4 x float>* @vf, align 16
   %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
 ; CHECK: xvmaxsp
   store <4 x float> %7, <4 x float>* @vf2, align 16
-  %8 = load volatile <4 x float>* @vf, align 16
-  %9 = load volatile <4 x float>* @vf, align 16
+  %8 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %9 = load volatile <4 x float>, <4 x float>* @vf, align 16
   %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
 ; CHECK: xvminsp
   store <4 x float> %10, <4 x float>* @vf3, align 16
-  %11 = load <2 x double>* @vd, align 16
+  %11 = load <2 x double>, <2 x double>* @vd, align 16
   %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
 ; CHECK: xvmindp
   store <2 x double> %12, <2 x double>* @vd2, align 16
-  %13 = load volatile <4 x float>* @vf, align 16
-  %14 = load volatile <4 x float>* @vf, align 16
+  %13 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %14 = load volatile <4 x float>, <4 x float>* @vf, align 16
   %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
 ; CHECK: xvminsp
   store <4 x float> %15, <4 x float>* @vf4, align 16
-  %16 = load double* @d, align 8
+  %16 = load double, double* @d, align 8
   %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
 ; CHECK: xsmaxdp
   store double %17, double* @d1, align 8

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx-p8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx-p8.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx-p8.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx-p8.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ target triple = "powerpc64-unknown-linux
 ; Unaligned loads/stores on P8 and later should use VSX where possible.
 
 define <2 x double> @test28u(<2 x double>* %a) {
-  %v = load <2 x double>* %a, align 8
+  %v = load <2 x double>, <2 x double>* %a, align 8
   ret <2 x double> %v
 
 ; CHECK-LABEL: @test28u
@@ -26,7 +26,7 @@ define void @test29u(<2 x double>* %a, <
 }
 
 define <4 x float> @test32u(<4 x float>* %a) {
-  %v = load <4 x float>* %a, align 8
+  %v = load <4 x float>, <4 x float>* %a, align 8
   ret <4 x float> %v
 
 ; CHECK-REG-LABEL: @test32u

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx.ll Fri Feb 27 15:17:42 2015
@@ -501,7 +501,7 @@ define <2 x i64> @test27(<2 x i64> %a, <
 }
 
 define <2 x double> @test28(<2 x double>* %a) {
-  %v = load <2 x double>* %a, align 16
+  %v = load <2 x double>, <2 x double>* %a, align 16
   ret <2 x double> %v
 
 ; CHECK-LABEL: @test28
@@ -519,7 +519,7 @@ define void @test29(<2 x double>* %a, <2
 }
 
 define <2 x double> @test28u(<2 x double>* %a) {
-  %v = load <2 x double>* %a, align 8
+  %v = load <2 x double>, <2 x double>* %a, align 8
   ret <2 x double> %v
 
 ; CHECK-LABEL: @test28u
@@ -537,7 +537,7 @@ define void @test29u(<2 x double>* %a, <
 }
 
 define <2 x i64> @test30(<2 x i64>* %a) {
-  %v = load <2 x i64>* %a, align 16
+  %v = load <2 x i64>, <2 x i64>* %a, align 16
   ret <2 x i64> %v
 
 ; CHECK-REG-LABEL: @test30
@@ -562,7 +562,7 @@ define void @test31(<2 x i64>* %a, <2 x
 }
 
 define <4 x float> @test32(<4 x float>* %a) {
-  %v = load <4 x float>* %a, align 16
+  %v = load <4 x float>, <4 x float>* %a, align 16
   ret <4 x float> %v
 
 ; CHECK-REG-LABEL: @test32
@@ -590,7 +590,7 @@ define void @test33(<4 x float>* %a, <4
 }
 
 define <4 x float> @test32u(<4 x float>* %a) {
-  %v = load <4 x float>* %a, align 8
+  %v = load <4 x float>, <4 x float>* %a, align 8
   ret <4 x float> %v
 
 ; CHECK-LABEL: @test32u
@@ -616,7 +616,7 @@ define void @test33u(<4 x float>* %a, <4
 }
 
 define <4 x i32> @test34(<4 x i32>* %a) {
-  %v = load <4 x i32>* %a, align 16
+  %v = load <4 x i32>, <4 x i32>* %a, align 16
   ret <4 x i32> %v
 
 ; CHECK-REG-LABEL: @test34
@@ -718,7 +718,7 @@ define <2 x i64> @test47(<2 x float> %a)
 }
 
 define <2 x double> @test50(double* %a) {
-  %v = load double* %a, align 8
+  %v = load double, double* %a, align 8
   %w = insertelement <2 x double> undef, double %v, i32 0
   %x = insertelement <2 x double> %w, double %v, i32 1
   ret <2 x double> %x

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx_insert_extract_le.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx_insert_extract_le.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx_insert_extract_le.ll Fri Feb 27 15:17:42 2015
@@ -1,8 +1,8 @@
 ; RUN: llc -mcpu=pwr8 -mattr=+vsx -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
 
 define <2 x double> @testi0(<2 x double>* %p1, double* %p2) {
-  %v = load <2 x double>* %p1
-  %s = load double* %p2
+  %v = load <2 x double>, <2 x double>* %p1
+  %s = load double, double* %p2
   %r = insertelement <2 x double> %v, double %s, i32 0
   ret <2 x double> %r
 
@@ -15,8 +15,8 @@ define <2 x double> @testi0(<2 x double>
 }
 
 define <2 x double> @testi1(<2 x double>* %p1, double* %p2) {
-  %v = load <2 x double>* %p1
-  %s = load double* %p2
+  %v = load <2 x double>, <2 x double>* %p1
+  %s = load double, double* %p2
   %r = insertelement <2 x double> %v, double %s, i32 1
   ret <2 x double> %r
 
@@ -29,7 +29,7 @@ define <2 x double> @testi1(<2 x double>
 }
 
 define double @teste0(<2 x double>* %p1) {
-  %v = load <2 x double>* %p1
+  %v = load <2 x double>, <2 x double>* %p1
   %r = extractelement <2 x double> %v, i32 0
   ret double %r
 
@@ -42,7 +42,7 @@ define double @teste0(<2 x double>* %p1)
 }
 
 define double @teste1(<2 x double>* %p1) {
-  %v = load <2 x double>* %p1
+  %v = load <2 x double>, <2 x double>* %p1
   %r = extractelement <2 x double> %v, i32 1
   ret double %r
 

Modified: llvm/trunk/test/CodeGen/PowerPC/vsx_shuffle_le.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx_shuffle_le.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx_shuffle_le.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx_shuffle_le.ll Fri Feb 27 15:17:42 2015
@@ -1,8 +1,8 @@
 ; RUN: llc -mcpu=pwr8 -mattr=+vsx -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
 
 define <2 x double> @test00(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 0>
   ret <2 x double> %v3
 
@@ -13,8 +13,8 @@ define <2 x double> @test00(<2 x double>
 }
 
 define <2 x double> @test01(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 1>
   ret <2 x double> %v3
 
@@ -24,8 +24,8 @@ define <2 x double> @test01(<2 x double>
 }
 
 define <2 x double> @test02(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 2>
   ret <2 x double> %v3
 
@@ -38,8 +38,8 @@ define <2 x double> @test02(<2 x double>
 }
 
 define <2 x double> @test03(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 3>
   ret <2 x double> %v3
 
@@ -52,8 +52,8 @@ define <2 x double> @test03(<2 x double>
 }
 
 define <2 x double> @test10(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 0>
   ret <2 x double> %v3
 
@@ -64,8 +64,8 @@ define <2 x double> @test10(<2 x double>
 }
 
 define <2 x double> @test11(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 1>
   ret <2 x double> %v3
 
@@ -76,8 +76,8 @@ define <2 x double> @test11(<2 x double>
 }
 
 define <2 x double> @test12(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 2>
   ret <2 x double> %v3
 
@@ -90,8 +90,8 @@ define <2 x double> @test12(<2 x double>
 }
 
 define <2 x double> @test13(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 3>
   ret <2 x double> %v3
 
@@ -104,8 +104,8 @@ define <2 x double> @test13(<2 x double>
 }
 
 define <2 x double> @test20(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 0>
   ret <2 x double> %v3
 
@@ -118,8 +118,8 @@ define <2 x double> @test20(<2 x double>
 }
 
 define <2 x double> @test21(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 1>
   ret <2 x double> %v3
 
@@ -132,8 +132,8 @@ define <2 x double> @test21(<2 x double>
 }
 
 define <2 x double> @test22(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 2>
   ret <2 x double> %v3
 
@@ -144,8 +144,8 @@ define <2 x double> @test22(<2 x double>
 }
 
 define <2 x double> @test23(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 3>
   ret <2 x double> %v3
 
@@ -155,8 +155,8 @@ define <2 x double> @test23(<2 x double>
 }
 
 define <2 x double> @test30(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 0>
   ret <2 x double> %v3
 
@@ -169,8 +169,8 @@ define <2 x double> @test30(<2 x double>
 }
 
 define <2 x double> @test31(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 1>
   ret <2 x double> %v3
 
@@ -183,8 +183,8 @@ define <2 x double> @test31(<2 x double>
 }
 
 define <2 x double> @test32(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 2>
   ret <2 x double> %v3
 
@@ -195,8 +195,8 @@ define <2 x double> @test32(<2 x double>
 }
 
 define <2 x double> @test33(<2 x double>* %p1, <2 x double>* %p2) {
-  %v1 = load <2 x double>* %p1
-  %v2 = load <2 x double>* %p2
+  %v1 = load <2 x double>, <2 x double>* %p1
+  %v2 = load <2 x double>, <2 x double>* %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 3>
   ret <2 x double> %v3
 

Modified: llvm/trunk/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
 ; CHECK-D89: .weak_definition _v1
 
 define i32 @f1() {
-  %x = load i32 * @v1
+  %x = load i32 , i32 * @v1
   ret i32 %x
 }
 
@@ -45,6 +45,6 @@ define i32* @f3() {
 ; CHECK-D89: .weak_definition _v4
 
 define i32 @f4() {
-  %x = load i32 * @v4
+  %x = load i32 , i32 * @v4
   ret i32 %x
 }

Modified: llvm/trunk/test/CodeGen/PowerPC/zero-not-run.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/zero-not-run.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/zero-not-run.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/zero-not-run.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
   br i1 undef, label %for.body, label %for.end731
 
 for.body:                                         ; preds = %entry
-  %0 = load i32* undef, align 4
+  %0 = load i32, i32* undef, align 4
   %or31 = or i32 %0, 319143828
   store i32 %or31, i32* undef, align 4
   %cmp32 = icmp eq i32 319143828, %or31

Modified: llvm/trunk/test/CodeGen/PowerPC/zext-free.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/zext-free.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/zext-free.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/zext-free.ll Fri Feb 27 15:17:42 2015
@@ -5,16 +5,16 @@ target triple = "powerpc64-unknown-linux
 ; Function Attrs: noreturn nounwind
 define signext i32 @_Z1fRPc(i8** nocapture dereferenceable(8) %p) #0 {
 entry:
-  %.pre = load i8** %p, align 8
+  %.pre = load i8*, i8** %p, align 8
   br label %loop
 
 loop:                                             ; preds = %loop.backedge, %entry
   %0 = phi i8* [ %.pre, %entry ], [ %.be, %loop.backedge ]
-  %1 = load i8* %0, align 1
+  %1 = load i8, i8* %0, align 1
   %tobool = icmp eq i8 %1, 0
   %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
   store i8* %incdec.ptr, i8** %p, align 8
-  %2 = load i8* %incdec.ptr, align 1
+  %2 = load i8, i8* %incdec.ptr, align 1
   %tobool2 = icmp ne i8 %2, 0
   %or.cond = and i1 %tobool, %tobool2
   br i1 %or.cond, label %if.then3, label %loop.backedge

Modified: llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll (original)
+++ llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@
 ; SI: ds_read_b32 v{{[0-9]+}}, [[PTR]]
 define void @local_address_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
 entry:
-  %0 = load i32 addrspace(3)* %in
+  %0 = load i32, i32 addrspace(3)* %in
   store i32 %0, i32 addrspace(1)* %out
   ret void
 }
@@ -27,7 +27,7 @@ entry:
 define void @local_address_gep(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %offset) {
 entry:
   %0 = getelementptr i32, i32 addrspace(3)* %in, i32 %offset
-  %1 = load i32 addrspace(3)* %0
+  %1 = load i32, i32 addrspace(3)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -38,7 +38,7 @@ entry:
 define void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
 entry:
   %0 = getelementptr i32, i32 addrspace(3)* %in, i32 1
-  %1 = load i32 addrspace(3)* %0
+  %1 = load i32, i32 addrspace(3)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -51,7 +51,7 @@ entry:
 define void @local_address_gep_large_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
 entry:
   %0 = getelementptr i32, i32 addrspace(3)* %in, i32 16385
-  %1 = load i32 addrspace(3)* %0
+  %1 = load i32, i32 addrspace(3)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -73,7 +73,7 @@ define void @null_32bit_lds_ptr(i32 addr
 ; SI: ds_read_b32
 define void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %lds, i32 %tid) {
   %ptr = getelementptr [3 x float], [3 x float] addrspace(3)* %lds, i32 %tid, i32 0
-  %val = load float addrspace(3)* %ptr
+  %val = load float, float addrspace(3)* %ptr
   store float %val, float addrspace(1)* %out
   ret void
 }
@@ -84,7 +84,7 @@ define void @mul_32bit_ptr(float addrspa
 ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0
 ; SI: ds_read_b32 v{{[0-9]+}}, [[REG]]
 define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %tid) {
-  %val = load float addrspace(3)* @g_lds
+  %val = load float, float addrspace(3)* @g_lds
   store float %val, float addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/add-debug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/add-debug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/add-debug.ll (original)
+++ llvm/trunk/test/CodeGen/R600/add-debug.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
   br i1 %0, label %if, label %else
 
 if:
-  %1 = load i64 addrspace(1)* %in
+  %1 = load i64, i64 addrspace(1)* %in
   br label %endif
 
 else:

Modified: llvm/trunk/test/CodeGen/R600/add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/add.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/add.ll (original)
+++ llvm/trunk/test/CodeGen/R600/add.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@
 ;SI: buffer_store_dword [[REG]],
 define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1)* %in
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %in
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %result = add i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -26,8 +26,8 @@ define void @test1(i32 addrspace(1)* %ou
 
 define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1)* %in
-  %b = load <2 x i32> addrspace(1)* %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1)* %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
   %result = add <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -46,8 +46,8 @@ define void @test2(<2 x i32> addrspace(1
 
 define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1)* %in
-  %b = load <4 x i32> addrspace(1)* %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1)* %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
   %result = add <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -136,7 +136,7 @@ entry:
 ; SI-NOT: v_addc_u32_e32 s
 define void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
 entry:
-  %0 = load i64 addrspace(1)* %in
+  %0 = load i64, i64 addrspace(1)* %in
   %1 = add i64 %a, %0
   store i64 %1, i64 addrspace(1)* %out
   ret void
@@ -152,7 +152,7 @@ entry:
   br i1 %0, label %if, label %else
 
 if:
-  %1 = load i64 addrspace(1)* %in
+  %1 = load i64, i64 addrspace(1)* %in
   br label %endif
 
 else:

Modified: llvm/trunk/test/CodeGen/R600/add_i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/add_i64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/add_i64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/add_i64.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ define void @test_i64_vreg(i64 addrspace
   %tid = call i32 @llvm.r600.read.tidig.x() readnone
   %a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
   %b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
-  %a = load i64 addrspace(1)* %a_ptr
-  %b = load i64 addrspace(1)* %b_ptr
+  %a = load i64, i64 addrspace(1)* %a_ptr
+  %b = load i64, i64 addrspace(1)* %b_ptr
   %result = add i64 %a, %b
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -22,7 +22,7 @@ define void @test_i64_vreg(i64 addrspace
 ; SI: v_add_i32
 ; SI: v_addc_u32
 define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 addrspace(1)* noalias %in_bar, i64 %a) {
-  %foo = load i64 addrspace(1)* %in, align 8
+  %foo = load i64, i64 addrspace(1)* %in, align 8
   %result = add i64 %foo, %a
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -35,7 +35,7 @@ define void @sgpr_operand(i64 addrspace(
 ; SI: v_add_i32
 ; SI: v_addc_u32
 define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %a) {
-  %foo = load i64 addrspace(1)* %in, align 8
+  %foo = load i64, i64 addrspace(1)* %in, align 8
   %result = add i64 %a, %foo
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -62,8 +62,8 @@ define void @test_v2i64_vreg(<2 x i64> a
   %tid = call i32 @llvm.r600.read.tidig.x() readnone
   %a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
   %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
-  %a = load <2 x i64> addrspace(1)* %a_ptr
-  %b = load <2 x i64> addrspace(1)* %b_ptr
+  %a = load <2 x i64>, <2 x i64> addrspace(1)* %a_ptr
+  %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
   %result = add <2 x i64> %a, %b
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/address-space.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/address-space.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/address-space.ll (original)
+++ llvm/trunk/test/CodeGen/R600/address-space.ll Fri Feb 27 15:17:42 2015
@@ -21,8 +21,8 @@ entry:
   br label %bb32
 
 bb32:
-  %a = load float addrspace(3)* %x, align 4
-  %b = load float addrspace(3)* %y, align 4
+  %a = load float, float addrspace(3)* %x, align 4
+  %b = load float, float addrspace(3)* %y, align 4
   %cmp = fcmp one float %a, %b
   br i1 %cmp, label %bb34, label %bb33
 

Modified: llvm/trunk/test/CodeGen/R600/and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/and.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/and.ll (original)
+++ llvm/trunk/test/CodeGen/R600/and.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@
 
 define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1) * %in
-  %b = load <2 x i32> addrspace(1) * %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
   %result = and <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -31,8 +31,8 @@ define void @test2(<2 x i32> addrspace(1
 
 define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
   %result = and <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -57,8 +57,8 @@ define void @s_and_constant_i32(i32 addr
 ; FUNC-LABEL: {{^}}v_and_i32:
 ; SI: v_and_b32
 define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
-  %a = load i32 addrspace(1)* %aptr, align 4
-  %b = load i32 addrspace(1)* %bptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %and = and i32 %a, %b
   store i32 %and, i32 addrspace(1)* %out, align 4
   ret void
@@ -67,7 +67,7 @@ define void @v_and_i32(i32 addrspace(1)*
 ; FUNC-LABEL: {{^}}v_and_constant_i32
 ; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
 define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
-  %a = load i32 addrspace(1)* %aptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
   %and = and i32 %a, 1234567
   store i32 %and, i32 addrspace(1)* %out, align 4
   ret void
@@ -76,7 +76,7 @@ define void @v_and_constant_i32(i32 addr
 ; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
 ; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
 define void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
-  %a = load i32 addrspace(1)* %aptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
   %and = and i32 %a, 64
   store i32 %and, i32 addrspace(1)* %out, align 4
   ret void
@@ -85,7 +85,7 @@ define void @v_and_inline_imm_64_i32(i32
 ; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
 ; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
 define void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
-  %a = load i32 addrspace(1)* %aptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
   %and = and i32 %a, -16
   store i32 %and, i32 addrspace(1)* %out, align 4
   ret void
@@ -120,8 +120,8 @@ define void @s_and_constant_i64(i64 addr
 ; SI: v_and_b32
 ; SI: v_and_b32
 define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
-  %a = load i64 addrspace(1)* %aptr, align 8
-  %b = load i64 addrspace(1)* %bptr, align 8
+  %a = load i64, i64 addrspace(1)* %aptr, align 8
+  %b = load i64, i64 addrspace(1)* %bptr, align 8
   %and = and i64 %a, %b
   store i64 %and, i64 addrspace(1)* %out, align 8
   ret void
@@ -136,8 +136,8 @@ entry:
   br i1 %tmp0, label %if, label %endif
 
 if:
-  %a = load i64 addrspace(1)* %aptr, align 8
-  %b = load i64 addrspace(1)* %bptr, align 8
+  %a = load i64, i64 addrspace(1)* %aptr, align 8
+  %b = load i64, i64 addrspace(1)* %bptr, align 8
   %and = and i64 %a, %b
   br label %endif
 
@@ -151,7 +151,7 @@ endif:
 ; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
 ; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
 define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
-  %a = load i64 addrspace(1)* %aptr, align 8
+  %a = load i64, i64 addrspace(1)* %aptr, align 8
   %and = and i64 %a, 1234567
   store i64 %and, i64 addrspace(1)* %out, align 8
   ret void
@@ -162,7 +162,7 @@ define void @v_and_constant_i64(i64 addr
 ; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
 ; SI: v_and_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}}
 define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
-  %a = load i64 addrspace(1)* %aptr, align 8
+  %a = load i64, i64 addrspace(1)* %aptr, align 8
   %and = and i64 %a, 64
   store i64 %and, i64 addrspace(1)* %out, align 8
   ret void

Modified: llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll (original)
+++ llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll Fri Feb 27 15:17:42 2015
@@ -29,14 +29,14 @@ define void @test_private_array_ptr_calc
   %tid = call i32 @llvm.SI.tid() readnone
   %a_ptr = getelementptr i32, i32 addrspace(1)* %inA, i32 %tid
   %b_ptr = getelementptr i32, i32 addrspace(1)* %inB, i32 %tid
-  %a = load i32 addrspace(1)* %a_ptr
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %a_ptr
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %result = add i32 %a, %b
   %alloca_ptr = getelementptr [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b
   store i32 %result, i32* %alloca_ptr, align 4
   ; Dummy call
   call void @llvm.AMDGPU.barrier.local() nounwind noduplicate
-  %reload = load i32* %alloca_ptr, align 4
+  %reload = load i32, i32* %alloca_ptr, align 4
   %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
   store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ define void @test_array_ptr_calc(i32 add
   %tid = call i32 @llvm.SI.tid() readnone
   %a_ptr = getelementptr [1025 x i32], [1025 x i32] addrspace(1)* %inA, i32 %tid, i32 0
   %b_ptr = getelementptr i32, i32 addrspace(1)* %inB, i32 %tid
-  %a = load i32 addrspace(1)* %a_ptr
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %a_ptr
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %result = add i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/big_alu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/big_alu.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/big_alu.ll (original)
+++ llvm/trunk/test/CodeGen/R600/big_alu.ll Fri Feb 27 15:17:42 2015
@@ -51,29 +51,29 @@ main_body:
   %43 = extractelement <4 x float> %reg7, i32 1
   %44 = extractelement <4 x float> %reg7, i32 2
   %45 = extractelement <4 x float> %reg7, i32 3
-  %46 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 11)
+  %46 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 11)
   %47 = extractelement <4 x float> %46, i32 0
-  %48 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 11)
+  %48 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 11)
   %49 = extractelement <4 x float> %48, i32 1
-  %50 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 11)
+  %50 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 11)
   %51 = extractelement <4 x float> %50, i32 2
-  %52 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 12)
+  %52 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 12)
   %53 = extractelement <4 x float> %52, i32 0
-  %54 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
+  %54 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
   %55 = extractelement <4 x float> %54, i32 0
-  %56 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
+  %56 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
   %57 = extractelement <4 x float> %56, i32 1
-  %58 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
+  %58 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
   %59 = extractelement <4 x float> %58, i32 2
-  %60 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
+  %60 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
   %61 = extractelement <4 x float> %60, i32 3
-  %62 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 16)
+  %62 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 16)
   %63 = extractelement <4 x float> %62, i32 0
-  %64 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 16)
+  %64 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 16)
   %65 = extractelement <4 x float> %64, i32 1
-  %66 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 16)
+  %66 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 16)
   %67 = extractelement <4 x float> %66, i32 2
-  %68 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %68 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %69 = extractelement <4 x float> %68, i32 0
   %70 = fcmp oge float %69, 3.500000e+00
   %71 = sext i1 %70 to i32
@@ -81,7 +81,7 @@ main_body:
   %73 = bitcast float %72 to i32
   %74 = icmp ne i32 %73, 0
   %. = select i1 %74, float 0.000000e+00, float 0.000000e+00
-  %75 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %75 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %76 = extractelement <4 x float> %75, i32 0
   %77 = fcmp oge float %76, 2.000000e+00
   %78 = sext i1 %77 to i32
@@ -135,7 +135,7 @@ IF137:
   %123 = insertelement <4 x float> %122, float 0.000000e+00, i32 3
   %124 = call float @llvm.AMDGPU.dp4(<4 x float> %119, <4 x float> %123)
   %125 = fdiv float 1.000000e+00, %124
-  %126 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %126 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
   %127 = extractelement <4 x float> %126, i32 0
   %128 = fmul float %127, %125
   %129 = fmul float %103, %128
@@ -347,15 +347,15 @@ ENDIF136:
   %329 = fmul float %314, %328
   %330 = fmul float %316, %328
   %331 = fmul float %318, %328
-  %332 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
+  %332 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
   %333 = extractelement <4 x float> %332, i32 0
   %334 = fsub float -0.000000e+00, %333
   %335 = fadd float 1.000000e+00, %334
-  %336 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %336 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
   %337 = extractelement <4 x float> %336, i32 0
   %338 = fsub float -0.000000e+00, %337
   %339 = fadd float 1.000000e+00, %338
-  %340 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+  %340 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
   %341 = extractelement <4 x float> %340, i32 0
   %342 = fsub float -0.000000e+00, %341
   %343 = fadd float 1.000000e+00, %342
@@ -1018,7 +1018,7 @@ ENDIF175:
   %temp92.11 = phi float [ %877, %IF176 ], [ %temp92.10, %ENDIF172 ]
   %temp93.5 = phi float [ %878, %IF176 ], [ %temp93.4, %ENDIF172 ]
   %temp94.5 = phi float [ %879, %IF176 ], [ %temp94.4, %ENDIF172 ]
-  %880 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+  %880 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
   %881 = extractelement <4 x float> %880, i32 0
   %882 = fcmp olt float %881, %179
   %883 = sext i1 %882 to i32
@@ -1114,12 +1114,12 @@ ENDIF178:
   %960 = fmul float %temp87.6, %956
   %961 = fmul float %2, -2.000000e+00
   %962 = fadd float %961, 1.000000e+00
-  %963 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 23)
+  %963 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 23)
   %964 = extractelement <4 x float> %963, i32 2
   %965 = fsub float -0.000000e+00, %964
   %966 = fadd float %962, %965
   %967 = fdiv float 1.000000e+00, %966
-  %968 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 24)
+  %968 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 24)
   %969 = extractelement <4 x float> %968, i32 2
   %970 = fmul float %969, %967
   %971 = fsub float -0.000000e+00, %53

Modified: llvm/trunk/test/CodeGen/R600/bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/bitcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/R600/bitcast.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ declare void @llvm.SI.export(i32, i32, i
 ; SI: s_endpgm
 define void @v32i8_to_v8i32(<32 x i8> addrspace(2)* inreg) #0 {
 entry:
-  %1 = load <32 x i8> addrspace(2)* %0
+  %1 = load <32 x i8>, <32 x i8> addrspace(2)* %0
   %2 = bitcast <32 x i8> %1 to <8 x i32>
   %3 = extractelement <8 x i32> %2, i32 1
   %4 = icmp ne i32 %3, 0
@@ -23,34 +23,34 @@ entry:
 define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
 entry:
   %0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)*
-  %1 = load <16 x i8> addrspace(1)* %0
+  %1 = load <16 x i8>, <16 x i8> addrspace(1)* %0
   store <16 x i8> %1, <16 x i8> addrspace(1)* %out
   ret void
 }
 
 define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
-  %load = load float addrspace(1)* %in, align 4
+  %load = load float, float addrspace(1)* %in, align 4
   %bc = bitcast float %load to <2 x i16>
   store <2 x i16> %bc, <2 x i16> addrspace(1)* %out, align 4
   ret void
 }
 
 define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
-  %load = load <2 x i16> addrspace(1)* %in, align 4
+  %load = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4
   %bc = bitcast <2 x i16> %load to float
   store float %bc, float addrspace(1)* %out, align 4
   ret void
 }
 
 define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in, align 4
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   %bc = bitcast <4 x i8> %load to i32
   store i32 %bc, i32 addrspace(1)* %out, align 4
   ret void
 }
 
 define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %bc = bitcast i32 %load to <4 x i8>
   store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
   ret void
@@ -59,7 +59,7 @@ define void @i32_to_v4i8(<4 x i8> addrsp
 ; FUNC-LABEL: {{^}}bitcast_v2i32_to_f64:
 ; SI: s_endpgm
 define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
-  %val = load <2 x i32> addrspace(1)* %in, align 8
+  %val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
   %add = add <2 x i32> %val, <i32 4, i32 9>
   %bc = bitcast <2 x i32> %add to double
   store double %bc, double addrspace(1)* %out, align 8
@@ -69,7 +69,7 @@ define void @bitcast_v2i32_to_f64(double
 ; FUNC-LABEL: {{^}}bitcast_f64_to_v2i32:
 ; SI: s_endpgm
 define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
-  %val = load double addrspace(1)* %in, align 8
+  %val = load double, double addrspace(1)* %in, align 8
   %add = fadd double %val, 4.0
   %bc = bitcast double %add to <2 x i32>
   store <2 x i32> %bc, <2 x i32> addrspace(1)* %out, align 8

Modified: llvm/trunk/test/CodeGen/R600/bswap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/bswap.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/bswap.ll (original)
+++ llvm/trunk/test/CodeGen/R600/bswap.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ declare <4 x i64> @llvm.bswap.v4i64(<4 x
 ; SI: buffer_store_dword [[RESULT]]
 ; SI: s_endpgm
 define void @test_bswap_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %bswap = call i32 @llvm.bswap.i32(i32 %val) nounwind readnone
   store i32 %bswap, i32 addrspace(1)* %out, align 4
   ret void
@@ -33,7 +33,7 @@ define void @test_bswap_i32(i32 addrspac
 ; SI-DAG: v_bfi_b32
 ; SI: s_endpgm
 define void @test_bswap_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) nounwind {
-  %val = load <2 x i32> addrspace(1)* %in, align 8
+  %val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
   %bswap = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %val) nounwind readnone
   store <2 x i32> %bswap, <2 x i32> addrspace(1)* %out, align 8
   ret void
@@ -54,7 +54,7 @@ define void @test_bswap_v2i32(<2 x i32>
 ; SI-DAG: v_bfi_b32
 ; SI: s_endpgm
 define void @test_bswap_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) nounwind {
-  %val = load <4 x i32> addrspace(1)* %in, align 16
+  %val = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
   %bswap = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %val) nounwind readnone
   store <4 x i32> %bswap, <4 x i32> addrspace(1)* %out, align 16
   ret void
@@ -87,28 +87,28 @@ define void @test_bswap_v4i32(<4 x i32>
 ; SI-DAG: v_bfi_b32
 ; SI: s_endpgm
 define void @test_bswap_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) nounwind {
-  %val = load <8 x i32> addrspace(1)* %in, align 32
+  %val = load <8 x i32>, <8 x i32> addrspace(1)* %in, align 32
   %bswap = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %val) nounwind readnone
   store <8 x i32> %bswap, <8 x i32> addrspace(1)* %out, align 32
   ret void
 }
 
 define void @test_bswap_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
-  %val = load i64 addrspace(1)* %in, align 8
+  %val = load i64, i64 addrspace(1)* %in, align 8
   %bswap = call i64 @llvm.bswap.i64(i64 %val) nounwind readnone
   store i64 %bswap, i64 addrspace(1)* %out, align 8
   ret void
 }
 
 define void @test_bswap_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) nounwind {
-  %val = load <2 x i64> addrspace(1)* %in, align 16
+  %val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16
   %bswap = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %val) nounwind readnone
   store <2 x i64> %bswap, <2 x i64> addrspace(1)* %out, align 16
   ret void
 }
 
 define void @test_bswap_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) nounwind {
-  %val = load <4 x i64> addrspace(1)* %in, align 32
+  %val = load <4 x i64>, <4 x i64> addrspace(1)* %in, align 32
   %bswap = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %val) nounwind readnone
   store <4 x i64> %bswap, <4 x i64> addrspace(1)* %out, align 32
   ret void

Modified: llvm/trunk/test/CodeGen/R600/call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/call.ll (original)
+++ llvm/trunk/test/CodeGen/R600/call.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@ define i32 @defined_function(i32 %x) nou
 
 define void @test_call(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1)* %in
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %in
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %c = call i32 @defined_function(i32 %b) nounwind
   %result = add i32 %a, %c
   store i32 %result, i32 addrspace(1)* %out
@@ -24,8 +24,8 @@ define void @test_call(i32 addrspace(1)*
 
 define void @test_call_external(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1)* %in
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %in
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %c = call i32 @external_function(i32 %b) nounwind
   %result = add i32 %a, %c
   store i32 %result, i32 addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/combine_vloads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/combine_vloads.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/combine_vloads.ll (original)
+++ llvm/trunk/test/CodeGen/R600/combine_vloads.ll Fri Feb 27 15:17:42 2015
@@ -23,7 +23,7 @@ for.body:
   %i.01 = phi i32 [ 0, %entry ], [ %tmp19, %for.body ]
   %arrayidx_v4 = bitcast <8 x i8> addrspace(1)* %src to <32 x i8> addrspace(1)*
   %0 = bitcast <32 x i8> addrspace(1)* %arrayidx_v4 to <8 x i32> addrspace(1)*
-  %vecload2 = load <8 x i32> addrspace(1)* %0, align 32
+  %vecload2 = load <8 x i32>, <8 x i32> addrspace(1)* %0, align 32
   %1 = bitcast <8 x i32> %vecload2 to <32 x i8>
   %tmp5 = shufflevector <32 x i8> %1, <32 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %tmp8 = shufflevector <32 x i8> %1, <32 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>

Modified: llvm/trunk/test/CodeGen/R600/commute_modifiers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/commute_modifiers.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/commute_modifiers.ll (original)
+++ llvm/trunk/test/CodeGen/R600/commute_modifiers.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ declare float @llvm.fma.f32(float, float
 define void @commute_add_imm_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
-  %x = load float addrspace(1)* %gep.0
+  %x = load float, float addrspace(1)* %gep.0
   %x.fabs = call float @llvm.fabs.f32(float %x) #1
   %z = fadd float 2.0, %x.fabs
   store float %z, float addrspace(1)* %out
@@ -25,7 +25,7 @@ define void @commute_add_imm_fabs_f32(fl
 define void @commute_mul_imm_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
-  %x = load float addrspace(1)* %gep.0
+  %x = load float, float addrspace(1)* %gep.0
   %x.fabs = call float @llvm.fabs.f32(float %x) #1
   %x.fneg.fabs = fsub float -0.000000e+00, %x.fabs
   %z = fmul float 4.0, %x.fneg.fabs
@@ -40,7 +40,7 @@ define void @commute_mul_imm_fneg_fabs_f
 define void @commute_mul_imm_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
-  %x = load float addrspace(1)* %gep.0
+  %x = load float, float addrspace(1)* %gep.0
   %x.fneg = fsub float -0.000000e+00, %x
   %z = fmul float 4.0, %x.fneg
   store float %z, float addrspace(1)* %out
@@ -56,7 +56,7 @@ define void @commute_mul_imm_fneg_f32(fl
 define void @commute_add_lit_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
-  %x = load float addrspace(1)* %gep.0
+  %x = load float, float addrspace(1)* %gep.0
   %x.fabs = call float @llvm.fabs.f32(float %x) #1
   %z = fadd float 1024.0, %x.fabs
   store float %z, float addrspace(1)* %out
@@ -72,8 +72,8 @@ define void @commute_add_fabs_f32(float
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
   %y.fabs = call float @llvm.fabs.f32(float %y) #1
   %z = fadd float %x, %y.fabs
   store float %z, float addrspace(1)* %out
@@ -89,8 +89,8 @@ define void @commute_mul_fneg_f32(float
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
   %y.fneg = fsub float -0.000000e+00, %y
   %z = fmul float %x, %y.fneg
   store float %z, float addrspace(1)* %out
@@ -106,8 +106,8 @@ define void @commute_mul_fabs_fneg_f32(f
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
   %y.fabs = call float @llvm.fabs.f32(float %y) #1
   %y.fabs.fneg = fsub float -0.000000e+00, %y.fabs
   %z = fmul float %x, %y.fabs.fneg
@@ -125,8 +125,8 @@ define void @commute_mul_fabs_x_fabs_y_f
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
   %x.fabs = call float @llvm.fabs.f32(float %x) #1
   %y.fabs = call float @llvm.fabs.f32(float %y) #1
   %z = fmul float %x.fabs, %y.fabs
@@ -143,8 +143,8 @@ define void @commute_mul_fabs_x_fneg_fab
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
   %x.fabs = call float @llvm.fabs.f32(float %x) #1
   %y.fabs = call float @llvm.fabs.f32(float %y) #1
   %y.fabs.fneg = fsub float -0.000000e+00, %y.fabs
@@ -167,8 +167,8 @@ define void @fma_a_2.0_neg_b_f32(float a
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %r2.fabs = call float @llvm.fabs.f32(float %r2)
 

Modified: llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll (original)
+++ llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 ; SI: buffer_store_dword [[REG]]
 ; SI: s_endpgm
 define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
-  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
   ret void
 }
@@ -17,7 +17,7 @@ define void @test_copy_v4i8(<4 x i8> add
 ; SI: buffer_store_dword [[REG]]
 ; SI: s_endpgm
 define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
-  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
   ret void
@@ -30,7 +30,7 @@ define void @test_copy_v4i8_x2(<4 x i8>
 ; SI: buffer_store_dword [[REG]]
 ; SI: s_endpgm
 define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
-  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
@@ -45,7 +45,7 @@ define void @test_copy_v4i8_x3(<4 x i8>
 ; SI: buffer_store_dword [[REG]]
 ; SI: s_endpgm
 define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
-  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
@@ -82,7 +82,7 @@ define void @test_copy_v4i8_x4(<4 x i8>
 
 ; SI: s_endpgm
 define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
-  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
   store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
@@ -120,7 +120,7 @@ define void @test_copy_v4i8_extra_use(<4
 
 ; SI: s_endpgm
 define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
-  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
   store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
@@ -133,7 +133,7 @@ define void @test_copy_v4i8_x2_extra_use
 ; SI-NOT: bfi
 ; SI: s_endpgm
 define void @test_copy_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
-  %val = load <3 x i8> addrspace(1)* %in, align 4
+  %val = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
   store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 4
   ret void
 }
@@ -145,7 +145,7 @@ define void @test_copy_v3i8(<3 x i8> add
 ; SI: buffer_load_ubyte
 ; SI: s_endpgm
 define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
-  %val = load volatile <4 x i8> addrspace(1)* %in, align 4
+  %val = load volatile <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
   ret void
 }
@@ -161,7 +161,7 @@ define void @test_copy_v4i8_volatile_loa
 ; SI: buffer_store_byte
 ; SI: s_endpgm
 define void @test_copy_v4i8_volatile_store(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
-  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   store volatile <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/copy-to-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/copy-to-reg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/copy-to-reg.ll (original)
+++ llvm/trunk/test/CodeGen/R600/copy-to-reg.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ loop:
 
 done:
   %tmp0 = getelementptr [16 x i32], [16 x i32]* %alloca, i32 0, i32 0
-  %tmp1 = load i32* %tmp0
+  %tmp1 = load i32, i32* %tmp0
   store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ define void @s_ctlz_zero_undef_i32(i32 a
 ; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
 ; EG: FFBH_UINT {{\*? *}}[[RESULT]]
 define void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
-  %val = load i32 addrspace(1)* %valptr, align 4
+  %val = load i32, i32 addrspace(1)* %valptr, align 4
   %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
   store i32 %ctlz, i32 addrspace(1)* %out, align 4
   ret void
@@ -44,7 +44,7 @@ define void @v_ctlz_zero_undef_i32(i32 a
 ; EG: FFBH_UINT {{\*? *}}[[RESULT]]
 ; EG: FFBH_UINT {{\*? *}}[[RESULT]]
 define void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
-  %val = load <2 x i32> addrspace(1)* %valptr, align 8
+  %val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr, align 8
   %ctlz = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
   store <2 x i32> %ctlz, <2 x i32> addrspace(1)* %out, align 8
   ret void
@@ -64,7 +64,7 @@ define void @v_ctlz_zero_undef_v2i32(<2
 ; EG: FFBH_UINT {{\*? *}}[[RESULT]]
 ; EG: FFBH_UINT {{\*? *}}[[RESULT]]
 define void @v_ctlz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
-  %val = load <4 x i32> addrspace(1)* %valptr, align 16
+  %val = load <4 x i32>, <4 x i32> addrspace(1)* %valptr, align 16
   %ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
   store <4 x i32> %ctlz, <4 x i32> addrspace(1)* %out, align 16
   ret void

Modified: llvm/trunk/test/CodeGen/R600/ctpop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ctpop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ctpop.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ctpop.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ define void @s_ctpop_i32(i32 addrspace(1
 
 ; EG: BCNT_INT
 define void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
   store i32 %ctpop, i32 addrspace(1)* %out, align 4
   ret void
@@ -49,8 +49,8 @@ define void @v_ctpop_i32(i32 addrspace(1
 ; EG: BCNT_INT
 ; EG: BCNT_INT
 define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1) nounwind {
-  %val0 = load i32 addrspace(1)* %in0, align 4
-  %val1 = load i32 addrspace(1)* %in1, align 4
+  %val0 = load i32, i32 addrspace(1)* %in0, align 4
+  %val1 = load i32, i32 addrspace(1)* %in1, align 4
   %ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
   %ctpop1 = call i32 @llvm.ctpop.i32(i32 %val1) nounwind readnone
   %add = add i32 %ctpop0, %ctpop1
@@ -65,7 +65,7 @@ define void @v_ctpop_add_chain_i32(i32 a
 ; GCN-NEXT: buffer_store_dword [[RESULT]],
 ; GCN: s_endpgm
 define void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1, i32 %sval) nounwind {
-  %val0 = load i32 addrspace(1)* %in0, align 4
+  %val0 = load i32, i32 addrspace(1)* %in0, align 4
   %ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
   %add = add i32 %ctpop0, %sval
   store i32 %add, i32 addrspace(1)* %out, align 4
@@ -80,7 +80,7 @@ define void @v_ctpop_add_sgpr_i32(i32 ad
 ; EG: BCNT_INT
 ; EG: BCNT_INT
 define void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) nounwind {
-  %val = load <2 x i32> addrspace(1)* %in, align 8
+  %val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
   %ctpop = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %val) nounwind readnone
   store <2 x i32> %ctpop, <2 x i32> addrspace(1)* %out, align 8
   ret void
@@ -98,7 +98,7 @@ define void @v_ctpop_v2i32(<2 x i32> add
 ; EG: BCNT_INT
 ; EG: BCNT_INT
 define void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %in) nounwind {
-  %val = load <4 x i32> addrspace(1)* %in, align 16
+  %val = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
   %ctpop = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %val) nounwind readnone
   store <4 x i32> %ctpop, <4 x i32> addrspace(1)* %out, align 16
   ret void
@@ -124,7 +124,7 @@ define void @v_ctpop_v4i32(<4 x i32> add
 ; EG: BCNT_INT
 ; EG: BCNT_INT
 define void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrspace(1)* noalias %in) nounwind {
-  %val = load <8 x i32> addrspace(1)* %in, align 32
+  %val = load <8 x i32>, <8 x i32> addrspace(1)* %in, align 32
   %ctpop = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %val) nounwind readnone
   store <8 x i32> %ctpop, <8 x i32> addrspace(1)* %out, align 32
   ret void
@@ -166,7 +166,7 @@ define void @v_ctpop_v8i32(<8 x i32> add
 ; EG: BCNT_INT
 ; EG: BCNT_INT
 define void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> addrspace(1)* noalias %in) nounwind {
-  %val = load <16 x i32> addrspace(1)* %in, align 32
+  %val = load <16 x i32>, <16 x i32> addrspace(1)* %in, align 32
   %ctpop = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %val) nounwind readnone
   store <16 x i32> %ctpop, <16 x i32> addrspace(1)* %out, align 32
   ret void
@@ -180,7 +180,7 @@ define void @v_ctpop_v16i32(<16 x i32> a
 
 ; EG: BCNT_INT
 define void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
   %add = add i32 %ctpop, 4
   store i32 %add, i32 addrspace(1)* %out, align 4
@@ -195,7 +195,7 @@ define void @v_ctpop_i32_add_inline_cons
 
 ; EG: BCNT_INT
 define void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
   %add = add i32 4, %ctpop
   store i32 %add, i32 addrspace(1)* %out, align 4
@@ -210,7 +210,7 @@ define void @v_ctpop_i32_add_inline_cons
 ; GCN: buffer_store_dword [[RESULT]],
 ; GCN: s_endpgm
 define void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
   %add = add i32 %ctpop, 99999
   store i32 %add, i32 addrspace(1)* %out, align 4
@@ -226,7 +226,7 @@ define void @v_ctpop_i32_add_literal(i32
 
 ; EG: BCNT_INT
 define void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
   %add = add i32 %ctpop, %const
   store i32 %add, i32 addrspace(1)* %out, align 4
@@ -242,7 +242,7 @@ define void @v_ctpop_i32_add_var(i32 add
 
 ; EG: BCNT_INT
 define void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
   %add = add i32 %const, %ctpop
   store i32 %add, i32 addrspace(1)* %out, align 4
@@ -259,10 +259,10 @@ define void @v_ctpop_i32_add_var_inv(i32
 
 ; EG: BCNT_INT
 define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 addrspace(1)* noalias %constptr) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
   %gep = getelementptr i32, i32 addrspace(1)* %constptr, i32 4
-  %const = load i32 addrspace(1)* %gep, align 4
+  %const = load i32, i32 addrspace(1)* %gep, align 4
   %add = add i32 %const, %ctpop
   store i32 %add, i32 addrspace(1)* %out, align 4
   ret void
@@ -290,7 +290,7 @@ if:
 
 else:
   %tmp3 = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %tmp4 = load i32 addrspace(1)* %tmp3
+  %tmp4 = load i32, i32 addrspace(1)* %tmp3
   br label %endif
 
 endif:

Modified: llvm/trunk/test/CodeGen/R600/ctpop64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ctpop64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ctpop64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ctpop64.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ define void @s_ctpop_i64(i32 addrspace(1
 ; GCN: buffer_store_dword [[RESULT]],
 ; GCN: s_endpgm
 define void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
-  %val = load i64 addrspace(1)* %in, align 8
+  %val = load i64, i64 addrspace(1)* %in, align 8
   %ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
   %truncctpop = trunc i64 %ctpop to i32
   store i32 %truncctpop, i32 addrspace(1)* %out, align 4
@@ -67,7 +67,7 @@ define void @s_ctpop_v4i64(<4 x i32> add
 ; GCN: v_bcnt_u32_b32
 ; GCN: s_endpgm
 define void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in) nounwind {
-  %val = load <2 x i64> addrspace(1)* %in, align 16
+  %val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16
   %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
   %truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
   store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
@@ -85,7 +85,7 @@ define void @v_ctpop_v2i64(<2 x i32> add
 ; GCN: v_bcnt_u32_b32
 ; GCN: s_endpgm
 define void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrspace(1)* noalias %in) nounwind {
-  %val = load <4 x i64> addrspace(1)* %in, align 32
+  %val = load <4 x i64>, <4 x i64> addrspace(1)* %in, align 32
   %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
   %truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
   store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
@@ -114,7 +114,7 @@ if:
 
 else:
   %tmp3 = getelementptr i64, i64 addrspace(1)* %in, i32 1
-  %tmp4 = load i64 addrspace(1)* %tmp3
+  %tmp4 = load i64, i64 addrspace(1)* %tmp3
   br label %endif
 
 endif:

Modified: llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll (original)
+++ llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ define void @s_cttz_zero_undef_i32(i32 a
 ; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
 ; EG: FFBL_INT {{\*? *}}[[RESULT]]
 define void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
-  %val = load i32 addrspace(1)* %valptr, align 4
+  %val = load i32, i32 addrspace(1)* %valptr, align 4
   %cttz = call i32 @llvm.cttz.i32(i32 %val, i1 true) nounwind readnone
   store i32 %cttz, i32 addrspace(1)* %out, align 4
   ret void
@@ -44,7 +44,7 @@ define void @v_cttz_zero_undef_i32(i32 a
 ; EG: FFBL_INT {{\*? *}}[[RESULT]]
 ; EG: FFBL_INT {{\*? *}}[[RESULT]]
 define void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
-  %val = load <2 x i32> addrspace(1)* %valptr, align 8
+  %val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr, align 8
   %cttz = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
   store <2 x i32> %cttz, <2 x i32> addrspace(1)* %out, align 8
   ret void
@@ -64,7 +64,7 @@ define void @v_cttz_zero_undef_v2i32(<2
 ; EG: FFBL_INT {{\*? *}}[[RESULT]]
 ; EG: FFBL_INT {{\*? *}}[[RESULT]]
 define void @v_cttz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
-  %val = load <4 x i32> addrspace(1)* %valptr, align 16
+  %val = load <4 x i32>, <4 x i32> addrspace(1)* %valptr, align 16
   %cttz = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
   store <4 x i32> %cttz, <4 x i32> addrspace(1)* %out, align 16
   ret void

Modified: llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll (original)
+++ llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 ; SI: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
 ; SI: buffer_store_dword [[CONV]],
 define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
-  %load = load i8 addrspace(1)* %in, align 1
+  %load = load i8, i8 addrspace(1)* %in, align 1
   %cvt = uitofp i8 %load to float
   store float %cvt, float addrspace(1)* %out, align 4
   ret void
@@ -23,7 +23,7 @@ define void @load_i8_to_f32(float addrsp
 ; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
 ; SI: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
 define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <2 x i8> addrspace(1)* %in, align 2
+  %load = load <2 x i8>, <2 x i8> addrspace(1)* %in, align 2
   %cvt = uitofp <2 x i8> %load to <2 x float>
   store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16
   ret void
@@ -37,7 +37,7 @@ define void @load_v2i8_to_v2f32(<2 x flo
 ; SI-DAG: v_cvt_f32_ubyte0_e32
 ; SI: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
 define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <3 x i8> addrspace(1)* %in, align 4
+  %load = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
   %cvt = uitofp <3 x i8> %load to <3 x float>
   store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16
   ret void
@@ -53,7 +53,7 @@ define void @load_v3i8_to_v3f32(<3 x flo
 ; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
 ; SI: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
 define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in, align 4
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   %cvt = uitofp <4 x i8> %load to <4 x float>
   store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
   ret void
@@ -77,7 +77,7 @@ define void @load_v4i8_to_v4f32(<4 x flo
 
 ; SI: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
 define void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in, align 1
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
   %cvt = uitofp <4 x i8> %load to <4 x float>
   store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
   ret void
@@ -105,7 +105,7 @@ define void @load_v4i8_to_v4f32_unaligne
 ; XSI: v_cvt_f32_u32_e32
 ; SI: s_endpgm
 define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in, align 4
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
   %cvt = uitofp <4 x i8> %load to <4 x float>
   store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
   %add = add <4 x i8> %load, <i8 9, i8 9, i8 9, i8 9> ; Second use of %load
@@ -117,7 +117,7 @@ define void @load_v4i8_to_v4f32_2_uses(<
 ; SI-LABEL: {{^}}load_v7i8_to_v7f32:
 ; SI: s_endpgm
 define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <7 x i8> addrspace(1)* %in, align 1
+  %load = load <7 x i8>, <7 x i8> addrspace(1)* %in, align 1
   %cvt = uitofp <7 x i8> %load to <7 x float>
   store <7 x float> %cvt, <7 x float> addrspace(1)* %out, align 16
   ret void
@@ -146,7 +146,7 @@ define void @load_v7i8_to_v7f32(<7 x flo
 ; SI: buffer_store_dword
 ; SI: buffer_store_dword
 define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <8 x i8> addrspace(1)* %in, align 8
+  %load = load <8 x i8>, <8 x i8> addrspace(1)* %in, align 8
   %cvt = uitofp <8 x i8> %load to <8 x float>
   store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16
   ret void
@@ -158,7 +158,7 @@ define void @load_v8i8_to_v8f32(<8 x flo
 ; SI-NEXT: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[ADD]]
 ; SI: buffer_store_dword [[CONV]],
 define void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %add = add i32 %load, 2
   %inreg = and i32 %add, 255
   %cvt = uitofp i32 %inreg to float
@@ -168,7 +168,7 @@ define void @i8_zext_inreg_i32_to_f32(fl
 
 ; SI-LABEL: {{^}}i8_zext_inreg_hi1_to_f32:
 define void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %inreg = and i32 %load, 65280
   %shr = lshr i32 %inreg, 8
   %cvt = uitofp i32 %shr to float
@@ -180,7 +180,7 @@ define void @i8_zext_inreg_hi1_to_f32(fl
 ; We don't get these ones because of the zext, but instcombine removes
 ; them so it shouldn't really matter.
 define void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
-  %load = load i8 addrspace(1)* %in, align 1
+  %load = load i8, i8 addrspace(1)* %in, align 1
   %ext = zext i8 %load to i32
   %cvt = uitofp i32 %ext to float
   store float %cvt, float addrspace(1)* %out, align 4
@@ -188,7 +188,7 @@ define void @i8_zext_i32_to_f32(float ad
 }
 
 define void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in, align 1
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
   %ext = zext <4 x i8> %load to <4 x i32>
   %cvt = uitofp <4 x i32> %ext to <4 x float>
   store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16

Modified: llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll (original)
+++ llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@
 define void @sint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
   %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %sint = load i32 addrspace(1) * %in
+  %sint = load i32, i32 addrspace(1) * %in
   %conv = sitofp i32 %sint to float
   %0 = insertelement <4 x float> undef, float %conv, i32 0
   %splat = shufflevector <4 x float> %0, <4 x float> undef, <4 x i32> zeroinitializer
@@ -27,7 +27,7 @@ entry:
 define void @uint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
   %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %uint = load i32 addrspace(1) * %in
+  %uint = load i32, i32 addrspace(1) * %in
   %conv = uitofp i32 %uint to float
   %0 = insertelement <4 x float> undef, float %conv, i32 0
   %splat = shufflevector <4 x float> %0, <4 x float> undef, <4 x i32> zeroinitializer

Modified: llvm/trunk/test/CodeGen/R600/dot4-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/dot4-folding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/dot4-folding.ll (original)
+++ llvm/trunk/test/CodeGen/R600/dot4-folding.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@
 
 define void @main(float addrspace(1)* %out) {
 main_body:
-  %0 = load <4 x float> addrspace(8)* null
-  %1 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %0 = load <4 x float>, <4 x float> addrspace(8)* null
+  %1 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %2 = call float @llvm.AMDGPU.dp4(<4 x float> %0,<4 x float> %1)
   %3 = insertelement <4 x float> undef, float %2, i32 0
   call void @llvm.R600.store.swizzle(<4 x float> %3, i32 0, i32 0)

Modified: llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll Fri Feb 27 15:17:42 2015
@@ -34,19 +34,19 @@ for.body:
   %k.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   tail call void @llvm.AMDGPU.barrier.local() #1
   %arrayidx = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %offset.02
-  %tmp = load float addrspace(3)* %arrayidx, align 4
+  %tmp = load float, float addrspace(3)* %arrayidx, align 4
   %add1 = add nsw i32 %offset.02, 1
   %arrayidx2 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add1
-  %tmp1 = load float addrspace(3)* %arrayidx2, align 4
+  %tmp1 = load float, float addrspace(3)* %arrayidx2, align 4
   %add3 = add nsw i32 %offset.02, 32
   %arrayidx4 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add3
-  %tmp2 = load float addrspace(3)* %arrayidx4, align 4
+  %tmp2 = load float, float addrspace(3)* %arrayidx4, align 4
   %add5 = add nsw i32 %offset.02, 33
   %arrayidx6 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add5
-  %tmp3 = load float addrspace(3)* %arrayidx6, align 4
+  %tmp3 = load float, float addrspace(3)* %arrayidx6, align 4
   %add7 = add nsw i32 %offset.02, 64
   %arrayidx8 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add7
-  %tmp4 = load float addrspace(3)* %arrayidx8, align 4
+  %tmp4 = load float, float addrspace(3)* %arrayidx8, align 4
   %add9 = fadd float %tmp, %tmp1
   %add10 = fadd float %add9, %tmp2
   %add11 = fadd float %add10, %tmp3

Modified: llvm/trunk/test/CodeGen/R600/ds_read2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ds_read2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ds_read2.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ds_read2.ll Fri Feb 27 15:17:42 2015
@@ -15,10 +15,10 @@
 define void @simple_read2_f32(float addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 8
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -34,10 +34,10 @@ define void @simple_read2_f32(float addr
 define void @simple_read2_f32_max_offset(float addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 255
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -52,10 +52,10 @@ define void @simple_read2_f32_max_offset
 define void @simple_read2_f32_too_far(float addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 257
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -70,20 +70,20 @@ define void @simple_read2_f32_x2(float a
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 0
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
 
   %idx.1 = add nsw i32 %tid.x, 8
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum.0 = fadd float %val0, %val1
 
   %idx.2 = add nsw i32 %tid.x, 11
   %arrayidx2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
-  %val2 = load float addrspace(3)* %arrayidx2, align 4
+  %val2 = load float, float addrspace(3)* %arrayidx2, align 4
 
   %idx.3 = add nsw i32 %tid.x, 27
   %arrayidx3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
-  %val3 = load float addrspace(3)* %arrayidx3, align 4
+  %val3 = load float, float addrspace(3)* %arrayidx3, align 4
   %sum.1 = fadd float %val2, %val3
 
   %sum = fadd float %sum.0, %sum.1
@@ -102,22 +102,22 @@ define void @simple_read2_f32_x2_barrier
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 0
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
 
   %idx.1 = add nsw i32 %tid.x, 8
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum.0 = fadd float %val0, %val1
 
   call void @llvm.AMDGPU.barrier.local() #2
 
   %idx.2 = add nsw i32 %tid.x, 11
   %arrayidx2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
-  %val2 = load float addrspace(3)* %arrayidx2, align 4
+  %val2 = load float, float addrspace(3)* %arrayidx2, align 4
 
   %idx.3 = add nsw i32 %tid.x, 27
   %arrayidx3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
-  %val3 = load float addrspace(3)* %arrayidx3, align 4
+  %val3 = load float, float addrspace(3)* %arrayidx3, align 4
   %sum.1 = fadd float %val2, %val3
 
   %sum = fadd float %sum.0, %sum.1
@@ -137,20 +137,20 @@ define void @simple_read2_f32_x2_nonzero
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
 
   %idx.1 = add nsw i32 %tid.x, 8
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum.0 = fadd float %val0, %val1
 
   %idx.2 = add nsw i32 %tid.x, 11
   %arrayidx2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
-  %val2 = load float addrspace(3)* %arrayidx2, align 4
+  %val2 = load float, float addrspace(3)* %arrayidx2, align 4
 
   %idx.3 = add nsw i32 %tid.x, 27
   %arrayidx3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
-  %val3 = load float addrspace(3)* %arrayidx3, align 4
+  %val3 = load float, float addrspace(3)* %arrayidx3, align 4
   %sum.1 = fadd float %val2, %val3
 
   %sum = fadd float %sum.0, %sum.1
@@ -177,8 +177,8 @@ define void @read2_ptr_is_subreg_arg_f32
   %gep = getelementptr inbounds float, <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1
   %gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0
   %gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1
-  %val0 = load float addrspace(3)* %gep.0, align 4
-  %val1 = load float addrspace(3)* %gep.1, align 4
+  %val0 = load float, float addrspace(3)* %gep.0, align 4
+  %val1 = load float, float addrspace(3)* %gep.1, align 4
   %add.x = add nsw i32 %x.i, 8
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
@@ -207,8 +207,8 @@ define void @read2_ptr_is_subreg_arg_off
   ; Apply an additional offset after the vector that will be more obviously folded.
   %gep.1.offset = getelementptr float, float addrspace(3)* %gep.1, i32 8
 
-  %val0 = load float addrspace(3)* %gep.0, align 4
-  %val1 = load float addrspace(3)* %gep.1.offset, align 4
+  %val0 = load float, float addrspace(3)* %gep.0, align 4
+  %val1 = load float, float addrspace(3)* %gep.1.offset, align 4
   %add.x = add nsw i32 %x.i, 8
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
@@ -231,8 +231,8 @@ define void @read2_ptr_is_subreg_f32(flo
   %gep = getelementptr inbounds [512 x float], <2 x [512 x float] addrspace(3)*> %ptr.1, <2 x i32> <i32 0, i32 0>, <2 x i32> %idx
   %gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0
   %gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1
-  %val0 = load float addrspace(3)* %gep.0, align 4
-  %val1 = load float addrspace(3)* %gep.1, align 4
+  %val0 = load float, float addrspace(3)* %gep.0, align 4
+  %val1 = load float, float addrspace(3)* %gep.1, align 4
   %add.x = add nsw i32 %x.i, 8
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
@@ -248,10 +248,10 @@ define void @read2_ptr_is_subreg_f32(flo
 define void @simple_read2_f32_volatile_0(float addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
-  %val0 = load volatile float addrspace(3)* %arrayidx0, align 4
+  %val0 = load volatile float, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 8
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -266,10 +266,10 @@ define void @simple_read2_f32_volatile_0
 define void @simple_read2_f32_volatile_1(float addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 8
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
-  %val1 = load volatile float addrspace(3)* %arrayidx1, align 4
+  %val1 = load volatile float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -285,10 +285,10 @@ define void @simple_read2_f32_volatile_1
 define void @unaligned_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %x.i
-  %val0 = load float addrspace(3)* %arrayidx0, align 1
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 1
   %add.x = add nsw i32 %x.i, 8
   %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x
-  %val1 = load float addrspace(3)* %arrayidx1, align 1
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 1
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -301,10 +301,10 @@ define void @unaligned_read2_f32(float a
 define void @misaligned_2_simple_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %x.i
-  %val0 = load float addrspace(3)* %arrayidx0, align 2
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 2
   %add.x = add nsw i32 %x.i, 8
   %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x
-  %val1 = load float addrspace(3)* %arrayidx1, align 2
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 2
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -320,10 +320,10 @@ define void @misaligned_2_simple_read2_f
 define void @simple_read2_f64(double addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x = add nsw i32 %x.i, 8
   %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 8
@@ -336,10 +336,10 @@ define void @simple_read2_f64(double add
 define void @simple_read2_f64_max_offset(double addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x = add nsw i32 %x.i, 255
   %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 8
@@ -354,10 +354,10 @@ define void @simple_read2_f64_max_offset
 define void @simple_read2_f64_too_far(double addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x = add nsw i32 %x.i, 257
   %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 8
@@ -372,10 +372,10 @@ define void @simple_read2_f64_too_far(do
 define void @misaligned_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
-  %val0 = load double addrspace(3)* %arrayidx0, align 4
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 7
   %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x
-  %val1 = load double addrspace(3)* %arrayidx1, align 4
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 4
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 4
@@ -388,8 +388,8 @@ define void @misaligned_read2_f64(double
 ; SI: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
 ; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]] offset0:0 offset1:1
 define void @load_constant_adjacent_offsets(i32 addrspace(1)* %out) {
-  %val0 = load i32 addrspace(3)* getelementptr inbounds ([4 x i32] addrspace(3)* @foo, i32 0, i32 0), align 4
-  %val1 = load i32 addrspace(3)* getelementptr inbounds ([4 x i32] addrspace(3)* @foo, i32 0, i32 1), align 4
+  %val0 = load i32, i32 addrspace(3)* getelementptr inbounds ([4 x i32] addrspace(3)* @foo, i32 0, i32 0), align 4
+  %val1 = load i32, i32 addrspace(3)* getelementptr inbounds ([4 x i32] addrspace(3)* @foo, i32 0, i32 1), align 4
   %sum = add i32 %val0, %val1
   store i32 %sum, i32 addrspace(1)* %out, align 4
   ret void
@@ -399,8 +399,8 @@ define void @load_constant_adjacent_offs
 ; SI: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
 ; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]] offset0:0 offset1:2
 define void @load_constant_disjoint_offsets(i32 addrspace(1)* %out) {
-  %val0 = load i32 addrspace(3)* getelementptr inbounds ([4 x i32] addrspace(3)* @foo, i32 0, i32 0), align 4
-  %val1 = load i32 addrspace(3)* getelementptr inbounds ([4 x i32] addrspace(3)* @foo, i32 0, i32 2), align 4
+  %val0 = load i32, i32 addrspace(3)* getelementptr inbounds ([4 x i32] addrspace(3)* @foo, i32 0, i32 0), align 4
+  %val1 = load i32, i32 addrspace(3)* getelementptr inbounds ([4 x i32] addrspace(3)* @foo, i32 0, i32 2), align 4
   %sum = add i32 %val0, %val1
   store i32 %sum, i32 addrspace(1)* %out, align 4
   ret void
@@ -413,8 +413,8 @@ define void @load_constant_disjoint_offs
 ; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]] offset0:0 offset1:1
 ; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]] offset0:2 offset1:3
 define void @load_misaligned64_constant_offsets(i64 addrspace(1)* %out) {
-  %val0 = load i64 addrspace(3)* getelementptr inbounds ([4 x i64] addrspace(3)* @bar, i32 0, i32 0), align 4
-  %val1 = load i64 addrspace(3)* getelementptr inbounds ([4 x i64] addrspace(3)* @bar, i32 0, i32 1), align 4
+  %val0 = load i64, i64 addrspace(3)* getelementptr inbounds ([4 x i64] addrspace(3)* @bar, i32 0, i32 0), align 4
+  %val1 = load i64, i64 addrspace(3)* getelementptr inbounds ([4 x i64] addrspace(3)* @bar, i32 0, i32 1), align 4
   %sum = add i64 %val0, %val1
   store i64 %sum, i64 addrspace(1)* %out, align 8
   ret void
@@ -429,8 +429,8 @@ define void @load_misaligned64_constant_
 ; SI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[BASE1]] offset0:0 offset1:1
 ; SI: s_endpgm
 define void @load_misaligned64_constant_large_offsets(i64 addrspace(1)* %out) {
-  %val0 = load i64 addrspace(3)* getelementptr inbounds ([4096 x i64] addrspace(3)* @bar.large, i32 0, i32 2048), align 4
-  %val1 = load i64 addrspace(3)* getelementptr inbounds ([4096 x i64] addrspace(3)* @bar.large, i32 0, i32 4095), align 4
+  %val0 = load i64, i64 addrspace(3)* getelementptr inbounds ([4096 x i64] addrspace(3)* @bar.large, i32 0, i32 2048), align 4
+  %val1 = load i64, i64 addrspace(3)* getelementptr inbounds ([4096 x i64] addrspace(3)* @bar.large, i32 0, i32 4095), align 4
   %sum = add i64 %val0, %val1
   store i64 %sum, i64 addrspace(1)* %out, align 8
   ret void
@@ -443,33 +443,33 @@ define void @sgemm_inner_loop_read2_sequ
   %x.i = tail call i32 @llvm.r600.read.tgid.x() #1
   %y.i = tail call i32 @llvm.r600.read.tidig.y() #1
   %arrayidx44 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i
-  %tmp16 = load float addrspace(3)* %arrayidx44, align 4
+  %tmp16 = load float, float addrspace(3)* %arrayidx44, align 4
   %add47 = add nsw i32 %x.i, 1
   %arrayidx48 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add47
-  %tmp17 = load float addrspace(3)* %arrayidx48, align 4
+  %tmp17 = load float, float addrspace(3)* %arrayidx48, align 4
   %add51 = add nsw i32 %x.i, 16
   %arrayidx52 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add51
-  %tmp18 = load float addrspace(3)* %arrayidx52, align 4
+  %tmp18 = load float, float addrspace(3)* %arrayidx52, align 4
   %add55 = add nsw i32 %x.i, 17
   %arrayidx56 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add55
-  %tmp19 = load float addrspace(3)* %arrayidx56, align 4
+  %tmp19 = load float, float addrspace(3)* %arrayidx56, align 4
   %arrayidx60 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %y.i
-  %tmp20 = load float addrspace(3)* %arrayidx60, align 4
+  %tmp20 = load float, float addrspace(3)* %arrayidx60, align 4
   %add63 = add nsw i32 %y.i, 1
   %arrayidx64 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add63
-  %tmp21 = load float addrspace(3)* %arrayidx64, align 4
+  %tmp21 = load float, float addrspace(3)* %arrayidx64, align 4
   %add67 = add nsw i32 %y.i, 32
   %arrayidx68 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add67
-  %tmp22 = load float addrspace(3)* %arrayidx68, align 4
+  %tmp22 = load float, float addrspace(3)* %arrayidx68, align 4
   %add71 = add nsw i32 %y.i, 33
   %arrayidx72 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add71
-  %tmp23 = load float addrspace(3)* %arrayidx72, align 4
+  %tmp23 = load float, float addrspace(3)* %arrayidx72, align 4
   %add75 = add nsw i32 %y.i, 64
   %arrayidx76 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add75
-  %tmp24 = load float addrspace(3)* %arrayidx76, align 4
+  %tmp24 = load float, float addrspace(3)* %arrayidx76, align 4
   %add79 = add nsw i32 %y.i, 65
   %arrayidx80 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add79
-  %tmp25 = load float addrspace(3)* %arrayidx80, align 4
+  %tmp25 = load float, float addrspace(3)* %arrayidx80, align 4
   %sum.0 = fadd float %tmp16, %tmp17
   %sum.1 = fadd float %sum.0, %tmp18
   %sum.2 = fadd float %sum.1, %tmp19
@@ -484,13 +484,13 @@ define void @sgemm_inner_loop_read2_sequ
 }
 
 define void @misaligned_read2_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(3)* %in) #0 {
-  %load = load <2 x i32> addrspace(3)* %in, align 4
+  %load = load <2 x i32>, <2 x i32> addrspace(3)* %in, align 4
   store <2 x i32> %load, <2 x i32> addrspace(1)* %out, align 8
   ret void
 }
 
 define void @misaligned_read2_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %in) #0 {
-  %load = load i64 addrspace(3)* %in, align 4
+  %load = load i64, i64 addrspace(3)* %in, align 4
   store i64 %load, i64 addrspace(1)* %out, align 8
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll Fri Feb 27 15:17:42 2015
@@ -15,30 +15,30 @@
 define void @offset_order(float addrspace(1)* %out) {
 entry:
   %ptr0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 0
-  %val0 = load float addrspace(3)* %ptr0
+  %val0 = load float, float addrspace(3)* %ptr0
 
   %ptr1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 256
-  %val1 = load float addrspace(3)* %ptr1
+  %val1 = load float, float addrspace(3)* %ptr1
   %add1 = fadd float %val0, %val1
 
   %ptr2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 3
-  %val2 = load float addrspace(3)* %ptr2
+  %val2 = load float, float addrspace(3)* %ptr2
   %add2 = fadd float %add1, %val2
 
   %ptr3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 2
-  %val3 = load float addrspace(3)* %ptr3
+  %val3 = load float, float addrspace(3)* %ptr3
   %add3 = fadd float %add2, %val3
 
   %ptr4 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 12
-  %val4 = load float addrspace(3)* %ptr4
+  %val4 = load float, float addrspace(3)* %ptr4
   %add4 = fadd float %add3, %val4
 
   %ptr5 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 14
-  %val5 = load float addrspace(3)* %ptr5
+  %val5 = load float, float addrspace(3)* %ptr5
   %add5 = fadd float %add4, %val5
 
   %ptr6 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 11
-  %val6 = load float addrspace(3)* %ptr6
+  %val6 = load float, float addrspace(3)* %ptr6
   %add6 = fadd float %add5, %val6
   store float %add6, float addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/ds_read2st64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ds_read2st64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ds_read2st64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ds_read2st64.ll Fri Feb 27 15:17:42 2015
@@ -13,10 +13,10 @@
 define void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 64
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -33,10 +33,10 @@ define void @simple_read2st64_f32_1_2(fl
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %add.x.0 = add nsw i32 %x.i, 64
   %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x.1 = add nsw i32 %x.i, 128
   %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -53,10 +53,10 @@ define void @simple_read2st64_f32_max_of
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %add.x.0 = add nsw i32 %x.i, 64
   %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x.1 = add nsw i32 %x.i, 16320
   %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -73,10 +73,10 @@ define void @simple_read2st64_f32_over_m
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %add.x.0 = add nsw i32 %x.i, 64
   %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x.1 = add nsw i32 %x.i, 16384
   %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -89,10 +89,10 @@ define void @simple_read2st64_f32_over_m
 define void @odd_invalid_read2st64_f32_0(float addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 63
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -106,10 +106,10 @@ define void @odd_invalid_read2st64_f32_1
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %add.x.0 = add nsw i32 %x.i, 64
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %add.x.1 = add nsw i32 %x.i, 127
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
   store float %sum, float addrspace(1)* %out.gep, align 4
@@ -125,10 +125,10 @@ define void @odd_invalid_read2st64_f32_1
 define void @simple_read2st64_f64_0_1(double addrspace(1)* %out) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x = add nsw i32 %x.i, 64
   %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 8
@@ -145,10 +145,10 @@ define void @simple_read2st64_f64_1_2(do
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %add.x.0 = add nsw i32 %x.i, 64
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x.1 = add nsw i32 %x.i, 128
   %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 8
@@ -164,10 +164,10 @@ define void @simple_read2st64_f64_1_2(do
 define void @misaligned_read2st64_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
-  %val0 = load double addrspace(3)* %arrayidx0, align 4
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 64
   %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x
-  %val1 = load double addrspace(3)* %arrayidx1, align 4
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 4
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 4
@@ -185,10 +185,10 @@ define void @simple_read2st64_f64_max_of
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %add.x.0 = add nsw i32 %x.i, 256
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x.1 = add nsw i32 %x.i, 8128
   %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 8
@@ -205,10 +205,10 @@ define void @simple_read2st64_f64_over_m
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %add.x.0 = add nsw i32 %x.i, 64
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x.1 = add nsw i32 %x.i, 8192
   %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 8
@@ -222,10 +222,10 @@ define void @invalid_read2st64_f64_odd_o
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %add.x.0 = add nsw i32 %x.i, 64
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x.1 = add nsw i32 %x.i, 8129
   %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 8
@@ -242,10 +242,10 @@ define void @invalid_read2st64_f64_odd_o
 define void @byte_size_only_divisible_64_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
-  %val0 = load double addrspace(3)* %arrayidx0, align 8
+  %val0 = load double, double addrspace(3)* %arrayidx0, align 8
   %add.x = add nsw i32 %x.i, 8
   %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x
-  %val1 = load double addrspace(3)* %arrayidx1, align 8
+  %val1 = load double, double addrspace(3)* %arrayidx1, align 8
   %sum = fadd double %val0, %val1
   %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
   store double %sum, double addrspace(1)* %out.gep, align 4

Modified: llvm/trunk/test/CodeGen/R600/ds_write2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ds_write2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ds_write2.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ds_write2.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@
 define void @simple_write2_one_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep = getelementptr float, float addrspace(1)* %in, i32 %x.i
-  %val = load float addrspace(1)* %in.gep, align 4
+  %val = load float, float addrspace(1)* %in.gep, align 4
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
   store float %val, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 8
@@ -31,8 +31,8 @@ define void @simple_write2_two_val_f32(f
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
   %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
-  %val0 = load float addrspace(1)* %in.gep.0, align 4
-  %val1 = load float addrspace(1)* %in.gep.1, align 4
+  %val0 = load float, float addrspace(1)* %in.gep.0, align 4
+  %val1 = load float, float addrspace(1)* %in.gep.1, align 4
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
   store float %val0, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 8
@@ -50,8 +50,8 @@ define void @simple_write2_two_val_f32_v
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
   %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
-  %val0 = load float addrspace(1)* %in0.gep, align 4
-  %val1 = load float addrspace(1)* %in1.gep, align 4
+  %val0 = load float, float addrspace(1)* %in0.gep, align 4
+  %val1 = load float, float addrspace(1)* %in1.gep, align 4
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
   store volatile float %val0, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 8
@@ -69,8 +69,8 @@ define void @simple_write2_two_val_f32_v
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
   %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
-  %val0 = load float addrspace(1)* %in0.gep, align 4
-  %val1 = load float addrspace(1)* %in1.gep, align 4
+  %val0 = load float, float addrspace(1)* %in0.gep, align 4
+  %val1 = load float, float addrspace(1)* %in1.gep, align 4
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
   store float %val0, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 8
@@ -90,8 +90,8 @@ define void @simple_write2_two_val_subre
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep.0 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %x.i
   %in.gep.1 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in.gep.0, i32 1
-  %val0 = load <2 x float> addrspace(1)* %in.gep.0, align 8
-  %val1 = load <2 x float> addrspace(1)* %in.gep.1, align 8
+  %val0 = load <2 x float>, <2 x float> addrspace(1)* %in.gep.0, align 8
+  %val1 = load <2 x float>, <2 x float> addrspace(1)* %in.gep.1, align 8
   %val0.0 = extractelement <2 x float> %val0, i32 0
   %val1.1 = extractelement <2 x float> %val1, i32 1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
@@ -110,7 +110,7 @@ define void @simple_write2_two_val_subre
 define void @simple_write2_two_val_subreg2_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %x.i
-  %val = load <2 x float> addrspace(1)* %in.gep, align 8
+  %val = load <2 x float>, <2 x float> addrspace(1)* %in.gep, align 8
   %val0 = extractelement <2 x float> %val, i32 0
   %val1 = extractelement <2 x float> %val, i32 1
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
@@ -129,7 +129,7 @@ define void @simple_write2_two_val_subre
 define void @simple_write2_two_val_subreg4_f32(float addrspace(1)* %C, <4 x float> addrspace(1)* %in) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 %x.i
-  %val = load <4 x float> addrspace(1)* %in.gep, align 16
+  %val = load <4 x float>, <4 x float> addrspace(1)* %in.gep, align 16
   %val0 = extractelement <4 x float> %val, i32 0
   %val1 = extractelement <4 x float> %val, i32 3
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
@@ -150,8 +150,8 @@ define void @simple_write2_two_val_max_o
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
   %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
-  %val0 = load float addrspace(1)* %in.gep.0, align 4
-  %val1 = load float addrspace(1)* %in.gep.1, align 4
+  %val0 = load float, float addrspace(1)* %in.gep.0, align 4
+  %val1 = load float, float addrspace(1)* %in.gep.1, align 4
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
   store float %val0, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 255
@@ -168,8 +168,8 @@ define void @simple_write2_two_val_too_f
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
   %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
-  %val0 = load float addrspace(1)* %in0.gep, align 4
-  %val1 = load float addrspace(1)* %in1.gep, align 4
+  %val0 = load float, float addrspace(1)* %in0.gep, align 4
+  %val1 = load float, float addrspace(1)* %in1.gep, align 4
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
   store float %val0, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 257
@@ -186,8 +186,8 @@ define void @simple_write2_two_val_f32_x
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %tid.x
   %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %tid.x
-  %val0 = load float addrspace(1)* %in0.gep, align 4
-  %val1 = load float addrspace(1)* %in1.gep, align 4
+  %val0 = load float, float addrspace(1)* %in0.gep, align 4
+  %val1 = load float, float addrspace(1)* %in1.gep, align 4
 
   %idx.0 = add nsw i32 %tid.x, 0
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
@@ -216,8 +216,8 @@ define void @simple_write2_two_val_f32_x
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %tid.x
   %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %tid.x
-  %val0 = load float addrspace(1)* %in0.gep, align 4
-  %val1 = load float addrspace(1)* %in1.gep, align 4
+  %val0 = load float, float addrspace(1)* %in0.gep, align 4
+  %val1 = load float, float addrspace(1)* %in1.gep, align 4
 
   %idx.0 = add nsw i32 %tid.x, 3
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
@@ -247,8 +247,8 @@ define void @write2_ptr_subreg_arg_two_v
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
   %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
-  %val0 = load float addrspace(1)* %in0.gep, align 4
-  %val1 = load float addrspace(1)* %in1.gep, align 4
+  %val0 = load float, float addrspace(1)* %in0.gep, align 4
+  %val1 = load float, float addrspace(1)* %in1.gep, align 4
 
   %index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0
   %index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0
@@ -273,7 +273,7 @@ define void @write2_ptr_subreg_arg_two_v
 define void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
-  %val = load double addrspace(1)* %in.gep, align 8
+  %val = load double, double addrspace(1)* %in.gep, align 8
   %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
   store double %val, double addrspace(3)* %arrayidx0, align 8
   %add.x = add nsw i32 %x.i, 8
@@ -291,7 +291,7 @@ define void @simple_write2_one_val_f64(d
 define void @misaligned_simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
-  %val = load double addrspace(1)* %in.gep, align 8
+  %val = load double, double addrspace(1)* %in.gep, align 8
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
   store double %val, double addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 7
@@ -310,8 +310,8 @@ define void @simple_write2_two_val_f64(d
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep.0 = getelementptr double, double addrspace(1)* %in, i32 %x.i
   %in.gep.1 = getelementptr double, double addrspace(1)* %in.gep.0, i32 1
-  %val0 = load double addrspace(1)* %in.gep.0, align 8
-  %val1 = load double addrspace(1)* %in.gep.1, align 8
+  %val0 = load double, double addrspace(1)* %in.gep.0, align 8
+  %val1 = load double, double addrspace(1)* %in.gep.1, align 8
   %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
   store double %val0, double addrspace(3)* %arrayidx0, align 8
   %add.x = add nsw i32 %x.i, 8
@@ -373,7 +373,7 @@ define void @store_misaligned64_constant
 define void @write2_sgemm_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb, float addrspace(1)* %in) #0 {
   %x.i = tail call i32 @llvm.r600.read.tgid.x() #1
   %y.i = tail call i32 @llvm.r600.read.tidig.y() #1
-  %val = load float addrspace(1)* %in
+  %val = load float, float addrspace(1)* %in
   %arrayidx44 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i
   store float %val, float addrspace(3)* %arrayidx44, align 4
   %add47 = add nsw i32 %x.i, 1

Modified: llvm/trunk/test/CodeGen/R600/ds_write2st64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ds_write2st64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ds_write2st64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ds_write2st64.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@
 define void @simple_write2st64_one_val_f32_0_1(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep = getelementptr float, float addrspace(1)* %in, i32 %x.i
-  %val = load float addrspace(1)* %in.gep, align 4
+  %val = load float, float addrspace(1)* %in.gep, align 4
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
   store float %val, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 64
@@ -31,8 +31,8 @@ define void @simple_write2st64_two_val_f
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
   %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
-  %val0 = load float addrspace(1)* %in.gep.0, align 4
-  %val1 = load float addrspace(1)* %in.gep.1, align 4
+  %val0 = load float, float addrspace(1)* %in.gep.0, align 4
+  %val1 = load float, float addrspace(1)* %in.gep.1, align 4
   %add.x.0 = add nsw i32 %x.i, 128
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
   store float %val0, float addrspace(3)* %arrayidx0, align 4
@@ -52,8 +52,8 @@ define void @simple_write2st64_two_val_m
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
   %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
-  %val0 = load float addrspace(1)* %in.gep.0, align 4
-  %val1 = load float addrspace(1)* %in.gep.1, align 4
+  %val0 = load float, float addrspace(1)* %in.gep.0, align 4
+  %val1 = load float, float addrspace(1)* %in.gep.1, align 4
   %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %x.i
   store float %val0, float addrspace(3)* %arrayidx0, align 4
   %add.x = add nsw i32 %x.i, 16320
@@ -72,8 +72,8 @@ define void @simple_write2st64_two_val_m
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep.0 = getelementptr double, double addrspace(1)* %in, i32 %x.i
   %in.gep.1 = getelementptr double, double addrspace(1)* %in.gep.0, i32 1
-  %val0 = load double addrspace(1)* %in.gep.0, align 8
-  %val1 = load double addrspace(1)* %in.gep.1, align 8
+  %val0 = load double, double addrspace(1)* %in.gep.0, align 8
+  %val1 = load double, double addrspace(1)* %in.gep.1, align 8
   %add.x.0 = add nsw i32 %x.i, 256
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
   store double %val0, double addrspace(3)* %arrayidx0, align 8
@@ -90,7 +90,7 @@ define void @simple_write2st64_two_val_m
 define void @byte_size_only_divisible_64_write2st64_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
   %x.i = tail call i32 @llvm.r600.read.tidig.x() #1
   %in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
-  %val = load double addrspace(1)* %in.gep, align 8
+  %val = load double, double addrspace(1)* %in.gep, align 8
   %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
   store double %val, double addrspace(3)* %arrayidx0, align 8
   %add.x = add nsw i32 %x.i, 8

Modified: llvm/trunk/test/CodeGen/R600/extload-private.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/extload-private.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/extload-private.ll (original)
+++ llvm/trunk/test/CodeGen/R600/extload-private.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 define void @load_i8_sext_private(i32 addrspace(1)* %out) {
 entry:
   %tmp0 = alloca i8
-  %tmp1 = load i8* %tmp0
+  %tmp1 = load i8, i8* %tmp0
   %tmp2 = sext i8 %tmp1 to i32
   store i32 %tmp2, i32 addrspace(1)* %out
   ret void
@@ -17,7 +17,7 @@ entry:
 define void @load_i8_zext_private(i32 addrspace(1)* %out) {
 entry:
   %tmp0 = alloca i8
-  %tmp1 = load i8* %tmp0
+  %tmp1 = load i8, i8* %tmp0
   %tmp2 = zext i8 %tmp1 to i32
   store i32 %tmp2, i32 addrspace(1)* %out
   ret void
@@ -28,7 +28,7 @@ entry:
 define void @load_i16_sext_private(i32 addrspace(1)* %out) {
 entry:
   %tmp0 = alloca i16
-  %tmp1 = load i16* %tmp0
+  %tmp1 = load i16, i16* %tmp0
   %tmp2 = sext i16 %tmp1 to i32
   store i32 %tmp2, i32 addrspace(1)* %out
   ret void
@@ -39,7 +39,7 @@ entry:
 define void @load_i16_zext_private(i32 addrspace(1)* %out) {
 entry:
   %tmp0 = alloca i16
-  %tmp1 = load i16* %tmp0
+  %tmp1 = load i16, i16* %tmp0
   %tmp2 = zext i16 %tmp1 to i32
   store i32 %tmp2, i32 addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/extload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/extload.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/extload.ll (original)
+++ llvm/trunk/test/CodeGen/R600/extload.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 
 define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspace(1)* nocapture noalias %src) nounwind {
   %cast = bitcast i8 addrspace(1)* %src to i32 addrspace(1)*
-  %load = load i32 addrspace(1)* %cast, align 1
+  %load = load i32, i32 addrspace(1)* %cast, align 1
   %x = bitcast i32 %load to <4 x i8>
   %castOut = bitcast i8 addrspace(1)* %out to <4 x i8> addrspace(1)*
   store <4 x i8> %x, <4 x i8> addrspace(1)* %castOut, align 1
@@ -21,7 +21,7 @@ define void @anyext_load_i8(i8 addrspace
 
 define void @anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrspace(1)* nocapture noalias %src) nounwind {
   %cast = bitcast i16 addrspace(1)* %src to i32 addrspace(1)*
-  %load = load i32 addrspace(1)* %cast, align 1
+  %load = load i32, i32 addrspace(1)* %cast, align 1
   %x = bitcast i32 %load to <2 x i16>
   %castOut = bitcast i16 addrspace(1)* %out to <2 x i16> addrspace(1)*
   store <2 x i16> %x, <2 x i16> addrspace(1)* %castOut, align 1
@@ -33,7 +33,7 @@ define void @anyext_load_i16(i16 addrspa
 ; EG: LDS_WRITE * [[VAL]]
 define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addrspace(3)* nocapture noalias %src) nounwind {
   %cast = bitcast i8 addrspace(3)* %src to i32 addrspace(3)*
-  %load = load i32 addrspace(3)* %cast, align 1
+  %load = load i32, i32 addrspace(3)* %cast, align 1
   %x = bitcast i32 %load to <4 x i8>
   %castOut = bitcast i8 addrspace(3)* %out to <4 x i8> addrspace(3)*
   store <4 x i8> %x, <4 x i8> addrspace(3)* %castOut, align 1
@@ -45,7 +45,7 @@ define void @anyext_load_lds_i8(i8 addrs
 ; EG: LDS_WRITE * [[VAL]]
 define void @anyext_load_lds_i16(i16 addrspace(3)* nocapture noalias %out, i16 addrspace(3)* nocapture noalias %src) nounwind {
   %cast = bitcast i16 addrspace(3)* %src to i32 addrspace(3)*
-  %load = load i32 addrspace(3)* %cast, align 1
+  %load = load i32, i32 addrspace(3)* %cast, align 1
   %x = bitcast i32 %load to <2 x i16>
   %castOut = bitcast i16 addrspace(3)* %out to <2 x i16> addrspace(3)*
   store <2 x i16> %x, <2 x i16> addrspace(3)* %castOut, align 1

Modified: llvm/trunk/test/CodeGen/R600/fabs.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fabs.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fabs.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fabs.f64.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define void @v_fabs_f64(double addrspace
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %tidext = sext i32 %tid to i64
   %gep = getelementptr double, double addrspace(1)* %in, i64 %tidext
-  %val = load double addrspace(1)* %gep, align 8
+  %val = load double, double addrspace(1)* %gep, align 8
   %fabs = call double @llvm.fabs.f64(double %val)
   store double %fabs, double addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fadd.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fadd.ll Fri Feb 27 15:17:42 2015
@@ -33,8 +33,8 @@ define void @fadd_v2f32(<2 x float> addr
 ; SI: v_add_f32
 define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
-  %a = load <4 x float> addrspace(1)* %in, align 16
-  %b = load <4 x float> addrspace(1)* %b_ptr, align 16
+  %a = load <4 x float>, <4 x float> addrspace(1)* %in, align 16
+  %b = load <4 x float>, <4 x float> addrspace(1)* %b_ptr, align 16
   %result = fadd <4 x float> %a, %b
   store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fadd64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fadd64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fadd64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fadd64.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
 
 define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                       double addrspace(1)* %in2) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
    %r2 = fadd double %r0, %r1
    store double %r2, double addrspace(1)* %out
    ret void

Modified: llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 
 define void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) {
 entry:
-  %0 = load float addrspace(1)* %in
+  %0 = load float, float addrspace(1)* %in
   %cmp = fcmp oeq float %0, 0.000000e+00
   %value = select i1 %cmp, i32 2, i32 3 
   store i32 %value, i32 addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 
 define void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) {
 entry:
-  %0 = load float addrspace(1)* %in
+  %0 = load float, float addrspace(1)* %in
   %cmp = fcmp oeq float %0, 0.000000e+00
   %value = select i1 %cmp, i32 -1, i32 0
   store i32 %value, i32 addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fcmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fcmp.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fcmp.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@
 
 define void @fcmp_sext(i32 addrspace(1)* %out, float addrspace(1)* %in) {
 entry:
-  %0 = load float addrspace(1)* %in
+  %0 = load float, float addrspace(1)* %in
   %arrayidx1 = getelementptr inbounds float, float addrspace(1)* %in, i32 1
-  %1 = load float addrspace(1)* %arrayidx1
+  %1 = load float, float addrspace(1)* %arrayidx1
   %cmp = fcmp oeq float %0, %1
   %sext = sext i1 %cmp to i32
   store i32 %sext, i32 addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/fcmp64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fcmp64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fcmp64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fcmp64.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@
 ; CHECK: v_cmp_nge_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
 define void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
                      double addrspace(1)* %in2) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
    %r2 = fcmp ult double %r0, %r1
    %r3 = zext i1 %r2 to i32
    store i32 %r3, i32 addrspace(1)* %out
@@ -17,8 +17,8 @@ define void @flt_f64(i32 addrspace(1)* %
 ; CHECK: v_cmp_ngt_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
 define void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
                      double addrspace(1)* %in2) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
    %r2 = fcmp ule double %r0, %r1
    %r3 = zext i1 %r2 to i32
    store i32 %r3, i32 addrspace(1)* %out
@@ -29,8 +29,8 @@ define void @fle_f64(i32 addrspace(1)* %
 ; CHECK: v_cmp_nle_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
 define void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
                      double addrspace(1)* %in2) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
    %r2 = fcmp ugt double %r0, %r1
    %r3 = zext i1 %r2 to i32
    store i32 %r3, i32 addrspace(1)* %out
@@ -41,8 +41,8 @@ define void @fgt_f64(i32 addrspace(1)* %
 ; CHECK: v_cmp_nlt_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
 define void @fge_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
                      double addrspace(1)* %in2) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
    %r2 = fcmp uge double %r0, %r1
    %r3 = zext i1 %r2 to i32
    store i32 %r3, i32 addrspace(1)* %out
@@ -53,8 +53,8 @@ define void @fge_f64(i32 addrspace(1)* %
 ; CHECK: v_cmp_neq_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
 define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                      double addrspace(1)* %in2) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
    %r2 = fcmp une double %r0, %r1
    %r3 = select i1 %r2, double %r0, double %r1
    store double %r3, double addrspace(1)* %out
@@ -65,8 +65,8 @@ define void @fne_f64(double addrspace(1)
 ; CHECK: v_cmp_nlg_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
 define void @feq_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                      double addrspace(1)* %in2) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
    %r2 = fcmp ueq double %r0, %r1
    %r3 = select i1 %r2, double %r0, double %r1
    store double %r3, double addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/fconst64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fconst64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fconst64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fconst64.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 ; CHECK-DAG: s_mov_b32 {{s[0-9]+}}, 0
 
 define void @fconst_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
-   %r1 = load double addrspace(1)* %in
+   %r1 = load double, double addrspace(1)* %in
    %r2 = fadd double %r1, 5.000000e+00
    store double %r2, double addrspace(1)* %out
    ret void

Modified: llvm/trunk/test/CodeGen/R600/fdiv.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fdiv.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fdiv.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fdiv.f64.ll Fri Feb 27 15:17:42 2015
@@ -31,8 +31,8 @@
 ; COMMON: s_endpgm
 define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) nounwind {
   %gep.1 = getelementptr double, double addrspace(1)* %in, i32 1
-  %num = load double addrspace(1)* %in
-  %den = load double addrspace(1)* %gep.1
+  %num = load double, double addrspace(1)* %in
+  %den = load double, double addrspace(1)* %gep.1
   %result = fdiv double %num, %den
   store double %result, double addrspace(1)* %out
   ret void
@@ -40,7 +40,7 @@ define void @fdiv_f64(double addrspace(1
 
 ; COMMON-LABEL: {{^}}fdiv_f64_s_v:
 define void @fdiv_f64_s_v(double addrspace(1)* %out, double addrspace(1)* %in, double %num) nounwind {
-  %den = load double addrspace(1)* %in
+  %den = load double, double addrspace(1)* %in
   %result = fdiv double %num, %den
   store double %result, double addrspace(1)* %out
   ret void
@@ -48,7 +48,7 @@ define void @fdiv_f64_s_v(double addrspa
 
 ; COMMON-LABEL: {{^}}fdiv_f64_v_s:
 define void @fdiv_f64_v_s(double addrspace(1)* %out, double addrspace(1)* %in, double %den) nounwind {
-  %num = load double addrspace(1)* %in
+  %num = load double, double addrspace(1)* %in
   %result = fdiv double %num, %den
   store double %result, double addrspace(1)* %out
   ret void
@@ -64,8 +64,8 @@ define void @fdiv_f64_s_s(double addrspa
 ; COMMON-LABEL: {{^}}v_fdiv_v2f64:
 define void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) nounwind {
   %gep.1 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in, i32 1
-  %num = load <2 x double> addrspace(1)* %in
-  %den = load <2 x double> addrspace(1)* %gep.1
+  %num = load <2 x double>, <2 x double> addrspace(1)* %in
+  %den = load <2 x double>, <2 x double> addrspace(1)* %gep.1
   %result = fdiv <2 x double> %num, %den
   store <2 x double> %result, <2 x double> addrspace(1)* %out
   ret void
@@ -81,8 +81,8 @@ define void @s_fdiv_v2f64(<2 x double> a
 ; COMMON-LABEL: {{^}}v_fdiv_v4f64:
 define void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) nounwind {
   %gep.1 = getelementptr <4 x double>, <4 x double> addrspace(1)* %in, i32 1
-  %num = load <4 x double> addrspace(1)* %in
-  %den = load <4 x double> addrspace(1)* %gep.1
+  %num = load <4 x double>, <4 x double> addrspace(1)* %in
+  %den = load <4 x double>, <4 x double> addrspace(1)* %gep.1
   %result = fdiv <4 x double> %num, %den
   store <4 x double> %result, <4 x double> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fdiv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fdiv.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fdiv.ll Fri Feb 27 15:17:42 2015
@@ -60,8 +60,8 @@ entry:
 ; SI-DAG: v_mul_f32
 define void @fdiv_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
-  %a = load <4 x float> addrspace(1) * %in
-  %b = load <4 x float> addrspace(1) * %b_ptr
+  %a = load <4 x float>, <4 x float> addrspace(1) * %in
+  %b = load <4 x float>, <4 x float> addrspace(1) * %b_ptr
   %result = fdiv <4 x float> %a, %b
   store <4 x float> %result, <4 x float> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll Fri Feb 27 15:17:42 2015
@@ -9,15 +9,15 @@
 
 define void @fetch_limits_r600() #0 {
 entry:
-  %0 = load <4 x float> addrspace(8)* null
-  %1 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
-  %2 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
-  %3 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
-  %4 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
-  %5 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
-  %6 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
-  %7 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
-  %8 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+  %0 = load <4 x float>, <4 x float> addrspace(8)* null
+  %1 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %2 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %3 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %5 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %6 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
+  %7 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %8 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
   %res0 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %0, i32 0, i32 0, i32 1)
   %res1 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %1, i32 0, i32 0, i32 1)
   %res2 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %2, i32 0, i32 0, i32 1)

Modified: llvm/trunk/test/CodeGen/R600/fetch-limits.r700+.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fetch-limits.r700%2B.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fetch-limits.r700+.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fetch-limits.r700+.ll Fri Feb 27 15:17:42 2015
@@ -18,23 +18,23 @@
 
 define void @fetch_limits_r700() #0 {
 entry:
-  %0 = load <4 x float> addrspace(8)* null
-  %1 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
-  %2 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
-  %3 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
-  %4 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
-  %5 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
-  %6 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
-  %7 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
-  %8 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
-  %9 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
-  %10 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
-  %11 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 11)
-  %12 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 12)
-  %13 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 13)
-  %14 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
-  %15 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 15)
-  %16 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 16)
+  %0 = load <4 x float>, <4 x float> addrspace(8)* null
+  %1 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %2 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %3 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %5 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %6 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
+  %7 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %8 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+  %9 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+  %11 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 11)
+  %12 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 12)
+  %13 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 13)
+  %14 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 14)
+  %15 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 15)
+  %16 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 16)
   %res0 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %0, i32 0, i32 0, i32 1)
   %res1 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %1, i32 0, i32 0, i32 1)
   %res2 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %2, i32 0, i32 0, i32 1)

Modified: llvm/trunk/test/CodeGen/R600/flat-address-space.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/flat-address-space.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/flat-address-space.ll (original)
+++ llvm/trunk/test/CodeGen/R600/flat-address-space.ll Fri Feb 27 15:17:42 2015
@@ -26,7 +26,7 @@ global:
 end:
   %fptr = phi i32 addrspace(4)* [ %flat_local, %local ], [ %flat_global, %global ]
   store i32 %x, i32 addrspace(4)* %fptr, align 4
-;  %val = load i32 addrspace(4)* %fptr, align 4
+;  %val = load i32, i32 addrspace(4)* %fptr, align 4
 ;  store i32 %val, i32 addrspace(1)* %out, align 4
   ret void
 }
@@ -87,7 +87,7 @@ define void @store_flat_trunc_i8(i8 addr
 ; CHECK: flat_load_dword
 define void @load_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %gptr) #0 {
   %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
-  %fload = load i32 addrspace(4)* %fptr, align 4
+  %fload = load i32, i32 addrspace(4)* %fptr, align 4
   store i32 %fload, i32 addrspace(1)* %out, align 4
   ret void
 }
@@ -96,7 +96,7 @@ define void @load_flat_i32(i32 addrspace
 ; CHECK: flat_load_dwordx2
 define void @load_flat_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %gptr) #0 {
   %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
-  %fload = load i64 addrspace(4)* %fptr, align 4
+  %fload = load i64, i64 addrspace(4)* %fptr, align 4
   store i64 %fload, i64 addrspace(1)* %out, align 8
   ret void
 }
@@ -105,7 +105,7 @@ define void @load_flat_i64(i64 addrspace
 ; CHECK: flat_load_dwordx4
 define void @load_flat_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %gptr) #0 {
   %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
-  %fload = load <4 x i32> addrspace(4)* %fptr, align 4
+  %fload = load <4 x i32>, <4 x i32> addrspace(4)* %fptr, align 4
   store <4 x i32> %fload, <4 x i32> addrspace(1)* %out, align 8
   ret void
 }
@@ -114,7 +114,7 @@ define void @load_flat_v4i32(<4 x i32> a
 ; CHECK: flat_load_sbyte
 define void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
   %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
-  %fload = load i8 addrspace(4)* %fptr, align 4
+  %fload = load i8, i8 addrspace(4)* %fptr, align 4
   %ext = sext i8 %fload to i32
   store i32 %ext, i32 addrspace(1)* %out, align 4
   ret void
@@ -124,7 +124,7 @@ define void @sextload_flat_i8(i32 addrsp
 ; CHECK: flat_load_ubyte
 define void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
   %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
-  %fload = load i8 addrspace(4)* %fptr, align 4
+  %fload = load i8, i8 addrspace(4)* %fptr, align 4
   %ext = zext i8 %fload to i32
   store i32 %ext, i32 addrspace(1)* %out, align 4
   ret void
@@ -134,7 +134,7 @@ define void @zextload_flat_i8(i32 addrsp
 ; CHECK: flat_load_sshort
 define void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
   %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
-  %fload = load i16 addrspace(4)* %fptr, align 4
+  %fload = load i16, i16 addrspace(4)* %fptr, align 4
   %ext = sext i16 %fload to i32
   store i32 %ext, i32 addrspace(1)* %out, align 4
   ret void
@@ -144,7 +144,7 @@ define void @sextload_flat_i16(i32 addrs
 ; CHECK: flat_load_ushort
 define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
   %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
-  %fload = load i16 addrspace(4)* %fptr, align 4
+  %fload = load i16, i16 addrspace(4)* %fptr, align 4
   %ext = zext i16 %fload to i32
   store i32 %ext, i32 addrspace(1)* %out, align 4
   ret void
@@ -171,7 +171,7 @@ define void @store_flat_scratch(i32 addr
   store i32 %x, i32 addrspace(4)* %fptr
   ; Dummy call
   call void @llvm.AMDGPU.barrier.local() #1
-  %reload = load i32 addrspace(4)* %fptr, align 4
+  %reload = load i32, i32 addrspace(4)* %fptr, align 4
   store i32 %reload, i32 addrspace(1)* %out, align 4
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/fma-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fma-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fma-combine.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fma-combine.ll Fri Feb 27 15:17:42 2015
@@ -20,9 +20,9 @@ define void @combine_to_fma_f64_0(double
   %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
 
   %mul = fmul double %a, %b
   %fma = fadd double %mul, %c
@@ -50,10 +50,10 @@ define void @combine_to_fma_f64_0_2use(d
   %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
-  %d = load double addrspace(1)* %gep.3
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
+  %d = load double, double addrspace(1)* %gep.3
 
   %mul = fmul double %a, %b
   %fma0 = fadd double %mul, %c
@@ -77,9 +77,9 @@ define void @combine_to_fma_f64_1(double
   %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
 
   %mul = fmul double %a, %b
   %fma = fadd double %c, %mul
@@ -101,9 +101,9 @@ define void @combine_to_fma_fsub_0_f64(d
   %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
 
   %mul = fmul double %a, %b
   %fma = fsub double %mul, %c
@@ -131,10 +131,10 @@ define void @combine_to_fma_fsub_f64_0_2
   %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
-  %d = load double addrspace(1)* %gep.3
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
+  %d = load double, double addrspace(1)* %gep.3
 
   %mul = fmul double %a, %b
   %fma0 = fsub double %mul, %c
@@ -158,9 +158,9 @@ define void @combine_to_fma_fsub_1_f64(d
   %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
 
   %mul = fmul double %a, %b
   %fma = fsub double %c, %mul
@@ -188,10 +188,10 @@ define void @combine_to_fma_fsub_1_f64_2
   %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
-  %d = load double addrspace(1)* %gep.3
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
+  %d = load double, double addrspace(1)* %gep.3
 
   %mul = fmul double %a, %b
   %fma0 = fsub double %c, %mul
@@ -215,9 +215,9 @@ define void @combine_to_fma_fsub_2_f64(d
   %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
 
   %mul = fmul double %a, %b
   %mul.neg = fsub double -0.0, %mul
@@ -246,10 +246,10 @@ define void @combine_to_fma_fsub_2_f64_2
   %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
-  %d = load double addrspace(1)* %gep.3
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
+  %d = load double, double addrspace(1)* %gep.3
 
   %mul = fmul double %a, %b
   %mul.neg = fsub double -0.0, %mul
@@ -280,10 +280,10 @@ define void @combine_to_fma_fsub_2_f64_2
   %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0
-  %b = load double addrspace(1)* %gep.1
-  %c = load double addrspace(1)* %gep.2
-  %d = load double addrspace(1)* %gep.3
+  %a = load double, double addrspace(1)* %gep.0
+  %b = load double, double addrspace(1)* %gep.1
+  %c = load double, double addrspace(1)* %gep.2
+  %d = load double, double addrspace(1)* %gep.3
 
   %mul = fmul double %a, %b
   %mul.neg = fsub double -0.0, %mul
@@ -315,11 +315,11 @@ define void @aggressive_combine_to_fma_f
   %gep.4 = getelementptr double, double addrspace(1)* %gep.0, i32 4
   %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
 
-  %x = load double addrspace(1)* %gep.0
-  %y = load double addrspace(1)* %gep.1
-  %z = load double addrspace(1)* %gep.2
-  %u = load double addrspace(1)* %gep.3
-  %v = load double addrspace(1)* %gep.4
+  %x = load double, double addrspace(1)* %gep.0
+  %y = load double, double addrspace(1)* %gep.1
+  %z = load double, double addrspace(1)* %gep.2
+  %u = load double, double addrspace(1)* %gep.3
+  %v = load double, double addrspace(1)* %gep.4
 
   %tmp0 = fmul double %u, %v
   %tmp1 = call double @llvm.fma.f64(double %x, double %y, double %tmp0) #0
@@ -350,11 +350,11 @@ define void @aggressive_combine_to_fma_f
   %gep.4 = getelementptr double, double addrspace(1)* %gep.0, i32 4
   %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
 
-  %x = load double addrspace(1)* %gep.0
-  %y = load double addrspace(1)* %gep.1
-  %z = load double addrspace(1)* %gep.2
-  %u = load double addrspace(1)* %gep.3
-  %v = load double addrspace(1)* %gep.4
+  %x = load double, double addrspace(1)* %gep.0
+  %y = load double, double addrspace(1)* %gep.1
+  %z = load double, double addrspace(1)* %gep.2
+  %u = load double, double addrspace(1)* %gep.3
+  %v = load double, double addrspace(1)* %gep.4
 
   %tmp0 = fmul double %u, %v
   %tmp1 = call double @llvm.fma.f64(double %y, double %z, double %tmp0) #0

Modified: llvm/trunk/test/CodeGen/R600/fma.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fma.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fma.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fma.f64.ll Fri Feb 27 15:17:42 2015
@@ -10,9 +10,9 @@ declare <4 x double> @llvm.fma.v4f64(<4
 ; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
 define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                      double addrspace(1)* %in2, double addrspace(1)* %in3) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
-   %r2 = load double addrspace(1)* %in3
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
+   %r2 = load double, double addrspace(1)* %in3
    %r3 = tail call double @llvm.fma.f64(double %r0, double %r1, double %r2)
    store double %r3, double addrspace(1)* %out
    ret void
@@ -23,9 +23,9 @@ define void @fma_f64(double addrspace(1)
 ; SI: v_fma_f64
 define void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
                        <2 x double> addrspace(1)* %in2, <2 x double> addrspace(1)* %in3) {
-   %r0 = load <2 x double> addrspace(1)* %in1
-   %r1 = load <2 x double> addrspace(1)* %in2
-   %r2 = load <2 x double> addrspace(1)* %in3
+   %r0 = load <2 x double>, <2 x double> addrspace(1)* %in1
+   %r1 = load <2 x double>, <2 x double> addrspace(1)* %in2
+   %r2 = load <2 x double>, <2 x double> addrspace(1)* %in3
    %r3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %r0, <2 x double> %r1, <2 x double> %r2)
    store <2 x double> %r3, <2 x double> addrspace(1)* %out
    ret void
@@ -38,9 +38,9 @@ define void @fma_v2f64(<2 x double> addr
 ; SI: v_fma_f64
 define void @fma_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in1,
                        <4 x double> addrspace(1)* %in2, <4 x double> addrspace(1)* %in3) {
-   %r0 = load <4 x double> addrspace(1)* %in1
-   %r1 = load <4 x double> addrspace(1)* %in2
-   %r2 = load <4 x double> addrspace(1)* %in3
+   %r0 = load <4 x double>, <4 x double> addrspace(1)* %in1
+   %r1 = load <4 x double>, <4 x double> addrspace(1)* %in2
+   %r2 = load <4 x double>, <4 x double> addrspace(1)* %in3
    %r3 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %r0, <4 x double> %r1, <4 x double> %r2)
    store <4 x double> %r3, <4 x double> addrspace(1)* %out
    ret void

Modified: llvm/trunk/test/CodeGen/R600/fma.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fma.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fma.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fma.ll Fri Feb 27 15:17:42 2015
@@ -14,9 +14,9 @@ declare i32 @llvm.r600.read.tidig.x() no
 ; EG: FMA {{\*? *}}[[RES]]
 define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
                      float addrspace(1)* %in2, float addrspace(1)* %in3) {
-  %r0 = load float addrspace(1)* %in1
-  %r1 = load float addrspace(1)* %in2
-  %r2 = load float addrspace(1)* %in3
+  %r0 = load float, float addrspace(1)* %in1
+  %r1 = load float, float addrspace(1)* %in2
+  %r2 = load float, float addrspace(1)* %in3
   %r3 = tail call float @llvm.fma.f32(float %r0, float %r1, float %r2)
   store float %r3, float addrspace(1)* %out
   ret void
@@ -31,9 +31,9 @@ define void @fma_f32(float addrspace(1)*
 ; EG-DAG: FMA {{\*? *}}[[RES]].[[CHHI]]
 define void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
                        <2 x float> addrspace(1)* %in2, <2 x float> addrspace(1)* %in3) {
-  %r0 = load <2 x float> addrspace(1)* %in1
-  %r1 = load <2 x float> addrspace(1)* %in2
-  %r2 = load <2 x float> addrspace(1)* %in3
+  %r0 = load <2 x float>, <2 x float> addrspace(1)* %in1
+  %r1 = load <2 x float>, <2 x float> addrspace(1)* %in2
+  %r2 = load <2 x float>, <2 x float> addrspace(1)* %in3
   %r3 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %r0, <2 x float> %r1, <2 x float> %r2)
   store <2 x float> %r3, <2 x float> addrspace(1)* %out
   ret void
@@ -52,9 +52,9 @@ define void @fma_v2f32(<2 x float> addrs
 ; EG-DAG: FMA {{\*? *}}[[RES]].W
 define void @fma_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
                        <4 x float> addrspace(1)* %in2, <4 x float> addrspace(1)* %in3) {
-  %r0 = load <4 x float> addrspace(1)* %in1
-  %r1 = load <4 x float> addrspace(1)* %in2
-  %r2 = load <4 x float> addrspace(1)* %in3
+  %r0 = load <4 x float>, <4 x float> addrspace(1)* %in1
+  %r1 = load <4 x float>, <4 x float> addrspace(1)* %in2
+  %r2 = load <4 x float>, <4 x float> addrspace(1)* %in3
   %r3 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %r0, <4 x float> %r1, <4 x float> %r2)
   store <4 x float> %r3, <4 x float> addrspace(1)* %out
   ret void
@@ -68,8 +68,8 @@ define void @fma_commute_mul_inline_imm_
   %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %in.a.gep, align 4
-  %b = load float addrspace(1)* %in.b.gep, align 4
+  %a = load float, float addrspace(1)* %in.a.gep, align 4
+  %b = load float, float addrspace(1)* %in.b.gep, align 4
 
   %fma = call float @llvm.fma.f32(float %a, float 2.0, float %b)
   store float %fma, float addrspace(1)* %out.gep, align 4
@@ -83,8 +83,8 @@ define void @fma_commute_mul_s_f32(float
   %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %in.a.gep, align 4
-  %c = load float addrspace(1)* %in.b.gep, align 4
+  %a = load float, float addrspace(1)* %in.a.gep, align 4
+  %c = load float, float addrspace(1)* %in.b.gep, align 4
 
   %fma = call float @llvm.fma.f32(float %a, float %b, float %c)
   store float %fma, float addrspace(1)* %out.gep, align 4

Modified: llvm/trunk/test/CodeGen/R600/fmax3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmax3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmax3.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmax3.ll Fri Feb 27 15:17:42 2015
@@ -11,9 +11,9 @@ declare float @llvm.maxnum.f32(float, fl
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm
 define void @test_fmax3_olt_0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
-  %a = load float addrspace(1)* %aptr, align 4
-  %b = load float addrspace(1)* %bptr, align 4
-  %c = load float addrspace(1)* %cptr, align 4
+  %a = load float, float addrspace(1)* %aptr, align 4
+  %b = load float, float addrspace(1)* %bptr, align 4
+  %c = load float, float addrspace(1)* %cptr, align 4
   %f0 = call float @llvm.maxnum.f32(float %a, float %b) nounwind readnone
   %f1 = call float @llvm.maxnum.f32(float %f0, float %c) nounwind readnone
   store float %f1, float addrspace(1)* %out, align 4
@@ -29,9 +29,9 @@ define void @test_fmax3_olt_0(float addr
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm
 define void @test_fmax3_olt_1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
-  %a = load float addrspace(1)* %aptr, align 4
-  %b = load float addrspace(1)* %bptr, align 4
-  %c = load float addrspace(1)* %cptr, align 4
+  %a = load float, float addrspace(1)* %aptr, align 4
+  %b = load float, float addrspace(1)* %bptr, align 4
+  %c = load float, float addrspace(1)* %cptr, align 4
   %f0 = call float @llvm.maxnum.f32(float %a, float %b) nounwind readnone
   %f1 = call float @llvm.maxnum.f32(float %c, float %f0) nounwind readnone
   store float %f1, float addrspace(1)* %out, align 4

Modified: llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ define void @test_fmax_legacy_uge_f64(do
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %cmp = fcmp uge double %a, %b
   %val = select i1 %cmp, double %a, double %b
@@ -24,8 +24,8 @@ define void @test_fmax_legacy_oge_f64(do
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %cmp = fcmp oge double %a, %b
   %val = select i1 %cmp, double %a, double %b
@@ -39,8 +39,8 @@ define void @test_fmax_legacy_ugt_f64(do
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %cmp = fcmp ugt double %a, %b
   %val = select i1 %cmp, double %a, double %b
@@ -54,8 +54,8 @@ define void @test_fmax_legacy_ogt_f64(do
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %cmp = fcmp ogt double %a, %b
   %val = select i1 %cmp, double %a, double %b

Modified: llvm/trunk/test/CodeGen/R600/fmax_legacy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmax_legacy.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmax_legacy.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmax_legacy.ll Fri Feb 27 15:17:42 2015
@@ -18,8 +18,8 @@ define void @test_fmax_legacy_uge_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp uge float %a, %b
   %val = select i1 %cmp, float %a, float %b
@@ -38,8 +38,8 @@ define void @test_fmax_legacy_oge_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp oge float %a, %b
   %val = select i1 %cmp, float %a, float %b
@@ -58,8 +58,8 @@ define void @test_fmax_legacy_ugt_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp ugt float %a, %b
   %val = select i1 %cmp, float %a, float %b
@@ -78,8 +78,8 @@ define void @test_fmax_legacy_ogt_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp ogt float %a, %b
   %val = select i1 %cmp, float %a, float %b
@@ -102,8 +102,8 @@ define void @test_fmax_legacy_ogt_f32_mu
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp ogt float %a, %b
   %val = select i1 %cmp, float %a, float %b

Modified: llvm/trunk/test/CodeGen/R600/fmin3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmin3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmin3.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmin3.ll Fri Feb 27 15:17:42 2015
@@ -12,9 +12,9 @@ declare float @llvm.minnum.f32(float, fl
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm
 define void @test_fmin3_olt_0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
-  %a = load float addrspace(1)* %aptr, align 4
-  %b = load float addrspace(1)* %bptr, align 4
-  %c = load float addrspace(1)* %cptr, align 4
+  %a = load float, float addrspace(1)* %aptr, align 4
+  %b = load float, float addrspace(1)* %bptr, align 4
+  %c = load float, float addrspace(1)* %cptr, align 4
   %f0 = call float @llvm.minnum.f32(float %a, float %b) nounwind readnone
   %f1 = call float @llvm.minnum.f32(float %f0, float %c) nounwind readnone
   store float %f1, float addrspace(1)* %out, align 4
@@ -30,9 +30,9 @@ define void @test_fmin3_olt_0(float addr
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm
 define void @test_fmin3_olt_1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
-  %a = load float addrspace(1)* %aptr, align 4
-  %b = load float addrspace(1)* %bptr, align 4
-  %c = load float addrspace(1)* %cptr, align 4
+  %a = load float, float addrspace(1)* %aptr, align 4
+  %b = load float, float addrspace(1)* %bptr, align 4
+  %c = load float, float addrspace(1)* %cptr, align 4
   %f0 = call float @llvm.minnum.f32(float %a, float %b) nounwind readnone
   %f1 = call float @llvm.minnum.f32(float %c, float %f0) nounwind readnone
   store float %f1, float addrspace(1)* %out, align 4

Modified: llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll Fri Feb 27 15:17:42 2015
@@ -19,8 +19,8 @@ define void @test_fmin_legacy_ule_f64(do
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %cmp = fcmp ule double %a, %b
   %val = select i1 %cmp, double %a, double %b
@@ -34,8 +34,8 @@ define void @test_fmin_legacy_ole_f64(do
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %cmp = fcmp ole double %a, %b
   %val = select i1 %cmp, double %a, double %b
@@ -49,8 +49,8 @@ define void @test_fmin_legacy_olt_f64(do
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %cmp = fcmp olt double %a, %b
   %val = select i1 %cmp, double %a, double %b
@@ -64,8 +64,8 @@ define void @test_fmin_legacy_ult_f64(do
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %cmp = fcmp ult double %a, %b
   %val = select i1 %cmp, double %a, double %b

Modified: llvm/trunk/test/CodeGen/R600/fmin_legacy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmin_legacy.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmin_legacy.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmin_legacy.ll Fri Feb 27 15:17:42 2015
@@ -30,8 +30,8 @@ define void @test_fmin_legacy_ule_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp ule float %a, %b
   %val = select i1 %cmp, float %a, float %b
@@ -49,8 +49,8 @@ define void @test_fmin_legacy_ole_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp ole float %a, %b
   %val = select i1 %cmp, float %a, float %b
@@ -68,8 +68,8 @@ define void @test_fmin_legacy_olt_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp olt float %a, %b
   %val = select i1 %cmp, float %a, float %b
@@ -87,8 +87,8 @@ define void @test_fmin_legacy_ult_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp ult float %a, %b
   %val = select i1 %cmp, float %a, float %b
@@ -109,8 +109,8 @@ define void @test_fmin_legacy_ole_f32_mu
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %cmp = fcmp ole float %a, %b
   %val0 = select i1 %cmp, float %a, float %b

Modified: llvm/trunk/test/CodeGen/R600/fmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmul.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmul.ll Fri Feb 27 15:17:42 2015
@@ -43,8 +43,8 @@ entry:
 ; SI: v_mul_f32
 define void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
-  %a = load <4 x float> addrspace(1) * %in
-  %b = load <4 x float> addrspace(1) * %b_ptr
+  %a = load <4 x float>, <4 x float> addrspace(1) * %in
+  %b = load <4 x float>, <4 x float> addrspace(1) * %b_ptr
   %result = fmul <4 x float> %a, %b
   store <4 x float> %result, <4 x float> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fmul64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmul64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmul64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmul64.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@
 ; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
 define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                       double addrspace(1)* %in2) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
    %r2 = fmul double %r0, %r1
    store double %r2, double addrspace(1)* %out
    ret void
@@ -17,8 +17,8 @@ define void @fmul_f64(double addrspace(1
 ; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
 define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
                         <2 x double> addrspace(1)* %in2) {
-   %r0 = load <2 x double> addrspace(1)* %in1
-   %r1 = load <2 x double> addrspace(1)* %in2
+   %r0 = load <2 x double>, <2 x double> addrspace(1)* %in1
+   %r1 = load <2 x double>, <2 x double> addrspace(1)* %in2
    %r2 = fmul <2 x double> %r0, %r1
    store <2 x double> %r2, <2 x double> addrspace(1)* %out
    ret void
@@ -31,8 +31,8 @@ define void @fmul_v2f64(<2 x double> add
 ; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
 define void @fmul_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in1,
                         <4 x double> addrspace(1)* %in2) {
-   %r0 = load <4 x double> addrspace(1)* %in1
-   %r1 = load <4 x double> addrspace(1)* %in2
+   %r0 = load <4 x double>, <4 x double> addrspace(1)* %in1
+   %r1 = load <4 x double>, <4 x double> addrspace(1)* %in2
    %r2 = fmul <4 x double> %r0, %r1
    store <4 x double> %r2, <4 x double> addrspace(1)* %out
    ret void

Modified: llvm/trunk/test/CodeGen/R600/fmuladd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fmuladd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fmuladd.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fmuladd.ll Fri Feb 27 15:17:42 2015
@@ -10,9 +10,9 @@ declare float @llvm.fabs.f32(float) noun
 
 define void @fmuladd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
                          float addrspace(1)* %in2, float addrspace(1)* %in3) {
-   %r0 = load float addrspace(1)* %in1
-   %r1 = load float addrspace(1)* %in2
-   %r2 = load float addrspace(1)* %in3
+   %r0 = load float, float addrspace(1)* %in1
+   %r1 = load float, float addrspace(1)* %in2
+   %r2 = load float, float addrspace(1)* %in3
    %r3 = tail call float @llvm.fmuladd.f32(float %r0, float %r1, float %r2)
    store float %r3, float addrspace(1)* %out
    ret void
@@ -23,9 +23,9 @@ define void @fmuladd_f32(float addrspace
 
 define void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                          double addrspace(1)* %in2, double addrspace(1)* %in3) {
-   %r0 = load double addrspace(1)* %in1
-   %r1 = load double addrspace(1)* %in2
-   %r2 = load double addrspace(1)* %in3
+   %r0 = load double, double addrspace(1)* %in1
+   %r1 = load double, double addrspace(1)* %in2
+   %r2 = load double, double addrspace(1)* %in3
    %r3 = tail call double @llvm.fmuladd.f64(double %r0, double %r1, double %r2)
    store double %r3, double addrspace(1)* %out
    ret void
@@ -42,8 +42,8 @@ define void @fmuladd_2.0_a_b_f32(float a
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %r3 = tail call float @llvm.fmuladd.f32(float 2.0, float %r1, float %r2)
   store float %r3, float addrspace(1)* %gep.out
@@ -61,8 +61,8 @@ define void @fmuladd_a_2.0_b_f32(float a
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %r3 = tail call float @llvm.fmuladd.f32(float %r1, float 2.0, float %r2)
   store float %r3, float addrspace(1)* %gep.out
@@ -82,8 +82,8 @@ define void @fadd_a_a_b_f32(float addrsp
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r0 = load float addrspace(1)* %gep.0
-  %r1 = load float addrspace(1)* %gep.1
+  %r0 = load float, float addrspace(1)* %gep.0
+  %r1 = load float, float addrspace(1)* %gep.1
 
   %add.0 = fadd float %r0, %r0
   %add.1 = fadd float %add.0, %r1
@@ -104,8 +104,8 @@ define void @fadd_b_a_a_f32(float addrsp
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r0 = load float addrspace(1)* %gep.0
-  %r1 = load float addrspace(1)* %gep.1
+  %r0 = load float, float addrspace(1)* %gep.0
+  %r1 = load float, float addrspace(1)* %gep.1
 
   %add.0 = fadd float %r0, %r0
   %add.1 = fadd float %r1, %add.0
@@ -124,8 +124,8 @@ define void @fmuladd_neg_2.0_a_b_f32(flo
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %r3 = tail call float @llvm.fmuladd.f32(float -2.0, float %r1, float %r2)
   store float %r3, float addrspace(1)* %gep.out
@@ -144,8 +144,8 @@ define void @fmuladd_neg_2.0_neg_a_b_f32
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %r1.fneg = fsub float -0.000000e+00, %r1
 
@@ -166,8 +166,8 @@ define void @fmuladd_2.0_neg_a_b_f32(flo
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %r1.fneg = fsub float -0.000000e+00, %r1
 
@@ -188,8 +188,8 @@ define void @fmuladd_2.0_a_neg_b_f32(flo
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %r2.fneg = fsub float -0.000000e+00, %r2
 

Modified: llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll Fri Feb 27 15:17:42 2015
@@ -15,8 +15,8 @@ define void @fneg_fabs_fadd_f64(double a
 }
 
 define void @v_fneg_fabs_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %xptr, double addrspace(1)* %yptr) {
-  %x = load double addrspace(1)* %xptr, align 8
-  %y = load double addrspace(1)* %xptr, align 8
+  %x = load double, double addrspace(1)* %xptr, align 8
+  %y = load double, double addrspace(1)* %xptr, align 8
   %fabs = call double @llvm.fabs.f64(double %x)
   %fsub = fsub double -0.000000e+00, %fabs
   %fadd = fadd double %y, %fsub

Modified: llvm/trunk/test/CodeGen/R600/fneg-fabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fneg-fabs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fneg-fabs.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fneg-fabs.ll Fri Feb 27 15:17:42 2015
@@ -72,7 +72,7 @@ define void @fneg_fabs_f32(float addrspa
 ; FUNC-LABEL: {{^}}v_fneg_fabs_f32:
 ; SI: v_or_b32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
 define void @v_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
-  %val = load float addrspace(1)* %in, align 4
+  %val = load float, float addrspace(1)* %in, align 4
   %fabs = call float @llvm.fabs.f32(float %val)
   %fsub = fsub float -0.000000e+00, %fabs
   store float %fsub, float addrspace(1)* %out, align 4

Modified: llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ declare double @llvm.convert.from.fp16.f
 ; SI: v_cvt_f32_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
 ; SI: buffer_store_dword [[RESULT]]
 define void @test_convert_fp16_to_fp32(float addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
-  %val = load i16 addrspace(1)* %in, align 2
+  %val = load i16, i16 addrspace(1)* %in, align 2
   %cvt = call float @llvm.convert.from.fp16.f32(i16 %val) nounwind readnone
   store float %cvt, float addrspace(1)* %out, align 4
   ret void
@@ -22,7 +22,7 @@ define void @test_convert_fp16_to_fp32(f
 ; SI: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[RESULT32]]
 ; SI: buffer_store_dwordx2 [[RESULT]]
 define void @test_convert_fp16_to_fp64(double addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
-  %val = load i16 addrspace(1)* %in, align 2
+  %val = load i16, i16 addrspace(1)* %in, align 2
   %cvt = call double @llvm.convert.from.fp16.f64(i16 %val) nounwind readnone
   store double %cvt, double addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ declare i16 @llvm.convert.to.fp16.f32(fl
 ; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[VAL]]
 ; SI: buffer_store_short [[RESULT]]
 define void @test_convert_fp32_to_fp16(i16 addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
-  %val = load float addrspace(1)* %in, align 4
+  %val = load float, float addrspace(1)* %in, align 4
   %cvt = call i16 @llvm.convert.to.fp16.f32(float %val) nounwind readnone
   store i16 %cvt, i16 addrspace(1)* %out, align 2
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll Fri Feb 27 15:17:42 2015
@@ -49,7 +49,7 @@ define void @fp_to_sint_v4f64_v4i32(<4 x
 define void @fp_to_sint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in) {
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
-  %val = load double addrspace(1)* %gep, align 8
+  %val = load double, double addrspace(1)* %gep, align 8
   %cast = fptosi double %val to i64
   store i64 %cast, i64 addrspace(1)* %out, align 8
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fp_to_sint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fp_to_sint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fp_to_sint.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fp_to_sint.ll Fri Feb 27 15:17:42 2015
@@ -44,7 +44,7 @@ define void @fp_to_sint_v2i32(<2 x i32>
 ; SI: v_cvt_i32_f32_e32
 ; SI: v_cvt_i32_f32_e32
 define void @fp_to_sint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
-  %value = load <4 x float> addrspace(1) * %in
+  %value = load <4 x float>, <4 x float> addrspace(1) * %in
   %result = fptosi <4 x float> %value to <4 x i32>
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll Fri Feb 27 15:17:42 2015
@@ -49,7 +49,7 @@ define void @fp_to_uint_v4i32_v4f64(<4 x
 define void @fp_to_uint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in) {
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
-  %val = load double addrspace(1)* %gep, align 8
+  %val = load double, double addrspace(1)* %gep, align 8
   %cast = fptoui double %val to i64
   store i64 %cast, i64 addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fp_to_uint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fp_to_uint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fp_to_uint.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fp_to_uint.ll Fri Feb 27 15:17:42 2015
@@ -36,7 +36,7 @@ define void @fp_to_uint_v2f32_to_v2i32(<
 ; SI: v_cvt_u32_f32_e32
 
 define void @fp_to_uint_v4f32_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
-  %value = load <4 x float> addrspace(1) * %in
+  %value = load <4 x float>, <4 x float> addrspace(1) * %in
   %result = fptoui <4 x float> %value to <4 x i32>
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/frem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/frem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/frem.ll (original)
+++ llvm/trunk/test/CodeGen/R600/frem.ll Fri Feb 27 15:17:42 2015
@@ -16,8 +16,8 @@
 define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
                       float addrspace(1)* %in2) #0 {
    %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
-   %r0 = load float addrspace(1)* %in1, align 4
-   %r1 = load float addrspace(1)* %gep2, align 4
+   %r0 = load float, float addrspace(1)* %in1, align 4
+   %r1 = load float, float addrspace(1)* %gep2, align 4
    %r2 = frem float %r0, %r1
    store float %r2, float addrspace(1)* %out, align 4
    ret void
@@ -35,8 +35,8 @@ define void @frem_f32(float addrspace(1)
 define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
                              float addrspace(1)* %in2) #1 {
    %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
-   %r0 = load float addrspace(1)* %in1, align 4
-   %r1 = load float addrspace(1)* %gep2, align 4
+   %r0 = load float, float addrspace(1)* %in1, align 4
+   %r1 = load float, float addrspace(1)* %gep2, align 4
    %r2 = frem float %r0, %r1
    store float %r2, float addrspace(1)* %out, align 4
    ret void
@@ -55,8 +55,8 @@ define void @unsafe_frem_f32(float addrs
 ; GCN: s_endpgm
 define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                       double addrspace(1)* %in2) #0 {
-   %r0 = load double addrspace(1)* %in1, align 8
-   %r1 = load double addrspace(1)* %in2, align 8
+   %r0 = load double, double addrspace(1)* %in1, align 8
+   %r1 = load double, double addrspace(1)* %in2, align 8
    %r2 = frem double %r0, %r1
    store double %r2, double addrspace(1)* %out, align 8
    ret void
@@ -71,8 +71,8 @@ define void @frem_f64(double addrspace(1
 ; GCN: s_endpgm
 define void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                              double addrspace(1)* %in2) #1 {
-   %r0 = load double addrspace(1)* %in1, align 8
-   %r1 = load double addrspace(1)* %in2, align 8
+   %r0 = load double, double addrspace(1)* %in1, align 8
+   %r1 = load double, double addrspace(1)* %in2, align 8
    %r2 = frem double %r0, %r1
    store double %r2, double addrspace(1)* %out, align 8
    ret void
@@ -81,8 +81,8 @@ define void @unsafe_frem_f64(double addr
 define void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
                         <2 x float> addrspace(1)* %in2) #0 {
    %gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
-   %r0 = load <2 x float> addrspace(1)* %in1, align 8
-   %r1 = load <2 x float> addrspace(1)* %gep2, align 8
+   %r0 = load <2 x float>, <2 x float> addrspace(1)* %in1, align 8
+   %r1 = load <2 x float>, <2 x float> addrspace(1)* %gep2, align 8
    %r2 = frem <2 x float> %r0, %r1
    store <2 x float> %r2, <2 x float> addrspace(1)* %out, align 8
    ret void
@@ -91,8 +91,8 @@ define void @frem_v2f32(<2 x float> addr
 define void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
                         <4 x float> addrspace(1)* %in2) #0 {
    %gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
-   %r0 = load <4 x float> addrspace(1)* %in1, align 16
-   %r1 = load <4 x float> addrspace(1)* %gep2, align 16
+   %r0 = load <4 x float>, <4 x float> addrspace(1)* %in1, align 16
+   %r1 = load <4 x float>, <4 x float> addrspace(1)* %gep2, align 16
    %r2 = frem <4 x float> %r0, %r1
    store <4 x float> %r2, <4 x float> addrspace(1)* %out, align 16
    ret void
@@ -101,8 +101,8 @@ define void @frem_v4f32(<4 x float> addr
 define void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
                         <2 x double> addrspace(1)* %in2) #0 {
    %gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
-   %r0 = load <2 x double> addrspace(1)* %in1, align 16
-   %r1 = load <2 x double> addrspace(1)* %gep2, align 16
+   %r0 = load <2 x double>, <2 x double> addrspace(1)* %in1, align 16
+   %r1 = load <2 x double>, <2 x double> addrspace(1)* %gep2, align 16
    %r2 = frem <2 x double> %r0, %r1
    store <2 x double> %r2, <2 x double> addrspace(1)* %out, align 16
    ret void

Modified: llvm/trunk/test/CodeGen/R600/fsqrt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fsqrt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fsqrt.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fsqrt.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 ; CHECK: v_sqrt_f32_e32 {{v[0-9]+, v[0-9]+}}
 
 define void @fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
-   %r0 = load float addrspace(1)* %in
+   %r0 = load float, float addrspace(1)* %in
    %r1 = call float @llvm.sqrt.f32(float %r0)
    store float %r1, float addrspace(1)* %out
    ret void
@@ -19,7 +19,7 @@ define void @fsqrt_f32(float addrspace(1
 ; CHECK: v_sqrt_f64_e32 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
 
 define void @fsqrt_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
-   %r0 = load double addrspace(1)* %in
+   %r0 = load double, double addrspace(1)* %in
    %r1 = call double @llvm.sqrt.f64(double %r0)
    store double %r1, double addrspace(1)* %out
    ret void

Modified: llvm/trunk/test/CodeGen/R600/fsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fsub.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fsub.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
 define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
   %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
-  %a = load float addrspace(1)* %in, align 4
-  %b = load float addrspace(1)* %b_ptr, align 4
+  %a = load float, float addrspace(1)* %in, align 4
+  %b = load float, float addrspace(1)* %b_ptr, align 4
   %result = fsub float %a, %b
   store float %result, float addrspace(1)* %out, align 4
   ret void
@@ -53,8 +53,8 @@ define void @fsub_v2f32(<2 x float> addr
 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
 define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
-  %a = load <4 x float> addrspace(1)* %in, align 16
-  %b = load <4 x float> addrspace(1)* %b_ptr, align 16
+  %a = load <4 x float>, <4 x float> addrspace(1)* %in, align 16
+  %b = load <4 x float>, <4 x float> addrspace(1)* %b_ptr, align 16
   %result = fsub <4 x float> %a, %b
   store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
   ret void

Modified: llvm/trunk/test/CodeGen/R600/fsub64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/fsub64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/fsub64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/fsub64.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@ declare double @llvm.fabs.f64(double) #0
 ; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
 define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                       double addrspace(1)* %in2) {
-  %r0 = load double addrspace(1)* %in1
-  %r1 = load double addrspace(1)* %in2
+  %r0 = load double, double addrspace(1)* %in1
+  %r1 = load double, double addrspace(1)* %in2
   %r2 = fsub double %r0, %r1
   store double %r2, double addrspace(1)* %out
   ret void
@@ -18,8 +18,8 @@ define void @fsub_f64(double addrspace(1
 ; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -\|v\[[0-9]+:[0-9]+\]\|}}
 define void @fsub_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                            double addrspace(1)* %in2) {
-  %r0 = load double addrspace(1)* %in1
-  %r1 = load double addrspace(1)* %in2
+  %r0 = load double, double addrspace(1)* %in1
+  %r1 = load double, double addrspace(1)* %in2
   %r1.fabs = call double @llvm.fabs.f64(double %r1) #0
   %r2 = fsub double %r0, %r1.fabs
   store double %r2, double addrspace(1)* %out
@@ -30,8 +30,8 @@ define void @fsub_fabs_f64(double addrsp
 ; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], |v\[[0-9]+:[0-9]+\]|, -v\[[0-9]+:[0-9]+\]}}
 define void @fsub_fabs_inv_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
                                double addrspace(1)* %in2) {
-  %r0 = load double addrspace(1)* %in1
-  %r1 = load double addrspace(1)* %in2
+  %r0 = load double, double addrspace(1)* %in1
+  %r1 = load double, double addrspace(1)* %in2
   %r0.fabs = call double @llvm.fabs.f64(double %r0) #0
   %r2 = fsub double %r0.fabs, %r1
   store double %r2, double addrspace(1)* %out
@@ -86,8 +86,8 @@ define void @fsub_v2f64(<2 x double> add
 ; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
 define void @fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x double>, <4 x double> addrspace(1)* %in, i32 1
-  %a = load <4 x double> addrspace(1)* %in
-  %b = load <4 x double> addrspace(1)* %b_ptr
+  %a = load <4 x double>, <4 x double> addrspace(1)* %in
+  %b = load <4 x double>, <4 x double> addrspace(1)* %b_ptr
   %result = fsub <4 x double> %a, %b
   store <4 x double> %result, <4 x double> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ declare <16 x double> @llvm.trunc.v16f64
 ; SI: v_bfe_u32 {{v[0-9]+}}, {{v[0-9]+}}, 20, 11
 ; SI: s_endpgm
 define void @v_ftrunc_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
-  %x = load double addrspace(1)* %in, align 8
+  %x = load double, double addrspace(1)* %in, align 8
   %y = call double @llvm.trunc.f64(double %x) nounwind readnone
   store double %y, double addrspace(1)* %out, align 8
   ret void

Modified: llvm/trunk/test/CodeGen/R600/global-directive.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/global-directive.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/global-directive.ll (original)
+++ llvm/trunk/test/CodeGen/R600/global-directive.ll Fri Feb 27 15:17:42 2015
@@ -7,8 +7,8 @@
 ; SI: {{^}}foo:
 define void @foo(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1)* %in
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %in
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %result = add i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/global-extload-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/global-extload-i1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/global-extload-i1.ll (original)
+++ llvm/trunk/test/CodeGen/R600/global-extload-i1.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @zextload_global_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %a = load i1 addrspace(1)* %in
+  %a = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %a to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
@@ -20,7 +20,7 @@ define void @zextload_global_i1_to_i32(i
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @sextload_global_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %a = load i1 addrspace(1)* %in
+  %a = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %a to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
@@ -29,7 +29,7 @@ define void @sextload_global_i1_to_i32(i
 ; FUNC-LABEL: {{^}}zextload_global_v1i1_to_v1i32:
 ; SI: s_endpgm
 define void @zextload_global_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i1> addrspace(1)* %in
+  %load = load <1 x i1>, <1 x i1> addrspace(1)* %in
   %ext = zext <1 x i1> %load to <1 x i32>
   store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
   ret void
@@ -38,7 +38,7 @@ define void @zextload_global_v1i1_to_v1i
 ; FUNC-LABEL: {{^}}sextload_global_v1i1_to_v1i32:
 ; SI: s_endpgm
 define void @sextload_global_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i1> addrspace(1)* %in
+  %load = load <1 x i1>, <1 x i1> addrspace(1)* %in
   %ext = sext <1 x i1> %load to <1 x i32>
   store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
   ret void
@@ -47,7 +47,7 @@ define void @sextload_global_v1i1_to_v1i
 ; FUNC-LABEL: {{^}}zextload_global_v2i1_to_v2i32:
 ; SI: s_endpgm
 define void @zextload_global_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i1> addrspace(1)* %in
+  %load = load <2 x i1>, <2 x i1> addrspace(1)* %in
   %ext = zext <2 x i1> %load to <2 x i32>
   store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
   ret void
@@ -56,7 +56,7 @@ define void @zextload_global_v2i1_to_v2i
 ; FUNC-LABEL: {{^}}sextload_global_v2i1_to_v2i32:
 ; SI: s_endpgm
 define void @sextload_global_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i1> addrspace(1)* %in
+  %load = load <2 x i1>, <2 x i1> addrspace(1)* %in
   %ext = sext <2 x i1> %load to <2 x i32>
   store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
   ret void
@@ -65,7 +65,7 @@ define void @sextload_global_v2i1_to_v2i
 ; FUNC-LABEL: {{^}}zextload_global_v4i1_to_v4i32:
 ; SI: s_endpgm
 define void @zextload_global_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i1> addrspace(1)* %in
+  %load = load <4 x i1>, <4 x i1> addrspace(1)* %in
   %ext = zext <4 x i1> %load to <4 x i32>
   store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
   ret void
@@ -74,7 +74,7 @@ define void @zextload_global_v4i1_to_v4i
 ; FUNC-LABEL: {{^}}sextload_global_v4i1_to_v4i32:
 ; SI: s_endpgm
 define void @sextload_global_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i1> addrspace(1)* %in
+  %load = load <4 x i1>, <4 x i1> addrspace(1)* %in
   %ext = sext <4 x i1> %load to <4 x i32>
   store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
   ret void
@@ -83,7 +83,7 @@ define void @sextload_global_v4i1_to_v4i
 ; FUNC-LABEL: {{^}}zextload_global_v8i1_to_v8i32:
 ; SI: s_endpgm
 define void @zextload_global_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i1> addrspace(1)* %in
+  %load = load <8 x i1>, <8 x i1> addrspace(1)* %in
   %ext = zext <8 x i1> %load to <8 x i32>
   store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
   ret void
@@ -92,7 +92,7 @@ define void @zextload_global_v8i1_to_v8i
 ; FUNC-LABEL: {{^}}sextload_global_v8i1_to_v8i32:
 ; SI: s_endpgm
 define void @sextload_global_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i1> addrspace(1)* %in
+  %load = load <8 x i1>, <8 x i1> addrspace(1)* %in
   %ext = sext <8 x i1> %load to <8 x i32>
   store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
   ret void
@@ -101,7 +101,7 @@ define void @sextload_global_v8i1_to_v8i
 ; FUNC-LABEL: {{^}}zextload_global_v16i1_to_v16i32:
 ; SI: s_endpgm
 define void @zextload_global_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i1> addrspace(1)* %in
+  %load = load <16 x i1>, <16 x i1> addrspace(1)* %in
   %ext = zext <16 x i1> %load to <16 x i32>
   store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
   ret void
@@ -110,7 +110,7 @@ define void @zextload_global_v16i1_to_v1
 ; FUNC-LABEL: {{^}}sextload_global_v16i1_to_v16i32:
 ; SI: s_endpgm
 define void @sextload_global_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i1> addrspace(1)* %in
+  %load = load <16 x i1>, <16 x i1> addrspace(1)* %in
   %ext = sext <16 x i1> %load to <16 x i32>
   store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
   ret void
@@ -119,7 +119,7 @@ define void @sextload_global_v16i1_to_v1
 ; XFUNC-LABEL: {{^}}zextload_global_v32i1_to_v32i32:
 ; XSI: s_endpgm
 ; define void @zextload_global_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <32 x i1> addrspace(1)* %in
+;   %load = load <32 x i1>, <32 x i1> addrspace(1)* %in
 ;   %ext = zext <32 x i1> %load to <32 x i32>
 ;   store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
 ;   ret void
@@ -128,7 +128,7 @@ define void @sextload_global_v16i1_to_v1
 ; XFUNC-LABEL: {{^}}sextload_global_v32i1_to_v32i32:
 ; XSI: s_endpgm
 ; define void @sextload_global_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <32 x i1> addrspace(1)* %in
+;   %load = load <32 x i1>, <32 x i1> addrspace(1)* %in
 ;   %ext = sext <32 x i1> %load to <32 x i32>
 ;   store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
 ;   ret void
@@ -137,7 +137,7 @@ define void @sextload_global_v16i1_to_v1
 ; XFUNC-LABEL: {{^}}zextload_global_v64i1_to_v64i32:
 ; XSI: s_endpgm
 ; define void @zextload_global_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <64 x i1> addrspace(1)* %in
+;   %load = load <64 x i1>, <64 x i1> addrspace(1)* %in
 ;   %ext = zext <64 x i1> %load to <64 x i32>
 ;   store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
 ;   ret void
@@ -146,7 +146,7 @@ define void @sextload_global_v16i1_to_v1
 ; XFUNC-LABEL: {{^}}sextload_global_v64i1_to_v64i32:
 ; XSI: s_endpgm
 ; define void @sextload_global_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <64 x i1> addrspace(1)* %in
+;   %load = load <64 x i1>, <64 x i1> addrspace(1)* %in
 ;   %ext = sext <64 x i1> %load to <64 x i32>
 ;   store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
 ;   ret void
@@ -157,7 +157,7 @@ define void @sextload_global_v16i1_to_v1
 ; SI: v_mov_b32_e32 {{v[0-9]+}}, 0{{$}}
 ; SI: buffer_store_dwordx2
 define void @zextload_global_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %a = load i1 addrspace(1)* %in
+  %a = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %a to i64
   store i64 %ext, i64 addrspace(1)* %out
   ret void
@@ -169,7 +169,7 @@ define void @zextload_global_i1_to_i64(i
 ; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[BFE]]
 ; SI: buffer_store_dwordx2
 define void @sextload_global_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %a = load i1 addrspace(1)* %in
+  %a = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %a to i64
   store i64 %ext, i64 addrspace(1)* %out
   ret void
@@ -178,7 +178,7 @@ define void @sextload_global_i1_to_i64(i
 ; FUNC-LABEL: {{^}}zextload_global_v1i1_to_v1i64:
 ; SI: s_endpgm
 define void @zextload_global_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i1> addrspace(1)* %in
+  %load = load <1 x i1>, <1 x i1> addrspace(1)* %in
   %ext = zext <1 x i1> %load to <1 x i64>
   store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
   ret void
@@ -187,7 +187,7 @@ define void @zextload_global_v1i1_to_v1i
 ; FUNC-LABEL: {{^}}sextload_global_v1i1_to_v1i64:
 ; SI: s_endpgm
 define void @sextload_global_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i1> addrspace(1)* %in
+  %load = load <1 x i1>, <1 x i1> addrspace(1)* %in
   %ext = sext <1 x i1> %load to <1 x i64>
   store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
   ret void
@@ -196,7 +196,7 @@ define void @sextload_global_v1i1_to_v1i
 ; FUNC-LABEL: {{^}}zextload_global_v2i1_to_v2i64:
 ; SI: s_endpgm
 define void @zextload_global_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i1> addrspace(1)* %in
+  %load = load <2 x i1>, <2 x i1> addrspace(1)* %in
   %ext = zext <2 x i1> %load to <2 x i64>
   store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
   ret void
@@ -205,7 +205,7 @@ define void @zextload_global_v2i1_to_v2i
 ; FUNC-LABEL: {{^}}sextload_global_v2i1_to_v2i64:
 ; SI: s_endpgm
 define void @sextload_global_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i1> addrspace(1)* %in
+  %load = load <2 x i1>, <2 x i1> addrspace(1)* %in
   %ext = sext <2 x i1> %load to <2 x i64>
   store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
   ret void
@@ -214,7 +214,7 @@ define void @sextload_global_v2i1_to_v2i
 ; FUNC-LABEL: {{^}}zextload_global_v4i1_to_v4i64:
 ; SI: s_endpgm
 define void @zextload_global_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i1> addrspace(1)* %in
+  %load = load <4 x i1>, <4 x i1> addrspace(1)* %in
   %ext = zext <4 x i1> %load to <4 x i64>
   store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
   ret void
@@ -223,7 +223,7 @@ define void @zextload_global_v4i1_to_v4i
 ; FUNC-LABEL: {{^}}sextload_global_v4i1_to_v4i64:
 ; SI: s_endpgm
 define void @sextload_global_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i1> addrspace(1)* %in
+  %load = load <4 x i1>, <4 x i1> addrspace(1)* %in
   %ext = sext <4 x i1> %load to <4 x i64>
   store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
   ret void
@@ -232,7 +232,7 @@ define void @sextload_global_v4i1_to_v4i
 ; FUNC-LABEL: {{^}}zextload_global_v8i1_to_v8i64:
 ; SI: s_endpgm
 define void @zextload_global_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i1> addrspace(1)* %in
+  %load = load <8 x i1>, <8 x i1> addrspace(1)* %in
   %ext = zext <8 x i1> %load to <8 x i64>
   store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
   ret void
@@ -241,7 +241,7 @@ define void @zextload_global_v8i1_to_v8i
 ; FUNC-LABEL: {{^}}sextload_global_v8i1_to_v8i64:
 ; SI: s_endpgm
 define void @sextload_global_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i1> addrspace(1)* %in
+  %load = load <8 x i1>, <8 x i1> addrspace(1)* %in
   %ext = sext <8 x i1> %load to <8 x i64>
   store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
   ret void
@@ -250,7 +250,7 @@ define void @sextload_global_v8i1_to_v8i
 ; FUNC-LABEL: {{^}}zextload_global_v16i1_to_v16i64:
 ; SI: s_endpgm
 define void @zextload_global_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i1> addrspace(1)* %in
+  %load = load <16 x i1>, <16 x i1> addrspace(1)* %in
   %ext = zext <16 x i1> %load to <16 x i64>
   store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
   ret void
@@ -259,7 +259,7 @@ define void @zextload_global_v16i1_to_v1
 ; FUNC-LABEL: {{^}}sextload_global_v16i1_to_v16i64:
 ; SI: s_endpgm
 define void @sextload_global_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i1> addrspace(1)* %in
+  %load = load <16 x i1>, <16 x i1> addrspace(1)* %in
   %ext = sext <16 x i1> %load to <16 x i64>
   store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
   ret void
@@ -268,7 +268,7 @@ define void @sextload_global_v16i1_to_v1
 ; XFUNC-LABEL: {{^}}zextload_global_v32i1_to_v32i64:
 ; XSI: s_endpgm
 ; define void @zextload_global_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <32 x i1> addrspace(1)* %in
+;   %load = load <32 x i1>, <32 x i1> addrspace(1)* %in
 ;   %ext = zext <32 x i1> %load to <32 x i64>
 ;   store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
 ;   ret void
@@ -277,7 +277,7 @@ define void @sextload_global_v16i1_to_v1
 ; XFUNC-LABEL: {{^}}sextload_global_v32i1_to_v32i64:
 ; XSI: s_endpgm
 ; define void @sextload_global_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <32 x i1> addrspace(1)* %in
+;   %load = load <32 x i1>, <32 x i1> addrspace(1)* %in
 ;   %ext = sext <32 x i1> %load to <32 x i64>
 ;   store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
 ;   ret void
@@ -286,7 +286,7 @@ define void @sextload_global_v16i1_to_v1
 ; XFUNC-LABEL: {{^}}zextload_global_v64i1_to_v64i64:
 ; XSI: s_endpgm
 ; define void @zextload_global_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <64 x i1> addrspace(1)* %in
+;   %load = load <64 x i1>, <64 x i1> addrspace(1)* %in
 ;   %ext = zext <64 x i1> %load to <64 x i64>
 ;   store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
 ;   ret void
@@ -295,7 +295,7 @@ define void @sextload_global_v16i1_to_v1
 ; XFUNC-LABEL: {{^}}sextload_global_v64i1_to_v64i64:
 ; XSI: s_endpgm
 ; define void @sextload_global_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <64 x i1> addrspace(1)* %in
+;   %load = load <64 x i1>, <64 x i1> addrspace(1)* %in
 ;   %ext = sext <64 x i1> %load to <64 x i64>
 ;   store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
 ;   ret void

Modified: llvm/trunk/test/CodeGen/R600/global-extload-i16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/global-extload-i16.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/global-extload-i16.ll (original)
+++ llvm/trunk/test/CodeGen/R600/global-extload-i16.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @zextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
-  %a = load i16 addrspace(1)* %in
+  %a = load i16, i16 addrspace(1)* %in
   %ext = zext i16 %a to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
@@ -19,7 +19,7 @@ define void @zextload_global_i16_to_i32(
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @sextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
-  %a = load i16 addrspace(1)* %in
+  %a = load i16, i16 addrspace(1)* %in
   %ext = sext i16 %a to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
@@ -29,7 +29,7 @@ define void @sextload_global_i16_to_i32(
 ; SI: buffer_load_ushort
 ; SI: s_endpgm
 define void @zextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i16> addrspace(1)* %in
+  %load = load <1 x i16>, <1 x i16> addrspace(1)* %in
   %ext = zext <1 x i16> %load to <1 x i32>
   store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
   ret void
@@ -39,7 +39,7 @@ define void @zextload_global_v1i16_to_v1
 ; SI: buffer_load_sshort
 ; SI: s_endpgm
 define void @sextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i16> addrspace(1)* %in
+  %load = load <1 x i16>, <1 x i16> addrspace(1)* %in
   %ext = sext <1 x i16> %load to <1 x i32>
   store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
   ret void
@@ -48,7 +48,7 @@ define void @sextload_global_v1i16_to_v1
 ; FUNC-LABEL: {{^}}zextload_global_v2i16_to_v2i32:
 ; SI: s_endpgm
 define void @zextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i16> addrspace(1)* %in
+  %load = load <2 x i16>, <2 x i16> addrspace(1)* %in
   %ext = zext <2 x i16> %load to <2 x i32>
   store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
   ret void
@@ -57,7 +57,7 @@ define void @zextload_global_v2i16_to_v2
 ; FUNC-LABEL: {{^}}sextload_global_v2i16_to_v2i32:
 ; SI: s_endpgm
 define void @sextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i16> addrspace(1)* %in
+  %load = load <2 x i16>, <2 x i16> addrspace(1)* %in
   %ext = sext <2 x i16> %load to <2 x i32>
   store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
   ret void
@@ -66,7 +66,7 @@ define void @sextload_global_v2i16_to_v2
 ; FUNC-LABEL: {{^}}zextload_global_v4i16_to_v4i32:
 ; SI: s_endpgm
 define void @zextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i16> addrspace(1)* %in
+  %load = load <4 x i16>, <4 x i16> addrspace(1)* %in
   %ext = zext <4 x i16> %load to <4 x i32>
   store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
   ret void
@@ -75,7 +75,7 @@ define void @zextload_global_v4i16_to_v4
 ; FUNC-LABEL: {{^}}sextload_global_v4i16_to_v4i32:
 ; SI: s_endpgm
 define void @sextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i16> addrspace(1)* %in
+  %load = load <4 x i16>, <4 x i16> addrspace(1)* %in
   %ext = sext <4 x i16> %load to <4 x i32>
   store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
   ret void
@@ -84,7 +84,7 @@ define void @sextload_global_v4i16_to_v4
 ; FUNC-LABEL: {{^}}zextload_global_v8i16_to_v8i32:
 ; SI: s_endpgm
 define void @zextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i16> addrspace(1)* %in
+  %load = load <8 x i16>, <8 x i16> addrspace(1)* %in
   %ext = zext <8 x i16> %load to <8 x i32>
   store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
   ret void
@@ -93,7 +93,7 @@ define void @zextload_global_v8i16_to_v8
 ; FUNC-LABEL: {{^}}sextload_global_v8i16_to_v8i32:
 ; SI: s_endpgm
 define void @sextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i16> addrspace(1)* %in
+  %load = load <8 x i16>, <8 x i16> addrspace(1)* %in
   %ext = sext <8 x i16> %load to <8 x i32>
   store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
   ret void
@@ -102,7 +102,7 @@ define void @sextload_global_v8i16_to_v8
 ; FUNC-LABEL: {{^}}zextload_global_v16i16_to_v16i32:
 ; SI: s_endpgm
 define void @zextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i16> addrspace(1)* %in
+  %load = load <16 x i16>, <16 x i16> addrspace(1)* %in
   %ext = zext <16 x i16> %load to <16 x i32>
   store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
   ret void
@@ -111,7 +111,7 @@ define void @zextload_global_v16i16_to_v
 ; FUNC-LABEL: {{^}}sextload_global_v16i16_to_v16i32:
 ; SI: s_endpgm
 define void @sextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i16> addrspace(1)* %in
+  %load = load <16 x i16>, <16 x i16> addrspace(1)* %in
   %ext = sext <16 x i16> %load to <16 x i32>
   store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
   ret void
@@ -120,7 +120,7 @@ define void @sextload_global_v16i16_to_v
 ; FUNC-LABEL: {{^}}zextload_global_v32i16_to_v32i32:
 ; SI: s_endpgm
 define void @zextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <32 x i16> addrspace(1)* %in
+  %load = load <32 x i16>, <32 x i16> addrspace(1)* %in
   %ext = zext <32 x i16> %load to <32 x i32>
   store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
   ret void
@@ -129,7 +129,7 @@ define void @zextload_global_v32i16_to_v
 ; FUNC-LABEL: {{^}}sextload_global_v32i16_to_v32i32:
 ; SI: s_endpgm
 define void @sextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <32 x i16> addrspace(1)* %in
+  %load = load <32 x i16>, <32 x i16> addrspace(1)* %in
   %ext = sext <32 x i16> %load to <32 x i32>
   store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
   ret void
@@ -138,7 +138,7 @@ define void @sextload_global_v32i16_to_v
 ; FUNC-LABEL: {{^}}zextload_global_v64i16_to_v64i32:
 ; SI: s_endpgm
 define void @zextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <64 x i16> addrspace(1)* %in
+  %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
   %ext = zext <64 x i16> %load to <64 x i32>
   store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
   ret void
@@ -147,7 +147,7 @@ define void @zextload_global_v64i16_to_v
 ; FUNC-LABEL: {{^}}sextload_global_v64i16_to_v64i32:
 ; SI: s_endpgm
 define void @sextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <64 x i16> addrspace(1)* %in
+  %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
   %ext = sext <64 x i16> %load to <64 x i32>
   store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
   ret void
@@ -158,7 +158,7 @@ define void @sextload_global_v64i16_to_v
 ; SI: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
 ; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
 define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
-  %a = load i16 addrspace(1)* %in
+  %a = load i16, i16 addrspace(1)* %in
   %ext = zext i16 %a to i64
   store i64 %ext, i64 addrspace(1)* %out
   ret void
@@ -169,7 +169,7 @@ define void @zextload_global_i16_to_i64(
 ; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
 ; SI: buffer_store_dwordx2
 define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
-  %a = load i16 addrspace(1)* %in
+  %a = load i16, i16 addrspace(1)* %in
   %ext = sext i16 %a to i64
   store i64 %ext, i64 addrspace(1)* %out
   ret void
@@ -178,7 +178,7 @@ define void @sextload_global_i16_to_i64(
 ; FUNC-LABEL: {{^}}zextload_global_v1i16_to_v1i64:
 ; SI: s_endpgm
 define void @zextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i16> addrspace(1)* %in
+  %load = load <1 x i16>, <1 x i16> addrspace(1)* %in
   %ext = zext <1 x i16> %load to <1 x i64>
   store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
   ret void
@@ -187,7 +187,7 @@ define void @zextload_global_v1i16_to_v1
 ; FUNC-LABEL: {{^}}sextload_global_v1i16_to_v1i64:
 ; SI: s_endpgm
 define void @sextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i16> addrspace(1)* %in
+  %load = load <1 x i16>, <1 x i16> addrspace(1)* %in
   %ext = sext <1 x i16> %load to <1 x i64>
   store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
   ret void
@@ -196,7 +196,7 @@ define void @sextload_global_v1i16_to_v1
 ; FUNC-LABEL: {{^}}zextload_global_v2i16_to_v2i64:
 ; SI: s_endpgm
 define void @zextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i16> addrspace(1)* %in
+  %load = load <2 x i16>, <2 x i16> addrspace(1)* %in
   %ext = zext <2 x i16> %load to <2 x i64>
   store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
   ret void
@@ -205,7 +205,7 @@ define void @zextload_global_v2i16_to_v2
 ; FUNC-LABEL: {{^}}sextload_global_v2i16_to_v2i64:
 ; SI: s_endpgm
 define void @sextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i16> addrspace(1)* %in
+  %load = load <2 x i16>, <2 x i16> addrspace(1)* %in
   %ext = sext <2 x i16> %load to <2 x i64>
   store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
   ret void
@@ -214,7 +214,7 @@ define void @sextload_global_v2i16_to_v2
 ; FUNC-LABEL: {{^}}zextload_global_v4i16_to_v4i64:
 ; SI: s_endpgm
 define void @zextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i16> addrspace(1)* %in
+  %load = load <4 x i16>, <4 x i16> addrspace(1)* %in
   %ext = zext <4 x i16> %load to <4 x i64>
   store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
   ret void
@@ -223,7 +223,7 @@ define void @zextload_global_v4i16_to_v4
 ; FUNC-LABEL: {{^}}sextload_global_v4i16_to_v4i64:
 ; SI: s_endpgm
 define void @sextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i16> addrspace(1)* %in
+  %load = load <4 x i16>, <4 x i16> addrspace(1)* %in
   %ext = sext <4 x i16> %load to <4 x i64>
   store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
   ret void
@@ -232,7 +232,7 @@ define void @sextload_global_v4i16_to_v4
 ; FUNC-LABEL: {{^}}zextload_global_v8i16_to_v8i64:
 ; SI: s_endpgm
 define void @zextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i16> addrspace(1)* %in
+  %load = load <8 x i16>, <8 x i16> addrspace(1)* %in
   %ext = zext <8 x i16> %load to <8 x i64>
   store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
   ret void
@@ -241,7 +241,7 @@ define void @zextload_global_v8i16_to_v8
 ; FUNC-LABEL: {{^}}sextload_global_v8i16_to_v8i64:
 ; SI: s_endpgm
 define void @sextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i16> addrspace(1)* %in
+  %load = load <8 x i16>, <8 x i16> addrspace(1)* %in
   %ext = sext <8 x i16> %load to <8 x i64>
   store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
   ret void
@@ -250,7 +250,7 @@ define void @sextload_global_v8i16_to_v8
 ; FUNC-LABEL: {{^}}zextload_global_v16i16_to_v16i64:
 ; SI: s_endpgm
 define void @zextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i16> addrspace(1)* %in
+  %load = load <16 x i16>, <16 x i16> addrspace(1)* %in
   %ext = zext <16 x i16> %load to <16 x i64>
   store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
   ret void
@@ -259,7 +259,7 @@ define void @zextload_global_v16i16_to_v
 ; FUNC-LABEL: {{^}}sextload_global_v16i16_to_v16i64:
 ; SI: s_endpgm
 define void @sextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i16> addrspace(1)* %in
+  %load = load <16 x i16>, <16 x i16> addrspace(1)* %in
   %ext = sext <16 x i16> %load to <16 x i64>
   store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
   ret void
@@ -268,7 +268,7 @@ define void @sextload_global_v16i16_to_v
 ; FUNC-LABEL: {{^}}zextload_global_v32i16_to_v32i64:
 ; SI: s_endpgm
 define void @zextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <32 x i16> addrspace(1)* %in
+  %load = load <32 x i16>, <32 x i16> addrspace(1)* %in
   %ext = zext <32 x i16> %load to <32 x i64>
   store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
   ret void
@@ -277,7 +277,7 @@ define void @zextload_global_v32i16_to_v
 ; FUNC-LABEL: {{^}}sextload_global_v32i16_to_v32i64:
 ; SI: s_endpgm
 define void @sextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <32 x i16> addrspace(1)* %in
+  %load = load <32 x i16>, <32 x i16> addrspace(1)* %in
   %ext = sext <32 x i16> %load to <32 x i64>
   store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
   ret void
@@ -286,7 +286,7 @@ define void @sextload_global_v32i16_to_v
 ; FUNC-LABEL: {{^}}zextload_global_v64i16_to_v64i64:
 ; SI: s_endpgm
 define void @zextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <64 x i16> addrspace(1)* %in
+  %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
   %ext = zext <64 x i16> %load to <64 x i64>
   store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
   ret void
@@ -295,7 +295,7 @@ define void @zextload_global_v64i16_to_v
 ; FUNC-LABEL: {{^}}sextload_global_v64i16_to_v64i64:
 ; SI: s_endpgm
 define void @sextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
-  %load = load <64 x i16> addrspace(1)* %in
+  %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
   %ext = sext <64 x i16> %load to <64 x i64>
   store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/global-extload-i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/global-extload-i32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/global-extload-i32.ll (original)
+++ llvm/trunk/test/CodeGen/R600/global-extload-i32.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 ; SI: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
 ; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
 define void @zextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %a = load i32 addrspace(1)* %in
+  %a = load i32, i32 addrspace(1)* %in
   %ext = zext i32 %a to i64
   store i64 %ext, i64 addrspace(1)* %out
   ret void
@@ -18,7 +18,7 @@ define void @zextload_global_i32_to_i64(
 ; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
 ; SI: buffer_store_dwordx2
 define void @sextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %a = load i32 addrspace(1)* %in
+  %a = load i32, i32 addrspace(1)* %in
   %ext = sext i32 %a to i64
   store i64 %ext, i64 addrspace(1)* %out
   ret void
@@ -29,7 +29,7 @@ define void @sextload_global_i32_to_i64(
 ; SI: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @zextload_global_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i32> addrspace(1)* %in
+  %load = load <1 x i32>, <1 x i32> addrspace(1)* %in
   %ext = zext <1 x i32> %load to <1 x i64>
   store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
   ret void
@@ -41,7 +41,7 @@ define void @zextload_global_v1i32_to_v1
 ; SI: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @sextload_global_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i32> addrspace(1)* %in
+  %load = load <1 x i32>, <1 x i32> addrspace(1)* %in
   %ext = sext <1 x i32> %load to <1 x i64>
   store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
   ret void
@@ -53,7 +53,7 @@ define void @sextload_global_v1i32_to_v1
 ; SI: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @zextload_global_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i32> addrspace(1)* %in
+  %load = load <2 x i32>, <2 x i32> addrspace(1)* %in
   %ext = zext <2 x i32> %load to <2 x i64>
   store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
   ret void
@@ -67,7 +67,7 @@ define void @zextload_global_v2i32_to_v2
 ; SI-DAG: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @sextload_global_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i32> addrspace(1)* %in
+  %load = load <2 x i32>, <2 x i32> addrspace(1)* %in
   %ext = sext <2 x i32> %load to <2 x i64>
   store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
   ret void
@@ -81,7 +81,7 @@ define void @sextload_global_v2i32_to_v2
 ; SI: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @zextload_global_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i32> addrspace(1)* %in
+  %load = load <4 x i32>, <4 x i32> addrspace(1)* %in
   %ext = zext <4 x i32> %load to <4 x i64>
   store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
   ret void
@@ -99,7 +99,7 @@ define void @zextload_global_v4i32_to_v4
 ; SI-DAG: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @sextload_global_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i32> addrspace(1)* %in
+  %load = load <4 x i32>, <4 x i32> addrspace(1)* %in
   %ext = sext <4 x i32> %load to <4 x i64>
   store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
   ret void
@@ -124,7 +124,7 @@ define void @sextload_global_v4i32_to_v4
 ; SI-DAG: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @zextload_global_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i32> addrspace(1)* %in
+  %load = load <8 x i32>, <8 x i32> addrspace(1)* %in
   %ext = zext <8 x i32> %load to <8 x i64>
   store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
   ret void
@@ -159,7 +159,7 @@ define void @zextload_global_v8i32_to_v8
 
 ; SI: s_endpgm
 define void @sextload_global_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i32> addrspace(1)* %in
+  %load = load <8 x i32>, <8 x i32> addrspace(1)* %in
   %ext = sext <8 x i32> %load to <8 x i64>
   store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
   ret void
@@ -212,7 +212,7 @@ define void @sextload_global_v8i32_to_v8
 ; SI-DAG: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @sextload_global_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i32> addrspace(1)* %in
+  %load = load <16 x i32>, <16 x i32> addrspace(1)* %in
   %ext = sext <16 x i32> %load to <16 x i64>
   store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
   ret void
@@ -255,7 +255,7 @@ define void @sextload_global_v16i32_to_v
 
 ; SI: s_endpgm
 define void @zextload_global_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i32> addrspace(1)* %in
+  %load = load <16 x i32>, <16 x i32> addrspace(1)* %in
   %ext = zext <16 x i32> %load to <16 x i64>
   store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
   ret void
@@ -369,7 +369,7 @@ define void @zextload_global_v16i32_to_v
 
 ; SI: s_endpgm
 define void @sextload_global_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <32 x i32> addrspace(1)* %in
+  %load = load <32 x i32>, <32 x i32> addrspace(1)* %in
   %ext = sext <32 x i32> %load to <32 x i64>
   store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
   ret void
@@ -450,7 +450,7 @@ define void @sextload_global_v32i32_to_v
 
 ; SI: s_endpgm
 define void @zextload_global_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(1)* nocapture %in) nounwind {
-  %load = load <32 x i32> addrspace(1)* %in
+  %load = load <32 x i32>, <32 x i32> addrspace(1)* %in
   %ext = zext <32 x i32> %load to <32 x i64>
   store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/global-extload-i8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/global-extload-i8.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/global-extload-i8.ll (original)
+++ llvm/trunk/test/CodeGen/R600/global-extload-i8.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @zextload_global_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
-  %a = load i8 addrspace(1)* %in
+  %a = load i8, i8 addrspace(1)* %in
   %ext = zext i8 %a to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
@@ -18,7 +18,7 @@ define void @zextload_global_i8_to_i32(i
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @sextload_global_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
-  %a = load i8 addrspace(1)* %in
+  %a = load i8, i8 addrspace(1)* %in
   %ext = sext i8 %a to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
@@ -27,7 +27,7 @@ define void @sextload_global_i8_to_i32(i
 ; FUNC-LABEL: {{^}}zextload_global_v1i8_to_v1i32:
 ; SI: s_endpgm
 define void @zextload_global_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i8> addrspace(1)* %in
+  %load = load <1 x i8>, <1 x i8> addrspace(1)* %in
   %ext = zext <1 x i8> %load to <1 x i32>
   store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
   ret void
@@ -36,7 +36,7 @@ define void @zextload_global_v1i8_to_v1i
 ; FUNC-LABEL: {{^}}sextload_global_v1i8_to_v1i32:
 ; SI: s_endpgm
 define void @sextload_global_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i8> addrspace(1)* %in
+  %load = load <1 x i8>, <1 x i8> addrspace(1)* %in
   %ext = sext <1 x i8> %load to <1 x i32>
   store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
   ret void
@@ -45,7 +45,7 @@ define void @sextload_global_v1i8_to_v1i
 ; FUNC-LABEL: {{^}}zextload_global_v2i8_to_v2i32:
 ; SI: s_endpgm
 define void @zextload_global_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i8> addrspace(1)* %in
+  %load = load <2 x i8>, <2 x i8> addrspace(1)* %in
   %ext = zext <2 x i8> %load to <2 x i32>
   store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
   ret void
@@ -54,7 +54,7 @@ define void @zextload_global_v2i8_to_v2i
 ; FUNC-LABEL: {{^}}sextload_global_v2i8_to_v2i32:
 ; SI: s_endpgm
 define void @sextload_global_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i8> addrspace(1)* %in
+  %load = load <2 x i8>, <2 x i8> addrspace(1)* %in
   %ext = sext <2 x i8> %load to <2 x i32>
   store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
   ret void
@@ -63,7 +63,7 @@ define void @sextload_global_v2i8_to_v2i
 ; FUNC-LABEL: {{^}}zextload_global_v4i8_to_v4i32:
 ; SI: s_endpgm
 define void @zextload_global_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in
   %ext = zext <4 x i8> %load to <4 x i32>
   store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
   ret void
@@ -72,7 +72,7 @@ define void @zextload_global_v4i8_to_v4i
 ; FUNC-LABEL: {{^}}sextload_global_v4i8_to_v4i32:
 ; SI: s_endpgm
 define void @sextload_global_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in
   %ext = sext <4 x i8> %load to <4 x i32>
   store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
   ret void
@@ -81,7 +81,7 @@ define void @sextload_global_v4i8_to_v4i
 ; FUNC-LABEL: {{^}}zextload_global_v8i8_to_v8i32:
 ; SI: s_endpgm
 define void @zextload_global_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i8> addrspace(1)* %in
+  %load = load <8 x i8>, <8 x i8> addrspace(1)* %in
   %ext = zext <8 x i8> %load to <8 x i32>
   store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
   ret void
@@ -90,7 +90,7 @@ define void @zextload_global_v8i8_to_v8i
 ; FUNC-LABEL: {{^}}sextload_global_v8i8_to_v8i32:
 ; SI: s_endpgm
 define void @sextload_global_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i8> addrspace(1)* %in
+  %load = load <8 x i8>, <8 x i8> addrspace(1)* %in
   %ext = sext <8 x i8> %load to <8 x i32>
   store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
   ret void
@@ -99,7 +99,7 @@ define void @sextload_global_v8i8_to_v8i
 ; FUNC-LABEL: {{^}}zextload_global_v16i8_to_v16i32:
 ; SI: s_endpgm
 define void @zextload_global_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i8> addrspace(1)* %in
+  %load = load <16 x i8>, <16 x i8> addrspace(1)* %in
   %ext = zext <16 x i8> %load to <16 x i32>
   store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
   ret void
@@ -108,7 +108,7 @@ define void @zextload_global_v16i8_to_v1
 ; FUNC-LABEL: {{^}}sextload_global_v16i8_to_v16i32:
 ; SI: s_endpgm
 define void @sextload_global_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i8> addrspace(1)* %in
+  %load = load <16 x i8>, <16 x i8> addrspace(1)* %in
   %ext = sext <16 x i8> %load to <16 x i32>
   store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
   ret void
@@ -117,7 +117,7 @@ define void @sextload_global_v16i8_to_v1
 ; XFUNC-LABEL: {{^}}zextload_global_v32i8_to_v32i32:
 ; XSI: s_endpgm
 ; define void @zextload_global_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <32 x i8> addrspace(1)* %in
+;   %load = load <32 x i8>, <32 x i8> addrspace(1)* %in
 ;   %ext = zext <32 x i8> %load to <32 x i32>
 ;   store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
 ;   ret void
@@ -126,7 +126,7 @@ define void @sextload_global_v16i8_to_v1
 ; XFUNC-LABEL: {{^}}sextload_global_v32i8_to_v32i32:
 ; XSI: s_endpgm
 ; define void @sextload_global_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <32 x i8> addrspace(1)* %in
+;   %load = load <32 x i8>, <32 x i8> addrspace(1)* %in
 ;   %ext = sext <32 x i8> %load to <32 x i32>
 ;   store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
 ;   ret void
@@ -135,7 +135,7 @@ define void @sextload_global_v16i8_to_v1
 ; XFUNC-LABEL: {{^}}zextload_global_v64i8_to_v64i32:
 ; XSI: s_endpgm
 ; define void @zextload_global_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <64 x i8> addrspace(1)* %in
+;   %load = load <64 x i8>, <64 x i8> addrspace(1)* %in
 ;   %ext = zext <64 x i8> %load to <64 x i32>
 ;   store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
 ;   ret void
@@ -144,7 +144,7 @@ define void @sextload_global_v16i8_to_v1
 ; XFUNC-LABEL: {{^}}sextload_global_v64i8_to_v64i32:
 ; XSI: s_endpgm
 ; define void @sextload_global_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <64 x i8> addrspace(1)* %in
+;   %load = load <64 x i8>, <64 x i8> addrspace(1)* %in
 ;   %ext = sext <64 x i8> %load to <64 x i32>
 ;   store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
 ;   ret void
@@ -155,7 +155,7 @@ define void @sextload_global_v16i8_to_v1
 ; SI: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
 ; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
 define void @zextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
-  %a = load i8 addrspace(1)* %in
+  %a = load i8, i8 addrspace(1)* %in
   %ext = zext i8 %a to i64
   store i64 %ext, i64 addrspace(1)* %out
   ret void
@@ -166,7 +166,7 @@ define void @zextload_global_i8_to_i64(i
 ; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
 ; SI: buffer_store_dwordx2
 define void @sextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
-  %a = load i8 addrspace(1)* %in
+  %a = load i8, i8 addrspace(1)* %in
   %ext = sext i8 %a to i64
   store i64 %ext, i64 addrspace(1)* %out
   ret void
@@ -175,7 +175,7 @@ define void @sextload_global_i8_to_i64(i
 ; FUNC-LABEL: {{^}}zextload_global_v1i8_to_v1i64:
 ; SI: s_endpgm
 define void @zextload_global_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i8> addrspace(1)* %in
+  %load = load <1 x i8>, <1 x i8> addrspace(1)* %in
   %ext = zext <1 x i8> %load to <1 x i64>
   store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
   ret void
@@ -184,7 +184,7 @@ define void @zextload_global_v1i8_to_v1i
 ; FUNC-LABEL: {{^}}sextload_global_v1i8_to_v1i64:
 ; SI: s_endpgm
 define void @sextload_global_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <1 x i8> addrspace(1)* %in
+  %load = load <1 x i8>, <1 x i8> addrspace(1)* %in
   %ext = sext <1 x i8> %load to <1 x i64>
   store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
   ret void
@@ -193,7 +193,7 @@ define void @sextload_global_v1i8_to_v1i
 ; FUNC-LABEL: {{^}}zextload_global_v2i8_to_v2i64:
 ; SI: s_endpgm
 define void @zextload_global_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i8> addrspace(1)* %in
+  %load = load <2 x i8>, <2 x i8> addrspace(1)* %in
   %ext = zext <2 x i8> %load to <2 x i64>
   store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
   ret void
@@ -202,7 +202,7 @@ define void @zextload_global_v2i8_to_v2i
 ; FUNC-LABEL: {{^}}sextload_global_v2i8_to_v2i64:
 ; SI: s_endpgm
 define void @sextload_global_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <2 x i8> addrspace(1)* %in
+  %load = load <2 x i8>, <2 x i8> addrspace(1)* %in
   %ext = sext <2 x i8> %load to <2 x i64>
   store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
   ret void
@@ -211,7 +211,7 @@ define void @sextload_global_v2i8_to_v2i
 ; FUNC-LABEL: {{^}}zextload_global_v4i8_to_v4i64:
 ; SI: s_endpgm
 define void @zextload_global_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in
   %ext = zext <4 x i8> %load to <4 x i64>
   store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
   ret void
@@ -220,7 +220,7 @@ define void @zextload_global_v4i8_to_v4i
 ; FUNC-LABEL: {{^}}sextload_global_v4i8_to_v4i64:
 ; SI: s_endpgm
 define void @sextload_global_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <4 x i8> addrspace(1)* %in
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in
   %ext = sext <4 x i8> %load to <4 x i64>
   store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
   ret void
@@ -229,7 +229,7 @@ define void @sextload_global_v4i8_to_v4i
 ; FUNC-LABEL: {{^}}zextload_global_v8i8_to_v8i64:
 ; SI: s_endpgm
 define void @zextload_global_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i8> addrspace(1)* %in
+  %load = load <8 x i8>, <8 x i8> addrspace(1)* %in
   %ext = zext <8 x i8> %load to <8 x i64>
   store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
   ret void
@@ -238,7 +238,7 @@ define void @zextload_global_v8i8_to_v8i
 ; FUNC-LABEL: {{^}}sextload_global_v8i8_to_v8i64:
 ; SI: s_endpgm
 define void @sextload_global_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <8 x i8> addrspace(1)* %in
+  %load = load <8 x i8>, <8 x i8> addrspace(1)* %in
   %ext = sext <8 x i8> %load to <8 x i64>
   store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
   ret void
@@ -247,7 +247,7 @@ define void @sextload_global_v8i8_to_v8i
 ; FUNC-LABEL: {{^}}zextload_global_v16i8_to_v16i64:
 ; SI: s_endpgm
 define void @zextload_global_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i8> addrspace(1)* %in
+  %load = load <16 x i8>, <16 x i8> addrspace(1)* %in
   %ext = zext <16 x i8> %load to <16 x i64>
   store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
   ret void
@@ -256,7 +256,7 @@ define void @zextload_global_v16i8_to_v1
 ; FUNC-LABEL: {{^}}sextload_global_v16i8_to_v16i64:
 ; SI: s_endpgm
 define void @sextload_global_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(1)* nocapture %in) nounwind {
-  %load = load <16 x i8> addrspace(1)* %in
+  %load = load <16 x i8>, <16 x i8> addrspace(1)* %in
   %ext = sext <16 x i8> %load to <16 x i64>
   store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
   ret void
@@ -265,7 +265,7 @@ define void @sextload_global_v16i8_to_v1
 ; XFUNC-LABEL: {{^}}zextload_global_v32i8_to_v32i64:
 ; XSI: s_endpgm
 ; define void @zextload_global_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <32 x i8> addrspace(1)* %in
+;   %load = load <32 x i8>, <32 x i8> addrspace(1)* %in
 ;   %ext = zext <32 x i8> %load to <32 x i64>
 ;   store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
 ;   ret void
@@ -274,7 +274,7 @@ define void @sextload_global_v16i8_to_v1
 ; XFUNC-LABEL: {{^}}sextload_global_v32i8_to_v32i64:
 ; XSI: s_endpgm
 ; define void @sextload_global_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <32 x i8> addrspace(1)* %in
+;   %load = load <32 x i8>, <32 x i8> addrspace(1)* %in
 ;   %ext = sext <32 x i8> %load to <32 x i64>
 ;   store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
 ;   ret void
@@ -283,7 +283,7 @@ define void @sextload_global_v16i8_to_v1
 ; XFUNC-LABEL: {{^}}zextload_global_v64i8_to_v64i64:
 ; XSI: s_endpgm
 ; define void @zextload_global_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <64 x i8> addrspace(1)* %in
+;   %load = load <64 x i8>, <64 x i8> addrspace(1)* %in
 ;   %ext = zext <64 x i8> %load to <64 x i64>
 ;   store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
 ;   ret void
@@ -292,7 +292,7 @@ define void @sextload_global_v16i8_to_v1
 ; XFUNC-LABEL: {{^}}sextload_global_v64i8_to_v64i64:
 ; XSI: s_endpgm
 ; define void @sextload_global_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(1)* nocapture %in) nounwind {
-;   %load = load <64 x i8> addrspace(1)* %in
+;   %load = load <64 x i8>, <64 x i8> addrspace(1)* %in
 ;   %ext = sext <64 x i8> %load to <64 x i64>
 ;   store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
 ;   ret void

Modified: llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll (original)
+++ llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 
 define void @load_init_global_global(i32 addrspace(1)* %out, i1 %p) {
  %gep = getelementptr [256 x i32], [256 x i32] addrspace(1)* @lds, i32 0, i32 10
-  %ld = load i32 addrspace(1)* %gep
+  %ld = load i32, i32 addrspace(1)* %gep
   store i32 %ld, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll (original)
+++ llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 ; SI: s_endpgm
 define void @test_i8( i32 %s, i8 addrspace(1)* %out) #3 {
   %arrayidx = getelementptr inbounds [1 x i8], [1 x i8] addrspace(2)* @a, i32 0, i32 %s
-  %1 = load i8 addrspace(2)* %arrayidx, align 1
+  %1 = load i8, i8 addrspace(2)* %arrayidx, align 1
   store i8 %1, i8 addrspace(1)* %out
   ret void
 }
@@ -23,7 +23,7 @@ define void @test_i8( i32 %s, i8 addrspa
 ; SI: s_endpgm
 define void @test_i16( i32 %s, i16 addrspace(1)* %out) #3 {
   %arrayidx = getelementptr inbounds [1 x i16], [1 x i16] addrspace(2)* @b, i32 0, i32 %s
-  %1 = load i16 addrspace(2)* %arrayidx, align 2
+  %1 = load i16, i16 addrspace(2)* %arrayidx, align 2
   store i16 %1, i16 addrspace(1)* %out
   ret void
 }
@@ -36,7 +36,7 @@ define void @test_i16( i32 %s, i16 addrs
 ; FUNC-LABEL: {{^}}struct_bar_gv_load:
 define void @struct_bar_gv_load(i8 addrspace(1)* %out, i32 %index) {
   %gep = getelementptr inbounds [1 x %struct.bar], [1 x %struct.bar] addrspace(2)* @struct_bar_gv, i32 0, i32 0, i32 1, i32 %index
-  %load = load i8 addrspace(2)* %gep, align 1
+  %load = load i8, i8 addrspace(2)* %gep, align 1
   store i8 %load, i8 addrspace(1)* %out, align 1
   ret void
 }
@@ -51,7 +51,7 @@ define void @struct_bar_gv_load(i8 addrs
 ; FUNC-LABEL: {{^}}array_vector_gv_load:
 define void @array_vector_gv_load(<4 x i32> addrspace(1)* %out, i32 %index) {
   %gep = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>] addrspace(2)* @array_vector_gv, i32 0, i32 %index
-  %load = load <4 x i32> addrspace(2)* %gep, align 16
+  %load = load <4 x i32>, <4 x i32> addrspace(2)* %gep, align 16
   store <4 x i32> %load, <4 x i32> addrspace(1)* %out, align 16
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll (original)
+++ llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@
 define void @float(float addrspace(1)* %out, i32 %index) {
 entry:
   %0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
-  %1 = load float addrspace(2)* %0
+  %1 = load float, float addrspace(2)* %0
   store float %1, float addrspace(1)* %out
   ret void
 }
@@ -45,7 +45,7 @@ entry:
 define void @i32(i32 addrspace(1)* %out, i32 %index) {
 entry:
   %0 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(2)* @i32_gv, i32 0, i32 %index
-  %1 = load i32 addrspace(2)* %0
+  %1 = load i32, i32 addrspace(2)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -60,7 +60,7 @@ entry:
 
 define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
   %gep = getelementptr inbounds [1 x %struct.foo], [1 x %struct.foo] addrspace(2)* @struct_foo_gv, i32 0, i32 0, i32 1, i32 %index
-  %load = load i32 addrspace(2)* %gep, align 4
+  %load = load i32, i32 addrspace(2)* %gep, align 4
   store i32 %load, i32 addrspace(1)* %out, align 4
   ret void
 }
@@ -76,7 +76,7 @@ define void @struct_foo_gv_load(i32 addr
 ; VI: s_load_dword
 define void @array_v1_gv_load(<1 x i32> addrspace(1)* %out, i32 %index) {
   %gep = getelementptr inbounds [4 x <1 x i32>], [4 x <1 x i32>] addrspace(2)* @array_v1_gv, i32 0, i32 %index
-  %load = load <1 x i32> addrspace(2)* %gep, align 4
+  %load = load <1 x i32>, <1 x i32> addrspace(2)* %gep, align 4
   store <1 x i32> %load, <1 x i32> addrspace(1)* %out, align 4
   ret void
 }
@@ -88,7 +88,7 @@ entry:
 
 if:
   %1 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
-  %2 = load float addrspace(2)* %1
+  %2 = load float, float addrspace(2)* %1
   store float %2, float addrspace(1)* %out
   br label %endif
 

Modified: llvm/trunk/test/CodeGen/R600/half.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/half.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/half.ll (original)
+++ llvm/trunk/test/CodeGen/R600/half.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define void @test_load_store(half addrsp
 ; CHECK-LABEL: {{^}}test_load_store:
 ; CHECK: buffer_load_ushort [[TMP:v[0-9]+]]
 ; CHECK: buffer_store_short [[TMP]]
-  %val = load half addrspace(1)* %in
+  %val = load half, half addrspace(1)* %in
   store half %val, half addrspace(1) * %out
   ret void
 }
@@ -14,7 +14,7 @@ define void @test_bitcast_from_half(half
 ; CHECK-LABEL: {{^}}test_bitcast_from_half:
 ; CHECK: buffer_load_ushort [[TMP:v[0-9]+]]
 ; CHECK: buffer_store_short [[TMP]]
-  %val = load half addrspace(1) * %in
+  %val = load half, half addrspace(1) * %in
   %val_int = bitcast half %val to i16
   store i16 %val_int, i16 addrspace(1)* %out
   ret void
@@ -24,7 +24,7 @@ define void @test_bitcast_to_half(half a
 ; CHECK-LABEL: {{^}}test_bitcast_to_half:
 ; CHECK: buffer_load_ushort [[TMP:v[0-9]+]]
 ; CHECK: buffer_store_short [[TMP]]
-  %val = load i16 addrspace(1)* %in
+  %val = load i16, i16 addrspace(1)* %in
   %val_fp = bitcast i16 %val to half
   store half %val_fp, half addrspace(1)* %out
   ret void
@@ -34,7 +34,7 @@ define void @test_extend32(half addrspac
 ; CHECK-LABEL: {{^}}test_extend32:
 ; CHECK: v_cvt_f32_f16_e32
 
-  %val16 = load half addrspace(1)* %in
+  %val16 = load half, half addrspace(1)* %in
   %val32 = fpext half %val16 to float
   store float %val32, float addrspace(1)* %out
   ret void
@@ -45,7 +45,7 @@ define void @test_extend64(half addrspac
 ; CHECK: v_cvt_f32_f16_e32
 ; CHECK: v_cvt_f64_f32_e32
 
-  %val16 = load half addrspace(1)* %in
+  %val16 = load half, half addrspace(1)* %in
   %val64 = fpext half %val16 to double
   store double %val64, double addrspace(1)* %out
   ret void
@@ -55,7 +55,7 @@ define void @test_trunc32(float addrspac
 ; CHECK-LABEL: {{^}}test_trunc32:
 ; CHECK: v_cvt_f16_f32_e32
 
-  %val32 = load float addrspace(1)* %in
+  %val32 = load float, float addrspace(1)* %in
   %val16 = fptrunc float %val32 to half
   store half %val16, half addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll (original)
+++ llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 ;CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 
 define void @test(float addrspace(1)* %out, i8 addrspace(1)* %in) {
-  %1 = load i8 addrspace(1)* %in
+  %1 = load i8, i8 addrspace(1)* %in
   %2 = uitofp i8 %1 to double
   %3 = fptrunc double %2 to float
   store float %3, float addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll (original)
+++ llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll Fri Feb 27 15:17:42 2015
@@ -8,9 +8,9 @@
 
 define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
-  %0 = load i32 addrspace(1)* %in
+  %0 = load i32, i32 addrspace(1)* %in
   %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
-  %1 = load i32 addrspace(1)* %arrayidx1
+  %1 = load i32, i32 addrspace(1)* %arrayidx1
   %cmp = icmp eq i32 %0, %1
   %value = select i1 %cmp, i32 0, i32 -1
   store i32 %value, i32 addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/imm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/imm.ll (original)
+++ llvm/trunk/test/CodeGen/R600/imm.ll Fri Feb 27 15:17:42 2015
@@ -225,7 +225,7 @@ define void @add_inline_imm_neg_4.0_f32(
 ; CHECK: v_add_f32_e32 [[REG:v[0-9]+]], 0.5, [[VAL]]
 ; CHECK: buffer_store_dword [[REG]]
 define void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
-  %x = load float addrspace(1)* %in
+  %x = load float, float addrspace(1)* %in
   %y = fadd float %x, 0.5
   store float %y, float addrspace(1)* %out
   ret void
@@ -236,7 +236,7 @@ define void @commute_add_inline_imm_0.5_
 ; CHECK: v_add_f32_e32 [[REG:v[0-9]+]], 0x44800000, [[VAL]]
 ; CHECK: buffer_store_dword [[REG]]
 define void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
-  %x = load float addrspace(1)* %in
+  %x = load float, float addrspace(1)* %in
   %y = fadd float %x, 1024.0
   store float %y, float addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/indirect-private-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/indirect-private-64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/indirect-private-64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/indirect-private-64.ll Fri Feb 27 15:17:42 2015
@@ -14,12 +14,12 @@ declare void @llvm.AMDGPU.barrier.local(
 ; SI-PROMOTE: ds_write_b64
 ; SI-PROMOTE: ds_read_b64
 define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind {
-  %val = load double addrspace(1)* %in, align 8
+  %val = load double, double addrspace(1)* %in, align 8
   %array = alloca double, i32 16, align 8
   %ptr = getelementptr double, double* %array, i32 %b
   store double %val, double* %ptr, align 8
   call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
-  %result = load double* %ptr, align 8
+  %result = load double, double* %ptr, align 8
   store double %result, double addrspace(1)* %out, align 8
   ret void
 }
@@ -38,12 +38,12 @@ define void @private_access_f64_alloca(d
 ; SI-PROMOTE: ds_read_b32
 ; SI-PROMOTE: ds_read_b32
 define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
-  %val = load <2 x double> addrspace(1)* %in, align 16
+  %val = load <2 x double>, <2 x double> addrspace(1)* %in, align 16
   %array = alloca <2 x double>, i32 16, align 16
   %ptr = getelementptr <2 x double>, <2 x double>* %array, i32 %b
   store <2 x double> %val, <2 x double>* %ptr, align 16
   call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
-  %result = load <2 x double>* %ptr, align 16
+  %result = load <2 x double>, <2 x double>* %ptr, align 16
   store <2 x double> %result, <2 x double> addrspace(1)* %out, align 16
   ret void
 }
@@ -56,12 +56,12 @@ define void @private_access_v2f64_alloca
 ; SI-PROMOTE: ds_write_b64
 ; SI-PROMOTE: ds_read_b64
 define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind {
-  %val = load i64 addrspace(1)* %in, align 8
+  %val = load i64, i64 addrspace(1)* %in, align 8
   %array = alloca i64, i32 16, align 8
   %ptr = getelementptr i64, i64* %array, i32 %b
   store i64 %val, i64* %ptr, align 8
   call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
-  %result = load i64* %ptr, align 8
+  %result = load i64, i64* %ptr, align 8
   store i64 %result, i64 addrspace(1)* %out, align 8
   ret void
 }
@@ -80,12 +80,12 @@ define void @private_access_i64_alloca(i
 ; SI-PROMOTE: ds_read_b32
 ; SI-PROMOTE: ds_read_b32
 define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
-  %val = load <2 x i64> addrspace(1)* %in, align 16
+  %val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16
   %array = alloca <2 x i64>, i32 16, align 16
   %ptr = getelementptr <2 x i64>, <2 x i64>* %array, i32 %b
   store <2 x i64> %val, <2 x i64>* %ptr, align 16
   call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
-  %result = load <2 x i64>* %ptr, align 16
+  %result = load <2 x i64>, <2 x i64>* %ptr, align 16
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out, align 16
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll (original)
+++ llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll Fri Feb 27 15:17:42 2015
@@ -185,13 +185,13 @@ entry:
   br i1 %1, label %if, label %else
 
 if:
-  %2 = load i32 addrspace(1)* %in
+  %2 = load i32, i32 addrspace(1)* %in
   %3 = insertelement <2 x i32> %0, i32 %2, i32 1
   br label %endif
 
 else:
   %4 = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %5 = load i32 addrspace(1)* %4
+  %5 = load i32, i32 addrspace(1)* %4
   %6 = insertelement <2 x i32> %0, i32 %5, i32 1
   br label %endif
 

Modified: llvm/trunk/test/CodeGen/R600/jump-address.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/jump-address.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/jump-address.ll (original)
+++ llvm/trunk/test/CodeGen/R600/jump-address.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 
 define void @main() #0 {
 main_body:
-  %0 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %0 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %1 = extractelement <4 x float> %0, i32 0
   %2 = bitcast float %1 to i32
   %3 = icmp eq i32 %2, 0
@@ -17,7 +17,7 @@ main_body:
   br i1 %7, label %ENDIF, label %ELSE
 
 ELSE:                                             ; preds = %main_body
-  %8 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %8 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %9 = extractelement <4 x float> %8, i32 0
   %10 = bitcast float %9 to i32
   %11 = icmp eq i32 %10, 1
@@ -40,7 +40,7 @@ ENDIF:
   ret void
 
 IF13:                                             ; preds = %ELSE
-  %20 = load <4 x float> addrspace(8)* null
+  %20 = load <4 x float>, <4 x float> addrspace(8)* null
   %21 = extractelement <4 x float> %20, i32 0
   %22 = fsub float -0.000000e+00, %21
   %23 = fadd float 0xFFF8000000000000, %22

Modified: llvm/trunk/test/CodeGen/R600/kcache-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/kcache-fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/kcache-fold.ll (original)
+++ llvm/trunk/test/CodeGen/R600/kcache-fold.ll Fri Feb 27 15:17:42 2015
@@ -4,35 +4,35 @@
 ; CHECK: MOV * T{{[0-9]+\.[XYZW], KC0}}
 define void @main1() {
 main_body:
-  %0 = load <4 x float> addrspace(8)* null
+  %0 = load <4 x float>, <4 x float> addrspace(8)* null
   %1 = extractelement <4 x float> %0, i32 0
-  %2 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %2 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %3 = extractelement <4 x float> %2, i32 0
-  %4 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %5 = extractelement <4 x float> %4, i32 0
   %6 = fcmp ogt float %1, 0.000000e+00
   %7 = select i1 %6, float %3, float %5
-  %8 = load <4 x float> addrspace(8)* null
+  %8 = load <4 x float>, <4 x float> addrspace(8)* null
   %9 = extractelement <4 x float> %8, i32 1
-  %10 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %11 = extractelement <4 x float> %10, i32 1
-  %12 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %12 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %13 = extractelement <4 x float> %12, i32 1
   %14 = fcmp ogt float %9, 0.000000e+00
   %15 = select i1 %14, float %11, float %13
-  %16 = load <4 x float> addrspace(8)* null
+  %16 = load <4 x float>, <4 x float> addrspace(8)* null
   %17 = extractelement <4 x float> %16, i32 2
-  %18 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %18 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %19 = extractelement <4 x float> %18, i32 2
-  %20 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %20 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %21 = extractelement <4 x float> %20, i32 2
   %22 = fcmp ogt float %17, 0.000000e+00
   %23 = select i1 %22, float %19, float %21
-  %24 = load <4 x float> addrspace(8)* null
+  %24 = load <4 x float>, <4 x float> addrspace(8)* null
   %25 = extractelement <4 x float> %24, i32 3
-  %26 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %26 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %27 = extractelement <4 x float> %26, i32 3
-  %28 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %28 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %29 = extractelement <4 x float> %28, i32 3
   %30 = fcmp ogt float %25, 0.000000e+00
   %31 = select i1 %30, float %27, float %29
@@ -52,35 +52,35 @@ main_body:
 ; CHECK-NOT: MOV
 define void @main2() {
 main_body:
-  %0 = load <4 x float> addrspace(8)* null
+  %0 = load <4 x float>, <4 x float> addrspace(8)* null
   %1 = extractelement <4 x float> %0, i32 0
-  %2 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %2 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %3 = extractelement <4 x float> %2, i32 0
-  %4 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %5 = extractelement <4 x float> %4, i32 1
   %6 = fcmp ogt float %1, 0.000000e+00
   %7 = select i1 %6, float %3, float %5
-  %8 = load <4 x float> addrspace(8)* null
+  %8 = load <4 x float>, <4 x float> addrspace(8)* null
   %9 = extractelement <4 x float> %8, i32 1
-  %10 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %11 = extractelement <4 x float> %10, i32 0
-  %12 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %12 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %13 = extractelement <4 x float> %12, i32 1
   %14 = fcmp ogt float %9, 0.000000e+00
   %15 = select i1 %14, float %11, float %13
-  %16 = load <4 x float> addrspace(8)* null
+  %16 = load <4 x float>, <4 x float> addrspace(8)* null
   %17 = extractelement <4 x float> %16, i32 2
-  %18 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %18 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %19 = extractelement <4 x float> %18, i32 3
-  %20 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %20 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %21 = extractelement <4 x float> %20, i32 2
   %22 = fcmp ogt float %17, 0.000000e+00
   %23 = select i1 %22, float %19, float %21
-  %24 = load <4 x float> addrspace(8)* null
+  %24 = load <4 x float>, <4 x float> addrspace(8)* null
   %25 = extractelement <4 x float> %24, i32 3
-  %26 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %26 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %27 = extractelement <4 x float> %26, i32 3
-  %28 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %28 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %29 = extractelement <4 x float> %28, i32 2
   %30 = fcmp ogt float %25, 0.000000e+00
   %31 = select i1 %30, float %27, float %29

Modified: llvm/trunk/test/CodeGen/R600/large-alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/large-alloca.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/large-alloca.ll (original)
+++ llvm/trunk/test/CodeGen/R600/large-alloca.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define void @large_alloca(i32 addrspace(
   %gep = getelementptr [8192 x i32], [8192 x i32]* %large, i32 0, i32 8191
   store i32 %x, i32* %gep
   %gep1 = getelementptr [8192 x i32], [8192 x i32]* %large, i32 0, i32 %y
-  %0 = load i32* %gep1
+  %0 = load i32, i32* %gep1
   store i32 %0, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll (original)
+++ llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 @gv = external unnamed_addr addrspace(2) constant [239 x i32], align 4
 
 define void @opencv_cvtfloat_crash(i32 addrspace(1)* %out, i32 %x) nounwind {
-  %val = load i32 addrspace(2)* getelementptr ([239 x i32] addrspace(2)* @gv, i64 0, i64 239), align 4
+  %val = load i32, i32 addrspace(2)* getelementptr ([239 x i32] addrspace(2)* @gv, i64 0, i64 239), align 4
   %mul12 = mul nsw i32 %val, 7
   br i1 undef, label %exit, label %bb
 

Modified: llvm/trunk/test/CodeGen/R600/lds-initializer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/lds-initializer.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/lds-initializer.ll (original)
+++ llvm/trunk/test/CodeGen/R600/lds-initializer.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 
 define void @load_init_lds_global(i32 addrspace(1)* %out, i1 %p) {
  %gep = getelementptr [8 x i32], [8 x i32] addrspace(3)* @lds, i32 0, i32 10
-  %ld = load i32 addrspace(3)* %gep
+  %ld = load i32, i32 addrspace(3)* %gep
   store i32 %ld, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll (original)
+++ llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@
 ; CHECK: {{^}}lds_crash:
 define void @lds_crash(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %a, i32 %b, i32 %c) {
 entry:
-  %0 = load i32 addrspace(3)* %in
+  %0 = load i32, i32 addrspace(3)* %in
   ; This block needs to be > 115 ISA instructions to hit the bug,
   ; so we'll use udiv instructions.
   %div0 = udiv i32 %0, %b

Modified: llvm/trunk/test/CodeGen/R600/lds-output-queue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/lds-output-queue.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/lds-output-queue.ll (original)
+++ llvm/trunk/test/CodeGen/R600/lds-output-queue.ll Fri Feb 27 15:17:42 2015
@@ -13,11 +13,11 @@
 define void @lds_input_queue(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %index) {
 entry:
   %0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(3)* @local_mem, i32 0, i32 %index
-  %1 = load i32 addrspace(3)* %0
+  %1 = load i32, i32 addrspace(3)* %0
   call void @llvm.AMDGPU.barrier.local()
 
   ; This will start a new clause for the vertex fetch
-  %2 = load i32 addrspace(1)* %in
+  %2 = load i32, i32 addrspace(1)* %in
   %3 = add i32 %1, %2
   store i32 %3, i32 addrspace(1)* %out
   ret void
@@ -41,8 +41,8 @@ declare void @llvm.AMDGPU.barrier.local(
 ; has been declared in the local memory space:
 ;
 ;  %0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(3)* @local_mem, i32 0, i32 %index
-;  %1 = load i32 addrspace(3)* %0
-;  %2 = load i32 addrspace(1)* %in
+;  %1 = load i32, i32 addrspace(3)* %0
+;  %2 = load i32, i32 addrspace(1)* %in
 ;
 ; The instruction selection phase will generate ISA that looks like this:
 ; %OQAP = LDS_READ_RET
@@ -91,8 +91,8 @@ declare void @llvm.AMDGPU.barrier.local(
 define void @local_global_alias(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
   %0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(3)* @local_mem, i32 0, i32 0
-  %1 = load i32 addrspace(3)* %0
-  %2 = load i32 addrspace(1)* %in
+  %1 = load i32, i32 addrspace(3)* %0
+  %2 = load i32, i32 addrspace(1)* %in
   %3 = add i32 %2, %1
   store i32 %3, i32 addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll (original)
+++ llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 
 define void @load_zeroinit_lds_global(i32 addrspace(1)* %out, i1 %p) {
  %gep = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds, i32 0, i32 10
-  %ld = load i32 addrspace(3)* %gep
+  %ld = load i32, i32 addrspace(3)* %gep
   store i32 %ld, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ define void @s_abs_i32(i32 addrspace(1)*
 ; EG: SUB_INT
 ; EG: MAX_INT
 define void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
-  %val = load i32 addrspace(1)* %src, align 4
+  %val = load i32, i32 addrspace(1)* %src, align 4
   %abs = call i32 @llvm.AMDGPU.abs(i32 %val) nounwind readnone
   store i32 %abs, i32 addrspace(1)* %out, align 4
   ret void
@@ -42,7 +42,7 @@ define void @v_abs_i32(i32 addrspace(1)*
 ; EG: SUB_INT
 ; EG: MAX_INT
 define void @abs_i32_legacy_amdil(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
-  %val = load i32 addrspace(1)* %src, align 4
+  %val = load i32, i32 addrspace(1)* %src, align 4
   %abs = call i32 @llvm.AMDIL.abs.i32(i32 %val) nounwind readnone
   store i32 %abs, i32 addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ entry:
   %3 = sub i32 %2, 1
   %4 = sub i32 %3, %0
   %5 = getelementptr i32, i32 addrspace(1)* %out, i32 %4
-  %6 = load i32 addrspace(1)* %5
+  %6 = load i32, i32 addrspace(1)* %5
   store i32 %6, i32 addrspace(1)* %1
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ entry:
   %3 = sub i32 %2, 1
   %4 = sub i32 %3, %0
   %5 = getelementptr i32, i32 addrspace(1)* %out, i32 %4
-  %6 = load i32 addrspace(1)* %5
+  %6 = load i32, i32 addrspace(1)* %5
   store i32 %6, i32 addrspace(1)* %1
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll Fri Feb 27 15:17:42 2015
@@ -44,7 +44,7 @@ define void @bfe_i32_imm_arg_arg(i32 add
 ; FUNC-LABEL: {{^}}v_bfe_print_arg:
 ; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 2, 8
 define void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) nounwind {
-  %load = load i32 addrspace(1)* %src0, align 4
+  %load = load i32, i32 addrspace(1)* %src0, align 4
   %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 2, i32 8) nounwind readnone
   store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
   ret void
@@ -75,7 +75,7 @@ define void @bfe_i32_arg_0_width_imm_off
 ; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
 ; SI: s_endpgm
 define void @bfe_i32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 1, i32 31)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -89,7 +89,7 @@ define void @bfe_i32_test_6(i32 addrspac
 ; SI: buffer_store_dword [[VREG]],
 ; SI: s_endpgm
 define void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 0, i32 31)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -102,7 +102,7 @@ define void @bfe_i32_test_7(i32 addrspac
 ; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
 ; SI: s_endpgm
 define void @bfe_i32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -115,7 +115,7 @@ define void @bfe_i32_test_8(i32 addrspac
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_i32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 31, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -127,7 +127,7 @@ define void @bfe_i32_test_9(i32 addrspac
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_i32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 1, i32 31)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -139,7 +139,7 @@ define void @bfe_i32_test_10(i32 addrspa
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_i32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 8, i32 24)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -151,7 +151,7 @@ define void @bfe_i32_test_11(i32 addrspa
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_i32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 24, i32 8)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -162,7 +162,7 @@ define void @bfe_i32_test_12(i32 addrspa
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_i32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = ashr i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
@@ -173,7 +173,7 @@ define void @bfe_i32_test_13(i32 addrspa
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_i32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = lshr i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
@@ -418,7 +418,7 @@ define void @bfe_i32_constant_fold_test_
 ; XSI-NOT: SHR
 ; XSI: buffer_store_dword [[BFE]],
 define void @bfe_sext_in_reg_i24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 0, i32 24)
   %shl = shl i32 %bfe, 8
   %ashr = ashr i32 %shl, 8
@@ -434,7 +434,7 @@ define void @bfe_sext_in_reg_i24(i32 add
 ; SI: v_ashrrev_i32_e32 [[TMP2:v[0-9]+]], 1, [[TMP1]]
 ; SI: buffer_store_dword [[TMP2]]
 define void @simplify_demanded_bfe_sdiv(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %src = load i32 addrspace(1)* %in, align 4
+  %src = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %src, i32 1, i32 16) nounwind readnone
   %div = sdiv i32 %bfe, 2
   store i32 %div, i32 addrspace(1)* %out, align 4

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll Fri Feb 27 15:17:42 2015
@@ -65,7 +65,7 @@ define void @bfe_u32_arg_0_width_imm_off
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_zextload_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
-  %load = load i8 addrspace(1)* %in
+  %load = load i8, i8 addrspace(1)* %in
   %ext = zext i8 %load to i32
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 8)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -79,7 +79,7 @@ define void @bfe_u32_zextload_i8(i32 add
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_zext_in_reg_i8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %add = add i32 %load, 1
   %ext = and i32 %add, 255
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 8)
@@ -94,7 +94,7 @@ define void @bfe_u32_zext_in_reg_i8(i32
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_zext_in_reg_i16(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %add = add i32 %load, 1
   %ext = and i32 %add, 65535
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 16)
@@ -108,7 +108,7 @@ define void @bfe_u32_zext_in_reg_i16(i32
 ; SI: bfe
 ; SI: s_endpgm
 define void @bfe_u32_zext_in_reg_i8_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %add = add i32 %load, 1
   %ext = and i32 %add, 255
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 1, i32 8)
@@ -123,7 +123,7 @@ define void @bfe_u32_zext_in_reg_i8_offs
 ; SI-NEXT: bfe
 ; SI: s_endpgm
 define void @bfe_u32_zext_in_reg_i8_offset_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %add = add i32 %load, 1
   %ext = and i32 %add, 255
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 3, i32 8)
@@ -138,7 +138,7 @@ define void @bfe_u32_zext_in_reg_i8_offs
 ; SI-NEXT: bfe
 ; SI: s_endpgm
 define void @bfe_u32_zext_in_reg_i8_offset_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %add = add i32 %load, 1
   %ext = and i32 %add, 255
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 7, i32 8)
@@ -152,7 +152,7 @@ define void @bfe_u32_zext_in_reg_i8_offs
 ; SI-NEXT: bfe
 ; SI: s_endpgm
 define void @bfe_u32_zext_in_reg_i16_offset_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %add = add i32 %load, 1
   %ext = and i32 %add, 65535
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 8, i32 8)
@@ -166,14 +166,14 @@ define void @bfe_u32_zext_in_reg_i16_off
 ; SI: s_endpgm
 ; EG: AND_INT T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, 1,
 define void @bfe_u32_test_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 0, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
 }
 
 define void @bfe_u32_test_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 8)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -181,7 +181,7 @@ define void @bfe_u32_test_2(i32 addrspac
 }
 
 define void @bfe_u32_test_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -196,7 +196,7 @@ define void @bfe_u32_test_3(i32 addrspac
 ; SI: buffer_store_dword [[VREG]],
 ; SI: s_endpgm
 define void @bfe_u32_test_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %shr = lshr i32 %shl, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shr, i32 31, i32 1)
@@ -211,7 +211,7 @@ define void @bfe_u32_test_4(i32 addrspac
 ; SI: v_bfe_i32 {{v[0-9]+}}, {{v[0-9]+}}, 0, 1
 ; SI: s_endpgm
 define void @bfe_u32_test_5(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %shr = ashr i32 %shl, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shr, i32 0, i32 1)
@@ -224,7 +224,7 @@ define void @bfe_u32_test_5(i32 addrspac
 ; SI: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
 ; SI: s_endpgm
 define void @bfe_u32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 1, i32 31)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -236,7 +236,7 @@ define void @bfe_u32_test_6(i32 addrspac
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 31)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -249,7 +249,7 @@ define void @bfe_u32_test_7(i32 addrspac
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -262,7 +262,7 @@ define void @bfe_u32_test_8(i32 addrspac
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 31, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -274,7 +274,7 @@ define void @bfe_u32_test_9(i32 addrspac
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 1, i32 31)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -286,7 +286,7 @@ define void @bfe_u32_test_10(i32 addrspa
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 8, i32 24)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -298,7 +298,7 @@ define void @bfe_u32_test_11(i32 addrspa
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 24, i32 8)
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -309,7 +309,7 @@ define void @bfe_u32_test_12(i32 addrspa
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = ashr i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
@@ -320,7 +320,7 @@ define void @bfe_u32_test_13(i32 addrspa
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_u32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = lshr i32 %x, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
   store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
@@ -568,7 +568,7 @@ define void @bfe_u32_constant_fold_test_
 define void @simplify_bfe_u32_multi_use_arg(i32 addrspace(1)* %out0,
                                             i32 addrspace(1)* %out1,
                                             i32 addrspace(1)* %in) nounwind {
-  %src = load i32 addrspace(1)* %in, align 4
+  %src = load i32, i32 addrspace(1)* %in, align 4
   %and = and i32 %src, 63
   %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %and, i32 2, i32 2) nounwind readnone
   store i32 %bfe_u32, i32 addrspace(1)* %out0, align 4

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ define void @s_brev_i32(i32 addrspace(1)
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm
 define void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
-  %val = load i32 addrspace(1)* %valptr, align 4
+  %val = load i32, i32 addrspace(1)* %valptr, align 4
   %ctlz = call i32 @llvm.AMDGPU.brev(i32 %val) nounwind readnone
   store i32 %ctlz, i32 addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll Fri Feb 27 15:17:42 2015
@@ -136,7 +136,7 @@ define void @v_test_class_full_mask_f32(
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load float addrspace(1)* %gep.in
+  %a = load float, float addrspace(1)* %gep.in
 
   %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
   %sext = sext i1 %result to i32
@@ -154,7 +154,7 @@ define void @test_class_inline_imm_const
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %b = load i32 addrspace(1)* %gep.in
+  %b = load i32, i32 addrspace(1)* %gep.in
 
   %result = call i1 @llvm.AMDGPU.class.f32(float 1.0, i32 %b) #1
   %sext = sext i1 %result to i32
@@ -174,7 +174,7 @@ define void @test_class_lit_constant_dyn
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %b = load i32 addrspace(1)* %gep.in
+  %b = load i32, i32 addrspace(1)* %gep.in
 
   %result = call i1 @llvm.AMDGPU.class.f32(float 1024.0, i32 %b) #1
   %sext = sext i1 %result to i32
@@ -292,7 +292,7 @@ define void @v_test_class_full_mask_f64(
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load double addrspace(1)* %in
+  %a = load double, double addrspace(1)* %in
 
   %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
   %sext = sext i1 %result to i32
@@ -308,7 +308,7 @@ define void @test_class_inline_imm_const
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %b = load i32 addrspace(1)* %gep.in
+  %b = load i32, i32 addrspace(1)* %gep.in
 
   %result = call i1 @llvm.AMDGPU.class.f64(double 1.0, i32 %b) #1
   %sext = sext i1 %result to i32
@@ -323,7 +323,7 @@ define void @test_class_lit_constant_dyn
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %b = load i32 addrspace(1)* %gep.in
+  %b = load i32, i32 addrspace(1)* %gep.in
 
   %result = call i1 @llvm.AMDGPU.class.f64(double 1024.0, i32 %b) #1
   %sext = sext i1 %result to i32
@@ -340,7 +340,7 @@ define void @test_fold_or_class_f32_0(i3
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load float addrspace(1)* %gep.in
+  %a = load float, float addrspace(1)* %gep.in
 
   %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
   %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 3) #1
@@ -360,7 +360,7 @@ define void @test_fold_or3_class_f32_0(i
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load float addrspace(1)* %gep.in
+  %a = load float, float addrspace(1)* %gep.in
 
   %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
   %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 2) #1
@@ -383,7 +383,7 @@ define void @test_fold_or_all_tests_clas
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load float addrspace(1)* %gep.in
+  %a = load float, float addrspace(1)* %gep.in
 
   %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
   %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 2) #1
@@ -418,7 +418,7 @@ define void @test_fold_or_class_f32_1(i3
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load float addrspace(1)* %gep.in
+  %a = load float, float addrspace(1)* %gep.in
 
   %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
   %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 8) #1
@@ -438,7 +438,7 @@ define void @test_fold_or_class_f32_2(i3
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load float addrspace(1)* %gep.in
+  %a = load float, float addrspace(1)* %gep.in
 
   %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
   %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
@@ -458,7 +458,7 @@ define void @test_no_fold_or_class_f32_0
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load float addrspace(1)* %gep.in
+  %a = load float, float addrspace(1)* %gep.in
 
   %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
   %class1 = call i1 @llvm.AMDGPU.class.f32(float %b, i32 8) #1

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll Fri Feb 27 15:17:42 2015
@@ -8,15 +8,15 @@
 ; CHECK: CUBE * T{{[0-9]}}.W
 define void @cube() #0 {
 main_body:
-  %0 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %0 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %1 = extractelement <4 x float> %0, i32 3
-  %2 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %2 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %3 = extractelement <4 x float> %2, i32 0
   %4 = fdiv float %3, %1
-  %5 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %5 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %6 = extractelement <4 x float> %5, i32 1
   %7 = fdiv float %6, %1
-  %8 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %8 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %9 = extractelement <4 x float> %8, i32 2
   %10 = fdiv float %9, %1
   %11 = insertelement <4 x float> undef, float %4, i32 0

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ declare float @llvm.AMDGPU.cvt.f32.ubyte
 ; SI-LABEL: {{^}}test_unpack_byte0_to_float:
 ; SI: v_cvt_f32_ubyte0
 define void @test_unpack_byte0_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte0(i32 %val) nounwind readnone
   store float %cvt, float addrspace(1)* %out, align 4
   ret void
@@ -18,7 +18,7 @@ define void @test_unpack_byte0_to_float(
 ; SI-LABEL: {{^}}test_unpack_byte1_to_float:
 ; SI: v_cvt_f32_ubyte1
 define void @test_unpack_byte1_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte1(i32 %val) nounwind readnone
   store float %cvt, float addrspace(1)* %out, align 4
   ret void
@@ -27,7 +27,7 @@ define void @test_unpack_byte1_to_float(
 ; SI-LABEL: {{^}}test_unpack_byte2_to_float:
 ; SI: v_cvt_f32_ubyte2
 define void @test_unpack_byte2_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte2(i32 %val) nounwind readnone
   store float %cvt, float addrspace(1)* %out, align 4
   ret void
@@ -36,7 +36,7 @@ define void @test_unpack_byte2_to_float(
 ; SI-LABEL: {{^}}test_unpack_byte3_to_float:
 ; SI: v_cvt_f32_ubyte3
 define void @test_unpack_byte3_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte3(i32 %val) nounwind readnone
   store float %cvt, float addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll Fri Feb 27 15:17:42 2015
@@ -122,9 +122,9 @@ define void @test_div_fmas_f32_logical_c
   %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 2
 
-  %a = load float addrspace(1)* %gep.a
-  %b = load float addrspace(1)* %gep.b
-  %c = load float addrspace(1)* %gep.c
+  %a = load float, float addrspace(1)* %gep.a
+  %b = load float, float addrspace(1)* %gep.b
+  %c = load float, float addrspace(1)* %gep.c
 
   %cmp0 = icmp eq i32 %tid, 0
   %cmp1 = icmp ne i32 %d, 0
@@ -159,15 +159,15 @@ entry:
   %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1
   %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2
 
-  %a = load float addrspace(1)* %gep.a
-  %b = load float addrspace(1)* %gep.b
-  %c = load float addrspace(1)* %gep.c
+  %a = load float, float addrspace(1)* %gep.a
+  %b = load float, float addrspace(1)* %gep.b
+  %c = load float, float addrspace(1)* %gep.c
 
   %cmp0 = icmp eq i32 %tid, 0
   br i1 %cmp0, label %bb, label %exit
 
 bb:
-  %val = load i32 addrspace(1)* %dummy
+  %val = load i32, i32 addrspace(1)* %dummy
   %cmp1 = icmp ne i32 %val, 0
   br label %exit
 

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll Fri Feb 27 15:17:42 2015
@@ -16,8 +16,8 @@ define void @test_div_scale_f32_1(float
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
   %result0 = extractvalue { float, i1 } %result, 0
@@ -36,8 +36,8 @@ define void @test_div_scale_f32_2(float
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
   %result0 = extractvalue { float, i1 } %result, 0
@@ -56,8 +56,8 @@ define void @test_div_scale_f64_1(double
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
   %result0 = extractvalue { double, i1 } %result, 0
@@ -76,8 +76,8 @@ define void @test_div_scale_f64_2(double
   %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
 
-  %a = load double addrspace(1)* %gep.0, align 8
-  %b = load double addrspace(1)* %gep.1, align 8
+  %a = load double, double addrspace(1)* %gep.0, align 8
+  %b = load double, double addrspace(1)* %gep.1, align 8
 
   %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
   %result0 = extractvalue { double, i1 } %result, 0
@@ -95,7 +95,7 @@ define void @test_div_scale_f32_scalar_n
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
 
-  %b = load float addrspace(1)* %gep, align 4
+  %b = load float, float addrspace(1)* %gep, align 4
 
   %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
   %result0 = extractvalue { float, i1 } %result, 0
@@ -113,7 +113,7 @@ define void @test_div_scale_f32_scalar_n
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
 
-  %b = load float addrspace(1)* %gep, align 4
+  %b = load float, float addrspace(1)* %gep, align 4
 
   %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
   %result0 = extractvalue { float, i1 } %result, 0
@@ -131,7 +131,7 @@ define void @test_div_scale_f32_scalar_d
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
 
-  %a = load float addrspace(1)* %gep, align 4
+  %a = load float, float addrspace(1)* %gep, align 4
 
   %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
   %result0 = extractvalue { float, i1 } %result, 0
@@ -149,7 +149,7 @@ define void @test_div_scale_f32_scalar_d
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
 
-  %a = load float addrspace(1)* %gep, align 4
+  %a = load float, float addrspace(1)* %gep, align 4
 
   %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
   %result0 = extractvalue { float, i1 } %result, 0
@@ -167,7 +167,7 @@ define void @test_div_scale_f64_scalar_n
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
 
-  %b = load double addrspace(1)* %gep, align 8
+  %b = load double, double addrspace(1)* %gep, align 8
 
   %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
   %result0 = extractvalue { double, i1 } %result, 0
@@ -185,7 +185,7 @@ define void @test_div_scale_f64_scalar_n
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
 
-  %b = load double addrspace(1)* %gep, align 8
+  %b = load double, double addrspace(1)* %gep, align 8
 
   %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
   %result0 = extractvalue { double, i1 } %result, 0
@@ -203,7 +203,7 @@ define void @test_div_scale_f64_scalar_d
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
 
-  %a = load double addrspace(1)* %gep, align 8
+  %a = load double, double addrspace(1)* %gep, align 8
 
   %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
   %result0 = extractvalue { double, i1 } %result, 0
@@ -221,7 +221,7 @@ define void @test_div_scale_f64_scalar_d
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
 
-  %a = load double addrspace(1)* %gep, align 8
+  %a = load double, double addrspace(1)* %gep, align 8
 
   %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
   %result0 = extractvalue { double, i1 } %result, 0
@@ -295,7 +295,7 @@ define void @test_div_scale_f64_all_scal
 define void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
-  %a = load float addrspace(1)* %gep.0, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
 
   %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float 1.0, float %a, i1 false) nounwind readnone
   %result0 = extractvalue { float, i1 } %result, 0
@@ -311,7 +311,7 @@ define void @test_div_scale_f32_inline_i
 define void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
-  %a = load float addrspace(1)* %gep.0, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
 
   %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float 2.0, i1 false) nounwind readnone
   %result0 = extractvalue { float, i1 } %result, 0
@@ -330,8 +330,8 @@ define void @test_div_scale_f32_fabs_num
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %a.fabs = call float @llvm.fabs.f32(float %a) nounwind readnone
 
@@ -352,8 +352,8 @@ define void @test_div_scale_f32_fabs_den
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %b.fabs = call float @llvm.fabs.f32(float %b) nounwind readnone
 

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ declare float @llvm.AMDIL.fraction.f32(f
 ; SI: v_fract_f32
 ; EG: FRACT
 define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) nounwind {
-  %val = load float addrspace(1)* %src, align 4
+  %val = load float, float addrspace(1)* %src, align 4
   %fract = call float @llvm.AMDGPU.fract.f32(float %val) nounwind readnone
   store float %fract, float addrspace(1)* %out, align 4
   ret void
@@ -21,7 +21,7 @@ define void @fract_f32(float addrspace(1
 ; SI: v_fract_f32
 ; EG: FRACT
 define void @fract_f32_legacy_amdil(float addrspace(1)* %out, float addrspace(1)* %src) nounwind {
-  %val = load float addrspace(1)* %src, align 4
+  %val = load float, float addrspace(1)* %src, align 4
   %fract = call float @llvm.AMDIL.fraction.f32(float %val) nounwind readnone
   store float %fract, float addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 ; SI: v_max_i32_e32
 define void @vector_imax(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
 main_body:
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %max = call i32 @llvm.AMDGPU.imax(i32 %p0, i32 %load)
   %bc = bitcast i32 %max to float
   call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 ; SI: v_min_i32_e32
 define void @vector_imin(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
 main_body:
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %min = call i32 @llvm.AMDGPU.imin(i32 %p0, i32 %load)
   %bc = bitcast i32 %min to float
   call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@
 ;CHECK: TEX_SAMPLE T{{[0-9]+\.XYZW, T[0-9]+\.XYZW}} RID:0 SID:0 CT:NNUN
 
 define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
-   %addr = load <4 x float> addrspace(1)* %in
+   %addr = load <4 x float>, <4 x float> addrspace(1)* %in
    %res1 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %addr, i32 0, i32 0, i32 1)
    %res2 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %res1, i32 0, i32 0, i32 2)
    %res3 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %res2, i32 0, i32 0, i32 3)

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ declare double @llvm.AMDGPU.trig.preop.f
 ; SI: buffer_store_dwordx2 [[RESULT]],
 ; SI: s_endpgm
 define void @test_trig_preop_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
-  %a = load double addrspace(1)* %aptr, align 8
-  %b = load i32 addrspace(1)* %bptr, align 4
+  %a = load double, double addrspace(1)* %aptr, align 8
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %result = call double @llvm.AMDGPU.trig.preop.f64(double %a, i32 %b) nounwind readnone
   store double %result, double addrspace(1)* %out, align 8
   ret void
@@ -23,7 +23,7 @@ define void @test_trig_preop_f64(double
 ; SI: buffer_store_dwordx2 [[RESULT]],
 ; SI: s_endpgm
 define void @test_trig_preop_f64_imm_segment(double addrspace(1)* %out, double addrspace(1)* %aptr) nounwind {
-  %a = load double addrspace(1)* %aptr, align 8
+  %a = load double, double addrspace(1)* %aptr, align 8
   %result = call double @llvm.AMDGPU.trig.preop.f64(double %a, i32 7) nounwind readnone
   store double %result, double addrspace(1)* %out, align 8
   ret void

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll Fri Feb 27 15:17:42 2015
@@ -29,8 +29,8 @@ define void @commute_umad24(i32 addrspac
   %src0.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
   %src2.gep = getelementptr i32, i32 addrspace(1)* %src0.gep, i32 1
 
-  %src0 = load i32 addrspace(1)* %src0.gep, align 4
-  %src2 = load i32 addrspace(1)* %src2.gep, align 4
+  %src0 = load i32, i32 addrspace(1)* %src0.gep, align 4
+  %src2 = load i32, i32 addrspace(1)* %src2.gep, align 4
   %mad = call i32 @llvm.AMDGPU.umad24(i32 %src0, i32 4, i32 %src2) nounwind readnone
   store i32 %mad, i32 addrspace(1)* %out.gep, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 ; SI: v_max_u32_e32
 define void @vector_umax(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
 main_body:
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %max = call i32 @llvm.AMDGPU.umax(i32 %p0, i32 %load)
   %bc = bitcast i32 %max to float
   call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
@@ -28,7 +28,7 @@ entry:
 ; SI-NOT: and
 ; SI: buffer_store_short [[RESULT]],
 define void @trunc_zext_umax(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
-  %tmp5 = load i8 addrspace(1)* %src, align 1
+  %tmp5 = load i8, i8 addrspace(1)* %src, align 1
   %tmp2 = zext i8 %tmp5 to i32
   %tmp3 = tail call i32 @llvm.AMDGPU.umax(i32 %tmp2, i32 0) nounwind readnone
   %tmp4 = trunc i32 %tmp3 to i8

Modified: llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
 ; SI: v_min_u32_e32
 define void @vector_umin(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
 main_body:
-  %load = load i32 addrspace(1)* %in, align 4
+  %load = load i32, i32 addrspace(1)* %in, align 4
   %min = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %load)
   %bc = bitcast i32 %min to float
   call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
@@ -28,7 +28,7 @@ entry:
 ; SI-NOT: and
 ; SI: buffer_store_short [[RESULT]],
 define void @trunc_zext_umin(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
-  %tmp5 = load i8 addrspace(1)* %src, align 1
+  %tmp5 = load i8, i8 addrspace(1)* %src, align 1
   %tmp2 = zext i8 %tmp5 to i32
   %tmp3 = tail call i32 @llvm.AMDGPU.umin(i32 %tmp2, i32 0) nounwind readnone
   %tmp4 = trunc i32 %tmp3 to i8

Modified: llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll Fri Feb 27 15:17:42 2015
@@ -89,15 +89,15 @@ define void @test(i32 %a1, i32 %a2, i32
 define void @vgpr_coords(float addrspace(2)* addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %20 = getelementptr float addrspace(2)*, float addrspace(2)* addrspace(2)* %0, i32 0
-  %21 = load float addrspace(2)* addrspace(2)* %20, !tbaa !2
+  %21 = load float addrspace(2)*, float addrspace(2)* addrspace(2)* %20, !tbaa !2
   %22 = getelementptr float, float addrspace(2)* %21, i32 0
-  %23 = load float addrspace(2)* %22, !tbaa !2, !invariant.load !1
+  %23 = load float, float addrspace(2)* %22, !tbaa !2, !invariant.load !1
   %24 = getelementptr float, float addrspace(2)* %21, i32 1
-  %25 = load float addrspace(2)* %24, !tbaa !2, !invariant.load !1
+  %25 = load float, float addrspace(2)* %24, !tbaa !2, !invariant.load !1
   %26 = getelementptr float, float addrspace(2)* %21, i32 4
-  %27 = load float addrspace(2)* %26, !tbaa !2, !invariant.load !1
+  %27 = load float, float addrspace(2)* %26, !tbaa !2, !invariant.load !1
   %28 = getelementptr <32 x i8>, <32 x i8> addrspace(2)* %2, i32 0
-  %29 = load <32 x i8> addrspace(2)* %28, !tbaa !2
+  %29 = load <32 x i8>, <32 x i8> addrspace(2)* %28, !tbaa !2
   %30 = bitcast float %27 to i32
   %31 = bitcast float %23 to i32
   %32 = bitcast float %25 to i32

Modified: llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@
 define void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, [2 x <16 x i8>] addrspace(2)* byval %arg3, [17 x <16 x i8>] addrspace(2)* inreg %arg4, [17 x <16 x i8>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) #0 {
 main_body:
   %tmp = getelementptr [2 x <16 x i8>], [2 x <16 x i8>] addrspace(2)* %arg3, i64 0, i32 1
-  %tmp10 = load <16 x i8> addrspace(2)* %tmp, !tbaa !0
+  %tmp10 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
   %tmp11 = shl i32 %arg6, 2
   %tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
   %tmp13 = bitcast i32 %tmp12 to float

Modified: llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
 declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) nounwind readnone
 
 define void @test_dp4(float addrspace(1)* %out, <4 x float> addrspace(1)* %a, <4 x float> addrspace(1)* %b) nounwind {
-  %src0 = load <4 x float> addrspace(1)* %a, align 16
-  %src1 = load <4 x float> addrspace(1)* %b, align 16
+  %src0 = load <4 x float>, <4 x float> addrspace(1)* %a, align 16
+  %src1 = load <4 x float>, <4 x float> addrspace(1)* %b, align 16
   %dp4 = call float @llvm.AMDGPU.dp4(<4 x float> %src0, <4 x float> %src1) nounwind readnone
   store float %dp4, float addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll Fri Feb 27 15:17:42 2015
@@ -33,7 +33,7 @@ define void @v_round_f64(double addrspac
   %tid = call i32 @llvm.r600.read.tidig.x() #1
   %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
-  %x = load double addrspace(1)* %gep
+  %x = load double, double addrspace(1)* %gep
   %result = call double @llvm.round.f64(double %x) #1
   store double %result, double addrspace(1)* %out.gep
   ret void

Modified: llvm/trunk/test/CodeGen/R600/load-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/load-i1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/load-i1.ll (original)
+++ llvm/trunk/test/CodeGen/R600/load-i1.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
 ; EG: VTX_READ_8
 ; EG: AND_INT
 define void @global_copy_i1_to_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   store i1 %load, i1 addrspace(1)* %out, align 1
   ret void
 }
@@ -26,7 +26,7 @@ define void @global_copy_i1_to_i1(i1 add
 ; EG: AND_INT
 ; EG: LDS_BYTE_WRITE
 define void @local_copy_i1_to_i1(i1 addrspace(3)* %out, i1 addrspace(3)* %in) nounwind {
-  %load = load i1 addrspace(3)* %in
+  %load = load i1, i1 addrspace(3)* %in
   store i1 %load, i1 addrspace(3)* %out, align 1
   ret void
 }
@@ -40,7 +40,7 @@ define void @local_copy_i1_to_i1(i1 addr
 ; EG: VTX_READ_8
 ; EG: AND_INT
 define void @constant_copy_i1_to_i1(i1 addrspace(1)* %out, i1 addrspace(2)* %in) nounwind {
-  %load = load i1 addrspace(2)* %in
+  %load = load i1, i1 addrspace(2)* %in
   store i1 %load, i1 addrspace(1)* %out, align 1
   ret void
 }
@@ -54,7 +54,7 @@ define void @constant_copy_i1_to_i1(i1 a
 ; EG: VTX_READ_8
 ; EG: BFE_INT
 define void @global_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %load to i32
   store i32 %ext, i32 addrspace(1)* %out, align 4
   ret void
@@ -66,7 +66,7 @@ define void @global_sextload_i1_to_i32(i
 ; SI: s_endpgm
 
 define void @global_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %load to i32
   store i32 %ext, i32 addrspace(1)* %out, align 4
   ret void
@@ -78,7 +78,7 @@ define void @global_zextload_i1_to_i32(i
 ; SI: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @global_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %load to i64
   store i64 %ext, i64 addrspace(1)* %out, align 4
   ret void
@@ -90,7 +90,7 @@ define void @global_sextload_i1_to_i64(i
 ; SI: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @global_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %load to i64
   store i64 %ext, i64 addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/load-input-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/load-input-fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/load-input-fold.ll (original)
+++ llvm/trunk/test/CodeGen/R600/load-input-fold.ll Fri Feb 27 15:17:42 2015
@@ -14,71 +14,71 @@ main_body:
   %9 = extractelement <4 x float> %reg3, i32 1
   %10 = extractelement <4 x float> %reg3, i32 2
   %11 = extractelement <4 x float> %reg3, i32 3
-  %12 = load <4 x float> addrspace(8)* null
+  %12 = load <4 x float>, <4 x float> addrspace(8)* null
   %13 = extractelement <4 x float> %12, i32 0
   %14 = fmul float %0, %13
-  %15 = load <4 x float> addrspace(8)* null
+  %15 = load <4 x float>, <4 x float> addrspace(8)* null
   %16 = extractelement <4 x float> %15, i32 1
   %17 = fmul float %0, %16
-  %18 = load <4 x float> addrspace(8)* null
+  %18 = load <4 x float>, <4 x float> addrspace(8)* null
   %19 = extractelement <4 x float> %18, i32 2
   %20 = fmul float %0, %19
-  %21 = load <4 x float> addrspace(8)* null
+  %21 = load <4 x float>, <4 x float> addrspace(8)* null
   %22 = extractelement <4 x float> %21, i32 3
   %23 = fmul float %0, %22
-  %24 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %24 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %25 = extractelement <4 x float> %24, i32 0
   %26 = fmul float %1, %25
   %27 = fadd float %26, %14
-  %28 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %28 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %29 = extractelement <4 x float> %28, i32 1
   %30 = fmul float %1, %29
   %31 = fadd float %30, %17
-  %32 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %32 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %33 = extractelement <4 x float> %32, i32 2
   %34 = fmul float %1, %33
   %35 = fadd float %34, %20
-  %36 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %36 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %37 = extractelement <4 x float> %36, i32 3
   %38 = fmul float %1, %37
   %39 = fadd float %38, %23
-  %40 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %40 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %41 = extractelement <4 x float> %40, i32 0
   %42 = fmul float %2, %41
   %43 = fadd float %42, %27
-  %44 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %44 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %45 = extractelement <4 x float> %44, i32 1
   %46 = fmul float %2, %45
   %47 = fadd float %46, %31
-  %48 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %48 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %49 = extractelement <4 x float> %48, i32 2
   %50 = fmul float %2, %49
   %51 = fadd float %50, %35
-  %52 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %52 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %53 = extractelement <4 x float> %52, i32 3
   %54 = fmul float %2, %53
   %55 = fadd float %54, %39
-  %56 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %56 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %57 = extractelement <4 x float> %56, i32 0
   %58 = fmul float %3, %57
   %59 = fadd float %58, %43
-  %60 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %60 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %61 = extractelement <4 x float> %60, i32 1
   %62 = fmul float %3, %61
   %63 = fadd float %62, %47
-  %64 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %64 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %65 = extractelement <4 x float> %64, i32 2
   %66 = fmul float %3, %65
   %67 = fadd float %66, %51
-  %68 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %68 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %69 = extractelement <4 x float> %68, i32 3
   %70 = fmul float %3, %69
   %71 = fadd float %70, %55
-  %72 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %72 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %73 = extractelement <4 x float> %72, i32 0
-  %74 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %74 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %75 = extractelement <4 x float> %74, i32 1
-  %76 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %76 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %77 = extractelement <4 x float> %76, i32 2
   %78 = insertelement <4 x float> undef, float %4, i32 0
   %79 = insertelement <4 x float> %78, float %5, i32 1

Modified: llvm/trunk/test/CodeGen/R600/load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/load.ll (original)
+++ llvm/trunk/test/CodeGen/R600/load.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@
 
 ; SI: buffer_load_ubyte v{{[0-9]+}},
 define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
-  %1 = load i8 addrspace(1)* %in
+  %1 = load i8, i8 addrspace(1)* %in
   %2 = zext i8 %1 to i32
   store i32 %2, i32 addrspace(1)* %out
   ret void
@@ -28,7 +28,7 @@ define void @load_i8(i32 addrspace(1)* %
 ; SI: buffer_load_sbyte
 define void @load_i8_sext(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
 entry:
-  %0 = load i8 addrspace(1)* %in
+  %0 = load i8, i8 addrspace(1)* %in
   %1 = sext i8 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -41,7 +41,7 @@ entry:
 ; SI: buffer_load_ubyte
 define void @load_v2i8(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
 entry:
-  %0 = load <2 x i8> addrspace(1)* %in
+  %0 = load <2 x i8>, <2 x i8> addrspace(1)* %in
   %1 = zext <2 x i8> %0 to <2 x i32>
   store <2 x i32> %1, <2 x i32> addrspace(1)* %out
   ret void
@@ -62,7 +62,7 @@ entry:
 ; SI: buffer_load_sbyte
 define void @load_v2i8_sext(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
 entry:
-  %0 = load <2 x i8> addrspace(1)* %in
+  %0 = load <2 x i8>, <2 x i8> addrspace(1)* %in
   %1 = sext <2 x i8> %0 to <2 x i32>
   store <2 x i32> %1, <2 x i32> addrspace(1)* %out
   ret void
@@ -79,7 +79,7 @@ entry:
 ; SI: buffer_load_ubyte
 define void @load_v4i8(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) {
 entry:
-  %0 = load <4 x i8> addrspace(1)* %in
+  %0 = load <4 x i8>, <4 x i8> addrspace(1)* %in
   %1 = zext <4 x i8> %0 to <4 x i32>
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
@@ -112,7 +112,7 @@ entry:
 ; SI: buffer_load_sbyte
 define void @load_v4i8_sext(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) {
 entry:
-  %0 = load <4 x i8> addrspace(1)* %in
+  %0 = load <4 x i8>, <4 x i8> addrspace(1)* %in
   %1 = sext <4 x i8> %0 to <4 x i32>
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
@@ -124,7 +124,7 @@ entry:
 ; SI: buffer_load_ushort
 define void @load_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
 entry:
-  %0 = load i16	 addrspace(1)* %in
+  %0 = load i16	, i16	 addrspace(1)* %in
   %1 = zext i16 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -139,7 +139,7 @@ entry:
 ; SI: buffer_load_sshort
 define void @load_i16_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
 entry:
-  %0 = load i16 addrspace(1)* %in
+  %0 = load i16, i16 addrspace(1)* %in
   %1 = sext i16 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -152,7 +152,7 @@ entry:
 ; SI: buffer_load_ushort
 define void @load_v2i16(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
 entry:
-  %0 = load <2 x i16> addrspace(1)* %in
+  %0 = load <2 x i16>, <2 x i16> addrspace(1)* %in
   %1 = zext <2 x i16> %0 to <2 x i32>
   store <2 x i32> %1, <2 x i32> addrspace(1)* %out
   ret void
@@ -173,7 +173,7 @@ entry:
 ; SI: buffer_load_sshort
 define void @load_v2i16_sext(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
 entry:
-  %0 = load <2 x i16> addrspace(1)* %in
+  %0 = load <2 x i16>, <2 x i16> addrspace(1)* %in
   %1 = sext <2 x i16> %0 to <2 x i32>
   store <2 x i32> %1, <2 x i32> addrspace(1)* %out
   ret void
@@ -190,7 +190,7 @@ entry:
 ; SI: buffer_load_ushort
 define void @load_v4i16(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
 entry:
-  %0 = load <4 x i16> addrspace(1)* %in
+  %0 = load <4 x i16>, <4 x i16> addrspace(1)* %in
   %1 = zext <4 x i16> %0 to <4 x i32>
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
@@ -223,7 +223,7 @@ entry:
 ; SI: buffer_load_sshort
 define void @load_v4i16_sext(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
 entry:
-  %0 = load <4 x i16> addrspace(1)* %in
+  %0 = load <4 x i16>, <4 x i16> addrspace(1)* %in
   %1 = sext <4 x i16> %0 to <4 x i32>
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
@@ -236,7 +236,7 @@ entry:
 ; SI: buffer_load_dword v{{[0-9]+}}
 define void @load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
-  %0 = load i32 addrspace(1)* %in
+  %0 = load i32, i32 addrspace(1)* %in
   store i32 %0, i32 addrspace(1)* %out
   ret void
 }
@@ -248,7 +248,7 @@ entry:
 ; SI: buffer_load_dword v{{[0-9]+}}
 define void @load_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
 entry:
-  %0 = load float addrspace(1)* %in
+  %0 = load float, float addrspace(1)* %in
   store float %0, float addrspace(1)* %out
   ret void
 }
@@ -260,7 +260,7 @@ entry:
 ; SI: buffer_load_dwordx2
 define void @load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
 entry:
-  %0 = load <2 x float> addrspace(1)* %in
+  %0 = load <2 x float>, <2 x float> addrspace(1)* %in
   store <2 x float> %0, <2 x float> addrspace(1)* %out
   ret void
 }
@@ -270,7 +270,7 @@ entry:
 ; SI: buffer_load_dwordx2
 define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
 entry:
-  %0 = load i64 addrspace(1)* %in
+  %0 = load i64, i64 addrspace(1)* %in
   store i64 %0, i64 addrspace(1)* %out
   ret void
 }
@@ -284,7 +284,7 @@ entry:
 
 define void @load_i64_sext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
-  %0 = load i32 addrspace(1)* %in
+  %0 = load i32, i32 addrspace(1)* %in
   %1 = sext i32 %0 to i64
   store i64 %1, i64 addrspace(1)* %out
   ret void
@@ -295,7 +295,7 @@ entry:
 ; R600: MEM_RAT
 define void @load_i64_zext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
-  %0 = load i32 addrspace(1)* %in
+  %0 = load i32, i32 addrspace(1)* %in
   %1 = zext i32 %0 to i64
   store i64 %1, i64 addrspace(1)* %out
   ret void
@@ -315,7 +315,7 @@ entry:
 ; SI: buffer_load_dword
 define void @load_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) {
 entry:
-  %0 = load <8 x i32> addrspace(1)* %in
+  %0 = load <8 x i32>, <8 x i32> addrspace(1)* %in
   store <8 x i32> %0, <8 x i32> addrspace(1)* %out
   ret void
 }
@@ -344,7 +344,7 @@ entry:
 ; SI: buffer_load_dword
 define void @load_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) {
 entry:
-  %0 = load <16 x i32> addrspace(1)* %in
+  %0 = load <16 x i32>, <16 x i32> addrspace(1)* %in
   store <16 x i32> %0, <16 x i32> addrspace(1)* %out
   ret void
 }
@@ -363,7 +363,7 @@ entry:
 ; SI: buffer_load_sbyte v{{[0-9]+}},
 define void @load_const_i8_sext(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
 entry:
-  %0 = load i8 addrspace(2)* %in
+  %0 = load i8, i8 addrspace(2)* %in
   %1 = sext i8 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -375,7 +375,7 @@ entry:
 ; SI: buffer_load_ubyte v{{[0-9]+}},
 define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
 entry:
-  %0 = load i8 addrspace(2)* %in
+  %0 = load i8, i8 addrspace(2)* %in
   %1 = zext i8 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -388,7 +388,7 @@ entry:
 define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
 entry:
   %0 = getelementptr i8, i8 addrspace(2)* %in, i32 1
-  %1 = load i8 addrspace(2)* %0
+  %1 = load i8, i8 addrspace(2)* %0
   %2 = zext i8 %1 to i32
   store i32 %2, i32 addrspace(1)* %out
   ret void
@@ -404,7 +404,7 @@ entry:
 ; SI: buffer_load_sshort
 define void @load_const_i16_sext(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
 entry:
-  %0 = load i16 addrspace(2)* %in
+  %0 = load i16, i16 addrspace(2)* %in
   %1 = sext i16 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -416,7 +416,7 @@ entry:
 ; SI: buffer_load_ushort
 define void @load_const_i16_aligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
 entry:
-  %0 = load i16 addrspace(2)* %in
+  %0 = load i16, i16 addrspace(2)* %in
   %1 = zext i16 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -429,7 +429,7 @@ entry:
 define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
 entry:
   %0 = getelementptr i16, i16 addrspace(2)* %in, i32 1
-  %1 = load i16 addrspace(2)* %0
+  %1 = load i16, i16 addrspace(2)* %0
   %2 = zext i16 %1 to i32
   store i32 %2, i32 addrspace(1)* %out
   ret void
@@ -442,7 +442,7 @@ entry:
 ; SI: s_load_dword s{{[0-9]+}}
 define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
 entry:
-  %0 = load i32 addrspace(2)* %in
+  %0 = load i32, i32 addrspace(2)* %in
   store i32 %0, i32 addrspace(1)* %out
   ret void
 }
@@ -453,7 +453,7 @@ entry:
 
 ; SI: s_load_dword s{{[0-9]+}}
 define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
-  %1 = load float addrspace(2)* %in
+  %1 = load float, float addrspace(2)* %in
   store float %1, float addrspace(1)* %out
   ret void
 }
@@ -469,7 +469,7 @@ define void @load_const_addrspace_f32(fl
 ; SI: s_mov_b32 m0
 ; SI: ds_read_u8
 define void @load_i8_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
-  %1 = load i8 addrspace(3)* %in
+  %1 = load i8, i8 addrspace(3)* %in
   %2 = zext i8 %1 to i32
   store i32 %2, i32 addrspace(1)* %out
   ret void
@@ -483,7 +483,7 @@ define void @load_i8_local(i32 addrspace
 ; SI: ds_read_i8
 define void @load_i8_sext_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
 entry:
-  %0 = load i8 addrspace(3)* %in
+  %0 = load i8, i8 addrspace(3)* %in
   %1 = sext i8 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -498,7 +498,7 @@ entry:
 ; SI: ds_read_u8
 define void @load_v2i8_local(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(3)* %in) {
 entry:
-  %0 = load <2 x i8> addrspace(3)* %in
+  %0 = load <2 x i8>, <2 x i8> addrspace(3)* %in
   %1 = zext <2 x i8> %0 to <2 x i32>
   store <2 x i32> %1, <2 x i32> addrspace(1)* %out
   ret void
@@ -515,7 +515,7 @@ entry:
 ; SI: ds_read_i8
 define void @load_v2i8_sext_local(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(3)* %in) {
 entry:
-  %0 = load <2 x i8> addrspace(3)* %in
+  %0 = load <2 x i8>, <2 x i8> addrspace(3)* %in
   %1 = sext <2 x i8> %0 to <2 x i32>
   store <2 x i32> %1, <2 x i32> addrspace(1)* %out
   ret void
@@ -534,7 +534,7 @@ entry:
 ; SI: ds_read_u8
 define void @load_v4i8_local(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(3)* %in) {
 entry:
-  %0 = load <4 x i8> addrspace(3)* %in
+  %0 = load <4 x i8>, <4 x i8> addrspace(3)* %in
   %1 = zext <4 x i8> %0 to <4 x i32>
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
@@ -557,7 +557,7 @@ entry:
 ; SI: ds_read_i8
 define void @load_v4i8_sext_local(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(3)* %in) {
 entry:
-  %0 = load <4 x i8> addrspace(3)* %in
+  %0 = load <4 x i8>, <4 x i8> addrspace(3)* %in
   %1 = sext <4 x i8> %0 to <4 x i32>
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
@@ -571,7 +571,7 @@ entry:
 ; SI: ds_read_u16
 define void @load_i16_local(i32 addrspace(1)* %out, i16 addrspace(3)* %in) {
 entry:
-  %0 = load i16	 addrspace(3)* %in
+  %0 = load i16	, i16	 addrspace(3)* %in
   %1 = zext i16 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -585,7 +585,7 @@ entry:
 ; SI: ds_read_i16
 define void @load_i16_sext_local(i32 addrspace(1)* %out, i16 addrspace(3)* %in) {
 entry:
-  %0 = load i16 addrspace(3)* %in
+  %0 = load i16, i16 addrspace(3)* %in
   %1 = sext i16 %0 to i32
   store i32 %1, i32 addrspace(1)* %out
   ret void
@@ -600,7 +600,7 @@ entry:
 ; SI: ds_read_u16
 define void @load_v2i16_local(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(3)* %in) {
 entry:
-  %0 = load <2 x i16> addrspace(3)* %in
+  %0 = load <2 x i16>, <2 x i16> addrspace(3)* %in
   %1 = zext <2 x i16> %0 to <2 x i32>
   store <2 x i32> %1, <2 x i32> addrspace(1)* %out
   ret void
@@ -617,7 +617,7 @@ entry:
 ; SI: ds_read_i16
 define void @load_v2i16_sext_local(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(3)* %in) {
 entry:
-  %0 = load <2 x i16> addrspace(3)* %in
+  %0 = load <2 x i16>, <2 x i16> addrspace(3)* %in
   %1 = sext <2 x i16> %0 to <2 x i32>
   store <2 x i32> %1, <2 x i32> addrspace(1)* %out
   ret void
@@ -636,7 +636,7 @@ entry:
 ; SI: ds_read_u16
 define void @load_v4i16_local(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(3)* %in) {
 entry:
-  %0 = load <4 x i16> addrspace(3)* %in
+  %0 = load <4 x i16>, <4 x i16> addrspace(3)* %in
   %1 = zext <4 x i16> %0 to <4 x i32>
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
@@ -659,7 +659,7 @@ entry:
 ; SI: ds_read_i16
 define void @load_v4i16_sext_local(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(3)* %in) {
 entry:
-  %0 = load <4 x i16> addrspace(3)* %in
+  %0 = load <4 x i16>, <4 x i16> addrspace(3)* %in
   %1 = sext <4 x i16> %0 to <4 x i32>
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
@@ -673,7 +673,7 @@ entry:
 ; SI: ds_read_b32
 define void @load_i32_local(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
 entry:
-  %0 = load i32 addrspace(3)* %in
+  %0 = load i32, i32 addrspace(3)* %in
   store i32 %0, i32 addrspace(1)* %out
   ret void
 }
@@ -685,7 +685,7 @@ entry:
 ; SI: ds_read_b32
 define void @load_f32_local(float addrspace(1)* %out, float addrspace(3)* %in) {
 entry:
-  %0 = load float addrspace(3)* %in
+  %0 = load float, float addrspace(3)* %in
   store float %0, float addrspace(1)* %out
   ret void
 }
@@ -698,7 +698,7 @@ entry:
 ; SI: ds_read_b64
 define void @load_v2f32_local(<2 x float> addrspace(1)* %out, <2 x float> addrspace(3)* %in) {
 entry:
-  %0 = load <2 x float> addrspace(3)* %in
+  %0 = load <2 x float>, <2 x float> addrspace(3)* %in
   store <2 x float> %0, <2 x float> addrspace(1)* %out
   ret void
 }
@@ -711,10 +711,10 @@ entry:
 ; SI-DAG: ds_read_b32
 ; SI-DAG: ds_read2_b32
 define void @load_i32_v2i32_local(<2 x i32> addrspace(1)* %out, i32 addrspace(3)* %in) {
-  %scalar = load i32 addrspace(3)* %in
+  %scalar = load i32, i32 addrspace(3)* %in
   %tmp0 = bitcast i32 addrspace(3)* %in to <2 x i32> addrspace(3)*
   %vec_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(3)* %tmp0, i32 2
-  %vec0 = load <2 x i32> addrspace(3)* %vec_ptr, align 4
+  %vec0 = load <2 x i32>, <2 x i32> addrspace(3)* %vec_ptr, align 4
   %vec1 = insertelement <2 x i32> <i32 0, i32 0>, i32 %scalar, i32 0
   %vec = add <2 x i32> %vec0, %vec1
   store <2 x i32> %vec, <2 x i32> addrspace(1)* %out
@@ -733,7 +733,7 @@ define void @load_i32_v2i32_local(<2 x i
 define void @load_i32_local_const_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
 entry:
   %tmp0 = getelementptr [512 x i32], [512 x i32] addrspace(3)* @lds, i32 0, i32 1
-  %tmp1 = load i32 addrspace(3)* %tmp0
+  %tmp1 = load i32, i32 addrspace(3)* %tmp0
   %tmp2 = getelementptr i32, i32 addrspace(1)* %out, i32 1
   store i32 %tmp1, i32 addrspace(1)* %tmp2
   ret void

Modified: llvm/trunk/test/CodeGen/R600/load.vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/load.vec.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/load.vec.ll (original)
+++ llvm/trunk/test/CodeGen/R600/load.vec.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 ; SI: {{^}}load_v2i32:
 ; SI: buffer_load_dwordx2 v[{{[0-9]+:[0-9]+}}]
 define void @load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
-  %a = load <2 x i32> addrspace(1) * %in
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
   store <2 x i32> %a, <2 x i32> addrspace(1)* %out
   ret void
 }
@@ -19,7 +19,7 @@ define void @load_v2i32(<2 x i32> addrsp
 ; SI: {{^}}load_v4i32:
 ; SI: buffer_load_dwordx4 v[{{[0-9]+:[0-9]+}}]
 define void @load_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
-  %a = load <4 x i32> addrspace(1) * %in
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
   store <4 x i32> %a, <4 x i32> addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/load64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/load64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/load64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/load64.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 ; CHECK: buffer_load_dwordx2 v[{{[0-9]+:[0-9]+}}]
 ; CHECK: buffer_store_dwordx2 v[{{[0-9]+:[0-9]+}}]
 define void @load_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
-  %1 = load double addrspace(1)* %in
+  %1 = load double, double addrspace(1)* %in
   store double %1, double addrspace(1)* %out
   ret void
 }
@@ -15,7 +15,7 @@ define void @load_f64(double addrspace(1
 ; CHECK: buffer_load_dwordx2 v[{{[0-9]+:[0-9]+}}]
 ; CHECK: buffer_store_dwordx2 v[{{[0-9]+:[0-9]+}}]
 define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
-  %tmp = load i64 addrspace(1)* %in
+  %tmp = load i64, i64 addrspace(1)* %in
   store i64 %tmp, i64 addrspace(1)* %out, align 8
   ret void
 }
@@ -25,7 +25,7 @@ define void @load_i64(i64 addrspace(1)*
 ; CHECK: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}]
 ; CHECK: buffer_store_dwordx2 v[{{[0-9]+:[0-9]+}}]
 define void @load_const_addrspace_f64(double addrspace(1)* %out, double addrspace(2)* %in) {
-  %1 = load double addrspace(2)* %in
+  %1 = load double, double addrspace(2)* %in
   store double %1, double addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/local-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/local-64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/local-64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/local-64.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 ; BOTH: buffer_store_dword [[REG]],
 define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
   %gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
-  %val = load i32 addrspace(3)* %gep, align 4
+  %val = load i32, i32 addrspace(3)* %gep, align 4
   store i32 %val, i32 addrspace(1)* %out, align 4
   ret void
 }
@@ -16,7 +16,7 @@ define void @local_i32_load(i32 addrspac
 ; BOTH: ds_read_b32 [[REG:v[0-9]+]], v{{[0-9]+}}
 ; BOTH: buffer_store_dword [[REG]],
 define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
-  %val = load i32 addrspace(3)* %in, align 4
+  %val = load i32, i32 addrspace(3)* %in, align 4
   store i32 %val, i32 addrspace(1)* %out, align 4
   ret void
 }
@@ -27,7 +27,7 @@ define void @local_i32_load_0_offset(i32
 ; BOTH: buffer_store_byte [[REG]],
 define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
   %gep = getelementptr i8, i8 addrspace(3)* %in, i32 65535
-  %val = load i8 addrspace(3)* %gep, align 4
+  %val = load i8, i8 addrspace(3)* %gep, align 4
   store i8 %val, i8 addrspace(1)* %out, align 4
   ret void
 }
@@ -42,7 +42,7 @@ define void @local_i8_load_i16_max_offse
 ; BOTH: buffer_store_byte [[REG]],
 define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
   %gep = getelementptr i8, i8 addrspace(3)* %in, i32 65536
-  %val = load i8 addrspace(3)* %gep, align 4
+  %val = load i8, i8 addrspace(3)* %gep, align 4
   store i8 %val, i8 addrspace(1)* %out, align 4
   ret void
 }
@@ -53,7 +53,7 @@ define void @local_i8_load_over_i16_max_
 ; BOTH: buffer_store_dwordx2 [[REG]],
 define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
   %gep = getelementptr i64, i64 addrspace(3)* %in, i32 7
-  %val = load i64 addrspace(3)* %gep, align 8
+  %val = load i64, i64 addrspace(3)* %gep, align 8
   store i64 %val, i64 addrspace(1)* %out, align 8
   ret void
 }
@@ -62,7 +62,7 @@ define void @local_i64_load(i64 addrspac
 ; BOTH: ds_read_b64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}
 ; BOTH: buffer_store_dwordx2 [[REG]],
 define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
-  %val = load i64 addrspace(3)* %in, align 8
+  %val = load i64, i64 addrspace(3)* %in, align 8
   store i64 %val, i64 addrspace(1)* %out, align 8
   ret void
 }
@@ -73,7 +73,7 @@ define void @local_i64_load_0_offset(i64
 ; BOTH: buffer_store_dwordx2 [[REG]],
 define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
   %gep = getelementptr double, double addrspace(3)* %in, i32 7
-  %val = load double addrspace(3)* %gep, align 8
+  %val = load double, double addrspace(3)* %gep, align 8
   store double %val, double addrspace(1)* %out, align 8
   ret void
 }
@@ -82,7 +82,7 @@ define void @local_f64_load(double addrs
 ; BOTH: ds_read_b64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}
 ; BOTH: buffer_store_dwordx2 [[REG]],
 define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
-  %val = load double addrspace(3)* %in, align 8
+  %val = load double, double addrspace(3)* %in, align 8
   store double %val, double addrspace(1)* %out, align 8
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll (original)
+++ llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll Fri Feb 27 15:17:42 2015
@@ -45,11 +45,11 @@ entry:
   %sub = sub nsw i32 3, %x.i
   call void @llvm.AMDGPU.barrier.local()
   %arrayidx2 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %sub
-  %0 = load i32 addrspace(3)* %arrayidx2, align 4
+  %0 = load i32, i32 addrspace(3)* %arrayidx2, align 4
   %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %x.i
   store i32 %0, i32 addrspace(1)* %arrayidx3, align 4
   %arrayidx4 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %sub
-  %1 = load i32 addrspace(3)* %arrayidx4, align 4
+  %1 = load i32, i32 addrspace(3)* %arrayidx4, align 4
   %add = add nsw i32 %x.i, 4
   %arrayidx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %add
   store i32 %1, i32 addrspace(1)* %arrayidx5, align 4

Modified: llvm/trunk/test/CodeGen/R600/local-memory.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/local-memory.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/local-memory.ll (original)
+++ llvm/trunk/test/CodeGen/R600/local-memory.ll Fri Feb 27 15:17:42 2015
@@ -36,7 +36,7 @@ entry:
   %.add = select i1 %cmp, i32 0, i32 %add
   call void @llvm.AMDGPU.barrier.local()
   %arrayidx1 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %.add
-  %0 = load i32 addrspace(3)* %arrayidx1, align 4
+  %0 = load i32, i32 addrspace(3)* %arrayidx1, align 4
   %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %y.i
   store i32 %0, i32 addrspace(1)* %arrayidx2, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/loop-idiom.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/loop-idiom.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/loop-idiom.ll (original)
+++ llvm/trunk/test/CodeGen/R600/loop-idiom.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ for.body:
   %0 = phi i32 [0, %entry], [%4, %for.body]
   %1 = getelementptr i8, i8 addrspace(3)* %in, i32 %0
   %2 = getelementptr i8, i8* %dest, i32 %0
-  %3 = load i8 addrspace(3)* %1
+  %3 = load i8, i8 addrspace(3)* %1
   store i8 %3, i8* %2
   %4 = add i32 %0, 1
   %5 = icmp eq i32 %4, %size

Modified: llvm/trunk/test/CodeGen/R600/m0-spill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/m0-spill.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/m0-spill.ll (original)
+++ llvm/trunk/test/CodeGen/R600/m0-spill.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ main_body:
 
 if:
   %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
-  %lds_data = load float addrspace(3)* %lds_ptr
+  %lds_data = load float, float addrspace(3)* %lds_ptr
   br label %endif
 
 else:

Modified: llvm/trunk/test/CodeGen/R600/mad-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/mad-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/mad-combine.ll (original)
+++ llvm/trunk/test/CodeGen/R600/mad-combine.ll Fri Feb 27 15:17:42 2015
@@ -37,9 +37,9 @@ define void @combine_to_mad_f32_0(float
   %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
 
   %mul = fmul float %a, %b
   %fma = fadd float %mul, %c
@@ -76,10 +76,10 @@ define void @combine_to_mad_f32_0_2use(f
   %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
-  %d = load float addrspace(1)* %gep.3
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
+  %d = load float, float addrspace(1)* %gep.3
 
   %mul = fmul float %a, %b
   %fma0 = fadd float %mul, %c
@@ -110,9 +110,9 @@ define void @combine_to_mad_f32_1(float
   %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
 
   %mul = fmul float %a, %b
   %fma = fadd float %c, %mul
@@ -140,9 +140,9 @@ define void @combine_to_mad_fsub_0_f32(f
   %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
 
   %mul = fmul float %a, %b
   %fma = fsub float %mul, %c
@@ -179,10 +179,10 @@ define void @combine_to_mad_fsub_0_f32_2
   %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
-  %d = load float addrspace(1)* %gep.3
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
+  %d = load float, float addrspace(1)* %gep.3
 
   %mul = fmul float %a, %b
   %fma0 = fsub float %mul, %c
@@ -212,9 +212,9 @@ define void @combine_to_mad_fsub_1_f32(f
   %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
 
   %mul = fmul float %a, %b
   %fma = fsub float %c, %mul
@@ -250,10 +250,10 @@ define void @combine_to_mad_fsub_1_f32_2
   %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
-  %d = load float addrspace(1)* %gep.3
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
+  %d = load float, float addrspace(1)* %gep.3
 
   %mul = fmul float %a, %b
   %fma0 = fsub float %c, %mul
@@ -284,9 +284,9 @@ define void @combine_to_mad_fsub_2_f32(f
   %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
 
   %mul = fmul float %a, %b
   %mul.neg = fsub float -0.0, %mul
@@ -324,10 +324,10 @@ define void @combine_to_mad_fsub_2_f32_2
   %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
-  %d = load float addrspace(1)* %gep.3
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
+  %d = load float, float addrspace(1)* %gep.3
 
   %mul = fmul float %a, %b
   %mul.neg = fsub float -0.0, %mul
@@ -367,10 +367,10 @@ define void @combine_to_mad_fsub_2_f32_2
   %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
   %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
-  %d = load float addrspace(1)* %gep.3
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
+  %d = load float, float addrspace(1)* %gep.3
 
   %mul = fmul float %a, %b
   %mul.neg = fsub float -0.0, %mul
@@ -412,11 +412,11 @@ define void @aggressive_combine_to_mad_f
   %gep.4 = getelementptr float, float addrspace(1)* %gep.0, i32 4
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
-  %z = load float addrspace(1)* %gep.2
-  %u = load float addrspace(1)* %gep.3
-  %v = load float addrspace(1)* %gep.4
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
+  %z = load float, float addrspace(1)* %gep.2
+  %u = load float, float addrspace(1)* %gep.3
+  %v = load float, float addrspace(1)* %gep.4
 
   %tmp0 = fmul float %u, %v
   %tmp1 = call float @llvm.fma.f32(float %x, float %y, float %tmp0) #0
@@ -458,11 +458,11 @@ define void @aggressive_combine_to_mad_f
   %gep.4 = getelementptr float, float addrspace(1)* %gep.0, i32 4
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
-  %z = load float addrspace(1)* %gep.2
-  %u = load float addrspace(1)* %gep.3
-  %v = load float addrspace(1)* %gep.4
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
+  %z = load float, float addrspace(1)* %gep.2
+  %u = load float, float addrspace(1)* %gep.3
+  %v = load float, float addrspace(1)* %gep.4
 
   %tmp0 = fmul float %u, %v
   %tmp1 = call float @llvm.fma.f32(float %y, float %z, float %tmp0) #0
@@ -503,11 +503,11 @@ define void @aggressive_combine_to_mad_f
   %gep.4 = getelementptr float, float addrspace(1)* %gep.0, i32 4
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
-  %z = load float addrspace(1)* %gep.2
-  %u = load float addrspace(1)* %gep.3
-  %v = load float addrspace(1)* %gep.4
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
+  %z = load float, float addrspace(1)* %gep.2
+  %u = load float, float addrspace(1)* %gep.3
+  %v = load float, float addrspace(1)* %gep.4
 
   %tmp0 = fmul float %u, %v
   %tmp1 = call float @llvm.fmuladd.f32(float %x, float %y, float %tmp0) #0
@@ -549,11 +549,11 @@ define void @aggressive_combine_to_mad_f
   %gep.4 = getelementptr float, float addrspace(1)* %gep.0, i32 4
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %x = load float addrspace(1)* %gep.0
-  %y = load float addrspace(1)* %gep.1
-  %z = load float addrspace(1)* %gep.2
-  %u = load float addrspace(1)* %gep.3
-  %v = load float addrspace(1)* %gep.4
+  %x = load float, float addrspace(1)* %gep.0
+  %y = load float, float addrspace(1)* %gep.1
+  %z = load float, float addrspace(1)* %gep.2
+  %u = load float, float addrspace(1)* %gep.3
+  %v = load float, float addrspace(1)* %gep.4
 
   %tmp0 = fmul float %u, %v
   %tmp1 = call float @llvm.fmuladd.f32(float %y, float %z, float %tmp0) #0

Modified: llvm/trunk/test/CodeGen/R600/mad-sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/mad-sub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/mad-sub.ll (original)
+++ llvm/trunk/test/CodeGen/R600/mad-sub.ll Fri Feb 27 15:17:42 2015
@@ -18,9 +18,9 @@ define void @mad_sub_f32(float addrspace
   %add2 = add i64 %tid.ext, 2
   %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
   %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
-  %a = load float addrspace(1)* %gep0, align 4
-  %b = load float addrspace(1)* %gep1, align 4
-  %c = load float addrspace(1)* %gep2, align 4
+  %a = load float, float addrspace(1)* %gep0, align 4
+  %b = load float, float addrspace(1)* %gep1, align 4
+  %c = load float, float addrspace(1)* %gep2, align 4
   %mul = fmul float %a, %b
   %sub = fsub float %mul, %c
   store float %sub, float addrspace(1)* %outgep, align 4
@@ -42,9 +42,9 @@ define void @mad_sub_inv_f32(float addrs
   %add2 = add i64 %tid.ext, 2
   %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
   %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
-  %a = load float addrspace(1)* %gep0, align 4
-  %b = load float addrspace(1)* %gep1, align 4
-  %c = load float addrspace(1)* %gep2, align 4
+  %a = load float, float addrspace(1)* %gep0, align 4
+  %b = load float, float addrspace(1)* %gep1, align 4
+  %c = load float, float addrspace(1)* %gep2, align 4
   %mul = fmul float %a, %b
   %sub = fsub float %c, %mul
   store float %sub, float addrspace(1)* %outgep, align 4
@@ -63,9 +63,9 @@ define void @mad_sub_f64(double addrspac
   %add2 = add i64 %tid.ext, 2
   %gep2 = getelementptr double, double addrspace(1)* %ptr, i64 %add2
   %outgep = getelementptr double, double addrspace(1)* %out, i64 %tid.ext
-  %a = load double addrspace(1)* %gep0, align 8
-  %b = load double addrspace(1)* %gep1, align 8
-  %c = load double addrspace(1)* %gep2, align 8
+  %a = load double, double addrspace(1)* %gep0, align 8
+  %b = load double, double addrspace(1)* %gep1, align 8
+  %c = load double, double addrspace(1)* %gep2, align 8
   %mul = fmul double %a, %b
   %sub = fsub double %mul, %c
   store double %sub, double addrspace(1)* %outgep, align 8
@@ -87,9 +87,9 @@ define void @mad_sub_fabs_f32(float addr
   %add2 = add i64 %tid.ext, 2
   %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
   %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
-  %a = load float addrspace(1)* %gep0, align 4
-  %b = load float addrspace(1)* %gep1, align 4
-  %c = load float addrspace(1)* %gep2, align 4
+  %a = load float, float addrspace(1)* %gep0, align 4
+  %b = load float, float addrspace(1)* %gep1, align 4
+  %c = load float, float addrspace(1)* %gep2, align 4
   %c.abs = call float @llvm.fabs.f32(float %c) #0
   %mul = fmul float %a, %b
   %sub = fsub float %mul, %c.abs
@@ -112,9 +112,9 @@ define void @mad_sub_fabs_inv_f32(float
   %add2 = add i64 %tid.ext, 2
   %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
   %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
-  %a = load float addrspace(1)* %gep0, align 4
-  %b = load float addrspace(1)* %gep1, align 4
-  %c = load float addrspace(1)* %gep2, align 4
+  %a = load float, float addrspace(1)* %gep0, align 4
+  %b = load float, float addrspace(1)* %gep1, align 4
+  %c = load float, float addrspace(1)* %gep2, align 4
   %c.abs = call float @llvm.fabs.f32(float %c) #0
   %mul = fmul float %a, %b
   %sub = fsub float %c.abs, %mul
@@ -133,9 +133,9 @@ define void @neg_neg_mad_f32(float addrs
   %add2 = add i64 %tid.ext, 2
   %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
   %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
-  %a = load float addrspace(1)* %gep0, align 4
-  %b = load float addrspace(1)* %gep1, align 4
-  %c = load float addrspace(1)* %gep2, align 4
+  %a = load float, float addrspace(1)* %gep0, align 4
+  %b = load float, float addrspace(1)* %gep1, align 4
+  %c = load float, float addrspace(1)* %gep2, align 4
   %nega = fsub float -0.000000e+00, %a
   %negb = fsub float -0.000000e+00, %b
   %mul = fmul float %nega, %negb
@@ -159,9 +159,9 @@ define void @mad_fabs_sub_f32(float addr
   %add2 = add i64 %tid.ext, 2
   %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
   %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
-  %a = load float addrspace(1)* %gep0, align 4
-  %b = load float addrspace(1)* %gep1, align 4
-  %c = load float addrspace(1)* %gep2, align 4
+  %a = load float, float addrspace(1)* %gep0, align 4
+  %b = load float, float addrspace(1)* %gep1, align 4
+  %c = load float, float addrspace(1)* %gep2, align 4
   %b.abs = call float @llvm.fabs.f32(float %b) #0
   %mul = fmul float %a, %b.abs
   %sub = fsub float %mul, %c
@@ -180,8 +180,8 @@ define void @fsub_c_fadd_a_a(float addrs
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %add = fadd float %r1, %r1
   %r3 = fsub float %r2, %add
@@ -201,8 +201,8 @@ define void @fsub_fadd_a_a_c(float addrs
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %r1 = load float addrspace(1)* %gep.0
-  %r2 = load float addrspace(1)* %gep.1
+  %r1 = load float, float addrspace(1)* %gep.0
+  %r2 = load float, float addrspace(1)* %gep.1
 
   %add = fadd float %r1, %r1
   %r3 = fsub float %add, %r2

Modified: llvm/trunk/test/CodeGen/R600/madak.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/madak.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/madak.ll (original)
+++ llvm/trunk/test/CodeGen/R600/madak.ll Fri Feb 27 15:17:42 2015
@@ -16,8 +16,8 @@ define void @madak_f32(float addrspace(1
   %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %in.a.gep, align 4
-  %b = load float addrspace(1)* %in.b.gep, align 4
+  %a = load float, float addrspace(1)* %in.a.gep, align 4
+  %b = load float, float addrspace(1)* %in.b.gep, align 4
 
   %mul = fmul float %a, %b
   %madak = fadd float %mul, 10.0
@@ -47,9 +47,9 @@ define void @madak_2_use_f32(float addrs
   %out.gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
   %out.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
 
-  %a = load float addrspace(1)* %in.gep.0, align 4
-  %b = load float addrspace(1)* %in.gep.1, align 4
-  %c = load float addrspace(1)* %in.gep.2, align 4
+  %a = load float, float addrspace(1)* %in.gep.0, align 4
+  %b = load float, float addrspace(1)* %in.gep.1, align 4
+  %c = load float, float addrspace(1)* %in.gep.2, align 4
 
   %mul0 = fmul float %a, %b
   %mul1 = fmul float %a, %c
@@ -69,7 +69,7 @@ define void @madak_m_inline_imm_f32(floa
   %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %in.a.gep, align 4
+  %a = load float, float addrspace(1)* %in.a.gep, align 4
 
   %mul = fmul float 4.0, %a
   %madak = fadd float %mul, 10.0
@@ -90,8 +90,8 @@ define void @madak_inline_imm_f32(float
   %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %in.a.gep, align 4
-  %b = load float addrspace(1)* %in.b.gep, align 4
+  %a = load float, float addrspace(1)* %in.a.gep, align 4
+  %b = load float, float addrspace(1)* %in.b.gep, align 4
 
   %mul = fmul float %a, %b
   %madak = fadd float %mul, 4.0
@@ -111,7 +111,7 @@ define void @s_v_madak_f32(float addrspa
   %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %in.a.gep, align 4
+  %a = load float, float addrspace(1)* %in.a.gep, align 4
 
   %mul = fmul float %a, %b
   %madak = fadd float %mul, 10.0
@@ -130,7 +130,7 @@ define void @v_s_madak_f32(float addrspa
   %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %b = load float addrspace(1)* %in.b.gep, align 4
+  %b = load float, float addrspace(1)* %in.b.gep, align 4
 
   %mul = fmul float %a, %b
   %madak = fadd float %mul, 10.0
@@ -159,8 +159,8 @@ define void @no_madak_src0_modifier_f32(
   %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %in.a.gep, align 4
-  %b = load float addrspace(1)* %in.b.gep, align 4
+  %a = load float, float addrspace(1)* %in.a.gep, align 4
+  %b = load float, float addrspace(1)* %in.b.gep, align 4
 
   %a.fabs = call float @llvm.fabs.f32(float %a) nounwind readnone
 
@@ -181,8 +181,8 @@ define void @no_madak_src1_modifier_f32(
   %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %in.a.gep, align 4
-  %b = load float addrspace(1)* %in.b.gep, align 4
+  %a = load float, float addrspace(1)* %in.a.gep, align 4
+  %b = load float, float addrspace(1)* %in.b.gep, align 4
 
   %b.fabs = call float @llvm.fabs.f32(float %b) nounwind readnone
 

Modified: llvm/trunk/test/CodeGen/R600/madmk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/madmk.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/madmk.ll (original)
+++ llvm/trunk/test/CodeGen/R600/madmk.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@ define void @madmk_f32(float addrspace(1
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %mul = fmul float %a, 10.0
   %madmk = fadd float %mul, %b
@@ -41,9 +41,9 @@ define void @madmk_2_use_f32(float addrs
   %out.gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
   %out.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
 
-  %a = load float addrspace(1)* %in.gep.0, align 4
-  %b = load float addrspace(1)* %in.gep.1, align 4
-  %c = load float addrspace(1)* %in.gep.2, align 4
+  %a = load float, float addrspace(1)* %in.gep.0, align 4
+  %b = load float, float addrspace(1)* %in.gep.1, align 4
+  %c = load float, float addrspace(1)* %in.gep.2, align 4
 
   %mul0 = fmul float %a, 10.0
   %mul1 = fmul float %a, 10.0
@@ -66,8 +66,8 @@ define void @madmk_inline_imm_f32(float
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %mul = fmul float %a, 4.0
   %madmk = fadd float %mul, %b
@@ -97,7 +97,7 @@ define void @v_s_madmk_f32(float addrspa
   %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
-  %a = load float addrspace(1)* %gep.0, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
 
   %mul = fmul float %a, 10.0
   %madmk = fadd float %mul, %b
@@ -113,7 +113,7 @@ define void @scalar_vector_madmk_f32(flo
   %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
-  %b = load float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.0, align 4
 
   %mul = fmul float %a, 10.0
   %madmk = fadd float %mul, %b
@@ -131,8 +131,8 @@ define void @no_madmk_src0_modifier_f32(
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %a.fabs = call float @llvm.fabs.f32(float %a) nounwind readnone
 
@@ -152,8 +152,8 @@ define void @no_madmk_src2_modifier_f32(
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0, align 4
-  %b = load float addrspace(1)* %gep.1, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
+  %b = load float, float addrspace(1)* %gep.1, align 4
 
   %b.fabs = call float @llvm.fabs.f32(float %b) nounwind readnone
 
@@ -172,7 +172,7 @@ define void @madmk_add_inline_imm_f32(fl
   %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
 
-  %a = load float addrspace(1)* %gep.0, align 4
+  %a = load float, float addrspace(1)* %gep.0, align 4
 
   %mul = fmul float %a, 10.0
   %madmk = fadd float %mul, 2.0

Modified: llvm/trunk/test/CodeGen/R600/max.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/max.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/max.ll (original)
+++ llvm/trunk/test/CodeGen/R600/max.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ define void @v_test_imax_sge_i32(i32 add
   %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp sge i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep, align 4
@@ -33,8 +33,8 @@ define void @v_test_imax_sgt_i32(i32 add
   %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp sgt i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep, align 4
@@ -57,8 +57,8 @@ define void @v_test_umax_uge_i32(i32 add
   %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp uge i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep, align 4
@@ -81,8 +81,8 @@ define void @v_test_umax_ugt_i32(i32 add
   %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp ugt i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep, align 4

Modified: llvm/trunk/test/CodeGen/R600/max3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/max3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/max3.ll (original)
+++ llvm/trunk/test/CodeGen/R600/max3.ll Fri Feb 27 15:17:42 2015
@@ -10,9 +10,9 @@ define void @v_test_imax3_sgt_i32(i32 ad
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
-  %c = load i32 addrspace(1)* %gep2, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
+  %c = load i32, i32 addrspace(1)* %gep2, align 4
   %icmp0 = icmp sgt i32 %a, %b
   %i0 = select i1 %icmp0, i32 %a, i32 %b
   %icmp1 = icmp sgt i32 %i0, %c
@@ -29,9 +29,9 @@ define void @v_test_umax3_ugt_i32(i32 ad
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
-  %c = load i32 addrspace(1)* %gep2, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
+  %c = load i32, i32 addrspace(1)* %gep2, align 4
   %icmp0 = icmp ugt i32 %a, %b
   %i0 = select i1 %icmp0, i32 %a, i32 %b
   %icmp1 = icmp ugt i32 %i0, %c

Modified: llvm/trunk/test/CodeGen/R600/min.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/min.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/min.ll (original)
+++ llvm/trunk/test/CodeGen/R600/min.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ define void @v_test_imin_sle_i32(i32 add
   %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp sle i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep, align 4
@@ -33,8 +33,8 @@ define void @v_test_imin_slt_i32(i32 add
   %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp slt i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep, align 4
@@ -57,8 +57,8 @@ define void @v_test_umin_ule_i32(i32 add
   %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp ule i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep, align 4
@@ -81,8 +81,8 @@ define void @v_test_umin_ult_i32(i32 add
   %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp ult i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep, align 4
@@ -110,8 +110,8 @@ define void @v_test_umin_ult_i32_multi_u
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %outgep0 = getelementptr i32, i32 addrspace(1)* %out0, i32 %tid
   %outgep1 = getelementptr i1, i1 addrspace(1)* %out1, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
   %cmp = icmp ult i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %outgep0, align 4

Modified: llvm/trunk/test/CodeGen/R600/min3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/min3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/min3.ll (original)
+++ llvm/trunk/test/CodeGen/R600/min3.ll Fri Feb 27 15:17:42 2015
@@ -10,9 +10,9 @@ define void @v_test_imin3_slt_i32(i32 ad
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
-  %c = load i32 addrspace(1)* %gep2, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
+  %c = load i32, i32 addrspace(1)* %gep2, align 4
   %icmp0 = icmp slt i32 %a, %b
   %i0 = select i1 %icmp0, i32 %a, i32 %b
   %icmp1 = icmp slt i32 %i0, %c
@@ -29,9 +29,9 @@ define void @v_test_umin3_ult_i32(i32 ad
   %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
   %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
   %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
-  %c = load i32 addrspace(1)* %gep2, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
+  %c = load i32, i32 addrspace(1)* %gep2, align 4
   %icmp0 = icmp ult i32 %a, %b
   %i0 = select i1 %icmp0, i32 %a, i32 %b
   %icmp1 = icmp ult i32 %i0, %c
@@ -57,10 +57,10 @@ define void @v_test_umin_umin_umin(i32 a
   %outgep0 = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
   %outgep1 = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
 
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
-  %c = load i32 addrspace(1)* %gep2, align 4
-  %d = load i32 addrspace(1)* %gep3, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
+  %c = load i32, i32 addrspace(1)* %gep2, align 4
+  %d = load i32, i32 addrspace(1)* %gep3, align 4
 
   %icmp0 = icmp slt i32 %a, %b
   %i0 = select i1 %icmp0, i32 %a, i32 %b
@@ -91,10 +91,10 @@ define void @v_test_umin3_2_uses(i32 add
   %outgep0 = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
   %outgep1 = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
 
-  %a = load i32 addrspace(1)* %gep0, align 4
-  %b = load i32 addrspace(1)* %gep1, align 4
-  %c = load i32 addrspace(1)* %gep2, align 4
-  %d = load i32 addrspace(1)* %gep3, align 4
+  %a = load i32, i32 addrspace(1)* %gep0, align 4
+  %b = load i32, i32 addrspace(1)* %gep1, align 4
+  %c = load i32, i32 addrspace(1)* %gep2, align 4
+  %d = load i32, i32 addrspace(1)* %gep3, align 4
 
   %icmp0 = icmp slt i32 %a, %b
   %i0 = select i1 %icmp0, i32 %a, i32 %b

Modified: llvm/trunk/test/CodeGen/R600/missing-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/missing-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/missing-store.ll (original)
+++ llvm/trunk/test/CodeGen/R600/missing-store.ll Fri Feb 27 15:17:42 2015
@@ -12,11 +12,11 @@
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @missing_store_reduced(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
-  %ptr0 = load i32 addrspace(2)* addrspace(3)* @ptr_load, align 8
+  %ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @ptr_load, align 8
   %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
 
   store i32 99, i32 addrspace(1)* %gptr, align 4
-  %tmp2 = load i32 addrspace(2)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
 
   store i32 %tmp2, i32 addrspace(1)* %out, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/mubuf.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/mubuf.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/mubuf.ll (original)
+++ llvm/trunk/test/CodeGen/R600/mubuf.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ declare i32 @llvm.r600.read.tidig.x() re
 define void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
   %0 = getelementptr i32, i32 addrspace(1)* %in, i64 1
-  %1 = load i32 addrspace(1)* %0
+  %1 = load i32, i32 addrspace(1)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -23,7 +23,7 @@ entry:
 define void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
 entry:
   %0 = getelementptr i8, i8 addrspace(1)* %in, i64 4095
-  %1 = load i8 addrspace(1)* %0
+  %1 = load i8, i8 addrspace(1)* %0
   store i8 %1, i8 addrspace(1)* %out
   ret void
 }
@@ -35,7 +35,7 @@ entry:
 define void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
   %0 = getelementptr i32, i32 addrspace(1)* %in, i64 1024
-  %1 = load i32 addrspace(1)* %0
+  %1 = load i32, i32 addrspace(1)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -48,7 +48,7 @@ define void @mubuf_load3(i32 addrspace(1
 entry:
   %0 = getelementptr i32, i32 addrspace(1)* %in, i64 %offset
   %1 = getelementptr i32, i32 addrspace(1)* %0, i64 1
-  %2 = load i32 addrspace(1)* %1
+  %2 = load i32, i32 addrspace(1)* %1
   store i32 %2, i32 addrspace(1)* %out
   ret void
 }
@@ -58,7 +58,7 @@ entry:
 define void @soffset_max_imm([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 {
 main_body:
   %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
-  %tmp1 = load <16 x i8> addrspace(2)* %tmp0
+  %tmp1 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp0
   %tmp2 = shl i32 %6, 2
   %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 64, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
   %tmp4 = add i32 %6, 16
@@ -77,7 +77,7 @@ main_body:
 define void @soffset_no_fold([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 {
 main_body:
   %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
-  %tmp1 = load <16 x i8> addrspace(2)* %tmp0
+  %tmp1 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp0
   %tmp2 = shl i32 %6, 2
   %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 65, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
   %tmp4 = add i32 %6, 16

Modified: llvm/trunk/test/CodeGen/R600/mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/mul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/mul.ll (original)
+++ llvm/trunk/test/CodeGen/R600/mul.ll Fri Feb 27 15:17:42 2015
@@ -13,8 +13,8 @@
 
 define void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1) * %in
-  %b = load <2 x i32> addrspace(1) * %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
   %result = mul <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -33,8 +33,8 @@ define void @test_mul_v2i32(<2 x i32> ad
 
 define void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
   %result = mul <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -58,8 +58,8 @@ define void @s_trunc_i64_mul_to_i32(i32
 ; SI: v_mul_lo_i32
 ; SI: buffer_store_dword
 define void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
-  %a = load i64 addrspace(1)* %aptr, align 8
-  %b = load i64 addrspace(1)* %bptr, align 8
+  %a = load i64, i64 addrspace(1)* %aptr, align 8
+  %b = load i64, i64 addrspace(1)* %bptr, align 8
   %mul = mul i64 %b, %a
   %trunc = trunc i64 %mul to i32
   store i32 %trunc, i32 addrspace(1)* %out, align 8
@@ -88,7 +88,7 @@ entry:
 ; SI-DAG: v_mul_hi_i32
 ; SI: s_endpgm
 define void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ext = sext i32 %val to i64
   %mul = mul i64 %ext, 80
   store i64 %mul, i64 addrspace(1)* %out, align 8
@@ -100,7 +100,7 @@ define void @v_mul64_sext_c(i64 addrspac
 ; SI-DAG: v_mul_hi_i32 v{{[0-9]+}}, 9, v{{[0-9]+}}
 ; SI: s_endpgm
 define void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %ext = sext i32 %val to i64
   %mul = mul i64 %ext, 9
   store i64 %mul, i64 addrspace(1)* %out, align 8
@@ -124,8 +124,8 @@ define void @s_mul_i32(i32 addrspace(1)*
 ; SI: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1)* %in
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %in
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %result = mul i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -148,8 +148,8 @@ define void @s_mul_i64(i64 addrspace(1)*
 ; FUNC-LABEL: {{^}}v_mul_i64:
 ; SI: v_mul_lo_i32
 define void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
-  %a = load i64 addrspace(1)* %aptr, align 8
-  %b = load i64 addrspace(1)* %bptr, align 8
+  %a = load i64, i64 addrspace(1)* %aptr, align 8
+  %b = load i64, i64 addrspace(1)* %bptr, align 8
   %mul = mul i64 %a, %b
   store i64 %mul, i64 addrspace(1)* %out, align 8
   ret void
@@ -163,7 +163,7 @@ entry:
   br i1 %0, label %if, label %else
 
 if:
-  %1 = load i32 addrspace(1)* %in
+  %1 = load i32, i32 addrspace(1)* %in
   br label %endif
 
 else:
@@ -186,7 +186,7 @@ entry:
   br i1 %0, label %if, label %else
 
 if:
-  %1 = load i64 addrspace(1)* %in
+  %1 = load i64, i64 addrspace(1)* %in
   br label %endif
 
 else:

Modified: llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll (original)
+++ llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
 
 ; FUNC-LABEL: {{^}}load_extern_const_init:
 define void @load_extern_const_init(i32 addrspace(1)* %out) nounwind {
-  %val = load i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @extern_const_addrspace, i64 0, i64 3), align 4
+  %val = load i32, i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @extern_const_addrspace, i64 0, i64 3), align 4
   store i32 %val, i32 addrspace(1)* %out, align 4
   ret void
 }
@@ -15,7 +15,7 @@ define void @load_extern_const_init(i32
 
 ; FUNC-LABEL: {{^}}load_undef_const_init:
 define void @load_undef_const_init(i32 addrspace(1)* %out) nounwind {
-  %val = load i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @undef_const_addrspace, i64 0, i64 3), align 4
+  %val = load i32, i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @undef_const_addrspace, i64 0, i64 3), align 4
   store i32 %val, i32 addrspace(1)* %out, align 4
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll (original)
+++ llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ define void @truncate_buffer_load_i32_to
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
-  %load = load i32 addrspace(1)* %gep.in
+  %load = load i32, i32 addrspace(1)* %gep.in
   %trunc = trunc i32 %load to i16
   store i16 %trunc, i16 addrspace(1)* %gep.out
   ret void
@@ -47,7 +47,7 @@ define void @truncate_buffer_load_i32_to
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
-  %load = load i32 addrspace(1)* %gep.in
+  %load = load i32, i32 addrspace(1)* %gep.in
   %trunc = trunc i32 %load to i8
   store i8 %trunc, i8 addrspace(1)* %gep.out
   ret void
@@ -69,7 +69,7 @@ define void @truncate_buffer_load_i32_to
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i1, i1 addrspace(1)* %out, i32 %tid
-  %load = load i32 addrspace(1)* %gep.in
+  %load = load i32, i32 addrspace(1)* %gep.in
   %trunc = trunc i32 %load to i1
   store i1 %trunc, i1 addrspace(1)* %gep.out
   ret void
@@ -91,7 +91,7 @@ define void @truncate_buffer_load_i64_to
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %load = load i64 addrspace(1)* %gep.in
+  %load = load i64, i64 addrspace(1)* %gep.in
   %trunc = trunc i64 %load to i32
   store i32 %trunc, i32 addrspace(1)* %gep.out
   ret void
@@ -114,7 +114,7 @@ define void @srl_buffer_load_i64_to_i32(
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %load = load i64 addrspace(1)* %gep.in
+  %load = load i64, i64 addrspace(1)* %gep.in
   %srl = lshr i64 %load, 32
   %trunc = trunc i64 %srl to i32
   store i32 %trunc, i32 addrspace(1)* %gep.out
@@ -138,7 +138,7 @@ define void @truncate_buffer_load_i16_to
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.in = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
-  %load = load i16 addrspace(1)* %gep.in
+  %load = load i16, i16 addrspace(1)* %gep.in
   %trunc = trunc i16 %load to i8
   store i8 %trunc, i8 addrspace(1)* %gep.out
   ret void
@@ -161,7 +161,7 @@ define void @srl_buffer_load_i64_to_i8(i
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
-  %load = load i64 addrspace(1)* %gep.in
+  %load = load i64, i64 addrspace(1)* %gep.in
   %srl = lshr i64 %load, 32
   %trunc = trunc i64 %srl to i8
   store i8 %trunc, i8 addrspace(1)* %gep.out
@@ -184,7 +184,7 @@ define void @truncate_buffer_load_i64_to
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
-  %load = load i64 addrspace(1)* %gep.in
+  %load = load i64, i64 addrspace(1)* %gep.in
   %trunc = trunc i64 %load to i8
   store i8 %trunc, i8 addrspace(1)* %gep.out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/or.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/or.ll (original)
+++ llvm/trunk/test/CodeGen/R600/or.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@
 ; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
 define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1) * %in
-  %b = load <2 x i32> addrspace(1) * %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
   %result = or <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -30,8 +30,8 @@ define void @or_v2i32(<2 x i32> addrspac
 ; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
 define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
   %result = or <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -48,7 +48,7 @@ define void @scalar_or_i32(i32 addrspace
 ; FUNC-LABEL: {{^}}vector_or_i32:
 ; SI: v_or_b32_e32 v{{[0-9]}}
 define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b) {
-  %loada = load i32 addrspace(1)* %a
+  %loada = load i32, i32 addrspace(1)* %a
   %or = or i32 %loada, %b
   store i32 %or, i32 addrspace(1)* %out
   ret void
@@ -65,7 +65,7 @@ define void @scalar_or_literal_i32(i32 a
 ; FUNC-LABEL: {{^}}vector_or_literal_i32:
 ; SI: v_or_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
 define void @vector_or_literal_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
-  %loada = load i32 addrspace(1)* %a, align 4
+  %loada = load i32, i32 addrspace(1)* %a, align 4
   %or = or i32 %loada, 65535
   store i32 %or, i32 addrspace(1)* %out, align 4
   ret void
@@ -74,7 +74,7 @@ define void @vector_or_literal_i32(i32 a
 ; FUNC-LABEL: {{^}}vector_or_inline_immediate_i32:
 ; SI: v_or_b32_e32 v{{[0-9]+}}, 4, v{{[0-9]+}}
 define void @vector_or_inline_immediate_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
-  %loada = load i32 addrspace(1)* %a, align 4
+  %loada = load i32, i32 addrspace(1)* %a, align 4
   %or = or i32 %loada, 4
   store i32 %or, i32 addrspace(1)* %out, align 4
   ret void
@@ -95,8 +95,8 @@ define void @scalar_or_i64(i64 addrspace
 ; SI: v_or_b32_e32 v{{[0-9]}}
 ; SI: v_or_b32_e32 v{{[0-9]}}
 define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 8
-  %loadb = load i64 addrspace(1)* %a, align 8
+  %loada = load i64, i64 addrspace(1)* %a, align 8
+  %loadb = load i64, i64 addrspace(1)* %a, align 8
   %or = or i64 %loada, %loadb
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -106,7 +106,7 @@ define void @vector_or_i64(i64 addrspace
 ; SI: v_or_b32_e32 v{{[0-9]}}
 ; SI: v_or_b32_e32 v{{[0-9]}}
 define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 %b) {
-  %loada = load i64 addrspace(1)* %a
+  %loada = load i64, i64 addrspace(1)* %a
   %or = or i64 %loada, %b
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -120,7 +120,7 @@ define void @scalar_vector_or_i64(i64 ad
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 8
+  %loada = load i64, i64 addrspace(1)* %a, align 8
   %or = or i64 %loada, 22470723082367
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -133,7 +133,7 @@ define void @vector_or_i64_loadimm(i64 a
 ; SI: v_or_b32_e32 {{v[0-9]+}}, 0, {{.*}}
 ; SI: s_endpgm
 define void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 8
+  %loada = load i64, i64 addrspace(1)* %a, align 8
   %or = or i64 %loada, 8
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -157,8 +157,8 @@ define void @trunc_i64_or_to_i32(i32 add
 
 ; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
 define void @or_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
-  %a = load float addrspace(1)* %in0
-  %b = load float addrspace(1)* %in1
+  %a = load float, float addrspace(1)* %in0
+  %b = load float, float addrspace(1)* %in1
   %acmp = fcmp oge float %a, 0.000000e+00
   %bcmp = fcmp oge float %b, 0.000000e+00
   %or = or i1 %acmp, %bcmp

Modified: llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll (original)
+++ llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll Fri Feb 27 15:17:42 2015
@@ -23,14 +23,14 @@ entry:
   %c1 = alloca i32, align 4
   %d1 = alloca i32, align 4
   %data = alloca i32, align 4
-  %0 = load i32* %a0, align 4
-  %1 = load i32* %b0, align 4
+  %0 = load i32, i32* %a0, align 4
+  %1 = load i32, i32* %b0, align 4
   %cmp = icmp ne i32 %0, %1
   br i1 %cmp, label %land.lhs.true, label %if.end
 
 land.lhs.true:                                    ; preds = %entry
-  %2 = load i32* %c0, align 4
-  %3 = load i32* %d0, align 4
+  %2 = load i32, i32* %c0, align 4
+  %3 = load i32, i32* %d0, align 4
   %cmp1 = icmp ne i32 %2, %3
   br i1 %cmp1, label %if.then, label %if.end
 
@@ -39,14 +39,14 @@ if.then:
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %land.lhs.true, %entry
-  %4 = load i32* %a1, align 4
-  %5 = load i32* %b1, align 4
+  %4 = load i32, i32* %a1, align 4
+  %5 = load i32, i32* %b1, align 4
   %cmp2 = icmp ne i32 %4, %5
   br i1 %cmp2, label %land.lhs.true3, label %if.end6
 
 land.lhs.true3:                                   ; preds = %if.end
-  %6 = load i32* %c1, align 4
-  %7 = load i32* %d1, align 4
+  %6 = load i32, i32* %c1, align 4
+  %7 = load i32, i32* %d1, align 4
   %cmp4 = icmp ne i32 %6, %7
   br i1 %cmp4, label %if.then5, label %if.end6
 

Modified: llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll (original)
+++ llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll Fri Feb 27 15:17:42 2015
@@ -23,14 +23,14 @@ entry:
   %c1 = alloca i32, align 4
   %d1 = alloca i32, align 4
   %data = alloca i32, align 4
-  %0 = load i32* %a0, align 4
-  %1 = load i32* %b0, align 4
+  %0 = load i32, i32* %a0, align 4
+  %1 = load i32, i32* %b0, align 4
   %cmp = icmp ne i32 %0, %1
   br i1 %cmp, label %land.lhs.true, label %if.else
 
 land.lhs.true:                                    ; preds = %entry
-  %2 = load i32* %c0, align 4
-  %3 = load i32* %d0, align 4
+  %2 = load i32, i32* %c0, align 4
+  %3 = load i32, i32* %d0, align 4
   %cmp1 = icmp ne i32 %2, %3
   br i1 %cmp1, label %if.then, label %if.else
 
@@ -42,14 +42,14 @@ if.else:
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  %4 = load i32* %a1, align 4
-  %5 = load i32* %b1, align 4
+  %4 = load i32, i32* %a1, align 4
+  %5 = load i32, i32* %b1, align 4
   %cmp2 = icmp ne i32 %4, %5
   br i1 %cmp2, label %land.lhs.true3, label %if.else6
 
 land.lhs.true3:                                   ; preds = %if.end
-  %6 = load i32* %c1, align 4
-  %7 = load i32* %d1, align 4
+  %6 = load i32, i32* %c1, align 4
+  %7 = load i32, i32* %d1, align 4
   %cmp4 = icmp ne i32 %6, %7
   br i1 %cmp4, label %if.then5, label %if.else6
 

Modified: llvm/trunk/test/CodeGen/R600/private-memory.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/private-memory.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/private-memory.ll (original)
+++ llvm/trunk/test/CodeGen/R600/private-memory.ll Fri Feb 27 15:17:42 2015
@@ -23,18 +23,18 @@ declare i32 @llvm.r600.read.tidig.x() no
 define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
 entry:
   %stack = alloca [5 x i32], align 4
-  %0 = load i32 addrspace(1)* %in, align 4
+  %0 = load i32, i32 addrspace(1)* %in, align 4
   %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
   store i32 4, i32* %arrayidx1, align 4
   %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
-  %1 = load i32 addrspace(1)* %arrayidx2, align 4
+  %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
   %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
   store i32 5, i32* %arrayidx3, align 4
   %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
-  %2 = load i32* %arrayidx10, align 4
+  %2 = load i32, i32* %arrayidx10, align 4
   store i32 %2, i32 addrspace(1)* %out, align 4
   %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
-  %3 = load i32* %arrayidx12
+  %3 = load i32, i32* %arrayidx12
   %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
   store i32 %3, i32 addrspace(1)* %arrayidx13
   ret void
@@ -67,8 +67,8 @@ entry:
   store i32 3, i32* %b.y.ptr
   %a.indirect.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
   %b.indirect.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
-  %a.indirect = load i32* %a.indirect.ptr
-  %b.indirect = load i32* %b.indirect.ptr
+  %a.indirect = load i32, i32* %a.indirect.ptr
+  %b.indirect = load i32, i32* %b.indirect.ptr
   %0 = add i32 %a.indirect, %b.indirect
   store i32 %0, i32 addrspace(1)* %out
   ret void
@@ -86,9 +86,9 @@ define void @direct_loop(i32 addrspace(1
 entry:
   %prv_array_const = alloca [2 x i32]
   %prv_array = alloca [2 x i32]
-  %a = load i32 addrspace(1)* %in
+  %a = load i32, i32 addrspace(1)* %in
   %b_src_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %b = load i32 addrspace(1)* %b_src_ptr
+  %b = load i32, i32 addrspace(1)* %b_src_ptr
   %a_dst_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
   store i32 %a, i32* %a_dst_ptr
   %b_dst_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 1
@@ -98,9 +98,9 @@ entry:
 for.body:
   %inc = phi i32 [0, %entry], [%count, %for.body]
   %x_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
-  %x = load i32* %x_ptr
+  %x = load i32, i32* %x_ptr
   %y_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
-  %y = load i32* %y_ptr
+  %y = load i32, i32* %y_ptr
   %xy = add i32 %x, %y
   store i32 %xy, i32* %y_ptr
   %count = add i32 %inc, 1
@@ -109,7 +109,7 @@ for.body:
 
 for.end:
   %value_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
-  %value = load i32* %value_ptr
+  %value = load i32, i32* %value_ptr
   store i32 %value, i32 addrspace(1)* %out
   ret void
 }
@@ -129,7 +129,7 @@ entry:
   store i16 0, i16* %1
   store i16 1, i16* %2
   %3 = getelementptr [2 x i16], [2 x i16]* %0, i32 0, i32 %index
-  %4 = load i16* %3
+  %4 = load i16, i16* %3
   %5 = sext i16 %4 to i32
   store i32 %5, i32 addrspace(1)* %out
   ret void
@@ -149,7 +149,7 @@ entry:
   store i8 0, i8* %1
   store i8 1, i8* %2
   %3 = getelementptr [2 x i8], [2 x i8]* %0, i32 0, i32 %index
-  %4 = load i8* %3
+  %4 = load i8, i8* %3
   %5 = sext i8 %4 to i32
   store i32 %5, i32 addrspace(1)* %out
   ret void
@@ -172,7 +172,7 @@ entry:
   store i32 0, i32* %1
   store i32 1, i32* %2
   %3 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 %in
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   %5 = call i32 @llvm.r600.read.tidig.x()
   %6 = add i32 %4, %5
   store i32 %6, i32 addrspace(1)* %out
@@ -202,8 +202,8 @@ entry:
   store i8 0, i8* %6
   %7 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 %in
   %8 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 %in
-  %9 = load i8* %7
-  %10 = load i8* %8
+  %9 = load i8, i8* %7
+  %10 = load i8, i8* %8
   %11 = add i8 %9, %10
   %12 = sext i8 %11 to i32
   store i32 %12, i32 addrspace(1)* %out
@@ -218,7 +218,7 @@ entry:
   store i8 0, i8* %gep0
   store i8 1, i8* %gep1
   %gep2 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 %index
-  %load = load i8* %gep2
+  %load = load i8, i8* %gep2
   %sext = sext i8 %load to i32
   store i32 %sext, i32 addrspace(1)* %out
   ret void
@@ -232,7 +232,7 @@ entry:
   store i32 0, i32* %gep0
   store i32 1, i32* %gep1
   %gep2 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
-  %load = load i32* %gep2
+  %load = load i32, i32* %gep2
   store i32 %load, i32 addrspace(1)* %out
   ret void
 }
@@ -245,7 +245,7 @@ entry:
   store i64 0, i64* %gep0
   store i64 1, i64* %gep1
   %gep2 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 %index
-  %load = load i64* %gep2
+  %load = load i64, i64* %gep2
   store i64 %load, i64 addrspace(1)* %out
   ret void
 }
@@ -260,7 +260,7 @@ entry:
   store i32 0, i32* %gep0
   store i32 1, i32* %gep1
   %gep2 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 %index, i32 0
-  %load = load i32* %gep2
+  %load = load i32, i32* %gep2
   store i32 %load, i32 addrspace(1)* %out
   ret void
 }
@@ -273,7 +273,7 @@ entry:
   store i32 0, i32* %gep0
   store i32 1, i32* %gep1
   %gep2 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 %index, i32 0
-  %load = load i32* %gep2
+  %load = load i32, i32* %gep2
   store i32 %load, i32 addrspace(1)* %out
   ret void
 }
@@ -287,7 +287,7 @@ entry:
   store i32 1, i32* %tmp2
   %cmp = icmp eq i32 %in, 0
   %sel = select i1 %cmp, i32* %tmp1, i32* %tmp2
-  %load = load i32* %sel
+  %load = load i32, i32* %sel
   store i32 %load, i32 addrspace(1)* %out
   ret void
 }
@@ -307,7 +307,7 @@ define void @ptrtoint(i32 addrspace(1)*
   %tmp2 = add i32 %tmp1, 5
   %tmp3 = inttoptr i32 %tmp2 to i32*
   %tmp4 = getelementptr i32, i32* %tmp3, i32 %b
-  %tmp5 = load i32* %tmp4
+  %tmp5 = load i32, i32* %tmp4
   store i32 %tmp5, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/pv-packing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/pv-packing.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/pv-packing.ll (original)
+++ llvm/trunk/test/CodeGen/R600/pv-packing.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@ main_body:
   %6 = extractelement <4 x float> %reg3, i32 0
   %7 = extractelement <4 x float> %reg3, i32 1
   %8 = extractelement <4 x float> %reg3, i32 2
-  %9 = load <4 x float> addrspace(8)* null
-  %10 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %9 = load <4 x float>, <4 x float> addrspace(8)* null
+  %10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %11 = call float @llvm.AMDGPU.dp4(<4 x float> %9, <4 x float> %9)
   %12 = fmul float %0, %3
   %13 = fadd float %12, %6

Modified: llvm/trunk/test/CodeGen/R600/pv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/pv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/pv.ll (original)
+++ llvm/trunk/test/CodeGen/R600/pv.ll Fri Feb 27 15:17:42 2015
@@ -33,63 +33,63 @@ main_body:
   %25 = extractelement <4 x float> %reg7, i32 1
   %26 = extractelement <4 x float> %reg7, i32 2
   %27 = extractelement <4 x float> %reg7, i32 3
-  %28 = load <4 x float> addrspace(8)* null
+  %28 = load <4 x float>, <4 x float> addrspace(8)* null
   %29 = extractelement <4 x float> %28, i32 0
   %30 = fmul float %0, %29
-  %31 = load <4 x float> addrspace(8)* null
+  %31 = load <4 x float>, <4 x float> addrspace(8)* null
   %32 = extractelement <4 x float> %31, i32 1
   %33 = fmul float %0, %32
-  %34 = load <4 x float> addrspace(8)* null
+  %34 = load <4 x float>, <4 x float> addrspace(8)* null
   %35 = extractelement <4 x float> %34, i32 2
   %36 = fmul float %0, %35
-  %37 = load <4 x float> addrspace(8)* null
+  %37 = load <4 x float>, <4 x float> addrspace(8)* null
   %38 = extractelement <4 x float> %37, i32 3
   %39 = fmul float %0, %38
-  %40 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %40 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %41 = extractelement <4 x float> %40, i32 0
   %42 = fmul float %1, %41
   %43 = fadd float %42, %30
-  %44 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %44 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %45 = extractelement <4 x float> %44, i32 1
   %46 = fmul float %1, %45
   %47 = fadd float %46, %33
-  %48 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %48 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %49 = extractelement <4 x float> %48, i32 2
   %50 = fmul float %1, %49
   %51 = fadd float %50, %36
-  %52 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %52 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %53 = extractelement <4 x float> %52, i32 3
   %54 = fmul float %1, %53
   %55 = fadd float %54, %39
-  %56 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %56 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %57 = extractelement <4 x float> %56, i32 0
   %58 = fmul float %2, %57
   %59 = fadd float %58, %43
-  %60 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %60 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %61 = extractelement <4 x float> %60, i32 1
   %62 = fmul float %2, %61
   %63 = fadd float %62, %47
-  %64 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %64 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %65 = extractelement <4 x float> %64, i32 2
   %66 = fmul float %2, %65
   %67 = fadd float %66, %51
-  %68 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %68 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %69 = extractelement <4 x float> %68, i32 3
   %70 = fmul float %2, %69
   %71 = fadd float %70, %55
-  %72 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %72 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %73 = extractelement <4 x float> %72, i32 0
   %74 = fmul float %3, %73
   %75 = fadd float %74, %59
-  %76 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %76 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %77 = extractelement <4 x float> %76, i32 1
   %78 = fmul float %3, %77
   %79 = fadd float %78, %63
-  %80 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %80 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %81 = extractelement <4 x float> %80, i32 2
   %82 = fmul float %3, %81
   %83 = fadd float %82, %67
-  %84 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %84 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %85 = extractelement <4 x float> %84, i32 3
   %86 = fmul float %3, %85
   %87 = fadd float %86, %71
@@ -107,15 +107,15 @@ main_body:
   %99 = fmul float %4, %98
   %100 = fmul float %5, %98
   %101 = fmul float %6, %98
-  %102 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %102 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %103 = extractelement <4 x float> %102, i32 0
   %104 = fmul float %103, %8
   %105 = fadd float %104, %20
-  %106 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %106 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %107 = extractelement <4 x float> %106, i32 1
   %108 = fmul float %107, %9
   %109 = fadd float %108, %21
-  %110 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %110 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %111 = extractelement <4 x float> %110, i32 2
   %112 = fmul float %111, %10
   %113 = fadd float %112, %22
@@ -123,11 +123,11 @@ main_body:
   %115 = call float @llvm.AMDIL.clamp.(float %109, float 0.000000e+00, float 1.000000e+00)
   %116 = call float @llvm.AMDIL.clamp.(float %113, float 0.000000e+00, float 1.000000e+00)
   %117 = call float @llvm.AMDIL.clamp.(float %15, float 0.000000e+00, float 1.000000e+00)
-  %118 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %118 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
   %119 = extractelement <4 x float> %118, i32 0
-  %120 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %120 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
   %121 = extractelement <4 x float> %120, i32 1
-  %122 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %122 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
   %123 = extractelement <4 x float> %122, i32 2
   %124 = insertelement <4 x float> undef, float %99, i32 0
   %125 = insertelement <4 x float> %124, float %100, i32 1
@@ -138,11 +138,11 @@ main_body:
   %130 = insertelement <4 x float> %129, float %123, i32 2
   %131 = insertelement <4 x float> %130, float 0.000000e+00, i32 3
   %132 = call float @llvm.AMDGPU.dp4(<4 x float> %127, <4 x float> %131)
-  %133 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %133 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
   %134 = extractelement <4 x float> %133, i32 0
-  %135 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %135 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
   %136 = extractelement <4 x float> %135, i32 1
-  %137 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %137 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
   %138 = extractelement <4 x float> %137, i32 2
   %139 = insertelement <4 x float> undef, float %99, i32 0
   %140 = insertelement <4 x float> %139, float %100, i32 1
@@ -153,31 +153,31 @@ main_body:
   %145 = insertelement <4 x float> %144, float %138, i32 2
   %146 = insertelement <4 x float> %145, float 0.000000e+00, i32 3
   %147 = call float @llvm.AMDGPU.dp4(<4 x float> %142, <4 x float> %146)
-  %148 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+  %148 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
   %149 = extractelement <4 x float> %148, i32 0
   %150 = fmul float %149, %8
-  %151 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+  %151 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
   %152 = extractelement <4 x float> %151, i32 1
   %153 = fmul float %152, %9
-  %154 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+  %154 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
   %155 = extractelement <4 x float> %154, i32 2
   %156 = fmul float %155, %10
-  %157 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %157 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %158 = extractelement <4 x float> %157, i32 0
   %159 = fmul float %158, %12
-  %160 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %160 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %161 = extractelement <4 x float> %160, i32 1
   %162 = fmul float %161, %13
-  %163 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+  %163 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
   %164 = extractelement <4 x float> %163, i32 2
   %165 = fmul float %164, %14
-  %166 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+  %166 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
   %167 = extractelement <4 x float> %166, i32 0
   %168 = fmul float %167, %16
-  %169 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+  %169 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
   %170 = extractelement <4 x float> %169, i32 1
   %171 = fmul float %170, %17
-  %172 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+  %172 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
   %173 = extractelement <4 x float> %172, i32 2
   %174 = fmul float %173, %18
   %175 = fcmp uge float %132, 0.000000e+00

Modified: llvm/trunk/test/CodeGen/R600/r600-export-fix.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/r600-export-fix.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/r600-export-fix.ll (original)
+++ llvm/trunk/test/CodeGen/R600/r600-export-fix.ll Fri Feb 27 15:17:42 2015
@@ -16,83 +16,83 @@ main_body:
   %1 = extractelement <4 x float> %reg1, i32 1
   %2 = extractelement <4 x float> %reg1, i32 2
   %3 = extractelement <4 x float> %reg1, i32 3
-  %4 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %5 = extractelement <4 x float> %4, i32 0
   %6 = fmul float %5, %0
-  %7 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %7 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %8 = extractelement <4 x float> %7, i32 1
   %9 = fmul float %8, %0
-  %10 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %11 = extractelement <4 x float> %10, i32 2
   %12 = fmul float %11, %0
-  %13 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+  %13 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
   %14 = extractelement <4 x float> %13, i32 3
   %15 = fmul float %14, %0
-  %16 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %16 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
   %17 = extractelement <4 x float> %16, i32 0
   %18 = fmul float %17, %1
   %19 = fadd float %18, %6
-  %20 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %20 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
   %21 = extractelement <4 x float> %20, i32 1
   %22 = fmul float %21, %1
   %23 = fadd float %22, %9
-  %24 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %24 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
   %25 = extractelement <4 x float> %24, i32 2
   %26 = fmul float %25, %1
   %27 = fadd float %26, %12
-  %28 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+  %28 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
   %29 = extractelement <4 x float> %28, i32 3
   %30 = fmul float %29, %1
   %31 = fadd float %30, %15
-  %32 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
+  %32 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
   %33 = extractelement <4 x float> %32, i32 0
   %34 = fmul float %33, %2
   %35 = fadd float %34, %19
-  %36 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
+  %36 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
   %37 = extractelement <4 x float> %36, i32 1
   %38 = fmul float %37, %2
   %39 = fadd float %38, %23
-  %40 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
+  %40 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
   %41 = extractelement <4 x float> %40, i32 2
   %42 = fmul float %41, %2
   %43 = fadd float %42, %27
-  %44 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
+  %44 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 6)
   %45 = extractelement <4 x float> %44, i32 3
   %46 = fmul float %45, %2
   %47 = fadd float %46, %31
-  %48 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %48 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
   %49 = extractelement <4 x float> %48, i32 0
   %50 = fmul float %49, %3
   %51 = fadd float %50, %35
-  %52 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %52 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
   %53 = extractelement <4 x float> %52, i32 1
   %54 = fmul float %53, %3
   %55 = fadd float %54, %39
-  %56 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %56 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
   %57 = extractelement <4 x float> %56, i32 2
   %58 = fmul float %57, %3
   %59 = fadd float %58, %43
-  %60 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+  %60 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
   %61 = extractelement <4 x float> %60, i32 3
   %62 = fmul float %61, %3
   %63 = fadd float %62, %47
-  %64 = load <4 x float> addrspace(8)* null
+  %64 = load <4 x float>, <4 x float> addrspace(8)* null
   %65 = extractelement <4 x float> %64, i32 0
-  %66 = load <4 x float> addrspace(8)* null
+  %66 = load <4 x float>, <4 x float> addrspace(8)* null
   %67 = extractelement <4 x float> %66, i32 1
-  %68 = load <4 x float> addrspace(8)* null
+  %68 = load <4 x float>, <4 x float> addrspace(8)* null
   %69 = extractelement <4 x float> %68, i32 2
-  %70 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %70 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %71 = extractelement <4 x float> %70, i32 0
-  %72 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %72 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %73 = extractelement <4 x float> %72, i32 1
-  %74 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %74 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %75 = extractelement <4 x float> %74, i32 2
-  %76 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %76 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %77 = extractelement <4 x float> %76, i32 0
-  %78 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %78 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %79 = extractelement <4 x float> %78, i32 1
-  %80 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+  %80 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
   %81 = extractelement <4 x float> %80, i32 2
   %82 = insertelement <4 x float> undef, float %51, i32 0
   %83 = insertelement <4 x float> %82, float %55, i32 1

Modified: llvm/trunk/test/CodeGen/R600/r600cfg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/r600cfg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/r600cfg.ll (original)
+++ llvm/trunk/test/CodeGen/R600/r600cfg.ll Fri Feb 27 15:17:42 2015
@@ -83,7 +83,7 @@ ELSE45:
 ENDIF43:                                          ; preds = %ELSE45, %IF44
   %.sink = phi i32 [ %49, %IF44 ], [ %51, %ELSE45 ]
   %52 = bitcast i32 %.sink to float
-  %53 = load <4 x float> addrspace(8)* null
+  %53 = load <4 x float>, <4 x float> addrspace(8)* null
   %54 = extractelement <4 x float> %53, i32 0
   %55 = bitcast float %54 to i32
   br label %LOOP47

Modified: llvm/trunk/test/CodeGen/R600/register-count-comments.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/register-count-comments.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/register-count-comments.ll (original)
+++ llvm/trunk/test/CodeGen/R600/register-count-comments.ll Fri Feb 27 15:17:42 2015
@@ -12,8 +12,8 @@ define void @foo(i32 addrspace(1)* noali
   %aptr = getelementptr i32, i32 addrspace(1)* %abase, i32 %tid
   %bptr = getelementptr i32, i32 addrspace(1)* %bbase, i32 %tid
   %outptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %a = load i32 addrspace(1)* %aptr, align 4
-  %b = load i32 addrspace(1)* %bptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %result = add i32 %a, %b
   store i32 %result, i32 addrspace(1)* %outptr, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/R600/reorder-stores.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/reorder-stores.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/reorder-stores.ll (original)
+++ llvm/trunk/test/CodeGen/R600/reorder-stores.ll Fri Feb 27 15:17:42 2015
@@ -12,8 +12,8 @@
 ; SI: buffer_store_dwordx2
 ; SI: s_endpgm
 define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind {
-  %tmp1 = load <2 x double> addrspace(1)* %x, align 16
-  %tmp4 = load <2 x double> addrspace(1)* %y, align 16
+  %tmp1 = load <2 x double>, <2 x double> addrspace(1)* %x, align 16
+  %tmp4 = load <2 x double>, <2 x double> addrspace(1)* %y, align 16
   store <2 x double> %tmp4, <2 x double> addrspace(1)* %x, align 16
   store <2 x double> %tmp1, <2 x double> addrspace(1)* %y, align 16
   ret void
@@ -26,8 +26,8 @@ define void @no_reorder_v2f64_global_loa
 ; SI: ds_write_b64
 ; SI: s_endpgm
 define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind {
-  %tmp1 = load <2 x double> addrspace(3)* %x, align 16
-  %tmp4 = load <2 x double> addrspace(3)* %y, align 16
+  %tmp1 = load <2 x double>, <2 x double> addrspace(3)* %x, align 16
+  %tmp4 = load <2 x double>, <2 x double> addrspace(3)* %y, align 16
   store <2 x double> %tmp4, <2 x double> addrspace(3)* %x, align 16
   store <2 x double> %tmp1, <2 x double> addrspace(3)* %y, align 16
   ret void
@@ -76,8 +76,8 @@ define void @no_reorder_scalarized_v2f64
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind {
-  %tmp1 = load <8 x i32> addrspace(1)* %x, align 32
-  %tmp4 = load <8 x i32> addrspace(1)* %y, align 32
+  %tmp1 = load <8 x i32>, <8 x i32> addrspace(1)* %x, align 32
+  %tmp4 = load <8 x i32>, <8 x i32> addrspace(1)* %y, align 32
   store <8 x i32> %tmp4, <8 x i32> addrspace(1)* %x, align 32
   store <8 x i32> %tmp1, <8 x i32> addrspace(1)* %y, align 32
   ret void
@@ -91,8 +91,8 @@ define void @no_reorder_split_v8i32_glob
 ; SI: ds_write_b64
 ; SI: s_endpgm
 define void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind {
-  %tmp1 = load <2 x i32> addrspace(3)* %x, align 8
-  %tmp4 = load <2 x i32> addrspace(3)* %y, align 8
+  %tmp1 = load <2 x i32>, <2 x i32> addrspace(3)* %x, align 8
+  %tmp4 = load <2 x i32>, <2 x i32> addrspace(3)* %y, align 8
   %tmp1ext = zext <2 x i32> %tmp1 to <2 x i64>
   %tmp4ext = zext <2 x i32> %tmp4 to <2 x i64>
   %tmp7 = add <2 x i64> %tmp1ext, <i64 1, i64 1>

Modified: llvm/trunk/test/CodeGen/R600/rotl.i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/rotl.i64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/rotl.i64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/rotl.i64.ll Fri Feb 27 15:17:42 2015
@@ -28,8 +28,8 @@ entry:
 ; BOTH: s_endpgm
 define void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
 entry:
-  %x = load i64 addrspace(1)* %xptr, align 8
-  %y = load i64 addrspace(1)* %yptr, align 8
+  %x = load i64, i64 addrspace(1)* %xptr, align 8
+  %y = load i64, i64 addrspace(1)* %yptr, align 8
   %tmp0 = shl i64 %x, %y
   %tmp1 = sub i64 64, %y
   %tmp2 = lshr i64 %x, %tmp1

Modified: llvm/trunk/test/CodeGen/R600/rotr.i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/rotr.i64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/rotr.i64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/rotr.i64.ll Fri Feb 27 15:17:42 2015
@@ -26,8 +26,8 @@ entry:
 ; BOTH: v_or_b32
 define void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
 entry:
-  %x = load i64 addrspace(1)* %xptr, align 8
-  %y = load i64 addrspace(1)* %yptr, align 8
+  %x = load i64, i64 addrspace(1)* %xptr, align 8
+  %y = load i64, i64 addrspace(1)* %yptr, align 8
   %tmp0 = sub i64 64, %y
   %tmp1 = shl i64 %x, %tmp0
   %tmp2 = lshr i64 %x, %y
@@ -50,8 +50,8 @@ entry:
 ; BOTH-LABEL: {{^}}v_rotr_v2i64:
 define void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
 entry:
-  %x = load <2 x i64> addrspace(1)* %xptr, align 8
-  %y = load <2 x i64> addrspace(1)* %yptr, align 8
+  %x = load <2 x i64>, <2 x i64> addrspace(1)* %xptr, align 8
+  %y = load <2 x i64>, <2 x i64> addrspace(1)* %yptr, align 8
   %tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
   %tmp1 = shl <2 x i64> %x, %tmp0
   %tmp2 = lshr <2 x i64> %x, %y

Modified: llvm/trunk/test/CodeGen/R600/rsq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/rsq.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/rsq.ll (original)
+++ llvm/trunk/test/CodeGen/R600/rsq.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ declare double @llvm.sqrt.f64(double) no
 ; SI: v_rsq_f32_e32
 ; SI: s_endpgm
 define void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
-  %val = load float addrspace(1)* %in, align 4
+  %val = load float, float addrspace(1)* %in, align 4
   %sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
   %div = fdiv float 1.0, %sqrt
   store float %div, float addrspace(1)* %out, align 4
@@ -21,7 +21,7 @@ define void @rsq_f32(float addrspace(1)*
 ; SI-SAFE: v_sqrt_f64_e32
 ; SI: s_endpgm
 define void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
-  %val = load double addrspace(1)* %in, align 4
+  %val = load double, double addrspace(1)* %in, align 4
   %sqrt = call double @llvm.sqrt.f64(double %val) nounwind readnone
   %div = fdiv double 1.0, %sqrt
   store double %div, double addrspace(1)* %out, align 4
@@ -62,9 +62,9 @@ define void @rsqrt_fmul(float addrspace(
   %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
   %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
 
-  %a = load float addrspace(1)* %gep.0
-  %b = load float addrspace(1)* %gep.1
-  %c = load float addrspace(1)* %gep.2
+  %a = load float, float addrspace(1)* %gep.0
+  %b = load float, float addrspace(1)* %gep.1
+  %c = load float, float addrspace(1)* %gep.2
 
   %x = call float @llvm.sqrt.f32(float %a)
   %y = fmul float %x, %b

Modified: llvm/trunk/test/CodeGen/R600/s_movk_i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/s_movk_i32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/s_movk_i32.ll (original)
+++ llvm/trunk/test/CodeGen/R600/s_movk_i32.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k0(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 4295032831 ; ((1 << 16) - 1) | (1 << 32)
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -23,7 +23,7 @@ define void @s_movk_i32_k0(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k1(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 4295000063 ; ((1 << 15) - 1) | (1 << 32)
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -37,7 +37,7 @@ define void @s_movk_i32_k1(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k2(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 274877939711 ; ((1 << 15) - 1) | (64 << 32)
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -51,7 +51,7 @@ define void @s_movk_i32_k2(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k3(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 4295000064 ; (1 << 15) | (1 << 32)
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -65,7 +65,7 @@ define void @s_movk_i32_k3(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k4(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 4295098368 ; (1 << 17) | (1 << 32)
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -79,7 +79,7 @@ define void @s_movk_i32_k4(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k5(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 18374967954648334319 ; -17 & 0xff00ffffffffffff
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -93,7 +93,7 @@ define void @s_movk_i32_k5(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k6(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 270582939713 ; 65 | (63 << 32)
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -107,7 +107,7 @@ define void @s_movk_i32_k6(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k7(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 70368744185856; ((1 << 13)) | ((1 << 14) << 32)
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -122,7 +122,7 @@ define void @s_movk_i32_k7(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k8(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 1229782942255906816 ; 0x11111111ffff8000
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -136,7 +136,7 @@ define void @s_movk_i32_k8(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k9(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 1229782942255906817 ; 0x11111111ffff8001
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -150,7 +150,7 @@ define void @s_movk_i32_k9(i64 addrspace
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k10(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 1229782942255909000 ; 0x11111111ffff8888
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -164,7 +164,7 @@ define void @s_movk_i32_k10(i64 addrspac
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k11(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 1229782942255910911 ; 0x11111111ffff8fff
   store i64 %or, i64 addrspace(1)* %out
   ret void
@@ -178,7 +178,7 @@ define void @s_movk_i32_k11(i64 addrspac
 ; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
 ; SI: s_endpgm
 define void @s_movk_i32_k12(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
-  %loada = load i64 addrspace(1)* %a, align 4
+  %loada = load i64, i64 addrspace(1)* %a, align 4
   %or = or i64 %loada, 1229782942255902721 ; 0x11111111ffff7001
   store i64 %or, i64 addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/saddo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/saddo.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/saddo.ll (original)
+++ llvm/trunk/test/CodeGen/R600/saddo.ll Fri Feb 27 15:17:42 2015
@@ -28,8 +28,8 @@ define void @s_saddo_i32(i32 addrspace(1
 
 ; FUNC-LABEL: {{^}}v_saddo_i32:
 define void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
-  %a = load i32 addrspace(1)* %aptr, align 4
-  %b = load i32 addrspace(1)* %bptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %sadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) nounwind
   %val = extractvalue { i32, i1 } %sadd, 0
   %carry = extractvalue { i32, i1 } %sadd, 1
@@ -52,8 +52,8 @@ define void @s_saddo_i64(i64 addrspace(1
 ; SI: v_add_i32
 ; SI: v_addc_u32
 define void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
-  %a = load i64 addrspace(1)* %aptr, align 4
-  %b = load i64 addrspace(1)* %bptr, align 4
+  %a = load i64, i64 addrspace(1)* %aptr, align 4
+  %b = load i64, i64 addrspace(1)* %bptr, align 4
   %sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
   %val = extractvalue { i64, i1 } %sadd, 0
   %carry = extractvalue { i64, i1 } %sadd, 1

Modified: llvm/trunk/test/CodeGen/R600/salu-to-valu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/salu-to-valu.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/salu-to-valu.ll (original)
+++ llvm/trunk/test/CodeGen/R600/salu-to-valu.ll Fri Feb 27 15:17:42 2015
@@ -28,10 +28,10 @@ loop:
   %4 = phi i64 [0, %entry], [%5, %loop]
   %5 = add i64 %2, %4
   %6 = getelementptr i8, i8 addrspace(1)* %in, i64 %5
-  %7 = load i8 addrspace(1)* %6, align 1
+  %7 = load i8, i8 addrspace(1)* %6, align 1
   %8 = or i64 %5, 1
   %9 = getelementptr i8, i8 addrspace(1)* %in, i64 %8
-  %10 = load i8 addrspace(1)* %9, align 1
+  %10 = load i8, i8 addrspace(1)* %9, align 1
   %11 = add i8 %7, %10
   %12 = sext i8 %11 to i32
   store i32 %12, i32 addrspace(1)* %out
@@ -59,18 +59,18 @@ entry:
   br i1 %0, label %if, label %else
 
 if:
-  %1 = load i32 addrspace(2)* addrspace(1)* %in
+  %1 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in
   br label %endif
 
 else:
   %2 = getelementptr i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in
-  %3 = load i32 addrspace(2)* addrspace(1)* %2
+  %3 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %2
   br label %endif
 
 endif:
   %4 = phi i32 addrspace(2)*  [%1, %if], [%3, %else]
   %5 = getelementptr i32, i32 addrspace(2)* %4, i32 3000
-  %6 = load i32 addrspace(2)* %5
+  %6 = load i32, i32 addrspace(2)* %5
   store i32 %6, i32 addrspace(1)* %out
   ret void
 }
@@ -84,7 +84,7 @@ entry:
   %0 = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %1 = add i32 %0, 4
   %2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %0, i32 4
-  %3 = load i32 addrspace(2)* %2
+  %3 = load i32, i32 addrspace(2)* %2
   store i32 %3, i32 addrspace(1)* %out
   ret void
 }
@@ -97,7 +97,7 @@ entry:
   %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1
   %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
   %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)*
-  %tmp3 = load <8 x i32> addrspace(2)* %tmp2, align 4
+  %tmp3 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp2, align 4
   store <8 x i32> %tmp3, <8 x i32> addrspace(1)* %out, align 32
   ret void
 }
@@ -112,7 +112,7 @@ entry:
   %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1
   %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
   %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)*
-  %tmp3 = load <16 x i32> addrspace(2)* %tmp2, align 4
+  %tmp3 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp2, align 4
   store <16 x i32> %tmp3, <16 x i32> addrspace(1)* %out, align 32
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll (original)
+++ llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
 ; SI: buffer_store_short [[RESULT]]
 ; SI: s_endpgm
 define void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %tmp1 = load i32 addrspace(1)* %in, align 4
+  %tmp1 = load i32, i32 addrspace(1)* %in, align 4
   %bc = bitcast i32 %tmp1 to <2 x i16>
   %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   store <4 x i16> %tmp2, <4 x i16> addrspace(1)* %out, align 8
@@ -27,7 +27,7 @@ define void @scalar_to_vector_v2i32(<4 x
 ; SI: buffer_store_short [[RESULT]]
 ; SI: s_endpgm
 define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
-  %tmp1 = load float addrspace(1)* %in, align 4
+  %tmp1 = load float, float addrspace(1)* %in, align 4
   %bc = bitcast float %tmp1 to <2 x i16>
   %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   store <4 x i16> %tmp2, <4 x i16> addrspace(1)* %out, align 8
@@ -39,7 +39,7 @@ define void @scalar_to_vector_v2f32(<4 x
 
 
 ; define void @scalar_to_vector_test2(<8 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-;   %tmp1 = load i32 addrspace(1)* %in, align 4
+;   %tmp1 = load i32, i32 addrspace(1)* %in, align 4
 ;   %bc = bitcast i32 %tmp1 to <4 x i8>
 
 ;   %tmp2 = shufflevector <4 x i8> %bc, <4 x i8> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>

Modified: llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll (original)
+++ llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 
 define void @main() {
 main_body:
-  %0 = load <4 x float> addrspace(9)* null
+  %0 = load <4 x float>, <4 x float> addrspace(9)* null
   %1 = extractelement <4 x float> %0, i32 3
   %2 = fptosi float %1 to i32
   %3 = bitcast i32 %2 to float
@@ -20,11 +20,11 @@ main_body:
   %14 = bitcast float %12 to i32
   %15 = add i32 %13, %14
   %16 = bitcast i32 %15 to float
-  %17 = load <4 x float> addrspace(9)* null
+  %17 = load <4 x float>, <4 x float> addrspace(9)* null
   %18 = extractelement <4 x float> %17, i32 0
-  %19 = load <4 x float> addrspace(9)* null
+  %19 = load <4 x float>, <4 x float> addrspace(9)* null
   %20 = extractelement <4 x float> %19, i32 1
-  %21 = load <4 x float> addrspace(9)* null
+  %21 = load <4 x float>, <4 x float> addrspace(9)* null
   %22 = extractelement <4 x float> %21, i32 2
   br label %LOOP
 

Modified: llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll (original)
+++ llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll Fri Feb 27 15:17:42 2015
@@ -3,15 +3,15 @@
 
 define void @main() {
 main_body:
-  %0 = load <4 x float> addrspace(9)* null
+  %0 = load <4 x float>, <4 x float> addrspace(9)* null
   %1 = extractelement <4 x float> %0, i32 3
   %2 = fptosi float %1 to i32
   %3 = bitcast i32 %2 to float
-  %4 = load <4 x float> addrspace(9)* null
+  %4 = load <4 x float>, <4 x float> addrspace(9)* null
   %5 = extractelement <4 x float> %4, i32 0
-  %6 = load <4 x float> addrspace(9)* null
+  %6 = load <4 x float>, <4 x float> addrspace(9)* null
   %7 = extractelement <4 x float> %6, i32 1
-  %8 = load <4 x float> addrspace(9)* null
+  %8 = load <4 x float>, <4 x float> addrspace(9)* null
   %9 = extractelement <4 x float> %8, i32 2
   br label %LOOP
 

Modified: llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll (original)
+++ llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll Fri Feb 27 15:17:42 2015
@@ -14,9 +14,9 @@ declare i32 @llvm.r600.read.tidig.x() #1
 ; SI: buffer_store_dword [[REG0]]
 ; SI: buffer_store_dword [[REG1]]
 define void @cluster_global_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr) #0 {
-  %load0 = load i32 addrspace(1)* %ptr, align 4
+  %load0 = load i32, i32 addrspace(1)* %ptr, align 4
   %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 1
-  %load1 = load i32 addrspace(1)* %gep, align 4
+  %load1 = load i32, i32 addrspace(1)* %gep, align 4
   store i32 %load0, i32 addrspace(1)* %out0, align 4
   store i32 %load1, i32 addrspace(1)* %out1, align 4
   ret void
@@ -30,8 +30,8 @@ define void @cluster_global_arg_loads(i3
 define void @same_base_ptr_crash(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %offset) {
 entry:
   %out1 = getelementptr i32, i32 addrspace(1)* %out, i32 %offset
-  %tmp0 = load i32 addrspace(1)* %out
-  %tmp1 = load i32 addrspace(1)* %out1
+  %tmp0 = load i32, i32 addrspace(1)* %out
+  %tmp1 = load i32, i32 addrspace(1)* %out1
   %tmp2 = add i32 %tmp0, %tmp1
   store i32 %tmp2, i32 addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/schedule-if-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/schedule-if-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/schedule-if-2.ll (original)
+++ llvm/trunk/test/CodeGen/R600/schedule-if-2.ll Fri Feb 27 15:17:42 2015
@@ -3,10 +3,10 @@
 
 define void @main() {
 main_body:
-  %0 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+  %0 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
   %1 = extractelement <4 x float> %0, i32 0
   %2 = fadd float 1.000000e+03, %1
-  %3 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %3 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %4 = extractelement <4 x float> %3, i32 0
   %5 = bitcast float %4 to i32
   %6 = icmp eq i32 %5, 0
@@ -47,7 +47,7 @@ IF:
   br label %ENDIF
 
 ELSE:                                             ; preds = %main_body
-  %36 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %36 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %37 = extractelement <4 x float> %36, i32 0
   %38 = bitcast float %37 to i32
   %39 = icmp eq i32 %38, 1
@@ -80,7 +80,7 @@ IF23:
   %.28 = select i1 %54, float 0x36A0000000000000, float 0.000000e+00
   %55 = bitcast float %.28 to i32
   %56 = sitofp i32 %55 to float
-  %57 = load <4 x float> addrspace(8)* null
+  %57 = load <4 x float>, <4 x float> addrspace(8)* null
   %58 = extractelement <4 x float> %57, i32 0
   %59 = fsub float -0.000000e+00, %58
   %60 = fadd float %2, %59

Modified: llvm/trunk/test/CodeGen/R600/schedule-if.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/schedule-if.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/schedule-if.ll (original)
+++ llvm/trunk/test/CodeGen/R600/schedule-if.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
 
 define void @main() {
 main_body:
-  %0 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %0 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %1 = extractelement <4 x float> %0, i32 0
   %2 = bitcast float %1 to i32
   %3 = icmp eq i32 %2, 0
@@ -14,7 +14,7 @@ main_body:
   br i1 %7, label %ENDIF, label %ELSE
 
 ELSE:                                             ; preds = %main_body
-  %8 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %8 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %9 = extractelement <4 x float> %8, i32 0
   %10 = bitcast float %9 to i32
   %11 = icmp eq i32 %10, 1
@@ -36,7 +36,7 @@ ENDIF:
   ret void
 
 IF13:                                             ; preds = %ELSE
-  %20 = load <4 x float> addrspace(8)* null
+  %20 = load <4 x float>, <4 x float> addrspace(8)* null
   %21 = extractelement <4 x float> %20, i32 0
   %22 = fsub float -0.000000e+00, %21
   %23 = fadd float 1.000000e+03, %22

Modified: llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll (original)
+++ llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll Fri Feb 27 15:17:42 2015
@@ -39,63 +39,63 @@ ENDIF:
   %temp3.0 = phi float [ 0.000000e+00, %main_body ], [ %101, %Flow2 ]
   %15 = extractelement <4 x float> %reg1, i32 1
   %16 = extractelement <4 x float> %reg1, i32 3
-  %17 = load <4 x float> addrspace(9)* null
+  %17 = load <4 x float>, <4 x float> addrspace(9)* null
   %18 = extractelement <4 x float> %17, i32 0
   %19 = fmul float %18, %0
-  %20 = load <4 x float> addrspace(9)* null
+  %20 = load <4 x float>, <4 x float> addrspace(9)* null
   %21 = extractelement <4 x float> %20, i32 1
   %22 = fmul float %21, %0
-  %23 = load <4 x float> addrspace(9)* null
+  %23 = load <4 x float>, <4 x float> addrspace(9)* null
   %24 = extractelement <4 x float> %23, i32 2
   %25 = fmul float %24, %0
-  %26 = load <4 x float> addrspace(9)* null
+  %26 = load <4 x float>, <4 x float> addrspace(9)* null
   %27 = extractelement <4 x float> %26, i32 3
   %28 = fmul float %27, %0
-  %29 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+  %29 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
   %30 = extractelement <4 x float> %29, i32 0
   %31 = fmul float %30, %15
   %32 = fadd float %31, %19
-  %33 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+  %33 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
   %34 = extractelement <4 x float> %33, i32 1
   %35 = fmul float %34, %15
   %36 = fadd float %35, %22
-  %37 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+  %37 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
   %38 = extractelement <4 x float> %37, i32 2
   %39 = fmul float %38, %15
   %40 = fadd float %39, %25
-  %41 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+  %41 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
   %42 = extractelement <4 x float> %41, i32 3
   %43 = fmul float %42, %15
   %44 = fadd float %43, %28
-  %45 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+  %45 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
   %46 = extractelement <4 x float> %45, i32 0
   %47 = fmul float %46, %1
   %48 = fadd float %47, %32
-  %49 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+  %49 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
   %50 = extractelement <4 x float> %49, i32 1
   %51 = fmul float %50, %1
   %52 = fadd float %51, %36
-  %53 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+  %53 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
   %54 = extractelement <4 x float> %53, i32 2
   %55 = fmul float %54, %1
   %56 = fadd float %55, %40
-  %57 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+  %57 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
   %58 = extractelement <4 x float> %57, i32 3
   %59 = fmul float %58, %1
   %60 = fadd float %59, %44
-  %61 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+  %61 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
   %62 = extractelement <4 x float> %61, i32 0
   %63 = fmul float %62, %16
   %64 = fadd float %63, %48
-  %65 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+  %65 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
   %66 = extractelement <4 x float> %65, i32 1
   %67 = fmul float %66, %16
   %68 = fadd float %67, %52
-  %69 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+  %69 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
   %70 = extractelement <4 x float> %69, i32 2
   %71 = fmul float %70, %16
   %72 = fadd float %71, %56
-  %73 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+  %73 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
   %74 = extractelement <4 x float> %73, i32 3
   %75 = fmul float %74, %16
   %76 = fadd float %75, %60

Modified: llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll (original)
+++ llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll Fri Feb 27 15:17:42 2015
@@ -21,63 +21,63 @@ ENDIF:
   %temp1.0 = phi float [ 1.000000e+00, %main_body ], [ %temp1.1, %LOOP ], [ %temp1.1, %ENDIF16 ]
   %temp2.0 = phi float [ 0.000000e+00, %main_body ], [ %temp2.1, %LOOP ], [ %temp2.1, %ENDIF16 ]
   %temp3.0 = phi float [ 0.000000e+00, %main_body ], [ %temp3.1, %LOOP ], [ %temp3.1, %ENDIF16 ]
-  %11 = load <4 x float> addrspace(9)* null
+  %11 = load <4 x float>, <4 x float> addrspace(9)* null
   %12 = extractelement <4 x float> %11, i32 0
   %13 = fmul float %12, %0
-  %14 = load <4 x float> addrspace(9)* null
+  %14 = load <4 x float>, <4 x float> addrspace(9)* null
   %15 = extractelement <4 x float> %14, i32 1
   %16 = fmul float %15, %0
-  %17 = load <4 x float> addrspace(9)* null
+  %17 = load <4 x float>, <4 x float> addrspace(9)* null
   %18 = extractelement <4 x float> %17, i32 2
   %19 = fmul float %18, %0
-  %20 = load <4 x float> addrspace(9)* null
+  %20 = load <4 x float>, <4 x float> addrspace(9)* null
   %21 = extractelement <4 x float> %20, i32 3
   %22 = fmul float %21, %0
-  %23 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+  %23 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
   %24 = extractelement <4 x float> %23, i32 0
   %25 = fmul float %24, %1
   %26 = fadd float %25, %13
-  %27 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+  %27 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
   %28 = extractelement <4 x float> %27, i32 1
   %29 = fmul float %28, %1
   %30 = fadd float %29, %16
-  %31 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+  %31 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
   %32 = extractelement <4 x float> %31, i32 2
   %33 = fmul float %32, %1
   %34 = fadd float %33, %19
-  %35 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+  %35 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
   %36 = extractelement <4 x float> %35, i32 3
   %37 = fmul float %36, %1
   %38 = fadd float %37, %22
-  %39 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+  %39 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
   %40 = extractelement <4 x float> %39, i32 0
   %41 = fmul float %40, %2
   %42 = fadd float %41, %26
-  %43 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+  %43 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
   %44 = extractelement <4 x float> %43, i32 1
   %45 = fmul float %44, %2
   %46 = fadd float %45, %30
-  %47 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+  %47 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
   %48 = extractelement <4 x float> %47, i32 2
   %49 = fmul float %48, %2
   %50 = fadd float %49, %34
-  %51 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+  %51 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
   %52 = extractelement <4 x float> %51, i32 3
   %53 = fmul float %52, %2
   %54 = fadd float %53, %38
-  %55 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+  %55 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
   %56 = extractelement <4 x float> %55, i32 0
   %57 = fmul float %56, %3
   %58 = fadd float %57, %42
-  %59 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+  %59 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
   %60 = extractelement <4 x float> %59, i32 1
   %61 = fmul float %60, %3
   %62 = fadd float %61, %46
-  %63 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+  %63 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
   %64 = extractelement <4 x float> %63, i32 2
   %65 = fmul float %64, %3
   %66 = fadd float %65, %50
-  %67 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+  %67 = load <4 x float>, <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
   %68 = extractelement <4 x float> %67, i32 3
   %69 = fmul float %68, %3
   %70 = fadd float %69, %54

Modified: llvm/trunk/test/CodeGen/R600/scratch-buffer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/scratch-buffer.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/scratch-buffer.ll (original)
+++ llvm/trunk/test/CodeGen/R600/scratch-buffer.ll Fri Feb 27 15:17:42 2015
@@ -30,12 +30,12 @@ entry:
 
 if:
   %if_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %if_offset
-  %if_value = load i32* %if_ptr
+  %if_value = load i32, i32* %if_ptr
   br label %done
 
 else:
   %else_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %else_offset
-  %else_value = load i32* %else_ptr
+  %else_value = load i32, i32* %else_ptr
   br label %done
 
 done:
@@ -57,12 +57,12 @@ entry:
   %scratch0 = alloca [8192 x i32]
   %scratch1 = alloca [8192 x i32]
 
-  %offset0 = load i32 addrspace(1)* %offsets
+  %offset0 = load i32, i32 addrspace(1)* %offsets
   %scratchptr0 = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %offset0
   store i32 %offset0, i32* %scratchptr0
 
   %offsetptr1 = getelementptr i32, i32 addrspace(1)* %offsets, i32 1
-  %offset1 = load i32 addrspace(1)* %offsetptr1
+  %offset1 = load i32, i32 addrspace(1)* %offsetptr1
   %scratchptr1 = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %offset1
   store i32 %offset1, i32* %scratchptr1
 
@@ -71,12 +71,12 @@ entry:
 
 if:
   %if_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %if_offset
-  %if_value = load i32* %if_ptr
+  %if_value = load i32, i32* %if_ptr
   br label %done
 
 else:
   %else_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %else_offset
-  %else_value = load i32* %else_ptr
+  %else_value = load i32, i32* %else_ptr
   br label %done
 
 done:

Modified: llvm/trunk/test/CodeGen/R600/sdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sdiv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sdiv.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sdiv.ll Fri Feb 27 15:17:42 2015
@@ -15,8 +15,8 @@
 ; EG: CF_END
 define void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in
-  %den = load i32 addrspace(1) * %den_ptr
+  %num = load i32, i32 addrspace(1) * %in
+  %den = load i32, i32 addrspace(1) * %den_ptr
   %result = sdiv i32 %num, %den
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -24,7 +24,7 @@ define void @sdiv_i32(i32 addrspace(1)*
 
 ; FUNC-LABEL: {{^}}sdiv_i32_4:
 define void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
-  %num = load i32 addrspace(1) * %in
+  %num = load i32, i32 addrspace(1) * %in
   %result = sdiv i32 %num, 4
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -44,7 +44,7 @@ define void @sdiv_i32_4(i32 addrspace(1)
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @slow_sdiv_i32_3435(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
-  %num = load i32 addrspace(1) * %in
+  %num = load i32, i32 addrspace(1) * %in
   %result = sdiv i32 %num, 3435
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -52,15 +52,15 @@ define void @slow_sdiv_i32_3435(i32 addr
 
 define void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %num = load <2 x i32> addrspace(1) * %in
-  %den = load <2 x i32> addrspace(1) * %den_ptr
+  %num = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %den = load <2 x i32>, <2 x i32> addrspace(1) * %den_ptr
   %result = sdiv <2 x i32> %num, %den
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
 }
 
 define void @sdiv_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
-  %num = load <2 x i32> addrspace(1) * %in
+  %num = load <2 x i32>, <2 x i32> addrspace(1) * %in
   %result = sdiv <2 x i32> %num, <i32 4, i32 4>
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -68,15 +68,15 @@ define void @sdiv_v2i32_4(<2 x i32> addr
 
 define void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %num = load <4 x i32> addrspace(1) * %in
-  %den = load <4 x i32> addrspace(1) * %den_ptr
+  %num = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %den = load <4 x i32>, <4 x i32> addrspace(1) * %den_ptr
   %result = sdiv <4 x i32> %num, %den
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
 }
 
 define void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
-  %num = load <4 x i32> addrspace(1) * %in
+  %num = load <4 x i32>, <4 x i32> addrspace(1) * %in
   %result = sdiv <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/sdivrem24.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sdivrem24.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sdivrem24.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sdivrem24.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@
 ; EG: FLT_TO_INT
 define void @sdiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
   %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
-  %num = load i8 addrspace(1) * %in
-  %den = load i8 addrspace(1) * %den_ptr
+  %num = load i8, i8 addrspace(1) * %in
+  %den = load i8, i8 addrspace(1) * %den_ptr
   %result = sdiv i8 %num, %den
   store i8 %result, i8 addrspace(1)* %out
   ret void
@@ -33,8 +33,8 @@ define void @sdiv24_i8(i8 addrspace(1)*
 ; EG: FLT_TO_INT
 define void @sdiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
   %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
-  %num = load i16 addrspace(1) * %in, align 2
-  %den = load i16 addrspace(1) * %den_ptr, align 2
+  %num = load i16, i16 addrspace(1) * %in, align 2
+  %den = load i16, i16 addrspace(1) * %den_ptr, align 2
   %result = sdiv i16 %num, %den
   store i16 %result, i16 addrspace(1)* %out, align 2
   ret void
@@ -52,8 +52,8 @@ define void @sdiv24_i16(i16 addrspace(1)
 ; EG: FLT_TO_INT
 define void @sdiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 8
   %den.i24.0 = shl i32 %den, 8
   %num.i24 = ashr i32 %num.i24.0, 8
@@ -71,8 +71,8 @@ define void @sdiv24_i32(i32 addrspace(1)
 ; EG-NOT: RECIP_IEEE
 define void @sdiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 7
   %den.i24.0 = shl i32 %den, 7
   %num.i24 = ashr i32 %num.i24.0, 7
@@ -90,8 +90,8 @@ define void @sdiv25_i32(i32 addrspace(1)
 ; EG-NOT: RECIP_IEEE
 define void @test_no_sdiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 8
   %den.i24.0 = shl i32 %den, 7
   %num.i24 = ashr i32 %num.i24.0, 8
@@ -109,8 +109,8 @@ define void @test_no_sdiv24_i32_1(i32 ad
 ; EG-NOT: RECIP_IEEE
 define void @test_no_sdiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 7
   %den.i24.0 = shl i32 %den, 8
   %num.i24 = ashr i32 %num.i24.0, 7
@@ -132,8 +132,8 @@ define void @test_no_sdiv24_i32_2(i32 ad
 ; EG: FLT_TO_INT
 define void @srem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
   %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
-  %num = load i8 addrspace(1) * %in
-  %den = load i8 addrspace(1) * %den_ptr
+  %num = load i8, i8 addrspace(1) * %in
+  %den = load i8, i8 addrspace(1) * %den_ptr
   %result = srem i8 %num, %den
   store i8 %result, i8 addrspace(1)* %out
   ret void
@@ -151,8 +151,8 @@ define void @srem24_i8(i8 addrspace(1)*
 ; EG: FLT_TO_INT
 define void @srem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
   %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
-  %num = load i16 addrspace(1) * %in, align 2
-  %den = load i16 addrspace(1) * %den_ptr, align 2
+  %num = load i16, i16 addrspace(1) * %in, align 2
+  %den = load i16, i16 addrspace(1) * %den_ptr, align 2
   %result = srem i16 %num, %den
   store i16 %result, i16 addrspace(1)* %out, align 2
   ret void
@@ -170,8 +170,8 @@ define void @srem24_i16(i16 addrspace(1)
 ; EG: FLT_TO_INT
 define void @srem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 8
   %den.i24.0 = shl i32 %den, 8
   %num.i24 = ashr i32 %num.i24.0, 8
@@ -189,8 +189,8 @@ define void @srem24_i32(i32 addrspace(1)
 ; EG-NOT: RECIP_IEEE
 define void @srem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 7
   %den.i24.0 = shl i32 %den, 7
   %num.i24 = ashr i32 %num.i24.0, 7
@@ -208,8 +208,8 @@ define void @srem25_i32(i32 addrspace(1)
 ; EG-NOT: RECIP_IEEE
 define void @test_no_srem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 8
   %den.i24.0 = shl i32 %den, 7
   %num.i24 = ashr i32 %num.i24.0, 8
@@ -227,8 +227,8 @@ define void @test_no_srem24_i32_1(i32 ad
 ; EG-NOT: RECIP_IEEE
 define void @test_no_srem24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 7
   %den.i24.0 = shl i32 %den, 8
   %num.i24 = ashr i32 %num.i24.0, 7

Modified: llvm/trunk/test/CodeGen/R600/select64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/select64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/select64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/select64.ll Fri Feb 27 15:17:42 2015
@@ -42,8 +42,8 @@ define void @select_trunc_i64_2(i32 addr
 ; CHECK-NOT: v_cndmask_b32
 define void @v_select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
   %cmp = icmp ugt i32 %cond, 5
-  %a = load i64 addrspace(1)* %aptr, align 8
-  %b = load i64 addrspace(1)* %bptr, align 8
+  %a = load i64, i64 addrspace(1)* %aptr, align 8
+  %b = load i64, i64 addrspace(1)* %bptr, align 8
   %sel = select i1 %cmp, i64 %a, i64 %b
   %trunc = trunc i64 %sel to i32
   store i32 %trunc, i32 addrspace(1)* %out, align 4
@@ -60,8 +60,8 @@ define void @v_select_trunc_i64_2(i32 ad
 ; CHECK: s_endpgm
 define void @v_select_i64_split_imm(i64 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
   %cmp = icmp ugt i32 %cond, 5
-  %a = load i64 addrspace(1)* %aptr, align 8
-  %b = load i64 addrspace(1)* %bptr, align 8
+  %a = load i64, i64 addrspace(1)* %aptr, align 8
+  %b = load i64, i64 addrspace(1)* %bptr, align 8
   %sel = select i1 %cmp, i64 %a, i64 270582939648 ; 63 << 32
   store i64 %sel, i64 addrspace(1)* %out, align 8
   ret void

Modified: llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll (original)
+++ llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 ;CHECK: CNDE {{\*?}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1.0, literal.x,
 ;CHECK: 1073741824
 define void @test(float addrspace(1)* %out, float addrspace(1)* %in) {
-  %1 = load float addrspace(1)* %in
+  %1 = load float, float addrspace(1)* %in
   %2 = fcmp oeq float %1, 0.0
   %3 = select i1 %2, float 1.0, float 2.0
   store float %3, float addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll (original)
+++ llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
 ;CHECK: CNDE_INT {{\*?}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, literal.x,
 ;CHECK-NEXT: 2
 define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
-  %1 = load i32 addrspace(1)* %in
+  %1 = load i32, i32 addrspace(1)* %in
   %2 = icmp eq i32 %1, 0
   %3 = select i1 %2, i32 1, i32 2
   store i32 %3, i32 addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll (original)
+++ llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 
 define void @test(float addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
-  %0 = load i32 addrspace(1)* %in
+  %0 = load i32, i32 addrspace(1)* %in
   %1 = icmp sge i32 %0, 0
   %2 = select i1 %1, float 1.0, float 0.0
   store float %2, float addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/setcc-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/setcc-opt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/setcc-opt.ll (original)
+++ llvm/trunk/test/CodeGen/R600/setcc-opt.ll Fri Feb 27 15:17:42 2015
@@ -162,7 +162,7 @@ define void @cmp_zext_k_i8max(i1 addrspa
 ; GCN-NEXT: buffer_store_byte [[RESULT]]
 ; GCN: s_endpgm
 define void @cmp_sext_k_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %b.ptr) nounwind {
-  %b = load i8 addrspace(1)* %b.ptr
+  %b = load i8, i8 addrspace(1)* %b.ptr
   %b.ext = sext i8 %b to i32
   %icmp0 = icmp ne i32 %b.ext, -1
   store i1 %icmp0, i1 addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/setcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/setcc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/setcc.ll (original)
+++ llvm/trunk/test/CodeGen/R600/setcc.ll Fri Feb 27 15:17:42 2015
@@ -22,8 +22,8 @@ define void @setcc_v2i32(<2 x i32> addrs
 
 define void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
   %result = icmp eq <4 x i32> %a, %b
   %sext = sext <4 x i1> %result to <4 x i32>
   store <4 x i32> %sext, <4 x i32> addrspace(1)* %out
@@ -347,8 +347,8 @@ define void @v3i32_eq(<3 x i32> addrspac
   %gep.a = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %ptra, i32 %tid
   %gep.b = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %ptrb, i32 %tid
   %gep.out = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %out, i32 %tid
-  %a = load <3 x i32> addrspace(1)* %gep.a
-  %b = load <3 x i32> addrspace(1)* %gep.b
+  %a = load <3 x i32>, <3 x i32> addrspace(1)* %gep.a
+  %b = load <3 x i32>, <3 x i32> addrspace(1)* %gep.b
   %cmp = icmp eq <3 x i32> %a, %b
   %ext = sext <3 x i1> %cmp to <3 x i32>
   store <3 x i32> %ext, <3 x i32> addrspace(1)* %gep.out
@@ -368,8 +368,8 @@ define void @v3i8_eq(<3 x i8> addrspace(
   %gep.a = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %ptra, i32 %tid
   %gep.b = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %ptrb, i32 %tid
   %gep.out = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %out, i32 %tid
-  %a = load <3 x i8> addrspace(1)* %gep.a
-  %b = load <3 x i8> addrspace(1)* %gep.b
+  %a = load <3 x i8>, <3 x i8> addrspace(1)* %gep.a
+  %b = load <3 x i8>, <3 x i8> addrspace(1)* %gep.b
   %cmp = icmp eq <3 x i8> %a, %b
   %ext = sext <3 x i1> %cmp to <3 x i8>
   store <3 x i8> %ext, <3 x i8> addrspace(1)* %gep.out

Modified: llvm/trunk/test/CodeGen/R600/sext-in-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sext-in-reg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sext-in-reg.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sext-in-reg.ll Fri Feb 27 15:17:42 2015
@@ -190,8 +190,8 @@ define void @v_sext_in_reg_i1_to_i64(i64
   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
-  %a = load i64 addrspace(1)* %a.gep, align 8
-  %b = load i64 addrspace(1)* %b.gep, align 8
+  %a = load i64, i64 addrspace(1)* %a.gep, align 8
+  %b = load i64, i64 addrspace(1)* %b.gep, align 8
 
   %c = shl i64 %a, %b
   %shl = shl i64 %c, 63
@@ -211,8 +211,8 @@ define void @v_sext_in_reg_i8_to_i64(i64
   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
-  %a = load i64 addrspace(1)* %a.gep, align 8
-  %b = load i64 addrspace(1)* %b.gep, align 8
+  %a = load i64, i64 addrspace(1)* %a.gep, align 8
+  %b = load i64, i64 addrspace(1)* %b.gep, align 8
 
   %c = shl i64 %a, %b
   %shl = shl i64 %c, 56
@@ -232,8 +232,8 @@ define void @v_sext_in_reg_i16_to_i64(i6
   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
-  %a = load i64 addrspace(1)* %a.gep, align 8
-  %b = load i64 addrspace(1)* %b.gep, align 8
+  %a = load i64, i64 addrspace(1)* %a.gep, align 8
+  %b = load i64, i64 addrspace(1)* %b.gep, align 8
 
   %c = shl i64 %a, %b
   %shl = shl i64 %c, 48
@@ -252,8 +252,8 @@ define void @v_sext_in_reg_i32_to_i64(i6
   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
-  %a = load i64 addrspace(1)* %a.gep, align 8
-  %b = load i64 addrspace(1)* %b.gep, align 8
+  %a = load i64, i64 addrspace(1)* %a.gep, align 8
+  %b = load i64, i64 addrspace(1)* %b.gep, align 8
 
   %c = shl i64 %a, %b
   %shl = shl i64 %c, 32
@@ -428,8 +428,8 @@ define void @testcase_3(i8 addrspace(1)*
 ; SI: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
 ; SI: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
 define void @vgpr_sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) nounwind {
-  %loada = load <4 x i32> addrspace(1)* %a, align 16
-  %loadb = load <4 x i32> addrspace(1)* %b, align 16
+  %loada = load <4 x i32>, <4 x i32> addrspace(1)* %a, align 16
+  %loadb = load <4 x i32>, <4 x i32> addrspace(1)* %b, align 16
   %c = add <4 x i32> %loada, %loadb ; add to prevent folding into extload
   %shl = shl <4 x i32> %c, <i32 24, i32 24, i32 24, i32 24>
   %ashr = ashr <4 x i32> %shl, <i32 24, i32 24, i32 24, i32 24>
@@ -441,8 +441,8 @@ define void @vgpr_sext_in_reg_v4i8_to_v4
 ; SI: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
 ; SI: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
 define void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) nounwind {
-  %loada = load <4 x i32> addrspace(1)* %a, align 16
-  %loadb = load <4 x i32> addrspace(1)* %b, align 16
+  %loada = load <4 x i32>, <4 x i32> addrspace(1)* %a, align 16
+  %loadb = load <4 x i32>, <4 x i32> addrspace(1)* %b, align 16
   %c = add <4 x i32> %loada, %loadb ; add to prevent folding into extload
   %shl = shl <4 x i32> %c, <i32 16, i32 16, i32 16, i32 16>
   %ashr = ashr <4 x i32> %shl, <i32 16, i32 16, i32 16, i32 16>
@@ -459,7 +459,7 @@ define void @vgpr_sext_in_reg_v4i16_to_v
 ; SI: v_bfe_i32
 ; SI: buffer_store_short
 define void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
-  %tmp5 = load i8 addrspace(1)* %src, align 1
+  %tmp5 = load i8, i8 addrspace(1)* %src, align 1
   %tmp2 = sext i8 %tmp5 to i32
   %tmp3 = tail call i32 @llvm.AMDGPU.imax(i32 %tmp2, i32 0) nounwind readnone
   %tmp4 = trunc i32 %tmp3 to i8
@@ -474,7 +474,7 @@ declare i32 @llvm.AMDGPU.bfe.i32(i32, i3
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_0_width(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
-  %load = load i32 addrspace(1)* %ptr, align 4
+  %load = load i32, i32 addrspace(1)* %ptr, align 4
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 8, i32 0) nounwind readnone
   store i32 %bfe, i32 addrspace(1)* %out, align 4
   ret void
@@ -485,7 +485,7 @@ define void @bfe_0_width(i32 addrspace(1
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_8_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
-  %load = load i32 addrspace(1)* %ptr, align 4
+  %load = load i32, i32 addrspace(1)* %ptr, align 4
   %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 8) nounwind readnone
   %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 8) nounwind readnone
   store i32 %bfe1, i32 addrspace(1)* %out, align 4
@@ -496,7 +496,7 @@ define void @bfe_8_bfe_8(i32 addrspace(1
 ; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
 ; SI: s_endpgm
 define void @bfe_8_bfe_16(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
-  %load = load i32 addrspace(1)* %ptr, align 4
+  %load = load i32, i32 addrspace(1)* %ptr, align 4
   %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 8) nounwind readnone
   %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 16) nounwind readnone
   store i32 %bfe1, i32 addrspace(1)* %out, align 4
@@ -509,7 +509,7 @@ define void @bfe_8_bfe_16(i32 addrspace(
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @bfe_16_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
-  %load = load i32 addrspace(1)* %ptr, align 4
+  %load = load i32, i32 addrspace(1)* %ptr, align 4
   %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 16) nounwind readnone
   %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 8) nounwind readnone
   store i32 %bfe1, i32 addrspace(1)* %out, align 4
@@ -545,7 +545,7 @@ define void @sext_in_reg_i8_to_i32_bfe_w
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @sextload_i8_to_i32_bfe(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) nounwind {
-  %load = load i8 addrspace(1)* %ptr, align 1
+  %load = load i8, i8 addrspace(1)* %ptr, align 1
   %sext = sext i8 %load to i32
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %sext, i32 0, i32 8) nounwind readnone
   %shl = shl i32 %bfe, 24
@@ -559,7 +559,7 @@ define void @sextload_i8_to_i32_bfe(i32
 ; SI-NOT: {{[^@]}}bfe
 ; SI: s_endpgm
 define void @sextload_i8_to_i32_bfe_0(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) nounwind {
-  %load = load i8 addrspace(1)* %ptr, align 1
+  %load = load i8, i8 addrspace(1)* %ptr, align 1
   %sext = sext i8 %load to i32
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %sext, i32 8, i32 0) nounwind readnone
   %shl = shl i32 %bfe, 24
@@ -574,7 +574,7 @@ define void @sextload_i8_to_i32_bfe_0(i3
 ; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
 ; SI: s_endpgm
 define void @sext_in_reg_i1_bfe_offset_0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 31
   %shr = ashr i32 %shl, 31
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 0, i32 1)
@@ -589,7 +589,7 @@ define void @sext_in_reg_i1_bfe_offset_0
 ; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 1
 ; SI: s_endpgm
 define void @sext_in_reg_i1_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 30
   %shr = ashr i32 %shl, 30
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 1, i32 1)
@@ -604,7 +604,7 @@ define void @sext_in_reg_i1_bfe_offset_1
 ; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 2
 ; SI: s_endpgm
 define void @sext_in_reg_i2_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %x = load i32 addrspace(1)* %in, align 4
+  %x = load i32, i32 addrspace(1)* %in, align 4
   %shl = shl i32 %x, 30
   %shr = ashr i32 %shl, 30
   %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 1, i32 2)

Modified: llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll Fri Feb 27 15:17:42 2015
@@ -83,13 +83,13 @@ entry:
 
 if:
   %gep.if = getelementptr i32, i32 addrspace(1)* %a, i32 %tid
-  %a.val = load i32 addrspace(1)* %gep.if
+  %a.val = load i32, i32 addrspace(1)* %gep.if
   %cmp.if = icmp eq i32 %a.val, 0
   br label %endif
 
 else:
   %gep.else = getelementptr i32, i32 addrspace(1)* %b, i32 %tid
-  %b.val = load i32 addrspace(1)* %gep.else
+  %b.val = load i32, i32 addrspace(1)* %gep.else
   %cmp.else = icmp slt i32 %b.val, 0
   br label %endif
 

Modified: llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 ; SI-LABEL: {{^}}test_dup_operands:
 ; SI: v_add_i32_e32
 define void @test_dup_operands(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) {
-  %a = load <2 x i32> addrspace(1)* %in
+  %a = load <2 x i32>, <2 x i32> addrspace(1)* %in
   %lo = extractelement <2 x i32> %a, i32 0
   %hi = extractelement <2 x i32> %a, i32 1
   %add = add i32 %lo, %lo

Modified: llvm/trunk/test/CodeGen/R600/sgpr-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sgpr-copy.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sgpr-copy.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sgpr-copy.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
 define void @phi1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
-  %21 = load <16 x i8> addrspace(2)* %20, !tbaa !1
+  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20, !tbaa !1
   %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 0)
   %23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
   %24 = call float @llvm.SI.load.const(<16 x i8> %21, i32 32)
@@ -34,7 +34,7 @@ ENDIF:
 define void @phi2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
-  %21 = load <16 x i8> addrspace(2)* %20, !tbaa !1
+  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20, !tbaa !1
   %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
   %23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 32)
   %24 = call float @llvm.SI.load.const(<16 x i8> %21, i32 36)
@@ -51,9 +51,9 @@ main_body:
   %35 = call float @llvm.SI.load.const(<16 x i8> %21, i32 88)
   %36 = call float @llvm.SI.load.const(<16 x i8> %21, i32 92)
   %37 = getelementptr <32 x i8>, <32 x i8> addrspace(2)* %2, i32 0
-  %38 = load <32 x i8> addrspace(2)* %37, !tbaa !1
+  %38 = load <32 x i8>, <32 x i8> addrspace(2)* %37, !tbaa !1
   %39 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %1, i32 0
-  %40 = load <16 x i8> addrspace(2)* %39, !tbaa !1
+  %40 = load <16 x i8>, <16 x i8> addrspace(2)* %39, !tbaa !1
   %41 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %3, <2 x i32> %5)
   %42 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %3, <2 x i32> %5)
   %43 = call float @llvm.SI.fs.interp(i32 0, i32 1, i32 %3, <2 x i32> %5)
@@ -155,7 +155,7 @@ ENDIF24:
 define void @loop(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
-  %21 = load <16 x i8> addrspace(2)* %20, !tbaa !1
+  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20, !tbaa !1
   %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 0)
   %23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 4)
   %24 = call float @llvm.SI.load.const(<16 x i8> %21, i32 8)
@@ -237,12 +237,12 @@ define void @sample_v3([17 x <16 x i8>]
 
 entry:
   %21 = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
-  %22 = load <16 x i8> addrspace(2)* %21, !tbaa !2
+  %22 = load <16 x i8>, <16 x i8> addrspace(2)* %21, !tbaa !2
   %23 = call float @llvm.SI.load.const(<16 x i8> %22, i32 16)
   %24 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
-  %25 = load <32 x i8> addrspace(2)* %24, !tbaa !2
+  %25 = load <32 x i8>, <32 x i8> addrspace(2)* %24, !tbaa !2
   %26 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
-  %27 = load <16 x i8> addrspace(2)* %26, !tbaa !2
+  %27 = load <16 x i8>, <16 x i8> addrspace(2)* %26, !tbaa !2
   %28 = fcmp oeq float %23, 0.0
   br i1 %28, label %if, label %else
 
@@ -276,7 +276,7 @@ endif:
 ; CHECK: s_endpgm
 define void @copy1(float addrspace(1)* %out, float addrspace(1)* %in0) {
 entry:
-  %0 = load float addrspace(1)* %in0
+  %0 = load float, float addrspace(1)* %in0
   %1 = fcmp oeq float %0, 0.0
   br i1 %1, label %if0, label %endif
 
@@ -335,12 +335,12 @@ attributes #0 = { "ShaderType"="0" }
 define void @sample_rsrc([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <4 x i32>] addrspace(2)* byval %arg2, [32 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
 bb:
   %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i32 0, i32 0
-  %tmp22 = load <16 x i8> addrspace(2)* %tmp, !tbaa !0
+  %tmp22 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
   %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp22, i32 16)
   %tmp25 = getelementptr [32 x <8 x i32>], [32 x <8 x i32>] addrspace(2)* %arg3, i32 0, i32 0
-  %tmp26 = load <8 x i32> addrspace(2)* %tmp25, !tbaa !0
+  %tmp26 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp25, !tbaa !0
   %tmp27 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(2)* %arg2, i32 0, i32 0
-  %tmp28 = load <4 x i32> addrspace(2)* %tmp27, !tbaa !0
+  %tmp28 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp27, !tbaa !0
   %tmp29 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %arg5, <2 x i32> %arg7)
   %tmp30 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %arg5, <2 x i32> %arg7)
   %tmp31 = bitcast float %tmp23 to i32

Modified: llvm/trunk/test/CodeGen/R600/shl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/shl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/shl.ll (original)
+++ llvm/trunk/test/CodeGen/R600/shl.ll Fri Feb 27 15:17:42 2015
@@ -16,8 +16,8 @@
 
 define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1) * %in
-  %b = load <2 x i32> addrspace(1) * %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
   %result = shl <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -43,8 +43,8 @@ define void @shl_v2i32(<2 x i32> addrspa
 
 define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
   %result = shl <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -70,8 +70,8 @@ define void @shl_v4i32(<4 x i32> addrspa
 
 define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
   %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
-  %a = load i64 addrspace(1) * %in
-  %b = load i64 addrspace(1) * %b_ptr
+  %a = load i64, i64 addrspace(1) * %in
+  %b = load i64, i64 addrspace(1) * %b_ptr
   %result = shl i64 %a, %b
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -109,8 +109,8 @@ define void @shl_i64(i64 addrspace(1)* %
 
 define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
-  %a = load <2 x i64> addrspace(1) * %in
-  %b = load <2 x i64> addrspace(1) * %b_ptr
+  %a = load <2 x i64>, <2 x i64> addrspace(1) * %in
+  %b = load <2 x i64>, <2 x i64> addrspace(1) * %b_ptr
   %result = shl <2 x i64> %a, %b
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out
   ret void
@@ -172,8 +172,8 @@ define void @shl_v2i64(<2 x i64> addrspa
 
 define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
-  %a = load <4 x i64> addrspace(1) * %in
-  %b = load <4 x i64> addrspace(1) * %b_ptr
+  %a = load <4 x i64>, <4 x i64> addrspace(1) * %in
+  %b = load <4 x i64>, <4 x i64> addrspace(1) * %b_ptr
   %result = shl <4 x i64> %a, %b
   store <4 x i64> %result, <4 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/shl_add_constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/shl_add_constant.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/shl_add_constant.ll (original)
+++ llvm/trunk/test/CodeGen/R600/shl_add_constant.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
 define void @shl_2_add_9_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
-  %val = load i32 addrspace(1)* %ptr, align 4
+  %val = load i32, i32 addrspace(1)* %ptr, align 4
   %add = add i32 %val, 9
   %result = shl i32 %add, 2
   store i32 %result, i32 addrspace(1)* %out, align 4
@@ -28,7 +28,7 @@ define void @shl_2_add_9_i32(i32 addrspa
 define void @shl_2_add_9_i32_2_add_uses(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
-  %val = load i32 addrspace(1)* %ptr, align 4
+  %val = load i32, i32 addrspace(1)* %ptr, align 4
   %add = add i32 %val, 9
   %result = shl i32 %add, 2
   store i32 %result, i32 addrspace(1)* %out0, align 4
@@ -46,7 +46,7 @@ define void @shl_2_add_9_i32_2_add_uses(
 define void @shl_2_add_999_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
-  %val = load i32 addrspace(1)* %ptr, align 4
+  %val = load i32, i32 addrspace(1)* %ptr, align 4
   %shl = add i32 %val, 999
   %result = shl i32 %shl, 2
   store i32 %result, i32 addrspace(1)* %out, align 4

Modified: llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll (original)
+++ llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll Fri Feb 27 15:17:42 2015
@@ -23,7 +23,7 @@ define void @load_shl_base_lds_0(float a
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
   store float %val0, float addrspace(1)* %out
   ret void
@@ -43,7 +43,7 @@ define void @load_shl_base_lds_1(float a
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %shl_add_use = shl i32 %idx.0, 2
   store i32 %shl_add_use, i32 addrspace(1)* %add_use, align 4
   store float %val0, float addrspace(1)* %out
@@ -59,7 +59,7 @@ define void @load_shl_base_lds_max_offse
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 65535
   %arrayidx0 = getelementptr inbounds [65536 x i8], [65536 x i8] addrspace(3)* @maxlds, i32 0, i32 %idx.0
-  %val0 = load i8 addrspace(3)* %arrayidx0
+  %val0 = load i8, i8 addrspace(3)* %arrayidx0
   store i32 %idx.0, i32 addrspace(1)* %add_use
   store i8 %val0, i8 addrspace(1)* %out
   ret void
@@ -77,9 +77,9 @@ define void @load_shl_base_lds_2(float a
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 64
   %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
-  %val0 = load float addrspace(3)* %arrayidx0, align 4
+  %val0 = load float, float addrspace(3)* %arrayidx0, align 4
   %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
-  %val1 = load float addrspace(3)* %arrayidx1, align 4
+  %val1 = load float, float addrspace(3)* %arrayidx1, align 4
   %sum = fadd float %val0, %val1
   store float %sum, float addrspace(1)* %out, align 4
   ret void
@@ -108,7 +108,7 @@ define void @store_shl_base_lds_0(float
 ;   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
 ;   %idx.0 = add nsw i32 %tid.x, 2
 ;   %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
-;   %val = load atomic i32 addrspace(3)* %arrayidx0 seq_cst, align 4
+;   %val = load atomic i32, i32 addrspace(3)* %arrayidx0 seq_cst, align 4
 ;   store i32 %val, i32 addrspace(1)* %out, align 4
 ;   store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
 ;   ret void

Modified: llvm/trunk/test/CodeGen/R600/si-lod-bias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/si-lod-bias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/si-lod-bias.ll (original)
+++ llvm/trunk/test/CodeGen/R600/si-lod-bias.ll Fri Feb 27 15:17:42 2015
@@ -10,12 +10,12 @@
 define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
-  %21 = load <16 x i8> addrspace(2)* %20, !tbaa !1
+  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20, !tbaa !1
   %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
   %23 = getelementptr <32 x i8>, <32 x i8> addrspace(2)* %2, i32 0
-  %24 = load <32 x i8> addrspace(2)* %23, !tbaa !1
+  %24 = load <32 x i8>, <32 x i8> addrspace(2)* %23, !tbaa !1
   %25 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %1, i32 0
-  %26 = load <16 x i8> addrspace(2)* %25, !tbaa !1
+  %26 = load <16 x i8>, <16 x i8> addrspace(2)* %25, !tbaa !1
   %27 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %3, <2 x i32> %5)
   %28 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %3, <2 x i32> %5)
   %29 = bitcast float %22 to i32

Modified: llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll (original)
+++ llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@
 define void @main([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %21 = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
-  %22 = load <16 x i8> addrspace(2)* %21, !tbaa !0
+  %22 = load <16 x i8>, <16 x i8> addrspace(2)* %21, !tbaa !0
   %23 = call float @llvm.SI.load.const(<16 x i8> %22, i32 96)
   %24 = call float @llvm.SI.load.const(<16 x i8> %22, i32 100)
   %25 = call float @llvm.SI.load.const(<16 x i8> %22, i32 104)
@@ -54,37 +54,37 @@ main_body:
   %59 = call float @llvm.SI.load.const(<16 x i8> %22, i32 376)
   %60 = call float @llvm.SI.load.const(<16 x i8> %22, i32 384)
   %61 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
-  %62 = load <32 x i8> addrspace(2)* %61, !tbaa !0
+  %62 = load <32 x i8>, <32 x i8> addrspace(2)* %61, !tbaa !0
   %63 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
-  %64 = load <16 x i8> addrspace(2)* %63, !tbaa !0
+  %64 = load <16 x i8>, <16 x i8> addrspace(2)* %63, !tbaa !0
   %65 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 1
-  %66 = load <32 x i8> addrspace(2)* %65, !tbaa !0
+  %66 = load <32 x i8>, <32 x i8> addrspace(2)* %65, !tbaa !0
   %67 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 1
-  %68 = load <16 x i8> addrspace(2)* %67, !tbaa !0
+  %68 = load <16 x i8>, <16 x i8> addrspace(2)* %67, !tbaa !0
   %69 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 2
-  %70 = load <32 x i8> addrspace(2)* %69, !tbaa !0
+  %70 = load <32 x i8>, <32 x i8> addrspace(2)* %69, !tbaa !0
   %71 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 2
-  %72 = load <16 x i8> addrspace(2)* %71, !tbaa !0
+  %72 = load <16 x i8>, <16 x i8> addrspace(2)* %71, !tbaa !0
   %73 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 3
-  %74 = load <32 x i8> addrspace(2)* %73, !tbaa !0
+  %74 = load <32 x i8>, <32 x i8> addrspace(2)* %73, !tbaa !0
   %75 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 3
-  %76 = load <16 x i8> addrspace(2)* %75, !tbaa !0
+  %76 = load <16 x i8>, <16 x i8> addrspace(2)* %75, !tbaa !0
   %77 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 4
-  %78 = load <32 x i8> addrspace(2)* %77, !tbaa !0
+  %78 = load <32 x i8>, <32 x i8> addrspace(2)* %77, !tbaa !0
   %79 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 4
-  %80 = load <16 x i8> addrspace(2)* %79, !tbaa !0
+  %80 = load <16 x i8>, <16 x i8> addrspace(2)* %79, !tbaa !0
   %81 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 5
-  %82 = load <32 x i8> addrspace(2)* %81, !tbaa !0
+  %82 = load <32 x i8>, <32 x i8> addrspace(2)* %81, !tbaa !0
   %83 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 5
-  %84 = load <16 x i8> addrspace(2)* %83, !tbaa !0
+  %84 = load <16 x i8>, <16 x i8> addrspace(2)* %83, !tbaa !0
   %85 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 6
-  %86 = load <32 x i8> addrspace(2)* %85, !tbaa !0
+  %86 = load <32 x i8>, <32 x i8> addrspace(2)* %85, !tbaa !0
   %87 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 6
-  %88 = load <16 x i8> addrspace(2)* %87, !tbaa !0
+  %88 = load <16 x i8>, <16 x i8> addrspace(2)* %87, !tbaa !0
   %89 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 7
-  %90 = load <32 x i8> addrspace(2)* %89, !tbaa !0
+  %90 = load <32 x i8>, <32 x i8> addrspace(2)* %89, !tbaa !0
   %91 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 7
-  %92 = load <16 x i8> addrspace(2)* %91, !tbaa !0
+  %92 = load <16 x i8>, <16 x i8> addrspace(2)* %91, !tbaa !0
   %93 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %4, <2 x i32> %6)
   %94 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %4, <2 x i32> %6)
   %95 = call float @llvm.SI.fs.interp(i32 0, i32 1, i32 %4, <2 x i32> %6)
@@ -116,16 +116,16 @@ main_body:
   %119 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %118
   %120 = bitcast float %93 to i32
   store i32 %120, i32 addrspace(3)* %115
-  %121 = load i32 addrspace(3)* %117
+  %121 = load i32, i32 addrspace(3)* %117
   %122 = bitcast i32 %121 to float
-  %123 = load i32 addrspace(3)* %119
+  %123 = load i32, i32 addrspace(3)* %119
   %124 = bitcast i32 %123 to float
   %125 = fsub float %124, %122
   %126 = bitcast float %94 to i32
   store i32 %126, i32 addrspace(3)* %115
-  %127 = load i32 addrspace(3)* %117
+  %127 = load i32, i32 addrspace(3)* %117
   %128 = bitcast i32 %127 to float
-  %129 = load i32 addrspace(3)* %119
+  %129 = load i32, i32 addrspace(3)* %119
   %130 = bitcast i32 %129 to float
   %131 = fsub float %130, %128
   %132 = insertelement <4 x float> undef, float %125, i32 0
@@ -156,30 +156,30 @@ main_body:
   %153 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %152
   %154 = bitcast float %138 to i32
   store i32 %154, i32 addrspace(3)* %149
-  %155 = load i32 addrspace(3)* %151
+  %155 = load i32, i32 addrspace(3)* %151
   %156 = bitcast i32 %155 to float
-  %157 = load i32 addrspace(3)* %153
+  %157 = load i32, i32 addrspace(3)* %153
   %158 = bitcast i32 %157 to float
   %159 = fsub float %158, %156
   %160 = bitcast float %139 to i32
   store i32 %160, i32 addrspace(3)* %149
-  %161 = load i32 addrspace(3)* %151
+  %161 = load i32, i32 addrspace(3)* %151
   %162 = bitcast i32 %161 to float
-  %163 = load i32 addrspace(3)* %153
+  %163 = load i32, i32 addrspace(3)* %153
   %164 = bitcast i32 %163 to float
   %165 = fsub float %164, %162
   %166 = bitcast float %140 to i32
   store i32 %166, i32 addrspace(3)* %149
-  %167 = load i32 addrspace(3)* %151
+  %167 = load i32, i32 addrspace(3)* %151
   %168 = bitcast i32 %167 to float
-  %169 = load i32 addrspace(3)* %153
+  %169 = load i32, i32 addrspace(3)* %153
   %170 = bitcast i32 %169 to float
   %171 = fsub float %170, %168
   %172 = bitcast float %141 to i32
   store i32 %172, i32 addrspace(3)* %149
-  %173 = load i32 addrspace(3)* %151
+  %173 = load i32, i32 addrspace(3)* %151
   %174 = bitcast i32 %173 to float
-  %175 = load i32 addrspace(3)* %153
+  %175 = load i32, i32 addrspace(3)* %153
   %176 = bitcast i32 %175 to float
   %177 = fsub float %176, %174
   %178 = insertelement <4 x float> undef, float %159, i32 0
@@ -695,7 +695,7 @@ attributes #4 = { nounwind readonly }
 define void @main1([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %21 = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
-  %22 = load <16 x i8> addrspace(2)* %21, !tbaa !0
+  %22 = load <16 x i8>, <16 x i8> addrspace(2)* %21, !tbaa !0
   %23 = call float @llvm.SI.load.const(<16 x i8> %22, i32 0)
   %24 = call float @llvm.SI.load.const(<16 x i8> %22, i32 4)
   %25 = call float @llvm.SI.load.const(<16 x i8> %22, i32 8)
@@ -800,41 +800,41 @@ main_body:
   %124 = call float @llvm.SI.load.const(<16 x i8> %22, i32 864)
   %125 = call float @llvm.SI.load.const(<16 x i8> %22, i32 868)
   %126 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
-  %127 = load <32 x i8> addrspace(2)* %126, !tbaa !0
+  %127 = load <32 x i8>, <32 x i8> addrspace(2)* %126, !tbaa !0
   %128 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
-  %129 = load <16 x i8> addrspace(2)* %128, !tbaa !0
+  %129 = load <16 x i8>, <16 x i8> addrspace(2)* %128, !tbaa !0
   %130 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 1
-  %131 = load <32 x i8> addrspace(2)* %130, !tbaa !0
+  %131 = load <32 x i8>, <32 x i8> addrspace(2)* %130, !tbaa !0
   %132 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 1
-  %133 = load <16 x i8> addrspace(2)* %132, !tbaa !0
+  %133 = load <16 x i8>, <16 x i8> addrspace(2)* %132, !tbaa !0
   %134 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 2
-  %135 = load <32 x i8> addrspace(2)* %134, !tbaa !0
+  %135 = load <32 x i8>, <32 x i8> addrspace(2)* %134, !tbaa !0
   %136 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 2
-  %137 = load <16 x i8> addrspace(2)* %136, !tbaa !0
+  %137 = load <16 x i8>, <16 x i8> addrspace(2)* %136, !tbaa !0
   %138 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 3
-  %139 = load <32 x i8> addrspace(2)* %138, !tbaa !0
+  %139 = load <32 x i8>, <32 x i8> addrspace(2)* %138, !tbaa !0
   %140 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 3
-  %141 = load <16 x i8> addrspace(2)* %140, !tbaa !0
+  %141 = load <16 x i8>, <16 x i8> addrspace(2)* %140, !tbaa !0
   %142 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 4
-  %143 = load <32 x i8> addrspace(2)* %142, !tbaa !0
+  %143 = load <32 x i8>, <32 x i8> addrspace(2)* %142, !tbaa !0
   %144 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 4
-  %145 = load <16 x i8> addrspace(2)* %144, !tbaa !0
+  %145 = load <16 x i8>, <16 x i8> addrspace(2)* %144, !tbaa !0
   %146 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 5
-  %147 = load <32 x i8> addrspace(2)* %146, !tbaa !0
+  %147 = load <32 x i8>, <32 x i8> addrspace(2)* %146, !tbaa !0
   %148 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 5
-  %149 = load <16 x i8> addrspace(2)* %148, !tbaa !0
+  %149 = load <16 x i8>, <16 x i8> addrspace(2)* %148, !tbaa !0
   %150 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 6
-  %151 = load <32 x i8> addrspace(2)* %150, !tbaa !0
+  %151 = load <32 x i8>, <32 x i8> addrspace(2)* %150, !tbaa !0
   %152 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 6
-  %153 = load <16 x i8> addrspace(2)* %152, !tbaa !0
+  %153 = load <16 x i8>, <16 x i8> addrspace(2)* %152, !tbaa !0
   %154 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 7
-  %155 = load <32 x i8> addrspace(2)* %154, !tbaa !0
+  %155 = load <32 x i8>, <32 x i8> addrspace(2)* %154, !tbaa !0
   %156 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 7
-  %157 = load <16 x i8> addrspace(2)* %156, !tbaa !0
+  %157 = load <16 x i8>, <16 x i8> addrspace(2)* %156, !tbaa !0
   %158 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 8
-  %159 = load <32 x i8> addrspace(2)* %158, !tbaa !0
+  %159 = load <32 x i8>, <32 x i8> addrspace(2)* %158, !tbaa !0
   %160 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 8
-  %161 = load <16 x i8> addrspace(2)* %160, !tbaa !0
+  %161 = load <16 x i8>, <16 x i8> addrspace(2)* %160, !tbaa !0
   %162 = fcmp ugt float %17, 0.000000e+00
   %163 = select i1 %162, float 1.000000e+00, float 0.000000e+00
   %164 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %4, <2 x i32> %6)

Modified: llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll (original)
+++ llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll Fri Feb 27 15:17:42 2015
@@ -14,14 +14,14 @@ declare void @llvm.AMDGPU.barrier.local(
 ; CI-NEXT: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8
 ; CI: buffer_store_dword
 define void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
-  %ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
+  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
 
   %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
   %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
 
-  %tmp1 = load i32 addrspace(3)* %ptr1, align 4
+  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
   store i32 99, i32 addrspace(1)* %gptr, align 4
-  %tmp2 = load i32 addrspace(3)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
 
   %add = add nsw i32 %tmp1, %tmp2
 
@@ -34,14 +34,14 @@ define void @reorder_local_load_global_s
 ; CI: buffer_store_dword
 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8
 define void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
-  %ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
+  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
 
   %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
   %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
 
-  %tmp1 = load i32 addrspace(3)* %ptr1, align 4
+  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
   store volatile i32 99, i32 addrspace(1)* %gptr, align 4
-  %tmp2 = load i32 addrspace(3)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
 
   %add = add nsw i32 %tmp1, %tmp2
 
@@ -54,15 +54,15 @@ define void @no_reorder_local_load_volat
 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8
 ; CI: buffer_store_dword
 define void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
-  %ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
+  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
 
   %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
   %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
 
-  %tmp1 = load i32 addrspace(3)* %ptr1, align 4
+  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
   store i32 99, i32 addrspace(1)* %gptr, align 4
   call void @llvm.AMDGPU.barrier.local() #2
-  %tmp2 = load i32 addrspace(3)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
 
   %add = add nsw i32 %tmp1, %tmp2
 
@@ -79,14 +79,14 @@ define void @no_reorder_barrier_local_lo
 ; CI: buffer_load_dword
 ; CI: buffer_store_dword
 define void @no_reorder_constant_load_global_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
-  %ptr0 = load i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
+  %ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
 
   %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
   %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
 
-  %tmp1 = load i32 addrspace(2)* %ptr1, align 4
+  %tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
   store i32 99, i32 addrspace(1)* %gptr, align 4
-  %tmp2 = load i32 addrspace(2)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
 
   %add = add nsw i32 %tmp1, %tmp2
 
@@ -100,14 +100,14 @@ define void @no_reorder_constant_load_gl
 ; CI: ds_write_b32
 ; CI: buffer_store_dword
 define void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
-  %ptr0 = load i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
+  %ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
 
   %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
   %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
 
-  %tmp1 = load i32 addrspace(2)* %ptr1, align 4
+  %tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
   store i32 99, i32 addrspace(3)* %lptr, align 4
-  %tmp2 = load i32 addrspace(2)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
 
   %add = add nsw i32 %tmp1, %tmp2
 
@@ -125,9 +125,9 @@ define void @reorder_smrd_load_local_sto
   %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
   %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
 
-  %tmp1 = load i32 addrspace(2)* %ptr1, align 4
+  %tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
   store i32 99, i32 addrspace(3)* %lptr, align 4
-  %tmp2 = load i32 addrspace(2)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
 
   %add = add nsw i32 %tmp1, %tmp2
 
@@ -144,9 +144,9 @@ define void @reorder_global_load_local_s
   %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 1
   %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 2
 
-  %tmp1 = load i32 addrspace(1)* %ptr1, align 4
+  %tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
   store i32 99, i32 addrspace(3)* %lptr, align 4
-  %tmp2 = load i32 addrspace(1)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(1)* %ptr2, align 4
 
   %add = add nsw i32 %tmp1, %tmp2
 
@@ -168,10 +168,10 @@ define void @reorder_local_offsets(i32 a
   %ptr3 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 101
 
   store i32 123, i32 addrspace(3)* %ptr1, align 4
-  %tmp1 = load i32 addrspace(3)* %ptr2, align 4
-  %tmp2 = load i32 addrspace(3)* %ptr3, align 4
+  %tmp1 = load i32, i32 addrspace(3)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(3)* %ptr3, align 4
   store i32 123, i32 addrspace(3)* %ptr2, align 4
-  %tmp3 = load i32 addrspace(3)* %ptr1, align 4
+  %tmp3 = load i32, i32 addrspace(3)* %ptr1, align 4
   store i32 789, i32 addrspace(3)* %ptr3, align 4
 
   %add.0 = add nsw i32 %tmp2, %tmp1
@@ -194,10 +194,10 @@ define void @reorder_global_offsets(i32
   %ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 101
 
   store i32 123, i32 addrspace(1)* %ptr1, align 4
-  %tmp1 = load i32 addrspace(1)* %ptr2, align 4
-  %tmp2 = load i32 addrspace(1)* %ptr3, align 4
+  %tmp1 = load i32, i32 addrspace(1)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
   store i32 123, i32 addrspace(1)* %ptr2, align 4
-  %tmp3 = load i32 addrspace(1)* %ptr1, align 4
+  %tmp3 = load i32, i32 addrspace(1)* %ptr1, align 4
   store i32 789, i32 addrspace(1)* %ptr3, align 4
 
   %add.0 = add nsw i32 %tmp2, %tmp1
@@ -211,19 +211,19 @@ define void @reorder_global_offsets(i32
 ; XCI: TBUFFER_STORE_FORMAT
 ; XCI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}}, 0x8
 ; define void @reorder_local_load_tbuffer_store_local_load(i32 addrspace(1)* %out, i32 %a1, i32 %vaddr) #1 {
-;   %ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
+;   %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
 
 ;   %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
 ;   %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
 
-;   %tmp1 = load i32 addrspace(3)* %ptr1, align 4
+;   %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
 
 ;   %vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
 ;   call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
 ;         i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 1,
 ;         i32 1, i32 0)
 
-;   %tmp2 = load i32 addrspace(3)* %ptr2, align 4
+;   %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
 
 ;   %add = add nsw i32 %tmp1, %tmp2
 

Modified: llvm/trunk/test/CodeGen/R600/si-vector-hang.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/si-vector-hang.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/si-vector-hang.ll (original)
+++ llvm/trunk/test/CodeGen/R600/si-vector-hang.ll Fri Feb 27 15:17:42 2015
@@ -17,52 +17,52 @@ target triple = "r600--"
 ; Function Attrs: nounwind
 define void @test_8_min_char(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture readonly %in0, i8 addrspace(1)* nocapture readonly %in1) #0 {
 entry:
-  %0 = load i8 addrspace(1)* %in0, align 1
+  %0 = load i8, i8 addrspace(1)* %in0, align 1
   %1 = insertelement <8 x i8> undef, i8 %0, i32 0
   %arrayidx2.i.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 1
-  %2 = load i8 addrspace(1)* %arrayidx2.i.i, align 1
+  %2 = load i8, i8 addrspace(1)* %arrayidx2.i.i, align 1
   %3 = insertelement <8 x i8> %1, i8 %2, i32 1
   %arrayidx6.i.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 2
-  %4 = load i8 addrspace(1)* %arrayidx6.i.i, align 1
+  %4 = load i8, i8 addrspace(1)* %arrayidx6.i.i, align 1
   %5 = insertelement <8 x i8> %3, i8 %4, i32 2
   %arrayidx10.i.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 3
-  %6 = load i8 addrspace(1)* %arrayidx10.i.i, align 1
+  %6 = load i8, i8 addrspace(1)* %arrayidx10.i.i, align 1
   %7 = insertelement <8 x i8> %5, i8 %6, i32 3
   %arrayidx.i.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 4
-  %8 = load i8 addrspace(1)* %arrayidx.i.i, align 1
+  %8 = load i8, i8 addrspace(1)* %arrayidx.i.i, align 1
   %9 = insertelement <8 x i8> undef, i8 %8, i32 0
   %arrayidx2.i9.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 5
-  %10 = load i8 addrspace(1)* %arrayidx2.i9.i, align 1
+  %10 = load i8, i8 addrspace(1)* %arrayidx2.i9.i, align 1
   %11 = insertelement <8 x i8> %9, i8 %10, i32 1
   %arrayidx6.i11.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 6
-  %12 = load i8 addrspace(1)* %arrayidx6.i11.i, align 1
+  %12 = load i8, i8 addrspace(1)* %arrayidx6.i11.i, align 1
   %13 = insertelement <8 x i8> %11, i8 %12, i32 2
   %arrayidx10.i13.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 7
-  %14 = load i8 addrspace(1)* %arrayidx10.i13.i, align 1
+  %14 = load i8, i8 addrspace(1)* %arrayidx10.i13.i, align 1
   %15 = insertelement <8 x i8> %13, i8 %14, i32 3
   %vecinit5.i = shufflevector <8 x i8> %7, <8 x i8> %15, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
-  %16 = load i8 addrspace(1)* %in1, align 1
+  %16 = load i8, i8 addrspace(1)* %in1, align 1
   %17 = insertelement <8 x i8> undef, i8 %16, i32 0
   %arrayidx2.i.i4 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 1
-  %18 = load i8 addrspace(1)* %arrayidx2.i.i4, align 1
+  %18 = load i8, i8 addrspace(1)* %arrayidx2.i.i4, align 1
   %19 = insertelement <8 x i8> %17, i8 %18, i32 1
   %arrayidx6.i.i5 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 2
-  %20 = load i8 addrspace(1)* %arrayidx6.i.i5, align 1
+  %20 = load i8, i8 addrspace(1)* %arrayidx6.i.i5, align 1
   %21 = insertelement <8 x i8> %19, i8 %20, i32 2
   %arrayidx10.i.i6 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 3
-  %22 = load i8 addrspace(1)* %arrayidx10.i.i6, align 1
+  %22 = load i8, i8 addrspace(1)* %arrayidx10.i.i6, align 1
   %23 = insertelement <8 x i8> %21, i8 %22, i32 3
   %arrayidx.i.i7 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 4
-  %24 = load i8 addrspace(1)* %arrayidx.i.i7, align 1
+  %24 = load i8, i8 addrspace(1)* %arrayidx.i.i7, align 1
   %25 = insertelement <8 x i8> undef, i8 %24, i32 0
   %arrayidx2.i9.i8 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 5
-  %26 = load i8 addrspace(1)* %arrayidx2.i9.i8, align 1
+  %26 = load i8, i8 addrspace(1)* %arrayidx2.i9.i8, align 1
   %27 = insertelement <8 x i8> %25, i8 %26, i32 1
   %arrayidx6.i11.i9 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 6
-  %28 = load i8 addrspace(1)* %arrayidx6.i11.i9, align 1
+  %28 = load i8, i8 addrspace(1)* %arrayidx6.i11.i9, align 1
   %29 = insertelement <8 x i8> %27, i8 %28, i32 2
   %arrayidx10.i13.i10 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 7
-  %30 = load i8 addrspace(1)* %arrayidx10.i13.i10, align 1
+  %30 = load i8, i8 addrspace(1)* %arrayidx10.i13.i10, align 1
   %31 = insertelement <8 x i8> %29, i8 %30, i32 3
   %vecinit5.i11 = shufflevector <8 x i8> %23, <8 x i8> %31, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
   %cmp.i = icmp slt <8 x i8> %vecinit5.i, %vecinit5.i11

Modified: llvm/trunk/test/CodeGen/R600/sign_extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sign_extend.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sign_extend.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sign_extend.ll Fri Feb 27 15:17:42 2015
@@ -48,7 +48,7 @@ define void @s_sext_i32_to_i64(i64 addrs
 ; SI: v_ashr
 ; SI: s_endpgm
 define void @v_sext_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %val = load i32 addrspace(1)* %in, align 4
+  %val = load i32, i32 addrspace(1)* %in, align 4
   %sext = sext i32 %val to i64
   store i64 %sext, i64 addrspace(1)* %out, align 8
   ret void

Modified: llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll (original)
+++ llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ define void @trunc_load_alloca_i64(i64 a
   store i64 3935, i64* %gep2, align 8
   store i64 9342, i64* %gep3, align 8
   %gep = getelementptr i64, i64* %alloca, i32 %idx
-  %load = load i64* %gep, align 8
+  %load = load i64, i64* %gep, align 8
   %mask = and i64 %load, 4294967296
   %add = add i64 %mask, -1
   store i64 %add, i64 addrspace(1)* %out, align 4

Modified: llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll Fri Feb 27 15:17:42 2015
@@ -54,7 +54,7 @@ define void @s_sint_to_fp_i64_to_f64(dou
 define void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
-  %val = load i64 addrspace(1)* %gep, align 8
+  %val = load i64, i64 addrspace(1)* %gep, align 8
   %result = sitofp i64 %val to double
   store double %result, double addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/sint_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sint_to_fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sint_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sint_to_fp.ll Fri Feb 27 15:17:42 2015
@@ -35,7 +35,7 @@ define void @sint_to_fp_v2i32(<2 x float
 ; SI: v_cvt_f32_i32_e32
 ; SI: v_cvt_f32_i32_e32
 define void @sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
-  %value = load <4 x i32> addrspace(1) * %in
+  %value = load <4 x i32>, <4 x i32> addrspace(1) * %in
   %result = sitofp <4 x i32> %value to <4 x float>
   store <4 x float> %result, <4 x float> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/smrd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/smrd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/smrd.ll (original)
+++ llvm/trunk/test/CodeGen/R600/smrd.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 define void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
 entry:
   %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 1
-  %1 = load i32 addrspace(2)* %0
+  %1 = load i32, i32 addrspace(2)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -20,7 +20,7 @@ entry:
 define void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
 entry:
   %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 255
-  %1 = load i32 addrspace(2)* %0
+  %1 = load i32, i32 addrspace(2)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -34,7 +34,7 @@ entry:
 define void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
 entry:
   %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 256
-  %1 = load i32 addrspace(2)* %0
+  %1 = load i32, i32 addrspace(2)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -55,7 +55,7 @@ entry:
 define void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
 entry:
   %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 4294967296 ; 2 ^ 32
-  %1 = load i32 addrspace(2)* %0
+  %1 = load i32, i32 addrspace(2)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }
@@ -67,7 +67,7 @@ entry:
 define void @smrd_load_const0(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
-  %21 = load <16 x i8> addrspace(2)* %20
+  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
   %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
   call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
   ret void
@@ -81,7 +81,7 @@ main_body:
 define void @smrd_load_const1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
-  %21 = load <16 x i8> addrspace(2)* %20
+  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
   %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1020)
   call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
   ret void
@@ -96,7 +96,7 @@ main_body:
 define void @smrd_load_const2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
 main_body:
   %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
-  %21 = load <16 x i8> addrspace(2)* %20
+  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
   %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1024)
   call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
   ret void

Modified: llvm/trunk/test/CodeGen/R600/split-scalar-i64-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/split-scalar-i64-add.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/split-scalar-i64-add.ll (original)
+++ llvm/trunk/test/CodeGen/R600/split-scalar-i64-add.ll Fri Feb 27 15:17:42 2015
@@ -38,7 +38,7 @@ define void @imp_def_vcc_split_i64_add_1
 define void @imp_def_vcc_split_i64_add_2(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) {
   %tid = call i32 @llvm.r600.read.tidig.x() readnone
   %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
-  %load = load i32 addrspace(1)* %gep
+  %load = load i32, i32 addrspace(1)* %gep
   %vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0
   %vec.1 = insertelement <2 x i32> %vec.0, i32 %load, i32 1
   %bc = bitcast <2 x i32> %vec.1 to i64

Modified: llvm/trunk/test/CodeGen/R600/sra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sra.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sra.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sra.ll Fri Feb 27 15:17:42 2015
@@ -16,8 +16,8 @@
 
 define void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1) * %in
-  %b = load <2 x i32> addrspace(1) * %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
   %result = ashr <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -43,8 +43,8 @@ define void @ashr_v2i32(<2 x i32> addrsp
 
 define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
   %result = ashr <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -90,8 +90,8 @@ entry:
 define void @ashr_i64_2(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
 entry:
   %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
-  %a = load i64 addrspace(1) * %in
-  %b = load i64 addrspace(1) * %b_ptr
+  %a = load i64, i64 addrspace(1) * %in
+  %b = load i64, i64 addrspace(1) * %b_ptr
   %result = ashr i64 %a, %b
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -133,8 +133,8 @@ entry:
 
 define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
-  %a = load <2 x i64> addrspace(1) * %in
-  %b = load <2 x i64> addrspace(1) * %b_ptr
+  %a = load <2 x i64>, <2 x i64> addrspace(1) * %in
+  %b = load <2 x i64>, <2 x i64> addrspace(1) * %b_ptr
   %result = ashr <2 x i64> %a, %b
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out
   ret void
@@ -204,8 +204,8 @@ define void @ashr_v2i64(<2 x i64> addrsp
 
 define void @ashr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
-  %a = load <4 x i64> addrspace(1) * %in
-  %b = load <4 x i64> addrspace(1) * %b_ptr
+  %a = load <4 x i64>, <4 x i64> addrspace(1) * %in
+  %b = load <4 x i64>, <4 x i64> addrspace(1) * %b_ptr
   %result = ashr <4 x i64> %a, %b
   store <4 x i64> %result, <4 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/srem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/srem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/srem.ll (original)
+++ llvm/trunk/test/CodeGen/R600/srem.ll Fri Feb 27 15:17:42 2015
@@ -4,15 +4,15 @@
 
 define void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in
-  %den = load i32 addrspace(1) * %den_ptr
+  %num = load i32, i32 addrspace(1) * %in
+  %den = load i32, i32 addrspace(1) * %den_ptr
   %result = srem i32 %num, %den
   store i32 %result, i32 addrspace(1)* %out
   ret void
 }
 
 define void @srem_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
-  %num = load i32 addrspace(1) * %in
+  %num = load i32, i32 addrspace(1) * %in
   %result = srem i32 %num, 4
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -25,7 +25,7 @@ define void @srem_i32_4(i32 addrspace(1)
 ; SI: v_sub_i32
 ; SI: s_endpgm
 define void @srem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
-  %num = load i32 addrspace(1) * %in
+  %num = load i32, i32 addrspace(1) * %in
   %result = srem i32 %num, 7
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -33,15 +33,15 @@ define void @srem_i32_7(i32 addrspace(1)
 
 define void @srem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %num = load <2 x i32> addrspace(1) * %in
-  %den = load <2 x i32> addrspace(1) * %den_ptr
+  %num = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %den = load <2 x i32>, <2 x i32> addrspace(1) * %den_ptr
   %result = srem <2 x i32> %num, %den
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
 }
 
 define void @srem_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
-  %num = load <2 x i32> addrspace(1) * %in
+  %num = load <2 x i32>, <2 x i32> addrspace(1) * %in
   %result = srem <2 x i32> %num, <i32 4, i32 4>
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -49,15 +49,15 @@ define void @srem_v2i32_4(<2 x i32> addr
 
 define void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %num = load <4 x i32> addrspace(1) * %in
-  %den = load <4 x i32> addrspace(1) * %den_ptr
+  %num = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %den = load <4 x i32>, <4 x i32> addrspace(1) * %den_ptr
   %result = srem <4 x i32> %num, %den
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
 }
 
 define void @srem_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
-  %num = load <4 x i32> addrspace(1) * %in
+  %num = load <4 x i32>, <4 x i32> addrspace(1) * %in
   %result = srem <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -65,15 +65,15 @@ define void @srem_v4i32_4(<4 x i32> addr
 
 define void @srem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
   %den_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
-  %num = load i64 addrspace(1) * %in
-  %den = load i64 addrspace(1) * %den_ptr
+  %num = load i64, i64 addrspace(1) * %in
+  %den = load i64, i64 addrspace(1) * %den_ptr
   %result = srem i64 %num, %den
   store i64 %result, i64 addrspace(1)* %out
   ret void
 }
 
 define void @srem_i64_4(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
-  %num = load i64 addrspace(1) * %in
+  %num = load i64, i64 addrspace(1) * %in
   %result = srem i64 %num, 4
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -81,15 +81,15 @@ define void @srem_i64_4(i64 addrspace(1)
 
 define void @srem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
   %den_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
-  %num = load <2 x i64> addrspace(1) * %in
-  %den = load <2 x i64> addrspace(1) * %den_ptr
+  %num = load <2 x i64>, <2 x i64> addrspace(1) * %in
+  %den = load <2 x i64>, <2 x i64> addrspace(1) * %den_ptr
   %result = srem <2 x i64> %num, %den
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out
   ret void
 }
 
 define void @srem_v2i64_4(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
-  %num = load <2 x i64> addrspace(1) * %in
+  %num = load <2 x i64>, <2 x i64> addrspace(1) * %in
   %result = srem <2 x i64> %num, <i64 4, i64 4>
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out
   ret void
@@ -97,15 +97,15 @@ define void @srem_v2i64_4(<2 x i64> addr
 
 define void @srem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
   %den_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
-  %num = load <4 x i64> addrspace(1) * %in
-  %den = load <4 x i64> addrspace(1) * %den_ptr
+  %num = load <4 x i64>, <4 x i64> addrspace(1) * %in
+  %den = load <4 x i64>, <4 x i64> addrspace(1) * %den_ptr
   %result = srem <4 x i64> %num, %den
   store <4 x i64> %result, <4 x i64> addrspace(1)* %out
   ret void
 }
 
 define void @srem_v4i64_4(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
-  %num = load <4 x i64> addrspace(1) * %in
+  %num = load <4 x i64>, <4 x i64> addrspace(1) * %in
   %result = srem <4 x i64> %num, <i64 4, i64 4, i64 4, i64 4>
   store <4 x i64> %result, <4 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/srl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/srl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/srl.ll (original)
+++ llvm/trunk/test/CodeGen/R600/srl.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@
 ; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1)* %in
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %in
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %result = lshr i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -26,8 +26,8 @@ define void @lshr_i32(i32 addrspace(1)*
 ; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 define void @lshr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1)* %in
-  %b = load <2 x i32> addrspace(1)* %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1)* %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
   %result = lshr <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -50,8 +50,8 @@ define void @lshr_v2i32(<2 x i32> addrsp
 ; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1)* %in
-  %b = load <4 x i32> addrspace(1)* %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1)* %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
   %result = lshr <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -74,8 +74,8 @@ define void @lshr_v4i32(<4 x i32> addrsp
 ; EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
 define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
   %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
-  %a = load i64 addrspace(1)* %in
-  %b = load i64 addrspace(1)* %b_ptr
+  %a = load i64, i64 addrspace(1)* %in
+  %b = load i64, i64 addrspace(1)* %b_ptr
   %result = lshr i64 %a, %b
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -112,8 +112,8 @@ define void @lshr_i64(i64 addrspace(1)*
 ; EG-DAG: CNDE_INT
 define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
-  %a = load <2 x i64> addrspace(1)* %in
-  %b = load <2 x i64> addrspace(1)* %b_ptr
+  %a = load <2 x i64>, <2 x i64> addrspace(1)* %in
+  %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
   %result = lshr <2 x i64> %a, %b
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out
   ret void
@@ -178,8 +178,8 @@ define void @lshr_v2i64(<2 x i64> addrsp
 ; EG-DAG: CNDE_INT
 define void @lshr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
-  %a = load <4 x i64> addrspace(1)* %in
-  %b = load <4 x i64> addrspace(1)* %b_ptr
+  %a = load <4 x i64>, <4 x i64> addrspace(1)* %in
+  %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
   %result = lshr <4 x i64> %a, %b
   store <4 x i64> %result, <4 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/ssubo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/ssubo.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/ssubo.ll (original)
+++ llvm/trunk/test/CodeGen/R600/ssubo.ll Fri Feb 27 15:17:42 2015
@@ -28,8 +28,8 @@ define void @s_ssubo_i32(i32 addrspace(1
 
 ; FUNC-LABEL: {{^}}v_ssubo_i32:
 define void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
-  %a = load i32 addrspace(1)* %aptr, align 4
-  %b = load i32 addrspace(1)* %bptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind
   %val = extractvalue { i32, i1 } %ssub, 0
   %carry = extractvalue { i32, i1 } %ssub, 1
@@ -54,8 +54,8 @@ define void @s_ssubo_i64(i64 addrspace(1
 ; SI: v_sub_i32_e32
 ; SI: v_subb_u32_e32
 define void @v_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
-  %a = load i64 addrspace(1)* %aptr, align 4
-  %b = load i64 addrspace(1)* %bptr, align 4
+  %a = load i64, i64 addrspace(1)* %aptr, align 4
+  %b = load i64, i64 addrspace(1)* %bptr, align 4
   %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
   %val = extractvalue { i64, i1 } %ssub, 0
   %carry = extractvalue { i64, i1 } %ssub, 1

Modified: llvm/trunk/test/CodeGen/R600/store-barrier.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/store-barrier.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/store-barrier.ll (original)
+++ llvm/trunk/test/CodeGen/R600/store-barrier.ll Fri Feb 27 15:17:42 2015
@@ -15,22 +15,22 @@
 define void @test(<2 x i8> addrspace(3)* nocapture %arg, <2 x i8> addrspace(1)* nocapture readonly %arg1, i32 addrspace(1)* nocapture readonly %arg2, <2 x i8> addrspace(1)* nocapture %arg3, i32 %arg4, i64 %tmp9) {
 bb:
   %tmp10 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp9
-  %tmp13 = load i32 addrspace(1)* %tmp10, align 2
+  %tmp13 = load i32, i32 addrspace(1)* %tmp10, align 2
   %tmp14 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp13
-  %tmp15 = load <2 x i8> addrspace(3)* %tmp14, align 2
+  %tmp15 = load <2 x i8>, <2 x i8> addrspace(3)* %tmp14, align 2
   %tmp16 = add i32 %tmp13, 1
   %tmp17 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp16
   store <2 x i8> %tmp15, <2 x i8> addrspace(3)* %tmp17, align 2
   tail call void @llvm.AMDGPU.barrier.local() #2
-  %tmp25 = load i32 addrspace(1)* %tmp10, align 4
+  %tmp25 = load i32, i32 addrspace(1)* %tmp10, align 4
   %tmp26 = sext i32 %tmp25 to i64
   %tmp27 = sext i32 %arg4 to i64
   %tmp28 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp25, i32 %arg4
-  %tmp29 = load i8 addrspace(3)* %tmp28, align 1
+  %tmp29 = load i8, i8 addrspace(3)* %tmp28, align 1
   %tmp30 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %arg3, i64 %tmp26, i64 %tmp27
   store i8 %tmp29, i8 addrspace(1)* %tmp30, align 1
   %tmp32 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp25, i32 0
-  %tmp33 = load i8 addrspace(3)* %tmp32, align 1
+  %tmp33 = load i8, i8 addrspace(3)* %tmp32, align 1
   %tmp35 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %arg3, i64 %tmp26, i64 0
   store i8 %tmp33, i8 addrspace(1)* %tmp35, align 1
   ret void

Modified: llvm/trunk/test/CodeGen/R600/store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/store.ll (original)
+++ llvm/trunk/test/CodeGen/R600/store.ll Fri Feb 27 15:17:42 2015
@@ -334,9 +334,9 @@ entry:
 ; SI: buffer_store_dwordx2
 define void @vecload2(i32 addrspace(1)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
 entry:
-  %0 = load i32 addrspace(2)* %mem, align 4
+  %0 = load i32, i32 addrspace(2)* %mem, align 4
   %arrayidx1.i = getelementptr inbounds i32, i32 addrspace(2)* %mem, i64 1
-  %1 = load i32 addrspace(2)* %arrayidx1.i, align 4
+  %1 = load i32, i32 addrspace(2)* %arrayidx1.i, align 4
   store i32 %0, i32 addrspace(1)* %out, align 4
   %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
   store i32 %1, i32 addrspace(1)* %arrayidx1, align 4

Modified: llvm/trunk/test/CodeGen/R600/store.r600.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/store.r600.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/store.r600.ll (original)
+++ llvm/trunk/test/CodeGen/R600/store.r600.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
 ; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
 
 define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
-  %1 = load <4 x i32> addrspace(1) * %in
+  %1 = load <4 x i32>, <4 x i32> addrspace(1) * %in
   store <4 x i32> %1, <4 x i32> addrspace(1)* %out
   ret void
 }
@@ -16,7 +16,7 @@ define void @store_v4i32(<4 x i32> addrs
 ; EG: {{^}}store_v4f32:
 ; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
 define void @store_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
-  %1 = load <4 x float> addrspace(1) * %in
+  %1 = load <4 x float>, <4 x float> addrspace(1) * %in
   store <4 x float> %1, <4 x float> addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/sub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/sub.ll (original)
+++ llvm/trunk/test/CodeGen/R600/sub.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ declare i32 @llvm.r600.read.tidig.x() re
 ; SI: v_subrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
 define void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1)* %in
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %in
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %result = sub i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -27,8 +27,8 @@ define void @test_sub_i32(i32 addrspace(
 
 define void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1) * %in
-  %b = load <2 x i32> addrspace(1) * %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
   %result = sub <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -47,8 +47,8 @@ define void @test_sub_v2i32(<2 x i32> ad
 
 define void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
   %result = sub <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -82,8 +82,8 @@ define void @v_sub_i64(i64 addrspace(1)*
   %tid = call i32 @llvm.r600.read.tidig.x() readnone
   %a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
   %b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
-  %a = load i64 addrspace(1)* %a_ptr
-  %b = load i64 addrspace(1)* %b_ptr
+  %a = load i64, i64 addrspace(1)* %a_ptr
+  %b = load i64, i64 addrspace(1)* %b_ptr
   %result = sub i64 %a, %b
   store i64 %result, i64 addrspace(1)* %out, align 8
   ret void
@@ -98,8 +98,8 @@ define void @v_test_sub_v2i64(<2 x i64>
   %tid = call i32 @llvm.r600.read.tidig.x() readnone
   %a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
   %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
-  %a = load <2 x i64> addrspace(1)* %a_ptr
-  %b = load <2 x i64> addrspace(1)* %b_ptr
+  %a = load <2 x i64>, <2 x i64> addrspace(1)* %a_ptr
+  %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
   %result = sub <2 x i64> %a, %b
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out
   ret void
@@ -118,8 +118,8 @@ define void @v_test_sub_v4i64(<4 x i64>
   %tid = call i32 @llvm.r600.read.tidig.x() readnone
   %a_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inA, i32 %tid
   %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inB, i32 %tid
-  %a = load <4 x i64> addrspace(1)* %a_ptr
-  %b = load <4 x i64> addrspace(1)* %b_ptr
+  %a = load <4 x i64>, <4 x i64> addrspace(1)* %a_ptr
+  %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
   %result = sub <4 x i64> %a, %b
   store <4 x i64> %result, <4 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/swizzle-export.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/swizzle-export.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/swizzle-export.ll (original)
+++ llvm/trunk/test/CodeGen/R600/swizzle-export.ll Fri Feb 27 15:17:42 2015
@@ -12,56 +12,56 @@ main_body:
   %1 = extractelement <4 x float> %reg1, i32 1
   %2 = extractelement <4 x float> %reg1, i32 2
   %3 = extractelement <4 x float> %reg1, i32 3
-  %4 = load <4 x float> addrspace(8)* null
+  %4 = load <4 x float>, <4 x float> addrspace(8)* null
   %5 = extractelement <4 x float> %4, i32 1
-  %6 = load <4 x float> addrspace(8)* null
+  %6 = load <4 x float>, <4 x float> addrspace(8)* null
   %7 = extractelement <4 x float> %6, i32 2
-  %8 = load <4 x float> addrspace(8)* null
+  %8 = load <4 x float>, <4 x float> addrspace(8)* null
   %9 = extractelement <4 x float> %8, i32 0
   %10 = fmul float 0.000000e+00, %9
-  %11 = load <4 x float> addrspace(8)* null
+  %11 = load <4 x float>, <4 x float> addrspace(8)* null
   %12 = extractelement <4 x float> %11, i32 0
   %13 = fmul float %5, %12
-  %14 = load <4 x float> addrspace(8)* null
+  %14 = load <4 x float>, <4 x float> addrspace(8)* null
   %15 = extractelement <4 x float> %14, i32 0
   %16 = fmul float 0.000000e+00, %15
-  %17 = load <4 x float> addrspace(8)* null
+  %17 = load <4 x float>, <4 x float> addrspace(8)* null
   %18 = extractelement <4 x float> %17, i32 0
   %19 = fmul float 0.000000e+00, %18
-  %20 = load <4 x float> addrspace(8)* null
+  %20 = load <4 x float>, <4 x float> addrspace(8)* null
   %21 = extractelement <4 x float> %20, i32 0
   %22 = fmul float %7, %21
-  %23 = load <4 x float> addrspace(8)* null
+  %23 = load <4 x float>, <4 x float> addrspace(8)* null
   %24 = extractelement <4 x float> %23, i32 0
   %25 = fmul float 0.000000e+00, %24
-  %26 = load <4 x float> addrspace(8)* null
+  %26 = load <4 x float>, <4 x float> addrspace(8)* null
   %27 = extractelement <4 x float> %26, i32 0
   %28 = fmul float 0.000000e+00, %27
-  %29 = load <4 x float> addrspace(8)* null
+  %29 = load <4 x float>, <4 x float> addrspace(8)* null
   %30 = extractelement <4 x float> %29, i32 0
   %31 = fmul float 0.000000e+00, %30
-  %32 = load <4 x float> addrspace(8)* null
+  %32 = load <4 x float>, <4 x float> addrspace(8)* null
   %33 = extractelement <4 x float> %32, i32 0
   %34 = fmul float 0.000000e+00, %33
-  %35 = load <4 x float> addrspace(8)* null
+  %35 = load <4 x float>, <4 x float> addrspace(8)* null
   %36 = extractelement <4 x float> %35, i32 0
   %37 = fmul float 0.000000e+00, %36
-  %38 = load <4 x float> addrspace(8)* null
+  %38 = load <4 x float>, <4 x float> addrspace(8)* null
   %39 = extractelement <4 x float> %38, i32 0
   %40 = fmul float 1.000000e+00, %39
-  %41 = load <4 x float> addrspace(8)* null
+  %41 = load <4 x float>, <4 x float> addrspace(8)* null
   %42 = extractelement <4 x float> %41, i32 0
   %43 = fmul float 0.000000e+00, %42
-  %44 = load <4 x float> addrspace(8)* null
+  %44 = load <4 x float>, <4 x float> addrspace(8)* null
   %45 = extractelement <4 x float> %44, i32 0
   %46 = fmul float 0.000000e+00, %45
-  %47 = load <4 x float> addrspace(8)* null
+  %47 = load <4 x float>, <4 x float> addrspace(8)* null
   %48 = extractelement <4 x float> %47, i32 0
   %49 = fmul float 0.000000e+00, %48
-  %50 = load <4 x float> addrspace(8)* null
+  %50 = load <4 x float>, <4 x float> addrspace(8)* null
   %51 = extractelement <4 x float> %50, i32 0
   %52 = fmul float 0.000000e+00, %51
-  %53 = load <4 x float> addrspace(8)* null
+  %53 = load <4 x float>, <4 x float> addrspace(8)* null
   %54 = extractelement <4 x float> %53, i32 0
   %55 = fmul float 1.000000e+00, %54
   %56 = insertelement <4 x float> undef, float %0, i32 0
@@ -102,12 +102,12 @@ main_body:
   %1 = extractelement <4 x float> %reg1, i32 1
   %2 = fadd float %0, 2.5
   %3 = fmul float %1, 3.5
-  %4 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+  %4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
   %5 = extractelement <4 x float> %4, i32 0
   %6 = call float @llvm.cos.f32(float %5)
-  %7 = load <4 x float> addrspace(8)* null
+  %7 = load <4 x float>, <4 x float> addrspace(8)* null
   %8 = extractelement <4 x float> %7, i32 0
-  %9 = load <4 x float> addrspace(8)* null
+  %9 = load <4 x float>, <4 x float> addrspace(8)* null
   %10 = extractelement <4 x float> %9, i32 1
   %11 = insertelement <4 x float> undef, float %2, i32 0
   %12 = insertelement <4 x float> %11, float %3, i32 1

Modified: llvm/trunk/test/CodeGen/R600/trunc-cmp-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/trunc-cmp-constant.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/trunc-cmp-constant.ll (original)
+++ llvm/trunk/test/CodeGen/R600/trunc-cmp-constant.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
 ; SI: v_cndmask_b32_e64
 ; SI: buffer_store_byte
 define void @sextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %load to i32
   %cmp = icmp eq i32 %ext, 0
   store i1 %cmp, i1 addrspace(1)* %out
@@ -25,7 +25,7 @@ define void @sextload_i1_to_i32_trunc_cm
 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]]
 ; SI-NEXT: buffer_store_byte [[RESULT]]
 define void @zextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %load to i32
   %cmp = icmp eq i32 %ext, 0
   store i1 %cmp, i1 addrspace(1)* %out
@@ -36,7 +36,7 @@ define void @zextload_i1_to_i32_trunc_cm
 ; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
 ; SI: buffer_store_byte [[RESULT]]
 define void @sextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %load to i32
   %cmp = icmp eq i32 %ext, 1
   store i1 %cmp, i1 addrspace(1)* %out
@@ -48,7 +48,7 @@ define void @sextload_i1_to_i32_trunc_cm
 ; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[LOAD]]
 ; SI-NEXT: buffer_store_byte [[RESULT]]
 define void @zextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %load to i32
   %cmp = icmp eq i32 %ext, 1
   store i1 %cmp, i1 addrspace(1)* %out
@@ -60,7 +60,7 @@ define void @zextload_i1_to_i32_trunc_cm
 ; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[LOAD]]
 ; SI-NEXT: buffer_store_byte [[RESULT]]
 define void @sextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %load to i32
   %cmp = icmp eq i32 %ext, -1
   store i1 %cmp, i1 addrspace(1)* %out
@@ -71,7 +71,7 @@ define void @sextload_i1_to_i32_trunc_cm
 ; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
 ; SI: buffer_store_byte [[RESULT]]
 define void @zextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %load to i32
   %cmp = icmp eq i32 %ext, -1
   store i1 %cmp, i1 addrspace(1)* %out
@@ -84,7 +84,7 @@ define void @zextload_i1_to_i32_trunc_cm
 ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
 ; SI-NEXT: buffer_store_byte [[RESULT]]
 define void @sextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %load to i32
   %cmp = icmp ne i32 %ext, 0
   store i1 %cmp, i1 addrspace(1)* %out
@@ -96,7 +96,7 @@ define void @sextload_i1_to_i32_trunc_cm
 ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
 ; SI-NEXT: buffer_store_byte [[RESULT]]
 define void @zextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %load to i32
   %cmp = icmp ne i32 %ext, 0
   store i1 %cmp, i1 addrspace(1)* %out
@@ -107,7 +107,7 @@ define void @zextload_i1_to_i32_trunc_cm
 ; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
 ; SI: buffer_store_byte [[RESULT]]
 define void @sextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %load to i32
   %cmp = icmp ne i32 %ext, 1
   store i1 %cmp, i1 addrspace(1)* %out
@@ -122,7 +122,7 @@ define void @sextload_i1_to_i32_trunc_cm
 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]]
 ; SI-NEXT: buffer_store_byte [[RESULT]]
 define void @zextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %load to i32
   %cmp = icmp ne i32 %ext, 1
   store i1 %cmp, i1 addrspace(1)* %out
@@ -137,7 +137,7 @@ define void @zextload_i1_to_i32_trunc_cm
 ; XSI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP0]]
 ; XSI-NEXT: buffer_store_byte [[RESULT]]
 define void @sextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = sext i1 %load to i32
   %cmp = icmp ne i32 %ext, -1
   store i1 %cmp, i1 addrspace(1)* %out
@@ -148,7 +148,7 @@ define void @sextload_i1_to_i32_trunc_cm
 ; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
 ; SI: buffer_store_byte [[RESULT]]
 define void @zextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
-  %load = load i1 addrspace(1)* %in
+  %load = load i1, i1 addrspace(1)* %in
   %ext = zext i1 %load to i32
   %cmp = icmp ne i32 %ext, -1
   store i1 %cmp, i1 addrspace(1)* %out
@@ -161,7 +161,7 @@ define void @zextload_i1_to_i32_trunc_cm
 ; SI-NEXT: v_cndmask_b32_e64
 ; SI-NEXT: buffer_store_byte
 define void @masked_load_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
-  %load = load i8 addrspace(1)* %in
+  %load = load i8, i8 addrspace(1)* %in
   %masked = and i8 %load, 255
   %ext = sext i8 %masked to i32
   %cmp = icmp ne i32 %ext, -1

Modified: llvm/trunk/test/CodeGen/R600/trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/trunc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/trunc.ll (original)
+++ llvm/trunk/test/CodeGen/R600/trunc.ll Fri Feb 27 15:17:42 2015
@@ -53,7 +53,7 @@ define void @trunc_shl_i64(i64 addrspace
 ; SI: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
 ; SI: v_cmp_eq_i32
 define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) {
-  %a = load i32 addrspace(1)* %ptr, align 4
+  %a = load i32, i32 addrspace(1)* %ptr, align 4
   %trunc = trunc i32 %a to i1
   %result = select i1 %trunc, i32 1, i32 0
   store i32 %result, i32 addrspace(1)* %out, align 4
@@ -91,7 +91,7 @@ define void @v_trunc_i64_to_i1(i32 addrs
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  %x = load i64 addrspace(1)* %gep
+  %x = load i64, i64 addrspace(1)* %gep
 
   %trunc = trunc i64 %x to i1
   %sel = select i1 %trunc, i32 63, i32 -12

Modified: llvm/trunk/test/CodeGen/R600/uaddo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/uaddo.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/uaddo.ll (original)
+++ llvm/trunk/test/CodeGen/R600/uaddo.ll Fri Feb 27 15:17:42 2015
@@ -33,8 +33,8 @@ define void @s_uaddo_i32(i32 addrspace(1
 ; FUNC-LABEL: {{^}}v_uaddo_i32:
 ; SI: v_add_i32
 define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
-  %a = load i32 addrspace(1)* %aptr, align 4
-  %b = load i32 addrspace(1)* %bptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind
   %val = extractvalue { i32, i1 } %uadd, 0
   %carry = extractvalue { i32, i1 } %uadd, 1
@@ -59,8 +59,8 @@ define void @s_uaddo_i64(i64 addrspace(1
 ; SI: v_add_i32
 ; SI: v_addc_u32
 define void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
-  %a = load i64 addrspace(1)* %aptr, align 4
-  %b = load i64 addrspace(1)* %bptr, align 4
+  %a = load i64, i64 addrspace(1)* %aptr, align 4
+  %b = load i64, i64 addrspace(1)* %bptr, align 4
   %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
   %val = extractvalue { i64, i1 } %uadd, 0
   %carry = extractvalue { i64, i1 } %uadd, 1

Modified: llvm/trunk/test/CodeGen/R600/udiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/udiv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/udiv.ll (original)
+++ llvm/trunk/test/CodeGen/R600/udiv.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@
 
 define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1) * %in
-  %b = load i32 addrspace(1) * %b_ptr
+  %a = load i32, i32 addrspace(1) * %in
+  %b = load i32, i32 addrspace(1) * %b_ptr
   %result = udiv i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -26,8 +26,8 @@ define void @test(i32 addrspace(1)* %out
 
 define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1) * %in
-  %b = load <2 x i32> addrspace(1) * %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
   %result = udiv <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -40,8 +40,8 @@ define void @test2(<2 x i32> addrspace(1
 
 define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
   %result = udiv <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/udivrem24.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/udivrem24.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/udivrem24.ll (original)
+++ llvm/trunk/test/CodeGen/R600/udivrem24.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@
 ; EG: FLT_TO_UINT
 define void @udiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
   %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
-  %num = load i8 addrspace(1) * %in
-  %den = load i8 addrspace(1) * %den_ptr
+  %num = load i8, i8 addrspace(1) * %in
+  %den = load i8, i8 addrspace(1) * %den_ptr
   %result = udiv i8 %num, %den
   store i8 %result, i8 addrspace(1)* %out
   ret void
@@ -33,8 +33,8 @@ define void @udiv24_i8(i8 addrspace(1)*
 ; EG: FLT_TO_UINT
 define void @udiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
   %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
-  %num = load i16 addrspace(1) * %in, align 2
-  %den = load i16 addrspace(1) * %den_ptr, align 2
+  %num = load i16, i16 addrspace(1) * %in, align 2
+  %den = load i16, i16 addrspace(1) * %den_ptr, align 2
   %result = udiv i16 %num, %den
   store i16 %result, i16 addrspace(1)* %out, align 2
   ret void
@@ -52,8 +52,8 @@ define void @udiv24_i16(i16 addrspace(1)
 ; EG: FLT_TO_UINT
 define void @udiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 8
   %den.i24.0 = shl i32 %den, 8
   %num.i24 = lshr i32 %num.i24.0, 8
@@ -72,8 +72,8 @@ define void @udiv24_i32(i32 addrspace(1)
 ; EG-NOT: RECIP_IEEE
 define void @udiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 7
   %den.i24.0 = shl i32 %den, 7
   %num.i24 = lshr i32 %num.i24.0, 7
@@ -92,8 +92,8 @@ define void @udiv25_i32(i32 addrspace(1)
 ; EG-NOT: RECIP_IEEE
 define void @test_no_udiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 8
   %den.i24.0 = shl i32 %den, 7
   %num.i24 = lshr i32 %num.i24.0, 8
@@ -112,8 +112,8 @@ define void @test_no_udiv24_i32_1(i32 ad
 ; EG-NOT: RECIP_IEEE
 define void @test_no_udiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 7
   %den.i24.0 = shl i32 %den, 8
   %num.i24 = lshr i32 %num.i24.0, 7
@@ -135,8 +135,8 @@ define void @test_no_udiv24_i32_2(i32 ad
 ; EG: FLT_TO_UINT
 define void @urem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
   %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
-  %num = load i8 addrspace(1) * %in
-  %den = load i8 addrspace(1) * %den_ptr
+  %num = load i8, i8 addrspace(1) * %in
+  %den = load i8, i8 addrspace(1) * %den_ptr
   %result = urem i8 %num, %den
   store i8 %result, i8 addrspace(1)* %out
   ret void
@@ -154,8 +154,8 @@ define void @urem24_i8(i8 addrspace(1)*
 ; EG: FLT_TO_UINT
 define void @urem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
   %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
-  %num = load i16 addrspace(1) * %in, align 2
-  %den = load i16 addrspace(1) * %den_ptr, align 2
+  %num = load i16, i16 addrspace(1) * %in, align 2
+  %den = load i16, i16 addrspace(1) * %den_ptr, align 2
   %result = urem i16 %num, %den
   store i16 %result, i16 addrspace(1)* %out, align 2
   ret void
@@ -173,8 +173,8 @@ define void @urem24_i16(i16 addrspace(1)
 ; EG: FLT_TO_UINT
 define void @urem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 8
   %den.i24.0 = shl i32 %den, 8
   %num.i24 = lshr i32 %num.i24.0, 8
@@ -193,8 +193,8 @@ define void @urem24_i32(i32 addrspace(1)
 ; EG-NOT: RECIP_IEEE
 define void @urem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 7
   %den.i24.0 = shl i32 %den, 7
   %num.i24 = lshr i32 %num.i24.0, 7
@@ -213,8 +213,8 @@ define void @urem25_i32(i32 addrspace(1)
 ; EG-NOT: RECIP_IEEE
 define void @test_no_urem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 8
   %den.i24.0 = shl i32 %den, 7
   %num.i24 = lshr i32 %num.i24.0, 8
@@ -233,8 +233,8 @@ define void @test_no_urem24_i32_1(i32 ad
 ; EG-NOT: RECIP_IEEE
 define void @test_no_urem24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %num = load i32 addrspace(1) * %in, align 4
-  %den = load i32 addrspace(1) * %den_ptr, align 4
+  %num = load i32, i32 addrspace(1) * %in, align 4
+  %den = load i32, i32 addrspace(1) * %den_ptr, align 4
   %num.i24.0 = shl i32 %num, 7
   %den.i24.0 = shl i32 %den, 8
   %num.i24 = lshr i32 %num.i24.0, 7

Modified: llvm/trunk/test/CodeGen/R600/uint_to_fp.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/uint_to_fp.f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/uint_to_fp.f64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/uint_to_fp.f64.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ declare i32 @llvm.r600.read.tidig.x() no
 define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
-  %val = load i64 addrspace(1)* %gep, align 8
+  %val = load i64, i64 addrspace(1)* %gep, align 8
   %result = uitofp i64 %val to double
   store double %result, double addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/uint_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/uint_to_fp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/uint_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/R600/uint_to_fp.ll Fri Feb 27 15:17:42 2015
@@ -38,7 +38,7 @@ define void @uint_to_fp_v2i32_to_v2f32(<
 ; SI: v_cvt_f32_u32_e32
 ; SI: s_endpgm
 define void @uint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
-  %value = load <4 x i32> addrspace(1) * %in
+  %value = load <4 x i32>, <4 x i32> addrspace(1) * %in
   %result = uitofp <4 x i32> %value to <4 x float>
   store <4 x float> %result, <4 x float> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/unaligned-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/unaligned-load-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/unaligned-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/R600/unaligned-load-store.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 ; SI: ds_write_b8
 ; SI: s_endpgm
 define void @unaligned_load_store_i16_local(i16 addrspace(3)* %p, i16 addrspace(3)* %r) nounwind {
-  %v = load i16 addrspace(3)* %p, align 1
+  %v = load i16, i16 addrspace(3)* %p, align 1
   store i16 %v, i16 addrspace(3)* %r, align 1
   ret void
 }
@@ -20,7 +20,7 @@ define void @unaligned_load_store_i16_lo
 ; SI: buffer_store_byte
 ; SI: s_endpgm
 define void @unaligned_load_store_i16_global(i16 addrspace(1)* %p, i16 addrspace(1)* %r) nounwind {
-  %v = load i16 addrspace(1)* %p, align 1
+  %v = load i16, i16 addrspace(1)* %p, align 1
   store i16 %v, i16 addrspace(1)* %r, align 1
   ret void
 }
@@ -36,7 +36,7 @@ define void @unaligned_load_store_i16_gl
 ; SI: ds_write_b8
 ; SI: s_endpgm
 define void @unaligned_load_store_i32_local(i32 addrspace(3)* %p, i32 addrspace(3)* %r) nounwind {
-  %v = load i32 addrspace(3)* %p, align 1
+  %v = load i32, i32 addrspace(3)* %p, align 1
   store i32 %v, i32 addrspace(3)* %r, align 1
   ret void
 }
@@ -51,7 +51,7 @@ define void @unaligned_load_store_i32_lo
 ; SI: buffer_store_byte
 ; SI: buffer_store_byte
 define void @unaligned_load_store_i32_global(i32 addrspace(1)* %p, i32 addrspace(1)* %r) nounwind {
-  %v = load i32 addrspace(1)* %p, align 1
+  %v = load i32, i32 addrspace(1)* %p, align 1
   store i32 %v, i32 addrspace(1)* %r, align 1
   ret void
 }
@@ -75,7 +75,7 @@ define void @unaligned_load_store_i32_gl
 ; SI: ds_write_b8
 ; SI: s_endpgm
 define void @unaligned_load_store_i64_local(i64 addrspace(3)* %p, i64 addrspace(3)* %r) {
-  %v = load i64 addrspace(3)* %p, align 1
+  %v = load i64, i64 addrspace(3)* %p, align 1
   store i64 %v, i64 addrspace(3)* %r, align 1
   ret void
 }
@@ -98,7 +98,7 @@ define void @unaligned_load_store_i64_lo
 ; SI: buffer_store_byte
 ; SI: buffer_store_byte
 define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) {
-  %v = load i64 addrspace(1)* %p, align 1
+  %v = load i64, i64 addrspace(1)* %p, align 1
   store i64 %v, i64 addrspace(1)* %r, align 1
   ret void
 }
@@ -145,7 +145,7 @@ define void @unaligned_load_store_i64_gl
 ; SI: ds_write_b8
 ; SI: s_endpgm
 define void @unaligned_load_store_v4i32_local(<4 x i32> addrspace(3)* %p, <4 x i32> addrspace(3)* %r) nounwind {
-  %v = load <4 x i32> addrspace(3)* %p, align 1
+  %v = load <4 x i32>, <4 x i32> addrspace(3)* %p, align 1
   store <4 x i32> %v, <4 x i32> addrspace(3)* %r, align 1
   ret void
 }
@@ -169,7 +169,7 @@ define void @unaligned_load_store_v4i32_
 ; FIXME-SI: buffer_load_ubyte
 ; FIXME-SI: buffer_load_ubyte
 define void @unaligned_load_store_v4i32_global(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) nounwind {
-  %v = load <4 x i32> addrspace(1)* %p, align 1
+  %v = load <4 x i32>, <4 x i32> addrspace(1)* %p, align 1
   store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 1
   ret void
 }
@@ -178,7 +178,7 @@ define void @unaligned_load_store_v4i32_
 ; SI: ds_read2_b32
 ; SI: s_endpgm
 define void @load_lds_i64_align_4(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
-  %val = load i64 addrspace(3)* %in, align 4
+  %val = load i64, i64 addrspace(3)* %in, align 4
   store i64 %val, i64 addrspace(1)* %out, align 8
   ret void
 }
@@ -188,7 +188,7 @@ define void @load_lds_i64_align_4(i64 ad
 ; SI: s_endpgm
 define void @load_lds_i64_align_4_with_offset(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
   %ptr = getelementptr i64, i64 addrspace(3)* %in, i32 4
-  %val = load i64 addrspace(3)* %ptr, align 4
+  %val = load i64, i64 addrspace(3)* %ptr, align 4
   store i64 %val, i64 addrspace(1)* %out, align 8
   ret void
 }
@@ -201,7 +201,7 @@ define void @load_lds_i64_align_4_with_s
   %ptr = bitcast i64 addrspace(3)* %in to i32 addrspace(3)*
   %ptr255 = getelementptr i32, i32 addrspace(3)* %ptr, i32 255
   %ptri64 = bitcast i32 addrspace(3)* %ptr255 to i64 addrspace(3)*
-  %val = load i64 addrspace(3)* %ptri64, align 4
+  %val = load i64, i64 addrspace(3)* %ptri64, align 4
   store i64 %val, i64 addrspace(1)* %out, align 8
   ret void
 }
@@ -219,7 +219,7 @@ define void @load_lds_i64_align_4_with_s
 ; SI: s_endpgm
 
 define void @load_lds_i64_align_1(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
-  %val = load i64 addrspace(3)* %in, align 1
+  %val = load i64, i64 addrspace(3)* %in, align 1
   store i64 %val, i64 addrspace(1)* %out, align 8
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/unhandled-loop-condition-assertion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/unhandled-loop-condition-assertion.ll (original)
+++ llvm/trunk/test/CodeGen/R600/unhandled-loop-condition-assertion.ll Fri Feb 27 15:17:42 2015
@@ -20,19 +20,19 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body, %for.body.lr.ph
   %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
   %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
-  %1 = load i32 addrspace(1)* %0, align 4
+  %1 = load i32, i32 addrspace(1)* %0, align 4
   %add.ptr = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %main_stride
   %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
-  %3 = load i32 addrspace(1)* %2, align 4
+  %3 = load i32, i32 addrspace(1)* %2, align 4
   %add.ptr1 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
   %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
-  %5 = load i32 addrspace(1)* %4, align 4
+  %5 = load i32, i32 addrspace(1)* %4, align 4
   %add.ptr2 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
   %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
-  %7 = load i32 addrspace(1)* %6, align 4
+  %7 = load i32, i32 addrspace(1)* %6, align 4
   %add.ptr3 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
   %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
-  %9 = load i32 addrspace(1)* %8, align 4
+  %9 = load i32, i32 addrspace(1)* %8, align 4
   %add.ptr6 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 undef
   br i1 undef, label %for.end, label %for.body
 
@@ -56,19 +56,19 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body, %for.body.lr.ph
   %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
   %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
-  %1 = load i32 addrspace(1)* %0, align 4
+  %1 = load i32, i32 addrspace(1)* %0, align 4
   %add.ptr = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %main_stride
   %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
-  %3 = load i32 addrspace(1)* %2, align 4
+  %3 = load i32, i32 addrspace(1)* %2, align 4
   %add.ptr1 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
   %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
-  %5 = load i32 addrspace(1)* %4, align 4
+  %5 = load i32, i32 addrspace(1)* %4, align 4
   %add.ptr2 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
   %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
-  %7 = load i32 addrspace(1)* %6, align 4
+  %7 = load i32, i32 addrspace(1)* %6, align 4
   %add.ptr3 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
   %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
-  %9 = load i32 addrspace(1)* %8, align 4
+  %9 = load i32, i32 addrspace(1)* %8, align 4
   %add.ptr6 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 undef
   br i1 undef, label %for.end, label %for.body
 
@@ -92,19 +92,19 @@ for.body.lr.ph:
 for.body:                                         ; preds = %for.body, %for.body.lr.ph
   %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
   %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
-  %1 = load i32 addrspace(1)* %0, align 4
+  %1 = load i32, i32 addrspace(1)* %0, align 4
   %add.ptr = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %main_stride
   %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
-  %3 = load i32 addrspace(1)* %2, align 4
+  %3 = load i32, i32 addrspace(1)* %2, align 4
   %add.ptr1 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
   %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
-  %5 = load i32 addrspace(1)* %4, align 4
+  %5 = load i32, i32 addrspace(1)* %4, align 4
   %add.ptr2 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
   %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
-  %7 = load i32 addrspace(1)* %6, align 4
+  %7 = load i32, i32 addrspace(1)* %6, align 4
   %add.ptr3 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
   %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
-  %9 = load i32 addrspace(1)* %8, align 4
+  %9 = load i32, i32 addrspace(1)* %8, align 4
   %add.ptr6 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 undef
   br i1 undef, label %for.end, label %for.body
 

Modified: llvm/trunk/test/CodeGen/R600/unroll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/unroll.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/unroll.ll (original)
+++ llvm/trunk/test/CodeGen/R600/unroll.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ loop.inc:
 
 exit:
   %2 = getelementptr [32 x i32], [32 x i32]* %0, i32 0, i32 5
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   store i32 %3, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/urem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/urem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/urem.ll (original)
+++ llvm/trunk/test/CodeGen/R600/urem.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@
 ; EG: CF_END
 define void @test_urem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
   %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %a = load i32 addrspace(1)* %in
-  %b = load i32 addrspace(1)* %b_ptr
+  %a = load i32, i32 addrspace(1)* %in
+  %b = load i32, i32 addrspace(1)* %b_ptr
   %result = urem i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -27,7 +27,7 @@ define void @test_urem_i32(i32 addrspace
 ; SI: buffer_store_dword
 ; SI: s_endpgm
 define void @test_urem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
-  %num = load i32 addrspace(1) * %in
+  %num = load i32, i32 addrspace(1) * %in
   %result = urem i32 %num, 7
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -38,8 +38,8 @@ define void @test_urem_i32_7(i32 addrspa
 ; EG: CF_END
 define void @test_urem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
-  %a = load <2 x i32> addrspace(1)* %in
-  %b = load <2 x i32> addrspace(1)* %b_ptr
+  %a = load <2 x i32>, <2 x i32> addrspace(1)* %in
+  %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
   %result = urem <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -50,8 +50,8 @@ define void @test_urem_v2i32(<2 x i32> a
 ; EG: CF_END
 define void @test_urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32> addrspace(1)* %in
-  %b = load <4 x i32> addrspace(1)* %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1)* %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
   %result = urem <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -62,8 +62,8 @@ define void @test_urem_v4i32(<4 x i32> a
 ; EG: CF_END
 define void @test_urem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
   %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
-  %a = load i64 addrspace(1)* %in
-  %b = load i64 addrspace(1)* %b_ptr
+  %a = load i64, i64 addrspace(1)* %in
+  %b = load i64, i64 addrspace(1)* %b_ptr
   %result = urem i64 %a, %b
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -74,8 +74,8 @@ define void @test_urem_i64(i64 addrspace
 ; EG: CF_END
 define void @test_urem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
   %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
-  %a = load <2 x i64> addrspace(1)* %in
-  %b = load <2 x i64> addrspace(1)* %b_ptr
+  %a = load <2 x i64>, <2 x i64> addrspace(1)* %in
+  %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
   %result = urem <2 x i64> %a, %b
   store <2 x i64> %result, <2 x i64> addrspace(1)* %out
   ret void
@@ -86,8 +86,8 @@ define void @test_urem_v2i64(<2 x i64> a
 ; EG: CF_END
 define void @test_urem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
-  %a = load <4 x i64> addrspace(1)* %in
-  %b = load <4 x i64> addrspace(1)* %b_ptr
+  %a = load <4 x i64>, <4 x i64> addrspace(1)* %in
+  %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
   %result = urem <4 x i64> %a, %b
   store <4 x i64> %result, <4 x i64> addrspace(1)* %out
   ret void

Modified: llvm/trunk/test/CodeGen/R600/usubo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/usubo.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/usubo.ll (original)
+++ llvm/trunk/test/CodeGen/R600/usubo.ll Fri Feb 27 15:17:42 2015
@@ -30,8 +30,8 @@ define void @s_usubo_i32(i32 addrspace(1
 ; FUNC-LABEL: {{^}}v_usubo_i32:
 ; SI: v_subrev_i32_e32
 define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
-  %a = load i32 addrspace(1)* %aptr, align 4
-  %b = load i32 addrspace(1)* %bptr, align 4
+  %a = load i32, i32 addrspace(1)* %aptr, align 4
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
   %val = extractvalue { i32, i1 } %usub, 0
   %carry = extractvalue { i32, i1 } %usub, 1
@@ -56,8 +56,8 @@ define void @s_usubo_i64(i64 addrspace(1
 ; SI: v_sub_i32
 ; SI: v_subb_u32
 define void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
-  %a = load i64 addrspace(1)* %aptr, align 4
-  %b = load i64 addrspace(1)* %bptr, align 4
+  %a = load i64, i64 addrspace(1)* %aptr, align 4
+  %b = load i64, i64 addrspace(1)* %bptr, align 4
   %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
   %val = extractvalue { i64, i1 } %usub, 0
   %carry = extractvalue { i64, i1 } %usub, 1

Modified: llvm/trunk/test/CodeGen/R600/v_cndmask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/v_cndmask.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/v_cndmask.ll (original)
+++ llvm/trunk/test/CodeGen/R600/v_cndmask.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
 define void @v_cnd_nan_nosgpr(float addrspace(1)* %out, i32 %c, float addrspace(1)* %fptr) #0 {
   %idx = call i32 @llvm.r600.read.tidig.x() #1
   %f.gep = getelementptr float, float addrspace(1)* %fptr, i32 %idx
-  %f = load float addrspace(1)* %fptr
+  %f = load float, float addrspace(1)* %fptr
   %setcc = icmp ne i32 %c, 0
   %select = select i1 %setcc, float 0xFFFFFFFFE0000000, float %f
   store float %select, float addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/valu-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/valu-i1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/valu-i1.ll (original)
+++ llvm/trunk/test/CodeGen/R600/valu-i1.ll Fri Feb 27 15:17:42 2015
@@ -95,7 +95,7 @@ loop:
   %i = phi i32 [%tid, %entry], [%i.inc, %loop]
   %gep.src = getelementptr i32, i32 addrspace(1)* %src, i32 %i
   %gep.dst = getelementptr i32, i32 addrspace(1)* %dst, i32 %i
-  %load = load i32 addrspace(1)* %src
+  %load = load i32, i32 addrspace(1)* %src
   store i32 %load, i32 addrspace(1)* %gep.dst
   %i.inc = add nsw i32 %i, 1
   %cmp = icmp eq i32 %limit, %i.inc
@@ -155,7 +155,7 @@ bb:
   %tmp = tail call i32 @llvm.r600.read.tidig.x() #0
   %tmp4 = sext i32 %tmp to i64
   %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg3, i64 %tmp4
-  %tmp6 = load i32 addrspace(1)* %tmp5, align 4
+  %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4
   %tmp7 = icmp sgt i32 %tmp6, 0
   %tmp8 = sext i32 %tmp6 to i64
   br i1 %tmp7, label %bb10, label %bb26
@@ -164,9 +164,9 @@ bb10:
   %tmp11 = phi i64 [ %tmp23, %bb20 ], [ 0, %bb ]
   %tmp12 = add nsw i64 %tmp11, %tmp4
   %tmp13 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp12
-  %tmp14 = load i32 addrspace(1)* %tmp13, align 4
+  %tmp14 = load i32, i32 addrspace(1)* %tmp13, align 4
   %tmp15 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp12
-  %tmp16 = load i32 addrspace(1)* %tmp15, align 4
+  %tmp16 = load i32, i32 addrspace(1)* %tmp15, align 4
   %tmp17 = icmp ne i32 %tmp14, -1
   %tmp18 = icmp ne i32 %tmp16, -1
   %tmp19 = and i1 %tmp17, %tmp18

Modified: llvm/trunk/test/CodeGen/R600/vector-alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/vector-alloca.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/vector-alloca.ll (original)
+++ llvm/trunk/test/CodeGen/R600/vector-alloca.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ entry:
   store i32 2, i32* %z
   store i32 3, i32* %w
   %1 = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 %index
-  %2 = load i32* %1
+  %2 = load i32, i32* %1
   store i32 %2, i32 addrspace(1)* %out
   ret void
 }
@@ -48,7 +48,7 @@ entry:
   %1 = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 %w_index
   store i32 1, i32* %1
   %2 = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 %r_index
-  %3 = load i32* %2
+  %3 = load i32, i32* %2
   store i32 %3, i32 addrspace(1)* %out
   ret void
 }
@@ -71,7 +71,7 @@ entry:
   %1 = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 1
   %2 = bitcast i32* %1 to [4 x i32]*
   %3 = getelementptr [4 x i32], [4 x i32]* %2, i32 0, i32 0
-  %4 = load i32* %3
+  %4 = load i32, i32* %3
   store i32 %4, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/vertex-fetch-encoding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/vertex-fetch-encoding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/vertex-fetch-encoding.ll (original)
+++ llvm/trunk/test/CodeGen/R600/vertex-fetch-encoding.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
 
 define void @vtx_fetch32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
-  %0 = load i32 addrspace(1)* %in
+  %0 = load i32, i32 addrspace(1)* %in
   store i32 %0, i32 addrspace(1)* %out
   ret void
 }
@@ -19,7 +19,7 @@ entry:
 
 define void @vtx_fetch128(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
 entry:
-  %0 = load <4 x i32> addrspace(1)* %in
+  %0 = load <4 x i32>, <4 x i32> addrspace(1)* %in
   store <4 x i32> %0, <4 x i32> addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/vselect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/vselect.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/vselect.ll (original)
+++ llvm/trunk/test/CodeGen/R600/vselect.ll Fri Feb 27 15:17:42 2015
@@ -12,8 +12,8 @@
 
 define void @test_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
 entry:
-  %0 = load <2 x i32> addrspace(1)* %in0
-  %1 = load <2 x i32> addrspace(1)* %in1
+  %0 = load <2 x i32>, <2 x i32> addrspace(1)* %in0
+  %1 = load <2 x i32>, <2 x i32> addrspace(1)* %in1
   %cmp = icmp ne <2 x i32> %0, %1
   %result = select <2 x i1> %cmp, <2 x i32> %0, <2 x i32> %1
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
@@ -30,8 +30,8 @@ entry:
 
 define void @test_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in0, <2 x float> addrspace(1)* %in1) {
 entry:
-  %0 = load <2 x float> addrspace(1)* %in0
-  %1 = load <2 x float> addrspace(1)* %in1
+  %0 = load <2 x float>, <2 x float> addrspace(1)* %in0
+  %1 = load <2 x float>, <2 x float> addrspace(1)* %in1
   %cmp = fcmp une <2 x float> %0, %1
   %result = select <2 x i1> %cmp, <2 x float> %0, <2 x float> %1
   store <2 x float> %result, <2 x float> addrspace(1)* %out
@@ -52,8 +52,8 @@ entry:
 
 define void @test_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
 entry:
-  %0 = load <4 x i32> addrspace(1)* %in0
-  %1 = load <4 x i32> addrspace(1)* %in1
+  %0 = load <4 x i32>, <4 x i32> addrspace(1)* %in0
+  %1 = load <4 x i32>, <4 x i32> addrspace(1)* %in1
   %cmp = icmp ne <4 x i32> %0, %1
   %result = select <4 x i1> %cmp, <4 x i32> %0, <4 x i32> %1
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
@@ -68,8 +68,8 @@ entry:
 
 define void @test_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in0, <4 x float> addrspace(1)* %in1) {
 entry:
-  %0 = load <4 x float> addrspace(1)* %in0
-  %1 = load <4 x float> addrspace(1)* %in1
+  %0 = load <4 x float>, <4 x float> addrspace(1)* %in0
+  %1 = load <4 x float>, <4 x float> addrspace(1)* %in1
   %cmp = fcmp une <4 x float> %0, %1
   %result = select <4 x i1> %cmp, <4 x float> %0, <4 x float> %1
   store <4 x float> %result, <4 x float> addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/R600/vtx-fetch-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/vtx-fetch-branch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/vtx-fetch-branch.ll (original)
+++ llvm/trunk/test/CodeGen/R600/vtx-fetch-branch.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ entry:
   br i1 %0, label %endif, label %if
 
 if:
-  %1 = load i32 addrspace(1)* %in
+  %1 = load i32, i32 addrspace(1)* %in
   br label %endif
 
 endif:

Modified: llvm/trunk/test/CodeGen/R600/vtx-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/vtx-schedule.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/vtx-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/R600/vtx-schedule.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@
 ; CHECK: VTX_READ_32 [[IN1:T[0-9]+\.X]], [[IN1]], 0
 define void @test(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* addrspace(1)* nocapture %in0) {
 entry:
-  %0 = load i32 addrspace(1)* addrspace(1)* %in0
-  %1 = load i32 addrspace(1)* %0
+  %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %in0
+  %1 = load i32, i32 addrspace(1)* %0
   store i32 %1, i32 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/R600/wait.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/wait.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/wait.ll (original)
+++ llvm/trunk/test/CodeGen/R600/wait.ll Fri Feb 27 15:17:42 2015
@@ -9,16 +9,16 @@
 define void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, <16 x i8> addrspace(2)* inreg %arg3, <16 x i8> addrspace(2)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(2)* inreg %constptr) #0 {
 main_body:
   %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg3, i32 0
-  %tmp10 = load <16 x i8> addrspace(2)* %tmp, !tbaa !0
+  %tmp10 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
   %tmp11 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp10, i32 0, i32 %arg6)
   %tmp12 = extractelement <4 x float> %tmp11, i32 0
   %tmp13 = extractelement <4 x float> %tmp11, i32 1
   call void @llvm.AMDGPU.barrier.global() #1
   %tmp14 = extractelement <4 x float> %tmp11, i32 2
 ;  %tmp15 = extractelement <4 x float> %tmp11, i32 3
-  %tmp15 = load float addrspace(2)* %constptr, align 4 ; Force waiting for expcnt and lgkmcnt
+  %tmp15 = load float, float addrspace(2)* %constptr, align 4 ; Force waiting for expcnt and lgkmcnt
   %tmp16 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg3, i32 1
-  %tmp17 = load <16 x i8> addrspace(2)* %tmp16, !tbaa !0
+  %tmp17 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp16, !tbaa !0
   %tmp18 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp17, i32 0, i32 %arg6)
   %tmp19 = extractelement <4 x float> %tmp18, i32 0
   %tmp20 = extractelement <4 x float> %tmp18, i32 1

Modified: llvm/trunk/test/CodeGen/R600/xor.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/xor.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/xor.ll (original)
+++ llvm/trunk/test/CodeGen/R600/xor.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@
 ; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
 
 define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
-  %a = load <2 x i32> addrspace(1) * %in0
-  %b = load <2 x i32> addrspace(1) * %in1
+  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in0
+  %b = load <2 x i32>, <2 x i32> addrspace(1) * %in1
   %result = xor <2 x i32> %a, %b
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
@@ -30,8 +30,8 @@ define void @xor_v2i32(<2 x i32> addrspa
 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
 
 define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
-  %a = load <4 x i32> addrspace(1) * %in0
-  %b = load <4 x i32> addrspace(1) * %in1
+  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in0
+  %b = load <4 x i32>, <4 x i32> addrspace(1) * %in1
   %result = xor <4 x i32> %a, %b
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
@@ -47,8 +47,8 @@ define void @xor_v4i32(<4 x i32> addrspa
 ; SI: buffer_store_dword [[RESULT]]
 ; SI: s_endpgm
 define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
-  %a = load float addrspace(1) * %in0
-  %b = load float addrspace(1) * %in1
+  %a = load float, float addrspace(1) * %in0
+  %b = load float, float addrspace(1) * %in1
   %acmp = fcmp oge float %a, 0.000000e+00
   %bcmp = fcmp oge float %b, 1.000000e+00
   %xor = xor i1 %acmp, %bcmp
@@ -64,8 +64,8 @@ define void @xor_i1(float addrspace(1)*
 ; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]]
 ; SI: buffer_store_byte [[RESULT]]
 define void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
-  %a = load i1 addrspace(1)* %in0
-  %b = load i1 addrspace(1)* %in1
+  %a = load i1, i1 addrspace(1)* %in0
+  %b = load i1, i1 addrspace(1)* %in1
   %xor = xor i1 %a, %b
   store i1 %xor, i1 addrspace(1)* %out
   ret void
@@ -74,8 +74,8 @@ define void @v_xor_i1(i1 addrspace(1)* %
 ; FUNC-LABEL: {{^}}vector_xor_i32:
 ; SI: v_xor_b32_e32
 define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
-  %a = load i32 addrspace(1)* %in0
-  %b = load i32 addrspace(1)* %in1
+  %a = load i32, i32 addrspace(1)* %in0
+  %b = load i32, i32 addrspace(1)* %in1
   %result = xor i32 %a, %b
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -100,8 +100,8 @@ define void @scalar_not_i32(i32 addrspac
 ; FUNC-LABEL: {{^}}vector_not_i32:
 ; SI: v_not_b32
 define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
-  %a = load i32 addrspace(1)* %in0
-  %b = load i32 addrspace(1)* %in1
+  %a = load i32, i32 addrspace(1)* %in0
+  %b = load i32, i32 addrspace(1)* %in1
   %result = xor i32 %a, -1
   store i32 %result, i32 addrspace(1)* %out
   ret void
@@ -112,8 +112,8 @@ define void @vector_not_i32(i32 addrspac
 ; SI: v_xor_b32_e32
 ; SI: s_endpgm
 define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
-  %a = load i64 addrspace(1)* %in0
-  %b = load i64 addrspace(1)* %in1
+  %a = load i64, i64 addrspace(1)* %in0
+  %b = load i64, i64 addrspace(1)* %in1
   %result = xor i64 %a, %b
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -140,8 +140,8 @@ define void @scalar_not_i64(i64 addrspac
 ; SI: v_not_b32
 ; SI: v_not_b32
 define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
-  %a = load i64 addrspace(1)* %in0
-  %b = load i64 addrspace(1)* %in1
+  %a = load i64, i64 addrspace(1)* %in0
+  %b = load i64, i64 addrspace(1)* %in1
   %result = xor i64 %a, -1
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -163,7 +163,7 @@ if:
   br label %endif
 
 else:
-  %2 = load i64 addrspace(1)* %in
+  %2 = load i64, i64 addrspace(1)* %in
   br label %endif
 
 endif:





More information about the llvm-commits mailing list