[llvm] r230794 - [opaque pointer type] Add textual IR support for explicit type parameter to load instruction
David Blaikie
dblaikie at gmail.com
Fri Feb 27 13:18:04 PST 2015
Modified: llvm/trunk/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-02-18-TailMergingBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-02-18-TailMergingBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-02-18-TailMergingBug.ll Fri Feb 27 15:17:42 2015
@@ -6,13 +6,13 @@
define void @minmax(float* %result) nounwind optsize {
entry:
- %tmp2 = load float* %result, align 4 ; <float> [#uses=6]
+ %tmp2 = load float, float* %result, align 4 ; <float> [#uses=6]
%tmp4 = getelementptr float, float* %result, i32 2 ; <float*> [#uses=5]
- %tmp5 = load float* %tmp4, align 4 ; <float> [#uses=10]
+ %tmp5 = load float, float* %tmp4, align 4 ; <float> [#uses=10]
%tmp7 = getelementptr float, float* %result, i32 4 ; <float*> [#uses=5]
- %tmp8 = load float* %tmp7, align 4 ; <float> [#uses=8]
+ %tmp8 = load float, float* %tmp7, align 4 ; <float> [#uses=8]
%tmp10 = getelementptr float, float* %result, i32 6 ; <float*> [#uses=3]
- %tmp11 = load float* %tmp10, align 4 ; <float> [#uses=8]
+ %tmp11 = load float, float* %tmp10, align 4 ; <float> [#uses=8]
%tmp12 = fcmp olt float %tmp8, %tmp11 ; <i1> [#uses=5]
br i1 %tmp12, label %bb, label %bb21
@@ -59,7 +59,7 @@ bb103: ; preds = %bb80, %bb72
bb111: ; preds = %bb103, %bb80, %bb72, %bb50, %bb40, %bb26
%iftmp.0.0.in = phi float* [ %tmp10, %bb103 ], [ %result, %bb26 ], [ %result, %bb40 ], [ %result, %bb50 ], [ %tmp4.mux, %bb80 ], [ %tmp4.mux787, %bb72 ] ; <float*> [#uses=1]
- %iftmp.0.0 = load float* %iftmp.0.0.in ; <float> [#uses=1]
+ %iftmp.0.0 = load float, float* %iftmp.0.0.in ; <float> [#uses=1]
%tmp125 = fcmp ogt float %tmp8, %tmp11 ; <i1> [#uses=5]
br i1 %tmp125, label %bb128, label %bb136
@@ -106,15 +106,15 @@ bb218: ; preds = %bb195, %bb187
bb226: ; preds = %bb218, %bb195, %bb187, %bb165, %bb155, %bb141
%iftmp.7.0.in = phi float* [ %tmp10, %bb218 ], [ %result, %bb141 ], [ %result, %bb155 ], [ %result, %bb165 ], [ %tmp4.mux789, %bb195 ], [ %tmp4.mux791, %bb187 ] ; <float*> [#uses=1]
- %iftmp.7.0 = load float* %iftmp.7.0.in ; <float> [#uses=1]
+ %iftmp.7.0 = load float, float* %iftmp.7.0.in ; <float> [#uses=1]
%tmp229 = getelementptr float, float* %result, i32 1 ; <float*> [#uses=7]
- %tmp230 = load float* %tmp229, align 4 ; <float> [#uses=6]
+ %tmp230 = load float, float* %tmp229, align 4 ; <float> [#uses=6]
%tmp232 = getelementptr float, float* %result, i32 3 ; <float*> [#uses=5]
- %tmp233 = load float* %tmp232, align 4 ; <float> [#uses=10]
+ %tmp233 = load float, float* %tmp232, align 4 ; <float> [#uses=10]
%tmp235 = getelementptr float, float* %result, i32 5 ; <float*> [#uses=5]
- %tmp236 = load float* %tmp235, align 4 ; <float> [#uses=8]
+ %tmp236 = load float, float* %tmp235, align 4 ; <float> [#uses=8]
%tmp238 = getelementptr float, float* %result, i32 7 ; <float*> [#uses=3]
- %tmp239 = load float* %tmp238, align 4 ; <float> [#uses=8]
+ %tmp239 = load float, float* %tmp238, align 4 ; <float> [#uses=8]
%tmp240 = fcmp olt float %tmp236, %tmp239 ; <i1> [#uses=5]
br i1 %tmp240, label %bb243, label %bb251
@@ -161,7 +161,7 @@ bb333: ; preds = %bb310, %bb302
bb341: ; preds = %bb333, %bb310, %bb302, %bb280, %bb270, %bb256
%iftmp.14.0.in = phi float* [ %tmp238, %bb333 ], [ %tmp229, %bb280 ], [ %tmp229, %bb270 ], [ %tmp229, %bb256 ], [ %tmp232.mux, %bb310 ], [ %tmp232.mux794, %bb302 ] ; <float*> [#uses=1]
- %iftmp.14.0 = load float* %iftmp.14.0.in ; <float> [#uses=1]
+ %iftmp.14.0 = load float, float* %iftmp.14.0.in ; <float> [#uses=1]
%tmp355 = fcmp ogt float %tmp236, %tmp239 ; <i1> [#uses=5]
br i1 %tmp355, label %bb358, label %bb366
@@ -208,7 +208,7 @@ bb448: ; preds = %bb425, %bb417
bb456: ; preds = %bb448, %bb425, %bb417, %bb395, %bb385, %bb371
%iftmp.21.0.in = phi float* [ %tmp238, %bb448 ], [ %tmp229, %bb395 ], [ %tmp229, %bb385 ], [ %tmp229, %bb371 ], [ %tmp232.mux796, %bb425 ], [ %tmp232.mux798, %bb417 ] ; <float*> [#uses=1]
- %iftmp.21.0 = load float* %iftmp.21.0.in ; <float> [#uses=1]
+ %iftmp.21.0 = load float, float* %iftmp.21.0.in ; <float> [#uses=1]
%tmp458459 = fpext float %iftmp.21.0 to double ; <double> [#uses=1]
%tmp460461 = fpext float %iftmp.7.0 to double ; <double> [#uses=1]
%tmp462463 = fpext float %iftmp.14.0 to double ; <double> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ target triple = "i386-apple-darwin8"
define void @test() nounwind {
entry:
- %tmp = load i32* @pixels, align 4 ; <i32> [#uses=1]
+ %tmp = load i32, i32* @pixels, align 4 ; <i32> [#uses=1]
%tmp1 = tail call i32 asm sideeffect "a: $0 $1", "=r,0,~{dirflag},~{fpsr},~{flags},~{ax}"( i32 %tmp ) nounwind ; <i32> [#uses=1]
store i32 %tmp1, i32* @pixels, align 4
ret void
Modified: llvm/trunk/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll Fri Feb 27 15:17:42 2015
@@ -13,38 +13,38 @@ entry:
store i8* %src, i8** %src_addr
store i32 %dst_stride, i32* %dst_stride_addr
store i32 %src_stride, i32* %src_stride_addr
- %tmp = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** %dst_addr, align 4 ; <i8*> [#uses=1]
%tmp1 = getelementptr i8, i8* %tmp, i32 0 ; <i8*> [#uses=1]
%tmp12 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp3 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp4 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp3 = load i8*, i8** %dst_addr, align 4 ; <i8*> [#uses=1]
+ %tmp4 = load i32, i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
%tmp5 = getelementptr i8, i8* %tmp3, i32 %tmp4 ; <i8*> [#uses=1]
%tmp56 = bitcast i8* %tmp5 to i32* ; <i32*> [#uses=1]
- %tmp7 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
%tmp8 = mul i32 %tmp7, 2 ; <i32> [#uses=1]
- %tmp9 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
+ %tmp9 = load i8*, i8** %dst_addr, align 4 ; <i8*> [#uses=1]
%tmp10 = getelementptr i8, i8* %tmp9, i32 %tmp8 ; <i8*> [#uses=1]
%tmp1011 = bitcast i8* %tmp10 to i32* ; <i32*> [#uses=1]
- %tmp13 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp13 = load i32, i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
%tmp14 = mul i32 %tmp13, 3 ; <i32> [#uses=1]
- %tmp15 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
+ %tmp15 = load i8*, i8** %dst_addr, align 4 ; <i8*> [#uses=1]
%tmp16 = getelementptr i8, i8* %tmp15, i32 %tmp14 ; <i8*> [#uses=1]
%tmp1617 = bitcast i8* %tmp16 to i32* ; <i32*> [#uses=1]
- %tmp18 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
+ %tmp18 = load i8*, i8** %src_addr, align 4 ; <i8*> [#uses=1]
%tmp19 = getelementptr i8, i8* %tmp18, i32 0 ; <i8*> [#uses=1]
%tmp1920 = bitcast i8* %tmp19 to i32* ; <i32*> [#uses=1]
- %tmp21 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp22 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp21 = load i8*, i8** %src_addr, align 4 ; <i8*> [#uses=1]
+ %tmp22 = load i32, i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
%tmp23 = getelementptr i8, i8* %tmp21, i32 %tmp22 ; <i8*> [#uses=1]
%tmp2324 = bitcast i8* %tmp23 to i32* ; <i32*> [#uses=1]
- %tmp25 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp25 = load i32, i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
%tmp26 = mul i32 %tmp25, 2 ; <i32> [#uses=1]
- %tmp27 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
+ %tmp27 = load i8*, i8** %src_addr, align 4 ; <i8*> [#uses=1]
%tmp28 = getelementptr i8, i8* %tmp27, i32 %tmp26 ; <i8*> [#uses=1]
%tmp2829 = bitcast i8* %tmp28 to i32* ; <i32*> [#uses=1]
- %tmp30 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp30 = load i32, i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
%tmp31 = mul i32 %tmp30, 3 ; <i32> [#uses=1]
- %tmp32 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
+ %tmp32 = load i8*, i8** %src_addr, align 4 ; <i8*> [#uses=1]
%tmp33 = getelementptr i8, i8* %tmp32, i32 %tmp31 ; <i8*> [#uses=1]
%tmp3334 = bitcast i8* %tmp33 to i32* ; <i32*> [#uses=1]
call void asm sideeffect "movd $4, %mm0 \0A\09movd $5, %mm1 \0A\09movd $6, %mm2 \0A\09movd $7, %mm3 \0A\09punpcklbw %mm1, %mm0 \0A\09punpcklbw %mm3, %mm2 \0A\09movq %mm0, %mm1 \0A\09punpcklwd %mm2, %mm0 \0A\09punpckhwd %mm2, %mm1 \0A\09movd %mm0, $0 \0A\09punpckhdq %mm0, %mm0 \0A\09movd %mm0, $1 \0A\09movd %mm1, $2 \0A\09punpckhdq %mm1, %mm1 \0A\09movd %mm1, $3 \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* %tmp12, i32* %tmp56, i32* %tmp1011, i32* %tmp1617, i32* %tmp1920, i32* %tmp2324, i32* %tmp2829, i32* %tmp3334 ) nounwind
Modified: llvm/trunk/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -11,10 +11,10 @@ entry:
bb53: ; preds = %entry
%tmp55 = call %struct.YY** @AA( i64 1, %struct.XX* %uen ) ; <%struct.YY**> [#uses=3]
- %tmp2728128 = load %struct.XX** null ; <%struct.XX*> [#uses=1]
- %tmp61 = load %struct.YY** %tmp55, align 8 ; <%struct.YY*> [#uses=1]
+ %tmp2728128 = load %struct.XX*, %struct.XX** null ; <%struct.XX*> [#uses=1]
+ %tmp61 = load %struct.YY*, %struct.YY** %tmp55, align 8 ; <%struct.YY*> [#uses=1]
%tmp62 = getelementptr %struct.YY, %struct.YY* %tmp61, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp63 = load i64* %tmp62, align 8 ; <i64> [#uses=1]
+ %tmp63 = load i64, i64* %tmp62, align 8 ; <i64> [#uses=1]
%tmp6566 = zext i16 %tmp45 to i64 ; <i64> [#uses=1]
%tmp67 = shl i64 %tmp6566, 1 ; <i64> [#uses=1]
call void @BB( %struct.YY** %tmp55, i64 %tmp67, i8 signext 0, %struct.XX* %uen )
@@ -30,7 +30,7 @@ bb70: ; preds = %bb119, %bb70.preheader
%tmp.135 = trunc i64 %tmp63 to i32 ; <i32> [#uses=1]
%tmp136 = shl i32 %indvar133, 1 ; <i32> [#uses=1]
%DD = add i32 %tmp136, %tmp.135 ; <i32> [#uses=1]
- %tmp73 = load %struct.ZZ*** %tmp72, align 8 ; <%struct.ZZ**> [#uses=0]
+ %tmp73 = load %struct.ZZ**, %struct.ZZ*** %tmp72, align 8 ; <%struct.ZZ**> [#uses=0]
br i1 false, label %bb119, label %bb77
bb77: ; preds = %bb70
Modified: llvm/trunk/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll Fri Feb 27 15:17:42 2015
@@ -34,7 +34,7 @@ bb35: ; preds = %bb24, %entry
%tmp56 = add i32 %tmp55, -1 ; <i32> [#uses=1]
%tmp5657 = sitofp i32 %tmp56 to double ; <double> [#uses=1]
%tmp15.i49 = getelementptr %struct.Lattice, %struct.Lattice* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp16.i50 = load double* %tmp15.i49, align 4 ; <double> [#uses=1]
+ %tmp16.i50 = load double, double* %tmp15.i49, align 4 ; <double> [#uses=1]
%tmp17.i = fmul double %tmp5657, %tmp16.i50 ; <double> [#uses=1]
%tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1]
%tmp20.i23 = fadd double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1]
@@ -47,11 +47,11 @@ bb58.preheader: ; preds = %bb35
bb58: ; preds = %bb58, %bb58.preheader
%tmp20.i7 = getelementptr %struct.CompAtom, %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2]
%tmp25.i = getelementptr %struct.CompAtom, %struct.CompAtom* %tmp1819, i32 0, i32 2 ; <i32*> [#uses=2]
- %tmp74.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
+ %tmp74.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
%tmp82.i = and i32 %tmp74.i, 134217728 ; <i32> [#uses=1]
%tmp85.i = or i32 0, %tmp82.i ; <i32> [#uses=1]
store i32 %tmp85.i, i32* %tmp25.i, align 1
- %tmp88.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
+ %tmp88.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
%tmp95.i = and i32 %tmp88.i, -268435456 ; <i32> [#uses=1]
%tmp97.i = or i32 0, %tmp95.i ; <i32> [#uses=1]
store i32 %tmp97.i, i32* %tmp25.i, align 1
Modified: llvm/trunk/test/CodeGen/X86/2008-03-07-APIntBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-03-07-APIntBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-03-07-APIntBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-03-07-APIntBug.ll Fri Feb 27 15:17:42 2015
@@ -18,16 +18,16 @@ newFuncRoot:
bb1233.exitStub: ; preds = %bb1163
ret void
bb1163: ; preds = %newFuncRoot
- %tmp1164 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
+ %tmp1164 = load %struct.rec*, %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
%tmp1165 = getelementptr %struct.rec, %struct.rec* %tmp1164, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11651166 = bitcast %struct.head_type* %tmp1165 to %struct.symbol_type* ; <%struct.symbol_type*> [#uses=1]
%tmp1167 = getelementptr %struct.symbol_type, %struct.symbol_type* %tmp11651166, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
- %tmp1168 = load %struct.rec** %tmp1167, align 1 ; <%struct.rec*> [#uses=2]
- %tmp1169 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
+ %tmp1168 = load %struct.rec*, %struct.rec** %tmp1167, align 1 ; <%struct.rec*> [#uses=2]
+ %tmp1169 = load %struct.rec*, %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
%tmp1170 = getelementptr %struct.rec, %struct.rec* %tmp1169, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11701171 = bitcast %struct.head_type* %tmp1170 to %struct.symbol_type* ; <%struct.symbol_type*> [#uses=1]
%tmp1172 = getelementptr %struct.symbol_type, %struct.symbol_type* %tmp11701171, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
- %tmp1173 = load %struct.rec** %tmp1172, align 1 ; <%struct.rec*> [#uses=2]
+ %tmp1173 = load %struct.rec*, %struct.rec** %tmp1172, align 1 ; <%struct.rec*> [#uses=2]
%tmp1174 = getelementptr %struct.rec, %struct.rec* %tmp1173, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11741175 = bitcast %struct.head_type* %tmp1174 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
%tmp1176 = getelementptr %struct.word_type, %struct.word_type* %tmp11741175, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
@@ -35,7 +35,7 @@ bb1163: ; preds = %newFuncRoot
%tmp11771178 = bitcast { i16, i8, i8 }* %tmp1177 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1179 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11771178, i32 0, i32 2 ; <i8*> [#uses=2]
%mask1180 = and i8 1, 1 ; <i8> [#uses=2]
- %tmp1181 = load i8* %tmp1179, align 1 ; <i8> [#uses=1]
+ %tmp1181 = load i8, i8* %tmp1179, align 1 ; <i8> [#uses=1]
%tmp1182 = shl i8 %mask1180, 7 ; <i8> [#uses=1]
%tmp1183 = and i8 %tmp1181, 127 ; <i8> [#uses=1]
%tmp1184 = or i8 %tmp1183, %tmp1182 ; <i8> [#uses=1]
@@ -47,7 +47,7 @@ bb1163: ; preds = %newFuncRoot
%tmp1189 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1188, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp11891190 = bitcast { i16, i8, i8 }* %tmp1189 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1191 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11891190, i32 0, i32 2 ; <i8*> [#uses=1]
- %tmp1192 = load i8* %tmp1191, align 1 ; <i8> [#uses=1]
+ %tmp1192 = load i8, i8* %tmp1191, align 1 ; <i8> [#uses=1]
%tmp1193 = lshr i8 %tmp1192, 7 ; <i8> [#uses=1]
%mask1194 = and i8 %tmp1193, 1 ; <i8> [#uses=2]
%mask1195 = and i8 %mask1194, 1 ; <i8> [#uses=0]
@@ -58,7 +58,7 @@ bb1163: ; preds = %newFuncRoot
%tmp11991200 = bitcast { i16, i8, i8 }* %tmp1199 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1201 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11991200, i32 0, i32 1 ; <i8*> [#uses=2]
%mask1202 = and i8 %mask1194, 1 ; <i8> [#uses=2]
- %tmp1203 = load i8* %tmp1201, align 1 ; <i8> [#uses=1]
+ %tmp1203 = load i8, i8* %tmp1201, align 1 ; <i8> [#uses=1]
%tmp1204 = shl i8 %mask1202, 1 ; <i8> [#uses=1]
%tmp1205 = and i8 %tmp1204, 2 ; <i8> [#uses=1]
%tmp1206 = and i8 %tmp1203, -3 ; <i8> [#uses=1]
@@ -71,12 +71,12 @@ bb1163: ; preds = %newFuncRoot
%tmp1212 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1211, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp12121213 = bitcast { i16, i8, i8 }* %tmp1212 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1214 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp12121213, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp1215 = load i8* %tmp1214, align 1 ; <i8> [#uses=1]
+ %tmp1215 = load i8, i8* %tmp1214, align 1 ; <i8> [#uses=1]
%tmp1216 = shl i8 %tmp1215, 6 ; <i8> [#uses=1]
%tmp1217 = lshr i8 %tmp1216, 7 ; <i8> [#uses=1]
%mask1218 = and i8 %tmp1217, 1 ; <i8> [#uses=2]
%mask1219 = and i8 %mask1218, 1 ; <i8> [#uses=0]
- %tmp1220 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
+ %tmp1220 = load %struct.rec*, %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
%tmp1221 = getelementptr %struct.rec, %struct.rec* %tmp1220, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp12211222 = bitcast %struct.head_type* %tmp1221 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
%tmp1223 = getelementptr %struct.word_type, %struct.word_type* %tmp12211222, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
@@ -84,7 +84,7 @@ bb1163: ; preds = %newFuncRoot
%tmp12241225 = bitcast { i16, i8, i8 }* %tmp1224 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1226 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp12241225, i32 0, i32 1 ; <i8*> [#uses=2]
%mask1227 = and i8 %mask1218, 1 ; <i8> [#uses=2]
- %tmp1228 = load i8* %tmp1226, align 1 ; <i8> [#uses=1]
+ %tmp1228 = load i8, i8* %tmp1226, align 1 ; <i8> [#uses=1]
%tmp1229 = and i8 %mask1227, 1 ; <i8> [#uses=1]
%tmp1230 = and i8 %tmp1228, -2 ; <i8> [#uses=1]
%tmp1231 = or i8 %tmp1230, %tmp1229 ; <i8> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ declare fastcc i8* @w_addchar(i8*, i32*,
define x86_stdcallcc i32 @parse_backslash(i8** inreg %word, i32* inreg %word_length, i32* inreg %max_length) nounwind {
entry:
- %tmp6 = load i8* null, align 1 ; <i8> [#uses=1]
+ %tmp6 = load i8, i8* null, align 1 ; <i8> [#uses=1]
br label %bb13
bb13: ; preds = %entry
%tmp26 = call fastcc i8* @w_addchar( i8* null, i32* %word_length, i32* %max_length, i8 signext %tmp6 ) nounwind ; <i8*> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll Fri Feb 27 15:17:42 2015
@@ -16,12 +16,12 @@ define i32 @foo() {
entry:
%retval = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
+ %tmp = load %struct.__res_state*, %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
%tmp1 = getelementptr %struct.__res_state, %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %tmp1, align 4
br label %return
return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
+ %retval2 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval2
}
@@ -31,11 +31,11 @@ define i32 @bar() {
entry:
%retval = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
+ %tmp = load %struct.__res_state*, %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
%tmp1 = getelementptr %struct.__res_state, %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp1, align 4
br label %return
return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
+ %retval2 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval2
}
Modified: llvm/trunk/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-03-14-SpillerCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-03-14-SpillerCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-03-14-SpillerCrash.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
define i64 @____wcstoll_l_internal(i32* %nptr, i32** %endptr, i32 %base, i32 %group, %struct.__locale_struct* %loc) nounwind {
entry:
- %tmp27 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp27 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%tmp83 = getelementptr i32, i32* %nptr, i32 1 ; <i32*> [#uses=1]
%tmp233 = add i32 0, -48 ; <i32> [#uses=1]
br label %bb271.us
@@ -32,7 +32,7 @@ bb374.outer: ; preds = %bb311.split, %b
br label %bb374.us
bb374.us: ; preds = %bb314.us, %bb374.outer
%tmp376.us = getelementptr i32, i32* %s.5.ph, i32 0 ; <i32*> [#uses=3]
- %tmp378.us = load i32* %tmp376.us, align 4 ; <i32> [#uses=2]
+ %tmp378.us = load i32, i32* %tmp376.us, align 4 ; <i32> [#uses=2]
%tmp302.us = icmp eq i32* %tmp376.us, %tmp83 ; <i1> [#uses=1]
%bothcond484.us = or i1 false, %tmp302.us ; <i1> [#uses=1]
br i1 %bothcond484.us, label %bb383, label %bb305.us
Modified: llvm/trunk/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll Fri Feb 27 15:17:42 2015
@@ -13,12 +13,12 @@ entry:
cond_true: ; preds = %entry
%tmp1415 = shl i16 %param, 3 ; <i16> [#uses=1]
%tmp17 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp18 = load %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
+ %tmp18 = load %struct.ComponentParameters*, %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp1920 = bitcast %struct.ComponentParameters* %tmp18 to i8* ; <i8*> [#uses=1]
%tmp212223 = sext i16 %tmp1415 to i64 ; <i64> [#uses=1]
%tmp24 = getelementptr i8, i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
%tmp2425 = bitcast i8* %tmp24 to i64* ; <i64*> [#uses=1]
- %tmp28 = load i64* %tmp2425, align 8 ; <i64> [#uses=1]
+ %tmp28 = load i64, i64* %tmp2425, align 8 ; <i64> [#uses=1]
%tmp2829 = inttoptr i64 %tmp28 to i32* ; <i32*> [#uses=1]
%tmp31 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
store i32* %tmp2829, i32** %tmp31, align 8
@@ -27,18 +27,18 @@ cond_true: ; preds = %entry
cond_next: ; preds = %cond_true, %entry
%tmp4243 = shl i16 %param, 3 ; <i16> [#uses=1]
%tmp46 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp47 = load %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
+ %tmp47 = load %struct.ComponentParameters*, %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp4849 = bitcast %struct.ComponentParameters* %tmp47 to i8* ; <i8*> [#uses=1]
%tmp505152 = sext i16 %tmp4243 to i64 ; <i64> [#uses=1]
%tmp53 = getelementptr i8, i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
%tmp5354 = bitcast i8* %tmp53 to i64* ; <i64*> [#uses=1]
- %tmp58 = load i64* %tmp5354, align 8 ; <i64> [#uses=1]
+ %tmp58 = load i64, i64* %tmp5354, align 8 ; <i64> [#uses=1]
%tmp59 = icmp eq i64 %tmp58, 0 ; <i1> [#uses=1]
br i1 %tmp59, label %UnifiedReturnBlock, label %cond_true63
cond_true63: ; preds = %cond_next
%tmp65 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
- %tmp66 = load %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
+ %tmp66 = load %struct.AGenericManager*, %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
%tmp69 = tail call i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord( %struct.AGenericManager* %tmp66, %struct.ComponentInstanceRecord** %instance ) ; <i32> [#uses=1]
ret i32 %tmp69
Modified: llvm/trunk/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ entry:
store i32 (...)** getelementptr ([4 x i32 (...)*]* @_ZTVSt9basic_iosIcSt11char_traitsIcEE, i32 0, i32 2), i32 (...)*** null, align 4
store i32 (...)** null, i32 (...)*** null, align 4
%ctg2242.i.i163.i = getelementptr i8, i8* %tmp96.i.i142.i, i32 0 ; <i8*> [#uses=1]
- %tmp150.i.i164.i = load i8** getelementptr ([4 x i8*]* @_ZTTSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE, i32 0, i64 2), align 4 ; <i8*> [#uses=1]
+ %tmp150.i.i164.i = load i8*, i8** getelementptr ([4 x i8*]* @_ZTTSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE, i32 0, i64 2), align 4 ; <i8*> [#uses=1]
%tmp150151.i.i165.i = bitcast i8* %tmp150.i.i164.i to i32 (...)** ; <i32 (...)**> [#uses=1]
%tmp153.i.i166.i = bitcast i8* %ctg2242.i.i163.i to i32 (...)*** ; <i32 (...)***> [#uses=1]
store i32 (...)** %tmp150151.i.i165.i, i32 (...)*** %tmp153.i.i166.i, align 4
Modified: llvm/trunk/test/CodeGen/X86/2008-04-09-BranchFolding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-04-09-BranchFolding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-04-09-BranchFolding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-04-09-BranchFolding.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ bb140: ; preds = %entry
bb17.i: ; preds = %bb140
ret %struct.tree_node* null
bb143: ; preds = %entry
- %tmp8.i43 = load %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
+ %tmp8.i43 = load %struct.tree_node*, %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
br i1 %tmp3.i40, label %bb160, label %bb9.i48
bb9.i48: ; preds = %bb143
ret %struct.tree_node* null
Modified: llvm/trunk/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll Fri Feb 27 15:17:42 2015
@@ -40,8 +40,8 @@
define void @"-[AA BB:optionIndex:delegate:CC:contextInfo:]"(%struct.AA* %self, %struct._message_ref_t* %_cmd, %struct.NSError* %inError, i64 %inOptionIndex, %struct.NSObject* %inDelegate, %struct.objc_selector* %inDidRecoverSelector, i8* %inContextInfo) {
entry:
- %tmp105 = load %struct.NSArray** null, align 8 ; <%struct.NSArray*> [#uses=1]
- %tmp107 = load %struct.NSObject** null, align 8 ; <%struct.NSObject*> [#uses=1]
+ %tmp105 = load %struct.NSArray*, %struct.NSArray** null, align 8 ; <%struct.NSArray*> [#uses=1]
+ %tmp107 = load %struct.NSObject*, %struct.NSObject** null, align 8 ; <%struct.NSObject*> [#uses=1]
call void null( %struct.NSObject* %tmp107, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_228", %struct.NSArray* %tmp105, i8 signext 0 )
%tmp111 = call %struct.NSObject* (%struct.NSObject*, %struct.objc_selector*, ...)* @objc_msgSend( %struct.NSObject* null, %struct.objc_selector* null, i32 0, i8* null ) ; <%struct.NSObject*> [#uses=0]
ret void
Modified: llvm/trunk/test/CodeGen/X86/2008-04-16-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-04-16-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-04-16-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-04-16-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ bb94.us: ; preds = %bb71.us, %bb53.us
store i16 %tmp113.us, i16* null, align 2
br label %bb53.us
bb71.us: ; preds = %bb53.us
- %tmp80.us = load i8* null, align 1 ; <i8> [#uses=1]
+ %tmp80.us = load i8, i8* null, align 1 ; <i8> [#uses=1]
%tmp8081.us = zext i8 %tmp80.us to i32 ; <i32> [#uses=1]
%tmp87.us = mul i32 %tmp8081.us, 0 ; <i32> [#uses=1]
%tmp92.us = add i32 0, %tmp87.us ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-04-17-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-04-17-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-04-17-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -33,7 +33,7 @@ bb161.i: ; preds = %bb142.i
bb182.i: ; preds = %bb142.i
ret void
bb3261: ; preds = %bb7834, %bb161.i
- %tmp3263 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp3263 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%tmp3264 = icmp eq i32 %tmp3263, 37 ; <i1> [#uses=1]
br i1 %tmp3264, label %bb3306, label %bb3267
bb3267: ; preds = %bb3261
@@ -42,7 +42,7 @@ bb3306: ; preds = %bb3261
%tmp3310 = invoke %struct.wxStringBase* @_ZN12wxStringBaseaSEPKw( %struct.wxStringBase* null, i32* getelementptr ([5 x i32]* @.str89, i32 0, i32 0) )
to label %bb3314 unwind label %lpad ; <%struct.wxStringBase*> [#uses=0]
bb3314: ; preds = %bb3306
- %tmp3316 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp3316 = load i32, i32* null, align 4 ; <i32> [#uses=1]
switch i32 %tmp3316, label %bb7595 [
i32 0, label %bb7819
i32 37, label %bb7806
Modified: llvm/trunk/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ target triple = "i386-apple-darwin8"
define i32 @main() nounwind {
entry:
- %tmp122 = load <2 x i64>* null, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp122 = load <2 x i64>, <2 x i64>* null, align 16 ; <<2 x i64>> [#uses=1]
%tmp126 = bitcast <2 x i64> %tmp122 to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp129 = call <8 x i16> @llvm.x86.sse41.pblendw( <8 x i16> zeroinitializer, <8 x i16> %tmp126, i32 2 ) nounwind ; <<8 x i16>> [#uses=0]
ret i32 0
Modified: llvm/trunk/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-04-28-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-04-28-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-04-28-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -33,7 +33,7 @@ bb13101: ; preds = %bb13088
bb13107: ; preds = %bb13101, %bb13088
%iftmp.684.0 = phi i32 [ 0, %bb13101 ], [ 65535, %bb13088 ] ; <i32> [#uses=2]
- %tmp13111 = load i64* null, align 8 ; <i64> [#uses=3]
+ %tmp13111 = load i64, i64* null, align 8 ; <i64> [#uses=3]
%tmp13116 = lshr i64 %tmp13111, 16 ; <i64> [#uses=1]
%tmp1311613117 = trunc i64 %tmp13116 to i32 ; <i32> [#uses=1]
%tmp13118 = and i32 %tmp1311613117, 65535 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86 -mattr=+sse2
define fastcc void @glgVectorFloatConversion() nounwind {
- %tmp12745 = load <4 x float>* null, align 16 ; <<4 x float>> [#uses=1]
+ %tmp12745 = load <4 x float>, <4 x float>* null, align 16 ; <<4 x float>> [#uses=1]
%tmp12773 = insertelement <4 x float> %tmp12745, float 1.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
%tmp12774 = insertelement <4 x float> %tmp12773, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
%tmp12775 = insertelement <4 x float> %tmp12774, float 1.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-05-12-tailmerge-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-05-12-tailmerge-5.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-05-12-tailmerge-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-05-12-tailmerge-5.ll Fri Feb 27 15:17:42 2015
@@ -46,7 +46,7 @@ entry:
store i8 %d, i8* %d_addr
%tmp13 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 0 ; <[3 x i8]*> [#uses=1]
%tmp1314 = bitcast [3 x i8]* %tmp13 to i32* ; <i32*> [#uses=1]
- %tmp15 = load i32* %tmp1314, align 4 ; <i32> [#uses=1]
+ %tmp15 = load i32, i32* %tmp1314, align 4 ; <i32> [#uses=1]
%tmp16 = shl i32 %tmp15, 14 ; <i32> [#uses=1]
%tmp17 = ashr i32 %tmp16, 23 ; <i32> [#uses=1]
%tmp1718 = trunc i32 %tmp17 to i16 ; <i16> [#uses=1]
@@ -57,7 +57,7 @@ entry:
%sextl21 = shl i16 %sextr, 7 ; <i16> [#uses=1]
%sextr22 = ashr i16 %sextl21, 7 ; <i16> [#uses=1]
%sextr2223 = sext i16 %sextr22 to i32 ; <i32> [#uses=1]
- %tmp24 = load i32* %j_addr, align 4 ; <i32> [#uses=1]
+ %tmp24 = load i32, i32* %j_addr, align 4 ; <i32> [#uses=1]
%tmp25 = icmp ne i32 %sextr2223, %tmp24 ; <i1> [#uses=1]
%tmp2526 = zext i1 %tmp25 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp2526, 0 ; <i1> [#uses=1]
@@ -69,8 +69,8 @@ bb: ; preds = %entry
bb27: ; preds = %entry
%tmp28 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp29 = load i8* %tmp28, align 4 ; <i8> [#uses=1]
- %tmp30 = load i8* %c_addr, align 1 ; <i8> [#uses=1]
+ %tmp29 = load i8, i8* %tmp28, align 4 ; <i8> [#uses=1]
+ %tmp30 = load i8, i8* %c_addr, align 1 ; <i8> [#uses=1]
%tmp31 = icmp ne i8 %tmp29, %tmp30 ; <i1> [#uses=1]
%tmp3132 = zext i1 %tmp31 to i8 ; <i8> [#uses=1]
%toBool33 = icmp ne i8 %tmp3132, 0 ; <i1> [#uses=1]
@@ -82,7 +82,7 @@ bb34: ; preds = %bb27
bb35: ; preds = %bb27
%tmp36 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 2 ; <i16*> [#uses=1]
- %tmp37 = load i16* %tmp36, align 4 ; <i16> [#uses=1]
+ %tmp37 = load i16, i16* %tmp36, align 4 ; <i16> [#uses=1]
%tmp38 = shl i16 %tmp37, 7 ; <i16> [#uses=1]
%tmp39 = ashr i16 %tmp38, 7 ; <i16> [#uses=1]
%sextl40 = shl i16 %tmp39, 7 ; <i16> [#uses=1]
@@ -91,7 +91,7 @@ bb35: ; preds = %bb27
%sextr43 = ashr i16 %sextl42, 7 ; <i16> [#uses=0]
%sextl44 = shl i16 %sextr41, 7 ; <i16> [#uses=1]
%sextr45 = ashr i16 %sextl44, 7 ; <i16> [#uses=1]
- %tmp46 = load i16* %t_addr, align 2 ; <i16> [#uses=1]
+ %tmp46 = load i16, i16* %t_addr, align 2 ; <i16> [#uses=1]
%tmp47 = icmp ne i16 %sextr45, %tmp46 ; <i1> [#uses=1]
%tmp4748 = zext i1 %tmp47 to i8 ; <i8> [#uses=1]
%toBool49 = icmp ne i8 %tmp4748, 0 ; <i1> [#uses=1]
@@ -103,7 +103,7 @@ bb50: ; preds = %bb35
bb51: ; preds = %bb35
%tmp52 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 3 ; <i16*> [#uses=1]
- %tmp53 = load i16* %tmp52, align 4 ; <i16> [#uses=1]
+ %tmp53 = load i16, i16* %tmp52, align 4 ; <i16> [#uses=1]
%tmp54 = shl i16 %tmp53, 7 ; <i16> [#uses=1]
%tmp55 = ashr i16 %tmp54, 7 ; <i16> [#uses=1]
%sextl56 = shl i16 %tmp55, 7 ; <i16> [#uses=1]
@@ -112,7 +112,7 @@ bb51: ; preds = %bb35
%sextr59 = ashr i16 %sextl58, 7 ; <i16> [#uses=0]
%sextl60 = shl i16 %sextr57, 7 ; <i16> [#uses=1]
%sextr61 = ashr i16 %sextl60, 7 ; <i16> [#uses=1]
- %tmp62 = load i16* %u_addr, align 2 ; <i16> [#uses=1]
+ %tmp62 = load i16, i16* %u_addr, align 2 ; <i16> [#uses=1]
%tmp63 = icmp ne i16 %sextr61, %tmp62 ; <i1> [#uses=1]
%tmp6364 = zext i1 %tmp63 to i8 ; <i8> [#uses=1]
%toBool65 = icmp ne i8 %tmp6364, 0 ; <i1> [#uses=1]
@@ -124,8 +124,8 @@ bb66: ; preds = %bb51
bb67: ; preds = %bb51
%tmp68 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 4 ; <i8*> [#uses=1]
- %tmp69 = load i8* %tmp68, align 4 ; <i8> [#uses=1]
- %tmp70 = load i8* %d_addr, align 1 ; <i8> [#uses=1]
+ %tmp69 = load i8, i8* %tmp68, align 4 ; <i8> [#uses=1]
+ %tmp70 = load i8, i8* %d_addr, align 1 ; <i8> [#uses=1]
%tmp71 = icmp ne i8 %tmp69, %tmp70 ; <i1> [#uses=1]
%tmp7172 = zext i1 %tmp71 to i8 ; <i8> [#uses=1]
%toBool73 = icmp ne i8 %tmp7172, 0 ; <i1> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-05-21-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-05-21-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-05-21-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-05-21-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -74,7 +74,7 @@ entry:
br label %bb497
bb483: ; preds = %bb497
- %tmp496 = load %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
+ %tmp496 = load %struct.tree_node*, %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
br label %bb497
bb497: ; preds = %bb483, %entry
Modified: llvm/trunk/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
define void @a(<4 x float>* %x) nounwind {
entry:
- %tmp2 = load <4 x float>* %x, align 1
+ %tmp2 = load <4 x float>, <4 x float>* %x, align 1
%inv = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %tmp2)
store <4 x float> %inv, <4 x float>* %x, align 1
ret void
Modified: llvm/trunk/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll Fri Feb 27 15:17:42 2015
@@ -13,9 +13,9 @@ define i16 @f(i64 %x) {
%b = bitcast i64 %x to double ; <double> [#uses=1]
store double %b, double* @atomic
store double 0.000000e+00, double* @atomic2
- %l = load i32* @ioport ; <i32> [#uses=1]
+ %l = load i32, i32* @ioport ; <i32> [#uses=1]
%t = trunc i32 %l to i16 ; <i16> [#uses=1]
- %l2 = load i32* @ioport2 ; <i32> [#uses=1]
+ %l2 = load i32, i32* @ioport2 ; <i32> [#uses=1]
%tmp = lshr i32 %l2, 16 ; <i32> [#uses=1]
%t2 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
%f = add i16 %t, %t2 ; <i16> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll Fri Feb 27 15:17:42 2015
@@ -12,9 +12,9 @@ define i16 @f(i64 %x, double %y) {
store volatile double 0.000000e+00, double* @atomic2 ; one processor operation only
%b2 = bitcast double %y to i64 ; <i64> [#uses=1]
store volatile i64 %b2, i64* @anything ; may transform to store of double
- %l = load volatile i32* @ioport ; must not narrow
+ %l = load volatile i32, i32* @ioport ; must not narrow
%t = trunc i32 %l to i16 ; <i16> [#uses=1]
- %l2 = load volatile i32* @ioport ; must not narrow
+ %l2 = load volatile i32, i32* @ioport ; must not narrow
%tmp = lshr i32 %l2, 16 ; <i32> [#uses=1]
%t2 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
%f = add i16 %t, %t2 ; <i16> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-06-16-SubregsBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-06-16-SubregsBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-06-16-SubregsBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-06-16-SubregsBug.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=i386-apple-darwin | grep mov | count 4
define i16 @test(i16* %tmp179) nounwind {
- %tmp180 = load i16* %tmp179, align 2 ; <i16> [#uses=2]
+ %tmp180 = load i16, i16* %tmp179, align 2 ; <i16> [#uses=2]
%tmp184 = and i16 %tmp180, -1024 ; <i16> [#uses=1]
%tmp186 = icmp eq i16 %tmp184, -32768 ; <i1> [#uses=1]
br i1 %tmp186, label %bb189, label %bb288
Modified: llvm/trunk/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll Fri Feb 27 15:17:42 2015
@@ -63,7 +63,7 @@ entry:
br i1 false, label %bb17.preheader, label %bb30
bb17.preheader: ; preds = %entry
- load i32* null, align 4 ; <i32>:0 [#uses=0]
+ load i32, i32* null, align 4 ; <i32>:0 [#uses=0]
br label %bb16
bb16: ; preds = %bb16, %bb17.preheader
Modified: llvm/trunk/test/CodeGen/X86/2008-07-19-movups-spills.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-07-19-movups-spills.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-07-19-movups-spills.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-07-19-movups-spills.ll Fri Feb 27 15:17:42 2015
@@ -75,38 +75,38 @@ define void @test1() {
; CHECK: movups
; CHECK: movups
; CHECK-NOT: movups
- load <4 x float>* @0, align 1 ; <<4 x float>>:1 [#uses=2]
- load <4 x float>* @1, align 1 ; <<4 x float>>:2 [#uses=3]
- load <4 x float>* @2, align 1 ; <<4 x float>>:3 [#uses=4]
- load <4 x float>* @3, align 1 ; <<4 x float>>:4 [#uses=5]
- load <4 x float>* @4, align 1 ; <<4 x float>>:5 [#uses=6]
- load <4 x float>* @5, align 1 ; <<4 x float>>:6 [#uses=7]
- load <4 x float>* @6, align 1 ; <<4 x float>>:7 [#uses=8]
- load <4 x float>* @7, align 1 ; <<4 x float>>:8 [#uses=9]
- load <4 x float>* @8, align 1 ; <<4 x float>>:9 [#uses=10]
- load <4 x float>* @9, align 1 ; <<4 x float>>:10 [#uses=11]
- load <4 x float>* @10, align 1 ; <<4 x float>>:11 [#uses=12]
- load <4 x float>* @11, align 1 ; <<4 x float>>:12 [#uses=13]
- load <4 x float>* @12, align 1 ; <<4 x float>>:13 [#uses=14]
- load <4 x float>* @13, align 1 ; <<4 x float>>:14 [#uses=15]
- load <4 x float>* @14, align 1 ; <<4 x float>>:15 [#uses=16]
- load <4 x float>* @15, align 1 ; <<4 x float>>:16 [#uses=17]
- load <4 x float>* @16, align 1 ; <<4 x float>>:17 [#uses=18]
- load <4 x float>* @17, align 1 ; <<4 x float>>:18 [#uses=19]
- load <4 x float>* @18, align 1 ; <<4 x float>>:19 [#uses=20]
- load <4 x float>* @19, align 1 ; <<4 x float>>:20 [#uses=21]
- load <4 x float>* @20, align 1 ; <<4 x float>>:21 [#uses=22]
- load <4 x float>* @21, align 1 ; <<4 x float>>:22 [#uses=23]
- load <4 x float>* @22, align 1 ; <<4 x float>>:23 [#uses=24]
- load <4 x float>* @23, align 1 ; <<4 x float>>:24 [#uses=25]
- load <4 x float>* @24, align 1 ; <<4 x float>>:25 [#uses=26]
- load <4 x float>* @25, align 1 ; <<4 x float>>:26 [#uses=27]
- load <4 x float>* @26, align 1 ; <<4 x float>>:27 [#uses=28]
- load <4 x float>* @27, align 1 ; <<4 x float>>:28 [#uses=29]
- load <4 x float>* @28, align 1 ; <<4 x float>>:29 [#uses=30]
- load <4 x float>* @29, align 1 ; <<4 x float>>:30 [#uses=31]
- load <4 x float>* @30, align 1 ; <<4 x float>>:31 [#uses=32]
- load <4 x float>* @31, align 1 ; <<4 x float>>:32 [#uses=33]
+ load <4 x float>, <4 x float>* @0, align 1 ; <<4 x float>>:1 [#uses=2]
+ load <4 x float>, <4 x float>* @1, align 1 ; <<4 x float>>:2 [#uses=3]
+ load <4 x float>, <4 x float>* @2, align 1 ; <<4 x float>>:3 [#uses=4]
+ load <4 x float>, <4 x float>* @3, align 1 ; <<4 x float>>:4 [#uses=5]
+ load <4 x float>, <4 x float>* @4, align 1 ; <<4 x float>>:5 [#uses=6]
+ load <4 x float>, <4 x float>* @5, align 1 ; <<4 x float>>:6 [#uses=7]
+ load <4 x float>, <4 x float>* @6, align 1 ; <<4 x float>>:7 [#uses=8]
+ load <4 x float>, <4 x float>* @7, align 1 ; <<4 x float>>:8 [#uses=9]
+ load <4 x float>, <4 x float>* @8, align 1 ; <<4 x float>>:9 [#uses=10]
+ load <4 x float>, <4 x float>* @9, align 1 ; <<4 x float>>:10 [#uses=11]
+ load <4 x float>, <4 x float>* @10, align 1 ; <<4 x float>>:11 [#uses=12]
+ load <4 x float>, <4 x float>* @11, align 1 ; <<4 x float>>:12 [#uses=13]
+ load <4 x float>, <4 x float>* @12, align 1 ; <<4 x float>>:13 [#uses=14]
+ load <4 x float>, <4 x float>* @13, align 1 ; <<4 x float>>:14 [#uses=15]
+ load <4 x float>, <4 x float>* @14, align 1 ; <<4 x float>>:15 [#uses=16]
+ load <4 x float>, <4 x float>* @15, align 1 ; <<4 x float>>:16 [#uses=17]
+ load <4 x float>, <4 x float>* @16, align 1 ; <<4 x float>>:17 [#uses=18]
+ load <4 x float>, <4 x float>* @17, align 1 ; <<4 x float>>:18 [#uses=19]
+ load <4 x float>, <4 x float>* @18, align 1 ; <<4 x float>>:19 [#uses=20]
+ load <4 x float>, <4 x float>* @19, align 1 ; <<4 x float>>:20 [#uses=21]
+ load <4 x float>, <4 x float>* @20, align 1 ; <<4 x float>>:21 [#uses=22]
+ load <4 x float>, <4 x float>* @21, align 1 ; <<4 x float>>:22 [#uses=23]
+ load <4 x float>, <4 x float>* @22, align 1 ; <<4 x float>>:23 [#uses=24]
+ load <4 x float>, <4 x float>* @23, align 1 ; <<4 x float>>:24 [#uses=25]
+ load <4 x float>, <4 x float>* @24, align 1 ; <<4 x float>>:25 [#uses=26]
+ load <4 x float>, <4 x float>* @25, align 1 ; <<4 x float>>:26 [#uses=27]
+ load <4 x float>, <4 x float>* @26, align 1 ; <<4 x float>>:27 [#uses=28]
+ load <4 x float>, <4 x float>* @27, align 1 ; <<4 x float>>:28 [#uses=29]
+ load <4 x float>, <4 x float>* @28, align 1 ; <<4 x float>>:29 [#uses=30]
+ load <4 x float>, <4 x float>* @29, align 1 ; <<4 x float>>:30 [#uses=31]
+ load <4 x float>, <4 x float>* @30, align 1 ; <<4 x float>>:31 [#uses=32]
+ load <4 x float>, <4 x float>* @31, align 1 ; <<4 x float>>:32 [#uses=33]
fmul <4 x float> %1, %1 ; <<4 x float>>:33 [#uses=1]
fmul <4 x float> %33, %2 ; <<4 x float>>:34 [#uses=1]
fmul <4 x float> %34, %3 ; <<4 x float>>:35 [#uses=1]
@@ -708,38 +708,38 @@ define void @test2() "no-realign-stack"
; CHECK: movups
; CHECK: movups
; CHECK-NOT: movups
- load <4 x float>* @0, align 1
- load <4 x float>* @1, align 1
- load <4 x float>* @2, align 1
- load <4 x float>* @3, align 1
- load <4 x float>* @4, align 1
- load <4 x float>* @5, align 1
- load <4 x float>* @6, align 1
- load <4 x float>* @7, align 1
- load <4 x float>* @8, align 1
- load <4 x float>* @9, align 1
- load <4 x float>* @10, align 1
- load <4 x float>* @11, align 1
- load <4 x float>* @12, align 1
- load <4 x float>* @13, align 1
- load <4 x float>* @14, align 1
- load <4 x float>* @15, align 1
- load <4 x float>* @16, align 1
- load <4 x float>* @17, align 1
- load <4 x float>* @18, align 1
- load <4 x float>* @19, align 1
- load <4 x float>* @20, align 1
- load <4 x float>* @21, align 1
- load <4 x float>* @22, align 1
- load <4 x float>* @23, align 1
- load <4 x float>* @24, align 1
- load <4 x float>* @25, align 1
- load <4 x float>* @26, align 1
- load <4 x float>* @27, align 1
- load <4 x float>* @28, align 1
- load <4 x float>* @29, align 1
- load <4 x float>* @30, align 1
- load <4 x float>* @31, align 1
+ load <4 x float>, <4 x float>* @0, align 1
+ load <4 x float>, <4 x float>* @1, align 1
+ load <4 x float>, <4 x float>* @2, align 1
+ load <4 x float>, <4 x float>* @3, align 1
+ load <4 x float>, <4 x float>* @4, align 1
+ load <4 x float>, <4 x float>* @5, align 1
+ load <4 x float>, <4 x float>* @6, align 1
+ load <4 x float>, <4 x float>* @7, align 1
+ load <4 x float>, <4 x float>* @8, align 1
+ load <4 x float>, <4 x float>* @9, align 1
+ load <4 x float>, <4 x float>* @10, align 1
+ load <4 x float>, <4 x float>* @11, align 1
+ load <4 x float>, <4 x float>* @12, align 1
+ load <4 x float>, <4 x float>* @13, align 1
+ load <4 x float>, <4 x float>* @14, align 1
+ load <4 x float>, <4 x float>* @15, align 1
+ load <4 x float>, <4 x float>* @16, align 1
+ load <4 x float>, <4 x float>* @17, align 1
+ load <4 x float>, <4 x float>* @18, align 1
+ load <4 x float>, <4 x float>* @19, align 1
+ load <4 x float>, <4 x float>* @20, align 1
+ load <4 x float>, <4 x float>* @21, align 1
+ load <4 x float>, <4 x float>* @22, align 1
+ load <4 x float>, <4 x float>* @23, align 1
+ load <4 x float>, <4 x float>* @24, align 1
+ load <4 x float>, <4 x float>* @25, align 1
+ load <4 x float>, <4 x float>* @26, align 1
+ load <4 x float>, <4 x float>* @27, align 1
+ load <4 x float>, <4 x float>* @28, align 1
+ load <4 x float>, <4 x float>* @29, align 1
+ load <4 x float>, <4 x float>* @30, align 1
+ load <4 x float>, <4 x float>* @31, align 1
fmul <4 x float> %1, %1
fmul <4 x float> %33, %2
fmul <4 x float> %34, %3
Modified: llvm/trunk/test/CodeGen/X86/2008-07-22-CombinerCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-07-22-CombinerCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-07-22-CombinerCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-07-22-CombinerCrash.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ external global <4 x i16> ; <<4 x i16>*
declare void @abort()
define void @t() nounwind {
- load i16* @0 ; <i16>:1 [#uses=1]
+ load i16, i16* @0 ; <i16>:1 [#uses=1]
zext i16 %1 to i64 ; <i64>:2 [#uses=1]
bitcast i64 %2 to <4 x i16> ; <<4 x i16>>:3 [#uses=1]
shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer ; <<4 x i16>>:4 [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-08-06-RewriterBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-08-06-RewriterBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-08-06-RewriterBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-08-06-RewriterBug.ll Fri Feb 27 15:17:42 2015
@@ -4,14 +4,14 @@
@data = external global [400 x i64] ; <[400 x i64]*> [#uses=5]
define void @foo(double* noalias, double* noalias) {
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 200), align 4 ; <i64>:3 [#uses=1]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 199), align 4 ; <i64>:4 [#uses=1]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 198), align 4 ; <i64>:5 [#uses=2]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 197), align 4 ; <i64>:6 [#uses=1]
+ load i64, i64* getelementptr ([400 x i64]* @data, i32 0, i64 200), align 4 ; <i64>:3 [#uses=1]
+ load i64, i64* getelementptr ([400 x i64]* @data, i32 0, i64 199), align 4 ; <i64>:4 [#uses=1]
+ load i64, i64* getelementptr ([400 x i64]* @data, i32 0, i64 198), align 4 ; <i64>:5 [#uses=2]
+ load i64, i64* getelementptr ([400 x i64]* @data, i32 0, i64 197), align 4 ; <i64>:6 [#uses=1]
br i1 false, label %28, label %7
; <label>:7 ; preds = %2
- load double** getelementptr (double** bitcast ([400 x i64]* @data to double**), i64 180), align 8 ; <double*>:8 [#uses=1]
+ load double*, double** getelementptr (double** bitcast ([400 x i64]* @data to double**), i64 180), align 8 ; <double*>:8 [#uses=1]
bitcast double* %8 to double* ; <double*>:9 [#uses=1]
ptrtoint double* %9 to i64 ; <i64>:10 [#uses=1]
mul i64 %4, %3 ; <i64>:11 [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ declare void @llvm.eh.unwind.init()
; CHECK: _Unwind_Resume_or_Rethrow
define i32 @_Unwind_Resume_or_Rethrow() nounwind uwtable ssp {
entry:
- %0 = load i32* @b, align 4
+ %0 = load i32, i32* @b, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
Modified: llvm/trunk/test/CodeGen/X86/2008-09-09-LinearScanBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-09-09-LinearScanBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-09-09-LinearScanBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-09-09-LinearScanBug.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define i32 @func_125(i32 %p_126, i32 %p_128, i32 %p_129) nounwind {
entry:
- %tmp2.i = load i32* @g_3 ; <i32> [#uses=2]
+ %tmp2.i = load i32, i32* @g_3 ; <i32> [#uses=2]
%conv = trunc i32 %tmp2.i to i16 ; <i16> [#uses=3]
br label %forcond1.preheader.i.i7
Modified: llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
define i32 @func_3(i32 %p_5) nounwind {
entry:
%0 = srem i32 1, 0 ; <i32> [#uses=2]
- %1 = load i16* @g_15, align 2 ; <i16> [#uses=1]
+ %1 = load i16, i16* @g_15, align 2 ; <i16> [#uses=1]
%2 = zext i16 %1 to i32 ; <i32> [#uses=1]
%3 = and i32 %2, 1 ; <i32> [#uses=1]
%4 = tail call i32 (...)* @rshift_u_s( i32 1 ) nounwind ; <i32> [#uses=1]
@@ -14,7 +14,7 @@ entry:
%6 = zext i1 %5 to i32 ; <i32> [#uses=1]
%7 = icmp sge i32 %3, %6 ; <i1> [#uses=1]
%8 = zext i1 %7 to i32 ; <i32> [#uses=1]
- %9 = load i16* @g_15, align 2 ; <i16> [#uses=1]
+ %9 = load i16, i16* @g_15, align 2 ; <i16> [#uses=1]
%10 = icmp eq i16 %9, 0 ; <i1> [#uses=1]
%11 = zext i1 %10 to i32 ; <i32> [#uses=1]
%12 = tail call i32 (...)* @func_20( i32 1 ) nounwind ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll Fri Feb 27 15:17:42 2015
@@ -18,11 +18,11 @@ entry:
; SOURCE-SCHED: subl
; SOURCE-SCHED: testb
; SOURCE-SCHED: jne
- %0 = load i32* @g_5, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @g_5, align 4 ; <i32> [#uses=1]
%1 = ashr i32 %0, 1 ; <i32> [#uses=1]
%2 = icmp sgt i32 %1, 1 ; <i1> [#uses=1]
%3 = zext i1 %2 to i32 ; <i32> [#uses=1]
- %4 = load i32* @g_73, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* @g_73, align 4 ; <i32> [#uses=1]
%5 = zext i16 %p_46 to i64 ; <i64> [#uses=1]
%6 = sub i64 0, %5 ; <i64> [#uses=1]
%7 = trunc i64 %6 to i8 ; <i8> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-09-17-inline-asm-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-09-17-inline-asm-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-09-17-inline-asm-1.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ target triple = "i386-apple-darwin8"
define i32 @aci(i32* %pw) nounwind {
entry:
- %0 = load i32* @x, align 4
+ %0 = load i32, i32* @x, align 4
%asmtmp = tail call { i32, i32 } asm "movl $0, %eax\0A\090:\0A\09test %eax, %eax\0A\09je 1f\0A\09movl %eax, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{ax},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind
%asmtmp2 = tail call { i32, i32 } asm "movl $0, %edx\0A\090:\0A\09test %edx, %edx\0A\09je 1f\0A\09movl %edx, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{dx},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind
%asmresult2 = extractvalue { i32, i32 } %asmtmp, 0
Modified: llvm/trunk/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-09-18-inline-asm-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-09-18-inline-asm-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-09-18-inline-asm-2.ll Fri Feb 27 15:17:42 2015
@@ -35,9 +35,9 @@ entry:
%0 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 0 ; <i32*> [#uses=2]
%1 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 1 ; <i32*> [#uses=2]
%2 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 2 ; <i8**> [#uses=2]
- %3 = load i32* %0, align 4 ; <i32> [#uses=1]
- %4 = load i32* %1, align 4 ; <i32> [#uses=1]
- %5 = load i8* %state, align 1 ; <i8> [#uses=1]
+ %3 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* %1, align 4 ; <i32> [#uses=1]
+ %5 = load i8, i8* %state, align 1 ; <i8> [#uses=1]
%asmtmp = tail call { i32, i32, i32, i32 } asm sideeffect "#1st=$0 $1 2nd=$1 $2 3rd=$2 $4 5th=$4 $3=4th 1$0 1%eXx 5$4 5%eXx 6th=$5", "=&r,=r,=r,=*m,=&q,=*imr,1,2,*m,5,~{dirflag},~{fpsr},~{flags},~{cx}"(i8** %2, i8* %state, i32 %3, i32 %4, i8** %2, i8 %5) nounwind ; <{ i32, i32, i32, i32 }> [#uses=3]
%asmresult = extractvalue { i32, i32, i32, i32 } %asmtmp, 0 ; <i32> [#uses=1]
%asmresult1 = extractvalue { i32, i32, i32, i32 } %asmtmp, 1 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-09-19-RegAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-09-19-RegAllocBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-09-19-RegAllocBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-09-19-RegAllocBug.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define i32 @func_4() nounwind {
entry:
- %0 = load i32* @g_3, align 4 ; <i32> [#uses=2]
+ %0 = load i32, i32* @g_3, align 4 ; <i32> [#uses=2]
%1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
%2 = sub i8 1, %1 ; <i8> [#uses=1]
%3 = sext i8 %2 to i32 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-09-29-ReMatBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-09-29-ReMatBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-09-29-ReMatBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-09-29-ReMatBug.ll Fri Feb 27 15:17:42 2015
@@ -13,13 +13,13 @@ internal constant %struct.__builtin_CFSt
define %struct.NSString* @"-[XCStringList stringRepresentation]"(%struct.XCStringList* %self, %struct..0objc_selector* %_cmd) nounwind {
entry:
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%1 = and i32 %0, 16777215 ; <i32> [#uses=1]
%2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %bb44, label %bb4
bb4: ; preds = %entry
- %3 = load %struct._XCStringListNode** null, align 4 ; <%struct._XCStringListNode*> [#uses=2]
+ %3 = load %struct._XCStringListNode*, %struct._XCStringListNode** null, align 4 ; <%struct._XCStringListNode*> [#uses=2]
%4 = icmp eq %struct._XCStringListNode* %3, null ; <i1> [#uses=1]
%5 = bitcast %struct._XCStringListNode* %3 to i32* ; <i32*> [#uses=1]
br label %bb37.outer
@@ -48,7 +48,7 @@ bb35.outer: ; preds = %bb34, %bb25.spli
br label %bb35
bb35: ; preds = %bb35, %bb35.outer
- %9 = load i8* null, align 1 ; <i8> [#uses=1]
+ %9 = load i8, i8* null, align 1 ; <i8> [#uses=1]
switch i8 %9, label %bb35 [
i8 0, label %bb37.outer
i8 32, label %bb34
@@ -63,7 +63,7 @@ bb37.outer: ; preds = %bb35, %bb4
br i1 %4, label %bb39.split, label %bb37
bb37: ; preds = %bb37.outer, %bb19
- %10 = load i32* %5, align 4 ; <i32> [#uses=1]
+ %10 = load i32, i32* %5, align 4 ; <i32> [#uses=1]
br i1 false, label %bb6, label %bb19
bb39.split: ; preds = %bb37.outer
Modified: llvm/trunk/test/CodeGen/X86/2008-09-29-VolatileBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-09-29-VolatileBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-09-29-VolatileBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-09-29-VolatileBug.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
define i32 @main() nounwind {
entry:
- %0 = load volatile i32* @g_407, align 4 ; <i32> [#uses=1]
+ %0 = load volatile i32, i32* @g_407, align 4 ; <i32> [#uses=1]
%1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
%2 = tail call i32 @func_45(i8 zeroext %1) nounwind ; <i32> [#uses=0]
ret i32 0
Modified: llvm/trunk/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ declare x86_stdcallcc void @_D3nan5print
define i32 @main() {
entry_nan.main:
- %tmp = load x86_fp80* @_D3nan4rvale ; <x86_fp80> [#uses=1]
+ %tmp = load x86_fp80, x86_fp80* @_D3nan4rvale ; <x86_fp80> [#uses=1]
call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 %tmp)
call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 0xK7FFF8001234000000000)
call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 0xK7FFFC001234000000400)
Modified: llvm/trunk/test/CodeGen/X86/2008-10-07-SSEISelBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-10-07-SSEISelBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-10-07-SSEISelBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-10-07-SSEISelBug.ll Fri Feb 27 15:17:42 2015
@@ -6,17 +6,17 @@ entry:
%w.addr = alloca float ; <float*> [#uses=2]
%.compoundliteral = alloca <4 x float> ; <<4 x float>*> [#uses=2]
store float %w, float* %w.addr
- %tmp = load float* %w.addr ; <float> [#uses=1]
+ %tmp = load float, float* %w.addr ; <float> [#uses=1]
%0 = insertelement <4 x float> undef, float %tmp, i32 0 ; <<4 x float>> [#uses=1]
%1 = insertelement <4 x float> %0, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
%2 = insertelement <4 x float> %1, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
%3 = insertelement <4 x float> %2, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
store <4 x float> %3, <4 x float>* %.compoundliteral
- %tmp1 = load <4 x float>* %.compoundliteral ; <<4 x float>> [#uses=1]
+ %tmp1 = load <4 x float>, <4 x float>* %.compoundliteral ; <<4 x float>> [#uses=1]
store <4 x float> %tmp1, <4 x float>* %retval
br label %return
return: ; preds = %entry
- %4 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
+ %4 = load <4 x float>, <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %4
}
Modified: llvm/trunk/test/CodeGen/X86/2008-10-11-CallCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-10-11-CallCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-10-11-CallCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-10-11-CallCrash.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ target triple = "i386-apple-darwin7"
define i32 @func_45(i64 %p_46, i32 %p_48) nounwind {
entry:
%0 = tail call i32 (...)* @lshift_s_u(i64 %p_46, i64 0) nounwind ; <i32> [#uses=0]
- %1 = load i32* @g_385, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* @g_385, align 4 ; <i32> [#uses=1]
%2 = shl i32 %1, 1 ; <i32> [#uses=1]
%3 = and i32 %2, 32 ; <i32> [#uses=1]
%4 = tail call i32 (...)* @func_87(i32 undef, i32 %p_48, i32 1) nounwind ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86 -mattr=+sse2
; PR2762
define void @foo(<4 x i32>* %p, <4 x double>* %q) {
- %n = load <4 x i32>* %p
+ %n = load <4 x i32>, <4 x i32>* %p
%z = sitofp <4 x i32> %n to <4 x double>
store <4 x double> %z, <4 x double>* %q
ret void
Modified: llvm/trunk/test/CodeGen/X86/2008-10-27-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-10-27-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-10-27-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-10-27-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ bb: ; preds = %bb, %entry
; CHECK: movsd %xmm0, 16(%esp)
; CHECK: %bb3
bb3: ; preds = %bb30.loopexit, %bb25, %bb3
- %2 = load i32* null, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%3 = mul i32 %2, 0 ; <i32> [#uses=1]
%4 = icmp slt i32 0, %3 ; <i1> [#uses=1]
br i1 %4, label %bb18, label %bb3
Modified: llvm/trunk/test/CodeGen/X86/2008-11-06-testb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-11-06-testb.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-11-06-testb.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-11-06-testb.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
%0 = getelementptr %struct.x, %struct.x* %p, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 55, i8* %0, align 1
%1 = bitcast %struct.x* %p to i32* ; <i32*> [#uses=1]
- %2 = load i32* %1, align 1 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 1 ; <i32> [#uses=1]
%3 = and i32 %2, 512 ; <i32> [#uses=1]
%4 = icmp eq i32 %3, 0 ; <i1> [#uses=1]
br i1 %4, label %bb5, label %bb
Modified: llvm/trunk/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ bb: ; preds = %bb1, %bb1
bb1: ; preds = %bb, %entry
%P.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%P.0 = getelementptr i8, i8* %Q, i32 %P.0.rec ; <i8*> [#uses=2]
- %0 = load i8* %P.0, align 1 ; <i8> [#uses=1]
+ %0 = load i8, i8* %P.0, align 1 ; <i8> [#uses=1]
switch i8 %0, label %bb3 [
i8 12, label %bb
i8 42, label %bb
Modified: llvm/trunk/test/CodeGen/X86/2008-12-02-IllegalResultType.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-12-02-IllegalResultType.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-12-02-IllegalResultType.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-12-02-IllegalResultType.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ target triple = "i386-pc-linux-gnu"
define i32 @func_73(i32 %p_74) nounwind {
entry:
- %0 = load i32* @g_7, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @g_7, align 4 ; <i32> [#uses=1]
%1 = or i8 0, 118 ; <i8> [#uses=1]
%2 = zext i8 %1 to i64 ; <i64> [#uses=1]
%3 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-01-16-SchedulerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-01-16-SchedulerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-01-16-SchedulerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-01-16-SchedulerBug.ll Fri Feb 27 15:17:42 2015
@@ -10,12 +10,12 @@ declare { i32, i1 } @llvm.sadd.with.over
define fastcc %XXV* @bar(%CF* %call_frame, %XXV** %exception) nounwind {
prologue:
- %param_x = load %XXV** null ; <%XXV*> [#uses=1]
+ %param_x = load %XXV*, %XXV** null ; <%XXV*> [#uses=1]
%unique_1.i = ptrtoint %XXV* %param_x to i1 ; <i1> [#uses=1]
br i1 %unique_1.i, label %NextVerify42, label %FailedVerify
NextVerify42: ; preds = %prologue
- %param_y = load %XXV** null ; <%XXV*> [#uses=1]
+ %param_y = load %XXV*, %XXV** null ; <%XXV*> [#uses=1]
%unique_1.i58 = ptrtoint %XXV* %param_y to i1 ; <i1> [#uses=1]
br i1 %unique_1.i58, label %function_setup.cont, label %FailedVerify
Modified: llvm/trunk/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ bb4.i.i70: ; preds = %bb4.i.i70, %bb.i5
br i1 false, label %_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73, label %bb4.i.i70
_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73: ; preds = %bb4.i.i70
- %0 = load i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 add (i32 ashr (i32 sub (i32 ptrtoint (i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 4) to i32), i32 ptrtoint ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE to i32)), i32 1), i32 1)), align 4 ; <i16> [#uses=0]
+ %0 = load i16, i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 add (i32 ashr (i32 sub (i32 ptrtoint (i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 4) to i32), i32 ptrtoint ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE to i32)), i32 1), i32 1)), align 4 ; <i16> [#uses=0]
br label %bb4.i5.i141
bb4.i5.i141: ; preds = %bb4.i5.i141, %_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73
Modified: llvm/trunk/test/CodeGen/X86/2009-01-31-BigShift2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-01-31-BigShift2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-01-31-BigShift2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-01-31-BigShift2.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
; PR3449
define void @test(<8 x double>* %P, i64* %Q) nounwind {
- %A = load <8 x double>* %P ; <<8 x double>> [#uses=1]
+ %A = load <8 x double>, <8 x double>* %P ; <<8 x double>> [#uses=1]
%B = bitcast <8 x double> %A to i512 ; <i512> [#uses=1]
%C = lshr i512 %B, 448 ; <i512> [#uses=1]
%D = trunc i512 %C to i64 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-02-01-LargeMask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-02-01-LargeMask.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-02-01-LargeMask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-02-01-LargeMask.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ entry:
bb.i49.i72: ; preds = %bb.i49.i72, %entry
%UNP.i1482.0 = phi i288 [ %.ins659, %bb.i49.i72 ], [ undef, %entry ] ; <i288> [#uses=1]
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%1 = xor i32 %0, 17834 ; <i32> [#uses=1]
%2 = zext i32 %1 to i288 ; <i288> [#uses=1]
%3 = shl i288 %2, 160 ; <i288> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ bb: ; preds = %entry
unreachable
bb1: ; preds = %entry
- %0 = load i32* @g_3, align 4 ; <i32> [#uses=2]
+ %0 = load i32, i32* @g_3, align 4 ; <i32> [#uses=2]
%1 = sext i32 %0 to i64 ; <i64> [#uses=1]
%2 = or i64 %1, %p_66 ; <i64> [#uses=1]
%3 = shl i64 %2, 0 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ target triple = "i386-apple-darwin9.6"
define i32 @alac_decode_frame() nounwind {
entry:
- %tmp2 = load i8** null ; <i8*> [#uses=2]
+ %tmp2 = load i8*, i8** null ; <i8*> [#uses=2]
%tmp34 = getelementptr i8, i8* %tmp2, i32 4 ; <i8*> [#uses=2]
%tmp5.i424 = bitcast i8* %tmp34 to i8** ; <i8**> [#uses=2]
%tmp15.i = getelementptr i8, i8* %tmp2, i32 12 ; <i8*> [#uses=1]
@@ -17,9 +17,9 @@ if.then43: ; preds = %entry
ret i32 0
if.end47: ; preds = %entry
- %tmp5.i590 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
+ %tmp5.i590 = load i8*, i8** %tmp5.i424 ; <i8*> [#uses=0]
store i32 19, i32* %0
- %tmp6.i569 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
+ %tmp6.i569 = load i8*, i8** %tmp5.i424 ; <i8*> [#uses=0]
%1 = call i32 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 0) nounwind ; <i32> [#uses=0]
br i1 false, label %bb.nph, label %if.then63
Modified: llvm/trunk/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll Fri Feb 27 15:17:42 2015
@@ -29,41 +29,41 @@ entry:
call void @llvm.dbg.declare(metadata [0 x i8]** %str.0, metadata !8, metadata !{!"0x102"}), !dbg !7
%4 = call i8* @llvm.stacksave(), !dbg !7 ; <i8*> [#uses=1]
store i8* %4, i8** %saved_stack.1, align 8, !dbg !7
- %5 = load i8** %s1_addr, align 8, !dbg !13 ; <i8*> [#uses=1]
+ %5 = load i8*, i8** %s1_addr, align 8, !dbg !13 ; <i8*> [#uses=1]
%6 = call i64 @strlen(i8* %5) nounwind readonly, !dbg !13 ; <i64> [#uses=1]
%7 = add i64 %6, 1, !dbg !13 ; <i64> [#uses=1]
store i64 %7, i64* %3, align 8, !dbg !13
- %8 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %8 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
%9 = sub nsw i64 %8, 1, !dbg !13 ; <i64> [#uses=0]
- %10 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %10 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
%11 = mul i64 %10, 8, !dbg !13 ; <i64> [#uses=0]
- %12 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %12 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
store i64 %12, i64* %2, align 8, !dbg !13
- %13 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %13 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
%14 = mul i64 %13, 8, !dbg !13 ; <i64> [#uses=0]
- %15 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %15 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
store i64 %15, i64* %1, align 8, !dbg !13
- %16 = load i64* %1, align 8, !dbg !13 ; <i64> [#uses=1]
+ %16 = load i64, i64* %1, align 8, !dbg !13 ; <i64> [#uses=1]
%17 = trunc i64 %16 to i32, !dbg !13 ; <i32> [#uses=1]
%18 = alloca i8, i32 %17, !dbg !13 ; <i8*> [#uses=1]
%19 = bitcast i8* %18 to [0 x i8]*, !dbg !13 ; <[0 x i8]*> [#uses=1]
store [0 x i8]* %19, [0 x i8]** %str.0, align 8, !dbg !13
- %20 = load [0 x i8]** %str.0, align 8, !dbg !15 ; <[0 x i8]*> [#uses=1]
+ %20 = load [0 x i8]*, [0 x i8]** %str.0, align 8, !dbg !15 ; <[0 x i8]*> [#uses=1]
%21 = getelementptr inbounds [0 x i8], [0 x i8]* %20, i64 0, i64 0, !dbg !15 ; <i8*> [#uses=1]
store i8 0, i8* %21, align 1, !dbg !15
- %22 = load [0 x i8]** %str.0, align 8, !dbg !16 ; <[0 x i8]*> [#uses=1]
+ %22 = load [0 x i8]*, [0 x i8]** %str.0, align 8, !dbg !16 ; <[0 x i8]*> [#uses=1]
%23 = getelementptr inbounds [0 x i8], [0 x i8]* %22, i64 0, i64 0, !dbg !16 ; <i8*> [#uses=1]
- %24 = load i8* %23, align 1, !dbg !16 ; <i8> [#uses=1]
+ %24 = load i8, i8* %23, align 1, !dbg !16 ; <i8> [#uses=1]
%25 = sext i8 %24 to i32, !dbg !16 ; <i32> [#uses=1]
store i32 %25, i32* %0, align 4, !dbg !16
- %26 = load i8** %saved_stack.1, align 8, !dbg !16 ; <i8*> [#uses=1]
+ %26 = load i8*, i8** %saved_stack.1, align 8, !dbg !16 ; <i8*> [#uses=1]
call void @llvm.stackrestore(i8* %26), !dbg !16
- %27 = load i32* %0, align 4, !dbg !16 ; <i32> [#uses=1]
+ %27 = load i32, i32* %0, align 4, !dbg !16 ; <i32> [#uses=1]
store i32 %27, i32* %retval, align 4, !dbg !16
br label %return, !dbg !16
return: ; preds = %entry
- %retval1 = load i32* %retval, !dbg !16 ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval, !dbg !16 ; <i32> [#uses=1]
%retval12 = trunc i32 %retval1 to i8, !dbg !16 ; <i8> [#uses=1]
ret i8 %retval12, !dbg !16
}
Modified: llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll Fri Feb 27 15:17:42 2015
@@ -33,17 +33,17 @@ bb26: ; preds = %bb4
br i1 %cond.i, label %bb.i, label %bb4
bb.i: ; preds = %bb26
- %3 = load i32* null, align 4 ; <i32> [#uses=1]
+ %3 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%4 = uitofp i32 %3 to float ; <float> [#uses=1]
%.sum13.i = add i64 0, 4 ; <i64> [#uses=1]
%5 = getelementptr i8, i8* null, i64 %.sum13.i ; <i8*> [#uses=1]
%6 = bitcast i8* %5 to i32* ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
+ %7 = load i32, i32* %6, align 4 ; <i32> [#uses=1]
%8 = uitofp i32 %7 to float ; <float> [#uses=1]
%.sum.i = add i64 0, 8 ; <i64> [#uses=1]
%9 = getelementptr i8, i8* null, i64 %.sum.i ; <i8*> [#uses=1]
%10 = bitcast i8* %9 to i32* ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
+ %11 = load i32, i32* %10, align 4 ; <i32> [#uses=1]
%12 = uitofp i32 %11 to float ; <float> [#uses=1]
%13 = insertelement <4 x float> undef, float %4, i32 0 ; <<4 x float>> [#uses=1]
%14 = insertelement <4 x float> %13, float %8, i32 1 ; <<4 x float>> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-03-03-BTHang.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-03-03-BTHang.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-03-03-BTHang.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-03-03-BTHang.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
%1 = and i32 %0, -4096 ; <i32> [#uses=1]
%2 = inttoptr i32 %1 to %struct.HandleBlock* ; <%struct.HandleBlock*> [#uses=3]
%3 = getelementptr %struct.HandleBlock, %struct.HandleBlock* %2, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4096 ; <i32> [#uses=1]
+ %4 = load i32, i32* %3, align 4096 ; <i32> [#uses=1]
%5 = icmp eq i32 %4, 1751280747 ; <i1> [#uses=1]
br i1 %5, label %bb, label %bb1
@@ -25,7 +25,7 @@ bb: ; preds = %entry
%not.i = and i32 %9, 31 ; <i32> [#uses=1]
%13 = xor i32 %not.i, 31 ; <i32> [#uses=1]
%14 = shl i32 1, %13 ; <i32> [#uses=1]
- %15 = load i32* %12, align 4 ; <i32> [#uses=1]
+ %15 = load i32, i32* %12, align 4 ; <i32> [#uses=1]
%16 = and i32 %15, %14 ; <i32> [#uses=1]
%17 = icmp eq i32 %16, 0 ; <i1> [#uses=1]
%tmp = zext i1 %17 to i8 ; <i8> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-03-05-burr-list-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-03-05-burr-list-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-03-05-burr-list-crash.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ define fastcc i8* @1(i8*) nounwind {
; <label>:3 ; preds = %1
%4 = call i64 @strlen(i8* %0) nounwind readonly ; <i64> [#uses=1]
%5 = trunc i64 %4 to i32 ; <i32> [#uses=2]
- %6 = load i32* @0, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* @0, align 4 ; <i32> [#uses=1]
%7 = sub i32 %5, %6 ; <i32> [#uses=2]
%8 = sext i32 %5 to i64 ; <i64> [#uses=1]
%9 = sext i32 %7 to i64 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-03-09-APIntCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-03-09-APIntCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-03-09-APIntCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-03-09-APIntCrash.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ entry:
br i1 false, label %if.then, label %return
if.then: ; preds = %entry
- %srcval18 = load i128* null, align 8 ; <i128> [#uses=1]
+ %srcval18 = load i128, i128* null, align 8 ; <i128> [#uses=1]
%tmp15 = lshr i128 %srcval18, 64 ; <i128> [#uses=1]
%tmp9 = mul i128 %tmp15, 18446744073709551616000 ; <i128> [#uses=1]
br label %return
Modified: llvm/trunk/test/CodeGen/X86/2009-03-10-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-03-10-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-03-10-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-03-10-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
define i32 @pnoutrefresh(%struct.WINDOW* %win, i32 %pminrow, i32 %pmincol, i32 %sminrow, i32 %smincol, i32 %smaxrow, i32 %smaxcol) nounwind optsize ssp {
entry:
- %0 = load i16* null, align 4 ; <i16> [#uses=2]
+ %0 = load i16, i16* null, align 4 ; <i16> [#uses=2]
%1 = icmp sgt i16 0, %0 ; <i1> [#uses=1]
br i1 %1, label %bb12, label %bb13
Modified: llvm/trunk/test/CodeGen/X86/2009-03-23-LinearScanBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-03-23-LinearScanBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-03-23-LinearScanBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-03-23-LinearScanBug.ll Fri Feb 27 15:17:42 2015
@@ -2,9 +2,9 @@
define fastcc void @optimize_bit_field() nounwind {
bb4:
- %a = load i32* null ; <i32> [#uses=1]
- %s = load i32* getelementptr (i32* null, i32 1) ; <i32> [#uses=1]
- %z = load i32* getelementptr (i32* null, i32 2) ; <i32> [#uses=1]
+ %a = load i32, i32* null ; <i32> [#uses=1]
+ %s = load i32, i32* getelementptr (i32* null, i32 1) ; <i32> [#uses=1]
+ %z = load i32, i32* getelementptr (i32* null, i32 2) ; <i32> [#uses=1]
%r = bitcast i32 0 to i32 ; <i32> [#uses=1]
%q = trunc i32 %z to i8 ; <i8> [#uses=1]
%b = icmp eq i8 0, %q ; <i1> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-03-23-MultiUseSched.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-03-23-MultiUseSched.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-03-23-MultiUseSched.ll Fri Feb 27 15:17:42 2015
@@ -10,30 +10,30 @@
@X = external global i64 ; <i64*> [#uses=25]
define fastcc i64 @foo() nounwind {
- %tmp = load volatile i64* @X ; <i64> [#uses=7]
- %tmp1 = load volatile i64* @X ; <i64> [#uses=5]
- %tmp2 = load volatile i64* @X ; <i64> [#uses=3]
- %tmp3 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp4 = load volatile i64* @X ; <i64> [#uses=5]
- %tmp5 = load volatile i64* @X ; <i64> [#uses=3]
- %tmp6 = load volatile i64* @X ; <i64> [#uses=2]
- %tmp7 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp8 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp9 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp10 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp11 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp12 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp13 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp14 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp15 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp16 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp17 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp18 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp19 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp20 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp21 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp22 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp23 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp = load volatile i64, i64* @X ; <i64> [#uses=7]
+ %tmp1 = load volatile i64, i64* @X ; <i64> [#uses=5]
+ %tmp2 = load volatile i64, i64* @X ; <i64> [#uses=3]
+ %tmp3 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp4 = load volatile i64, i64* @X ; <i64> [#uses=5]
+ %tmp5 = load volatile i64, i64* @X ; <i64> [#uses=3]
+ %tmp6 = load volatile i64, i64* @X ; <i64> [#uses=2]
+ %tmp7 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp8 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp9 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp10 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp11 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp12 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp13 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp14 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp15 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp16 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp17 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp18 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp19 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp20 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp21 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp22 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp23 = load volatile i64, i64* @X ; <i64> [#uses=1]
%tmp24 = call i64 @llvm.bswap.i64(i64 %tmp8) ; <i64> [#uses=1]
%tmp25 = add i64 %tmp6, %tmp5 ; <i64> [#uses=1]
%tmp26 = add i64 %tmp25, %tmp4 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-03-25-TestBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-03-25-TestBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-03-25-TestBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-03-25-TestBug.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
define void @func(i32* %b) nounwind {
bb1579.i.i: ; preds = %bb1514.i.i, %bb191.i.i
- %tmp176 = load i32* %b, align 4
+ %tmp176 = load i32, i32* %b, align 4
%tmp177 = and i32 %tmp176, 2
%tmp178 = icmp eq i32 %tmp177, 0
br i1 %tmp178, label %hello, label %world
Modified: llvm/trunk/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-14-IllegalRegs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-14-IllegalRegs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-14-IllegalRegs.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ entry:
store i8 48, i8* %2, align 1
%3 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
%4 = getelementptr [32 x i8], [32 x i8]* %3, i32 0, i32 31 ; <i8*> [#uses=1]
- %5 = load i8* %4, align 1 ; <i8> [#uses=1]
+ %5 = load i8, i8* %4, align 1 ; <i8> [#uses=1]
%6 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
%7 = getelementptr [32 x i8], [32 x i8]* %6, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 %5, i8* %7, align 1
@@ -23,12 +23,12 @@ entry:
store i8 15, i8* %8, align 1
%9 = call i32 (...)* bitcast (i32 (%struct.X*, %struct.X*)* @f to i32 (...)*)(%struct.X* byval align 4 %xxx, %struct.X* byval align 4 %xxx) nounwind ; <i32> [#uses=1]
store i32 %9, i32* %0, align 4
- %10 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %10 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %10, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
Modified: llvm/trunk/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll Fri Feb 27 15:17:42 2015
@@ -72,7 +72,7 @@ bb349: ; preds = %bb349, %entry
%47 = and i64 %20, %not417 ; <i64> [#uses=1]
%48 = xor i64 0, %47 ; <i64> [#uses=1]
%49 = getelementptr [80 x i64], [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
- %50 = load i64* %49, align 8 ; <i64> [#uses=1]
+ %50 = load i64, i64* %49, align 8 ; <i64> [#uses=1]
%51 = add i64 %48, 0 ; <i64> [#uses=1]
%52 = add i64 %51, 0 ; <i64> [#uses=1]
%53 = add i64 %52, 0 ; <i64> [#uses=1]
@@ -88,12 +88,12 @@ bb349: ; preds = %bb349, %entry
%61 = and i32 %60, 15 ; <i32> [#uses=1]
%62 = zext i32 %61 to i64 ; <i64> [#uses=1]
%63 = getelementptr [16 x i64], [16 x i64]* null, i64 0, i64 %62 ; <i64*> [#uses=2]
- %64 = load i64* null, align 8 ; <i64> [#uses=1]
+ %64 = load i64, i64* null, align 8 ; <i64> [#uses=1]
%65 = lshr i64 %64, 6 ; <i64> [#uses=1]
%66 = xor i64 0, %65 ; <i64> [#uses=1]
%67 = xor i64 %66, 0 ; <i64> [#uses=1]
- %68 = load i64* %46, align 8 ; <i64> [#uses=1]
- %69 = load i64* null, align 8 ; <i64> [#uses=1]
+ %68 = load i64, i64* %46, align 8 ; <i64> [#uses=1]
+ %69 = load i64, i64* null, align 8 ; <i64> [#uses=1]
%70 = add i64 %68, 0 ; <i64> [#uses=1]
%71 = add i64 %70, %67 ; <i64> [#uses=1]
%72 = add i64 %71, %69 ; <i64> [#uses=1]
@@ -106,7 +106,7 @@ bb349: ; preds = %bb349, %entry
%76 = and i64 %33, %not429 ; <i64> [#uses=1]
%77 = xor i64 %75, %76 ; <i64> [#uses=1]
%78 = getelementptr [80 x i64], [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
- %79 = load i64* %78, align 16 ; <i64> [#uses=1]
+ %79 = load i64, i64* %78, align 16 ; <i64> [#uses=1]
%80 = add i64 %77, %20 ; <i64> [#uses=1]
%81 = add i64 %80, %72 ; <i64> [#uses=1]
%82 = add i64 %81, %74 ; <i64> [#uses=1]
@@ -119,14 +119,14 @@ bb349: ; preds = %bb349, %entry
%87 = add i64 0, %85 ; <i64> [#uses=1]
%asmtmp435 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 8, i64 0) nounwind ; <i64> [#uses=1]
%88 = xor i64 0, %asmtmp435 ; <i64> [#uses=1]
- %89 = load i64* null, align 8 ; <i64> [#uses=3]
+ %89 = load i64, i64* null, align 8 ; <i64> [#uses=3]
%asmtmp436 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 19, i64 %89) nounwind ; <i64> [#uses=1]
%asmtmp437 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 61, i64 %89) nounwind ; <i64> [#uses=1]
%90 = lshr i64 %89, 6 ; <i64> [#uses=1]
%91 = xor i64 %asmtmp436, %90 ; <i64> [#uses=1]
%92 = xor i64 %91, %asmtmp437 ; <i64> [#uses=1]
- %93 = load i64* %63, align 8 ; <i64> [#uses=1]
- %94 = load i64* null, align 8 ; <i64> [#uses=1]
+ %93 = load i64, i64* %63, align 8 ; <i64> [#uses=1]
+ %94 = load i64, i64* null, align 8 ; <i64> [#uses=1]
%95 = add i64 %93, %88 ; <i64> [#uses=1]
%96 = add i64 %95, %92 ; <i64> [#uses=1]
%97 = add i64 %96, %94 ; <i64> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/2009-04-24.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-24.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-24.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-24.ll Fri Feb 27 15:17:42 2015
@@ -8,6 +8,6 @@
define i32 @f() {
entry:
- %tmp1 = load i32* @i
+ %tmp1 = load i32, i32* @i
ret i32 %tmp1
}
Modified: llvm/trunk/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-25-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-25-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-25-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ entry:
br label %while.cond
while.cond: ; preds = %while.cond, %entry
- %tmp15 = load i32* %tmp13 ; <i32> [#uses=2]
+ %tmp15 = load i32, i32* %tmp13 ; <i32> [#uses=2]
%bf.lo = lshr i32 %tmp15, 1 ; <i32> [#uses=1]
%bf.lo.cleared = and i32 %bf.lo, 2147483647 ; <i32> [#uses=1]
%conv = zext i32 %bf.lo.cleared to i64 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll Fri Feb 27 15:17:42 2015
@@ -22,9 +22,9 @@
define void @getAffNeighbour(i32 %curr_mb_nr, i32 %xN, i32 %yN, i32 %is_chroma, %struct.PixelPos* %pix) nounwind {
entry:
%Opq.sa.calc = add i32 0, 2 ; <i32> [#uses=2]
- %0 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=3]
+ %0 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=3]
%1 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %2 = load %struct.Macroblock** %1, align 8 ; <%struct.Macroblock*> [#uses=24]
+ %2 = load %struct.Macroblock*, %struct.Macroblock** %1, align 8 ; <%struct.Macroblock*> [#uses=24]
%3 = zext i32 %curr_mb_nr to i64 ; <i64> [#uses=24]
%4 = sext i32 %is_chroma to i64 ; <i64> [#uses=8]
br label %meshBB392
@@ -32,9 +32,9 @@ entry:
entry.fragment: ; preds = %meshBB392
%Opq.sa.calc747 = add i32 %Opq.sa.calc921, 70 ; <i32> [#uses=0]
%5 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 0 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=2]
+ %6 = load i32, i32* %5, align 4 ; <i32> [#uses=2]
%7 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 1 ; <i32*> [#uses=1]
- %8 = load i32* %7, align 4 ; <i32> [#uses=5]
+ %8 = load i32, i32* %7, align 4 ; <i32> [#uses=5]
br label %entry.fragment181
entry.fragment181: ; preds = %entry.fragment
@@ -75,7 +75,7 @@ bb4: ; preds = %bb3
bb5: ; preds = %meshBB428
%Opq.sa.calc470 = sub i32 %Opq.sa.calc897, -49 ; <i32> [#uses=1]
%17 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %18 = load i32* %17, align 4 ; <i32> [#uses=1]
+ %18 = load i32, i32* %17, align 4 ; <i32> [#uses=1]
br label %bb5.fragment
bb5.fragment: ; preds = %bb5
@@ -92,7 +92,7 @@ bb6: ; preds = %bb5.fragment
bb7: ; preds = %bb6
%Opq.sa.calc476 = add i32 %Opq.sa.calc873, -58 ; <i32> [#uses=1]
%22 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 25 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 8 ; <i32> [#uses=1]
+ %23 = load i32, i32* %22, align 8 ; <i32> [#uses=1]
%24 = add i32 %23, 1 ; <i32> [#uses=1]
%25 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %meshBB388
@@ -103,14 +103,14 @@ bb7.fragment: ; preds = %meshBB388
%Opq.sa.calc708 = xor i32 %Opq.sa.calc707, 474 ; <i32> [#uses=0]
store i32 %.SV194.phi, i32* %.SV196.phi, align 4
%26 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load17.SV.phi, i64 %.load36.SV.phi, i32 29 ; <i32*> [#uses=1]
- %27 = load i32* %26, align 8 ; <i32> [#uses=2]
+ %27 = load i32, i32* %26, align 8 ; <i32> [#uses=2]
store i32 %27, i32* %.load67.SV.phi, align 4
br label %bb96
bb8: ; preds = %meshBB348
%Opq.sa.calc479 = sub i32 %Opq.sa.calc805, 141 ; <i32> [#uses=1]
%28 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=2]
- %29 = load i32* %28, align 4 ; <i32> [#uses=2]
+ %29 = load i32, i32* %28, align 4 ; <i32> [#uses=2]
%30 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
br label %meshBB368
@@ -118,25 +118,25 @@ bb8.fragment: ; preds = %meshBB368
%Opq.sa.calc765 = sub i32 %Opq.sa.calc768, -115 ; <i32> [#uses=2]
store i32 %.SV198.phi, i32* %.SV200.phi, align 4
%31 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load16.SV.phi, i64 %.load35.SV.phi, i32 26 ; <i32*> [#uses=2]
- %32 = load i32* %31, align 4 ; <i32> [#uses=4]
+ %32 = load i32, i32* %31, align 4 ; <i32> [#uses=4]
store i32 %32, i32* %.load66.SV.phi, align 4
- %33 = load i32* %31, align 4 ; <i32> [#uses=1]
+ %33 = load i32, i32* %31, align 4 ; <i32> [#uses=1]
%34 = icmp eq i32 %33, 0 ; <i1> [#uses=1]
br i1 %34, label %bb96, label %bb9
bb9: ; preds = %bb8.fragment
%Opq.sa.calc482 = xor i32 %Opq.sa.calc765, 163 ; <i32> [#uses=0]
- %35 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %35 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%36 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %35, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %37 = load %struct.Macroblock** %36, align 8 ; <%struct.Macroblock*> [#uses=1]
- %38 = load i32* %.SV76.phi, align 4 ; <i32> [#uses=1]
+ %37 = load %struct.Macroblock*, %struct.Macroblock** %36, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %38 = load i32, i32* %.SV76.phi, align 4 ; <i32> [#uses=1]
br label %bb9.fragment
bb9.fragment: ; preds = %bb9
%Opq.sa.calc999 = add i32 %Opq.sa.calc765, -44 ; <i32> [#uses=1]
%39 = sext i32 %38 to i64 ; <i64> [#uses=1]
%40 = getelementptr %struct.Macroblock, %struct.Macroblock* %37, i64 %39, i32 20 ; <i32*> [#uses=1]
- %41 = load i32* %40, align 4 ; <i32> [#uses=1]
+ %41 = load i32, i32* %40, align 4 ; <i32> [#uses=1]
%42 = icmp eq i32 %41, 0 ; <i1> [#uses=1]
br i1 %42, label %bb96, label %bb11
@@ -161,7 +161,7 @@ bb13: ; preds = %bb5.fragment
bb13.fragment: ; preds = %meshBB360
%Opq.sa.calc870 = add i32 %Opq.sa.calc866, -129 ; <i32> [#uses=3]
- %47 = load i32* %.SV208.phi, align 8 ; <i32> [#uses=3]
+ %47 = load i32, i32* %.SV208.phi, align 8 ; <i32> [#uses=3]
br i1 %.load74.SV.phi, label %bb14, label %meshBB412
bb14: ; preds = %bb13.fragment
@@ -173,25 +173,25 @@ bb14: ; preds = %bb13.fragment
bb14.fragment: ; preds = %bb14
%Opq.sa.calc723 = sub i32 %Opq.sa.calc493, 117 ; <i32> [#uses=4]
- %50 = load i32* %49, align 8 ; <i32> [#uses=4]
+ %50 = load i32, i32* %49, align 8 ; <i32> [#uses=4]
store i32 %50, i32* %.SV52.phi1113, align 4
- %51 = load i32* %49, align 8 ; <i32> [#uses=1]
+ %51 = load i32, i32* %49, align 8 ; <i32> [#uses=1]
%52 = icmp eq i32 %51, 0 ; <i1> [#uses=1]
br i1 %52, label %meshBB, label %bb15
bb15: ; preds = %bb14.fragment
%Opq.sa.calc496 = sub i32 %Opq.sa.calc723, -8 ; <i32> [#uses=1]
- %53 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %53 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%54 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %53, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %55 = load %struct.Macroblock** %54, align 8 ; <%struct.Macroblock*> [#uses=1]
- %56 = load i32* %.SV208.phi, align 8 ; <i32> [#uses=1]
+ %55 = load %struct.Macroblock*, %struct.Macroblock** %54, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %56 = load i32, i32* %.SV208.phi, align 8 ; <i32> [#uses=1]
br label %meshBB324
bb15.fragment: ; preds = %meshBB324
%Opq.sa.calc925 = xor i32 %Opq.sa.calc750, 215 ; <i32> [#uses=2]
%57 = sext i32 %.SV214.phi to i64 ; <i64> [#uses=1]
%58 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV212.phi, i64 %57, i32 20 ; <i32*> [#uses=1]
- %59 = load i32* %58, align 4 ; <i32> [#uses=1]
+ %59 = load i32, i32* %58, align 4 ; <i32> [#uses=1]
%60 = icmp eq i32 %59, 0 ; <i1> [#uses=1]
br i1 %60, label %bb16, label %bb96
@@ -216,7 +216,7 @@ bb19.fragment: ; preds = %bb19
%Opq.sa.calc880 = xor i32 %Opq.sa.calc932, 246 ; <i32> [#uses=0]
store i32 %63, i32* %64, align 4
%65 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 29 ; <i32*> [#uses=1]
- %66 = load i32* %65, align 8 ; <i32> [#uses=2]
+ %66 = load i32, i32* %65, align 8 ; <i32> [#uses=2]
store i32 %66, i32* %.SV52.phi1186, align 4
br label %bb96
@@ -228,7 +228,7 @@ bb23: ; preds = %meshBB360
%Opq.sa.calc509 = xor i32 %Opq.sa.calc866, 70 ; <i32> [#uses=1]
%Opq.sa.calc508 = sub i32 %Opq.sa.calc509, -19 ; <i32> [#uses=0]
%67 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %68 = load i32* %67, align 4 ; <i32> [#uses=1]
+ %68 = load i32, i32* %67, align 4 ; <i32> [#uses=1]
%69 = icmp eq i32 %68, 0 ; <i1> [#uses=1]
%70 = and i32 %curr_mb_nr, 1 ; <i32> [#uses=1]
%71 = icmp eq i32 %70, 0 ; <i1> [#uses=2]
@@ -237,7 +237,7 @@ bb23: ; preds = %meshBB360
bb23.fragment: ; preds = %bb23
%Opq.sa.calc847 = sub i32 %Opq.sa.calc866, -9 ; <i32> [#uses=2]
%72 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=3]
- %73 = load i32* %72, align 4 ; <i32> [#uses=3]
+ %73 = load i32, i32* %72, align 4 ; <i32> [#uses=3]
%74 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
store i32 %73, i32* %74, align 4
br label %bb23.fragment182
@@ -247,9 +247,9 @@ bb23.fragment182: ; preds = %bb23.fragm
%Opq.sa.calc742 = add i32 %Opq.sa.calc744, %Opq.sa.calc847 ; <i32> [#uses=1]
%Opq.sa.calc743 = add i32 %Opq.sa.calc742, -149 ; <i32> [#uses=2]
%75 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 26 ; <i32*> [#uses=2]
- %76 = load i32* %75, align 4 ; <i32> [#uses=3]
+ %76 = load i32, i32* %75, align 4 ; <i32> [#uses=3]
store i32 %76, i32* %.SV52.phi1113, align 4
- %77 = load i32* %75, align 4 ; <i32> [#uses=1]
+ %77 = load i32, i32* %75, align 4 ; <i32> [#uses=1]
%78 = icmp ne i32 %77, 0 ; <i1> [#uses=2]
br i1 %69, label %meshBB344, label %meshBB432
@@ -264,10 +264,10 @@ bb25: ; preds = %bb24
bb26: ; preds = %bb25
%Opq.sa.calc519 = xor i32 %Opq.sa.calc515, 23 ; <i32> [#uses=2]
%Opq.sa.calc518 = xor i32 %Opq.sa.calc519, 84 ; <i32> [#uses=1]
- %79 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %79 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%80 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %79, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %81 = load %struct.Macroblock** %80, align 8 ; <%struct.Macroblock*> [#uses=1]
- %82 = load i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
+ %81 = load %struct.Macroblock*, %struct.Macroblock** %80, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %82 = load i32, i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
br label %meshBB340
bb26.fragment: ; preds = %meshBB340
@@ -276,7 +276,7 @@ bb26.fragment: ; preds = %meshBB340
%Opq.sa.calc917 = add i32 %Opq.sa.calc916, -237 ; <i32> [#uses=1]
%83 = sext i32 %.SV230.phi to i64 ; <i64> [#uses=1]
%84 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV228.phi, i64 %83, i32 20 ; <i32*> [#uses=1]
- %85 = load i32* %84, align 4 ; <i32> [#uses=1]
+ %85 = load i32, i32* %84, align 4 ; <i32> [#uses=1]
%86 = icmp eq i32 %85, 0 ; <i1> [#uses=1]
br i1 %86, label %meshBB420, label %meshBB356
@@ -308,17 +308,17 @@ bb32: ; preds = %bb24
bb33: ; preds = %bb32
%Opq.sa.calc534 = sub i32 %Opq.sa.calc512, -75 ; <i32> [#uses=2]
- %92 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %92 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%93 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %92, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %94 = load %struct.Macroblock** %93, align 8 ; <%struct.Macroblock*> [#uses=1]
- %95 = load i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
+ %94 = load %struct.Macroblock*, %struct.Macroblock** %93, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %95 = load i32, i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
br label %bb33.fragment
bb33.fragment: ; preds = %bb33
%Opq.sa.calc712 = add i32 %Opq.sa.calc534, -109 ; <i32> [#uses=3]
%96 = sext i32 %95 to i64 ; <i64> [#uses=1]
%97 = getelementptr %struct.Macroblock, %struct.Macroblock* %94, i64 %96, i32 20 ; <i32*> [#uses=1]
- %98 = load i32* %97, align 4 ; <i32> [#uses=1]
+ %98 = load i32, i32* %97, align 4 ; <i32> [#uses=1]
%99 = icmp eq i32 %98, 0 ; <i1> [#uses=1]
br i1 %99, label %bb34, label %meshBB
@@ -372,17 +372,17 @@ bb40: ; preds = %bb39
bb41: ; preds = %meshBB336
%Opq.sa.calc557 = sub i32 %Opq.sa.calc979, 143 ; <i32> [#uses=1]
- %108 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %108 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%109 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %108, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %110 = load %struct.Macroblock** %109, align 8 ; <%struct.Macroblock*> [#uses=1]
- %111 = load i32* %.SV99.phi1128, align 4 ; <i32> [#uses=1]
+ %110 = load %struct.Macroblock*, %struct.Macroblock** %109, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %111 = load i32, i32* %.SV99.phi1128, align 4 ; <i32> [#uses=1]
br label %bb41.fragment
bb41.fragment: ; preds = %bb41
%Opq.sa.calc987 = xor i32 %Opq.sa.calc557, 213 ; <i32> [#uses=4]
%112 = sext i32 %111 to i64 ; <i64> [#uses=1]
%113 = getelementptr %struct.Macroblock, %struct.Macroblock* %110, i64 %112, i32 20 ; <i32*> [#uses=1]
- %114 = load i32* %113, align 4 ; <i32> [#uses=1]
+ %114 = load i32, i32* %113, align 4 ; <i32> [#uses=1]
%115 = icmp eq i32 %114, 0 ; <i1> [#uses=1]
br i1 %115, label %bb42, label %bb96
@@ -415,17 +415,17 @@ bb48: ; preds = %bb39
bb49: ; preds = %bb48
%Opq.sa.calc572 = add i32 %Opq.sa.calc798, 84 ; <i32> [#uses=0]
- %122 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %122 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%123 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %122, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %124 = load %struct.Macroblock** %123, align 8 ; <%struct.Macroblock*> [#uses=1]
- %125 = load i32* %.SV99.phi1037, align 4 ; <i32> [#uses=1]
+ %124 = load %struct.Macroblock*, %struct.Macroblock** %123, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %125 = load i32, i32* %.SV99.phi1037, align 4 ; <i32> [#uses=1]
br label %bb49.fragment
bb49.fragment: ; preds = %bb49
%Opq.sa.calc860 = sub i32 %Opq.sa.calc569, 114 ; <i32> [#uses=5]
%126 = sext i32 %125 to i64 ; <i64> [#uses=1]
%127 = getelementptr %struct.Macroblock, %struct.Macroblock* %124, i64 %126, i32 20 ; <i32*> [#uses=1]
- %128 = load i32* %127, align 4 ; <i32> [#uses=1]
+ %128 = load i32, i32* %127, align 4 ; <i32> [#uses=1]
%129 = icmp eq i32 %128, 0 ; <i1> [#uses=1]
br i1 %129, label %bb50, label %meshBB380
@@ -485,7 +485,7 @@ bb58: ; preds = %bb56.fragment
bb59: ; preds = %bb58
%Opq.sa.calc599 = add i32 %Opq.sa.calc1002, 151 ; <i32> [#uses=0]
%141 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %142 = load i32* %141, align 4 ; <i32> [#uses=1]
+ %142 = load i32, i32* %141, align 4 ; <i32> [#uses=1]
br label %bb59.fragment
bb59.fragment: ; preds = %bb59
@@ -502,7 +502,7 @@ bb60: ; preds = %bb59.fragment
bb61: ; preds = %bb60
%Opq.sa.calc605 = xor i32 %Opq.sa.calc731, 57 ; <i32> [#uses=1]
%146 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=2]
- %147 = load i32* %146, align 8 ; <i32> [#uses=3]
+ %147 = load i32, i32* %146, align 8 ; <i32> [#uses=3]
%148 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
br label %bb61.fragment
@@ -510,23 +510,23 @@ bb61.fragment: ; preds = %bb61
%Opq.sa.calc700 = sub i32 %Opq.sa.calc605, 108 ; <i32> [#uses=3]
store i32 %147, i32* %148, align 4
%149 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=4]
- %150 = load i32* %149, align 8 ; <i32> [#uses=1]
+ %150 = load i32, i32* %149, align 8 ; <i32> [#uses=1]
%151 = icmp eq i32 %150, 0 ; <i1> [#uses=1]
br i1 %151, label %bb65, label %bb62
bb62: ; preds = %bb61.fragment
%Opq.sa.calc608 = add i32 %Opq.sa.calc700, -94 ; <i32> [#uses=1]
- %152 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=2]
+ %152 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=2]
%153 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %152, i64 0, i32 45 ; <i32*> [#uses=1]
- %154 = load i32* %153, align 4 ; <i32> [#uses=1]
+ %154 = load i32, i32* %153, align 4 ; <i32> [#uses=1]
%155 = icmp eq i32 %154, 1 ; <i1> [#uses=1]
br i1 %155, label %bb63, label %bb64
bb63: ; preds = %bb62
%Opq.sa.calc611 = add i32 %Opq.sa.calc700, -101 ; <i32> [#uses=2]
%156 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %152, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %157 = load %struct.Macroblock** %156, align 8 ; <%struct.Macroblock*> [#uses=1]
- %158 = load i32* %146, align 8 ; <i32> [#uses=1]
+ %157 = load %struct.Macroblock*, %struct.Macroblock** %156, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %158 = load i32, i32* %146, align 8 ; <i32> [#uses=1]
br label %meshBB452
bb63.fragment: ; preds = %meshBB452
@@ -534,7 +534,7 @@ bb63.fragment: ; preds = %meshBB452
%Opq.sa.calc890 = add i32 %Opq.sa.calc891, -3 ; <i32> [#uses=2]
%159 = sext i32 %.SV266.phi to i64 ; <i64> [#uses=1]
%160 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV264.phi, i64 %159, i32 20 ; <i32*> [#uses=1]
- %161 = load i32* %160, align 4 ; <i32> [#uses=1]
+ %161 = load i32, i32* %160, align 4 ; <i32> [#uses=1]
%162 = icmp eq i32 %161, 0 ; <i1> [#uses=1]
br i1 %162, label %bb64, label %meshBB456
@@ -562,7 +562,7 @@ bb65: ; preds = %meshBB456, %bb64, %bb6
%Opq.link.SV618.phi = phi i32 [ %Opq.sa.calc816, %meshBB456 ], [ %Opq.sa.calc700, %bb61.fragment ], [ %Opq.sa.calc614, %bb64 ] ; <i32> [#uses=1]
%Opq.link.mask620 = and i32 %Opq.link.SV618.phi, 40 ; <i32> [#uses=1]
%Opq.sa.calc617 = add i32 %Opq.link.mask620, -35 ; <i32> [#uses=2]
- %164 = load i32* %.SV152.phi1058, align 8 ; <i32> [#uses=1]
+ %164 = load i32, i32* %.SV152.phi1058, align 8 ; <i32> [#uses=1]
br label %meshBB436
bb65.fragment: ; preds = %meshBB436
@@ -590,7 +590,7 @@ bb68: ; preds = %bb59.fragment
bb68.fragment: ; preds = %meshBB344
%Opq.sa.calc784 = sub i32 %Opq.link.mask722, 3 ; <i32> [#uses=5]
- %168 = load i32* %.SV274.phi, align 8 ; <i32> [#uses=3]
+ %168 = load i32, i32* %.SV274.phi, align 8 ; <i32> [#uses=3]
br i1 %.load144.SV.phi, label %bb69, label %meshBB412
bb69: ; preds = %bb68.fragment
@@ -604,18 +604,18 @@ bb69.fragment: ; preds = %bb69
%Opq.sa.calc996 = sub i32 %Opq.sa.calc784, -9 ; <i32> [#uses=3]
%Opq.sa.calc994 = sub i32 %Opq.sa.calc996, %Opq.sa.calc784 ; <i32> [#uses=1]
%Opq.sa.calc995 = sub i32 %Opq.sa.calc994, 3 ; <i32> [#uses=2]
- %171 = load i32* %170, align 8 ; <i32> [#uses=3]
+ %171 = load i32, i32* %170, align 8 ; <i32> [#uses=3]
store i32 %171, i32* %.SV52.phi1170, align 4
- %172 = load i32* %170, align 8 ; <i32> [#uses=1]
+ %172 = load i32, i32* %170, align 8 ; <i32> [#uses=1]
%173 = icmp eq i32 %172, 0 ; <i1> [#uses=1]
br i1 %173, label %meshBB396, label %meshBB400
bb70: ; preds = %meshBB400
%Opq.sa.calc630 = add i32 %Opq.sa.calc824, -203 ; <i32> [#uses=2]
- %174 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %174 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%175 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %174, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %176 = load %struct.Macroblock** %175, align 8 ; <%struct.Macroblock*> [#uses=1]
- %177 = load i32* %.SV156.phi, align 8 ; <i32> [#uses=1]
+ %176 = load %struct.Macroblock*, %struct.Macroblock** %175, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %177 = load i32, i32* %.SV156.phi, align 8 ; <i32> [#uses=1]
br label %meshBB428
bb70.fragment: ; preds = %meshBB428
@@ -623,7 +623,7 @@ bb70.fragment: ; preds = %meshBB428
%Opq.sa.calc738 = sub i32 %Opq.sa.calc739, 1 ; <i32> [#uses=2]
%178 = sext i32 %.SV280.phi to i64 ; <i64> [#uses=1]
%179 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV278.phi, i64 %178, i32 20 ; <i32*> [#uses=1]
- %180 = load i32* %179, align 4 ; <i32> [#uses=1]
+ %180 = load i32, i32* %179, align 4 ; <i32> [#uses=1]
%181 = icmp eq i32 %180, 0 ; <i1> [#uses=1]
br i1 %181, label %meshBB452, label %meshBB356
@@ -648,7 +648,7 @@ bb74.fragment: ; preds = %bb74
%Opq.sa.calc1011 = sub i32 %Opq.sa.calc636, -19 ; <i32> [#uses=0]
store i32 %184, i32* %185, align 4
%186 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=1]
- %187 = load i32* %186, align 8 ; <i32> [#uses=2]
+ %187 = load i32, i32* %186, align 8 ; <i32> [#uses=2]
store i32 %187, i32* %.SV52.phi1186, align 4
br label %bb96
@@ -660,9 +660,9 @@ bb76: ; preds = %bb58
bb77: ; preds = %bb76
%Opq.sa.calc643 = add i32 %Opq.sa.calc640, 2 ; <i32> [#uses=2]
- %189 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %189 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%190 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %189, i64 0, i32 45 ; <i32*> [#uses=1]
- %191 = load i32* %190, align 4 ; <i32> [#uses=1]
+ %191 = load i32, i32* %190, align 4 ; <i32> [#uses=1]
%192 = icmp eq i32 %191, 2 ; <i1> [#uses=1]
br i1 %192, label %meshBB416, label %bb79
@@ -670,7 +670,7 @@ bb78: ; preds = %meshBB416
%Opq.sa.calc647 = xor i32 %Opq.sa.calc971, 25 ; <i32> [#uses=2]
%Opq.sa.calc646 = sub i32 %Opq.sa.calc647, 29 ; <i32> [#uses=0]
%193 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=1]
- %194 = load i32* %193, align 8 ; <i32> [#uses=1]
+ %194 = load i32, i32* %193, align 8 ; <i32> [#uses=1]
%195 = add i32 %194, 1 ; <i32> [#uses=1]
br label %bb78.fragment
@@ -703,7 +703,7 @@ bb83: ; preds = %bb56.fragment
bb84: ; preds = %bb83
%Opq.sa.calc661 = xor i32 %Opq.sa.calc658, 22 ; <i32> [#uses=1]
%199 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %200 = load i32* %199, align 4 ; <i32> [#uses=1]
+ %200 = load i32, i32* %199, align 4 ; <i32> [#uses=1]
br label %meshBB400
bb84.fragment: ; preds = %meshBB400
@@ -723,7 +723,7 @@ bb85: ; preds = %meshBB372
bb86: ; preds = %meshBB336
%Opq.sa.calc670 = sub i32 %Opq.sa.calc979, 35 ; <i32> [#uses=1]
%204 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 24 ; <i32*> [#uses=1]
- %205 = load i32* %204, align 4 ; <i32> [#uses=1]
+ %205 = load i32, i32* %204, align 4 ; <i32> [#uses=1]
%206 = add i32 %205, 1 ; <i32> [#uses=1]
%207 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %bb86.fragment
@@ -732,7 +732,7 @@ bb86.fragment: ; preds = %bb86
%Opq.sa.calc943 = xor i32 %Opq.sa.calc670, 123 ; <i32> [#uses=2]
store i32 %206, i32* %207, align 4
%208 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 28 ; <i32*> [#uses=1]
- %209 = load i32* %208, align 4 ; <i32> [#uses=2]
+ %209 = load i32, i32* %208, align 4 ; <i32> [#uses=2]
store i32 %209, i32* %.SV52.phi1234, align 4
br label %meshBB424
@@ -749,7 +749,7 @@ bb89: ; preds = %bb84.fragment
bb89.fragment: ; preds = %bb89
%Opq.sa.calc962 = add i32 %Opq.sa.calc677, -188 ; <i32> [#uses=3]
- %211 = load i32* %210, align 4 ; <i32> [#uses=3]
+ %211 = load i32, i32* %210, align 4 ; <i32> [#uses=3]
br i1 %203, label %bb90, label %meshBB408
bb90: ; preds = %bb89.fragment
@@ -762,25 +762,25 @@ bb90: ; preds = %bb89.fragment
bb90.fragment: ; preds = %bb90
%Opq.sa.calc773 = sub i32 %Opq.sa.calc680, 60 ; <i32> [#uses=3]
%Opq.sa.calc772 = add i32 %Opq.sa.calc773, -25 ; <i32> [#uses=2]
- %214 = load i32* %213, align 4 ; <i32> [#uses=3]
+ %214 = load i32, i32* %213, align 4 ; <i32> [#uses=3]
store i32 %214, i32* %.SV52.phi1190, align 4
- %215 = load i32* %213, align 4 ; <i32> [#uses=1]
+ %215 = load i32, i32* %213, align 4 ; <i32> [#uses=1]
%216 = icmp eq i32 %215, 0 ; <i1> [#uses=1]
br i1 %216, label %meshBB416, label %meshBB368
bb91: ; preds = %meshBB368
%Opq.sa.calc683 = sub i32 %Opq.sa.calc768, -7 ; <i32> [#uses=0]
- %217 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %217 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%218 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %217, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %219 = load %struct.Macroblock** %218, align 8 ; <%struct.Macroblock*> [#uses=1]
- %220 = load i32* %.SV170.phi, align 4 ; <i32> [#uses=1]
+ %219 = load %struct.Macroblock*, %struct.Macroblock** %218, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %220 = load i32, i32* %.SV170.phi, align 4 ; <i32> [#uses=1]
br label %bb91.fragment
bb91.fragment: ; preds = %bb91
%Opq.sa.calc853 = xor i32 %Opq.sa.calc768, 8 ; <i32> [#uses=1]
%221 = sext i32 %220 to i64 ; <i64> [#uses=1]
%222 = getelementptr %struct.Macroblock, %struct.Macroblock* %219, i64 %221, i32 20 ; <i32*> [#uses=1]
- %223 = load i32* %222, align 4 ; <i32> [#uses=1]
+ %223 = load i32, i32* %222, align 4 ; <i32> [#uses=1]
%224 = icmp eq i32 %223, 0 ; <i1> [#uses=1]
br i1 %224, label %bb92, label %bb96
@@ -805,7 +805,7 @@ bb95.fragment: ; preds = %meshBB384
%Opq.sa.calc841 = sub i32 %Opq.sa.calc901, 76 ; <i32> [#uses=0]
store i32 %.SV306.phi, i32* %.SV308.phi, align 4
%229 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load.SV.phi, i64 %.load20.SV.phi, i32 28 ; <i32*> [#uses=1]
- %230 = load i32* %229, align 4 ; <i32> [#uses=2]
+ %230 = load i32, i32* %229, align 4 ; <i32> [#uses=2]
store i32 %230, i32* %.load53.SV.phi, align 4
br label %bb96
@@ -826,13 +826,13 @@ bb97: ; preds = %meshBB424, %meshBB408,
%.SV70.phi1148 = phi i32 [ %.SV70.phi1195, %meshBB424 ], [ %.SV70.phi1215, %meshBB408 ], [ %.SV70.phi1138, %meshBB352 ], [ %.SV70.phi1085, %bb96 ], [ %.SV70.phi1027, %bb21 ] ; <i32> [#uses=1]
%yM.0.reg2mem.0.SV.phi = phi i32 [ -1, %meshBB424 ], [ -1, %meshBB408 ], [ -1, %meshBB352 ], [ %yM.0.SV.phi, %bb96 ], [ -1, %bb21 ] ; <i32> [#uses=1]
%Opq.sa.calc694 = xor i32 0, 243 ; <i32> [#uses=1]
- %232 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %232 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%233 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %232, i64 0, i32 45 ; <i32*> [#uses=1]
br label %bb97.fragment
bb97.fragment: ; preds = %bb97
%Opq.sa.calc928 = xor i32 %Opq.sa.calc694, 128 ; <i32> [#uses=1]
- %234 = load i32* %233, align 4 ; <i32> [#uses=1]
+ %234 = load i32, i32* %233, align 4 ; <i32> [#uses=1]
%235 = icmp eq i32 %234, 0 ; <i1> [#uses=1]
br i1 %235, label %return, label %bb98
@@ -855,13 +855,13 @@ bb98.fragment: ; preds = %meshBB376
%Opq.sa.calc1008 = sub i32 %Opq.link.mask911, 13 ; <i32> [#uses=1]
%241 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 4 ; <i32*> [#uses=4]
%242 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- %243 = load i32* %242, align 4 ; <i32> [#uses=1]
- %244 = load void (i32, i32*, i32*)** @get_mb_block_pos, align 8 ; <void (i32, i32*, i32*)*> [#uses=1]
+ %243 = load i32, i32* %242, align 4 ; <i32> [#uses=1]
+ %244 = load void (i32, i32*, i32*)*, void (i32, i32*, i32*)** @get_mb_block_pos, align 8 ; <void (i32, i32*, i32*)*> [#uses=1]
tail call void %244(i32 %243, i32* %241, i32* %.SV317.phi) nounwind
- %245 = load i32* %241, align 4 ; <i32> [#uses=1]
- %246 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %245 = load i32, i32* %241, align 4 ; <i32> [#uses=1]
+ %246 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%247 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %246, i64 0, i32 119, i64 %.load39.SV.phi, i64 0 ; <i32*> [#uses=1]
- %248 = load i32* %247, align 4 ; <i32> [#uses=1]
+ %248 = load i32, i32* %247, align 4 ; <i32> [#uses=1]
%249 = mul i32 %248, %245 ; <i32> [#uses=2]
store i32 %249, i32* %241, align 4
br label %bb98.fragment183
@@ -869,15 +869,15 @@ bb98.fragment: ; preds = %meshBB376
bb98.fragment183: ; preds = %bb98.fragment
%Opq.sa.calc777 = sub i32 %Opq.sa.calc1008, -158 ; <i32> [#uses=1]
%Opq.sa.calc776 = sub i32 %Opq.sa.calc777, 46 ; <i32> [#uses=0]
- %250 = load i32* %.SV317.phi, align 4 ; <i32> [#uses=1]
- %251 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %250 = load i32, i32* %.SV317.phi, align 4 ; <i32> [#uses=1]
+ %251 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%252 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %251, i64 0, i32 119, i64 %.load39.SV.phi, i64 1 ; <i32*> [#uses=1]
- %253 = load i32* %252, align 4 ; <i32> [#uses=1]
+ %253 = load i32, i32* %252, align 4 ; <i32> [#uses=1]
%254 = mul i32 %253, %250 ; <i32> [#uses=1]
- %255 = load i32* %.SV313.phi, align 4 ; <i32> [#uses=1]
+ %255 = load i32, i32* %.SV313.phi, align 4 ; <i32> [#uses=1]
%256 = add i32 %255, %249 ; <i32> [#uses=1]
store i32 %256, i32* %241, align 4
- %257 = load i32* %.SV315.phi, align 4 ; <i32> [#uses=1]
+ %257 = load i32, i32* %.SV315.phi, align 4 ; <i32> [#uses=1]
%258 = add i32 %257, %254 ; <i32> [#uses=1]
store i32 %258, i32* %.SV317.phi, align 4
ret void
Modified: llvm/trunk/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll Fri Feb 27 15:17:42 2015
@@ -9,13 +9,13 @@ entry:
%arrayidx4 = getelementptr i32, i32* %data, i32 3 ; <i32*> [#uses=1]
%arrayidx6 = getelementptr i32, i32* %data, i32 4 ; <i32*> [#uses=1]
%arrayidx8 = getelementptr i32, i32* %data, i32 5 ; <i32*> [#uses=1]
- %tmp9 = load i32* %arrayidx8 ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* %arrayidx8 ; <i32> [#uses=1]
%arrayidx11 = getelementptr i32, i32* %data, i32 6 ; <i32*> [#uses=1]
- %tmp12 = load i32* %arrayidx11 ; <i32> [#uses=1]
+ %tmp12 = load i32, i32* %arrayidx11 ; <i32> [#uses=1]
%arrayidx14 = getelementptr i32, i32* %data, i32 7 ; <i32*> [#uses=1]
- %tmp15 = load i32* %arrayidx14 ; <i32> [#uses=1]
+ %tmp15 = load i32, i32* %arrayidx14 ; <i32> [#uses=1]
%arrayidx17 = getelementptr i32, i32* %data, i32 8 ; <i32*> [#uses=1]
- %tmp18 = load i32* %arrayidx17 ; <i32> [#uses=1]
+ %tmp18 = load i32, i32* %arrayidx17 ; <i32> [#uses=1]
%0 = call i32 asm "cpuid", "={ax},=*{bx},=*{cx},=*{dx},{ax},{bx},{cx},{dx},~{dirflag},~{fpsr},~{flags}"(i32* %arrayidx2, i32* %arrayidx4, i32* %arrayidx6, i32 %tmp9, i32 %tmp12, i32 %tmp15, i32 %tmp18) nounwind ; <i32> [#uses=1]
store i32 %0, i32* %arrayidx
ret void
Modified: llvm/trunk/test/CodeGen/X86/2009-04-29-LinearScanBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-29-LinearScanBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-29-LinearScanBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-29-LinearScanBug.ll Fri Feb 27 15:17:42 2015
@@ -105,17 +105,17 @@
define fastcc i32 @pf_state_compare_ext_gwy(%struct.pf_state_key* nocapture %a, %struct.pf_state_key* nocapture %b) nounwind optsize ssp {
entry:
%0 = zext i8 0 to i32 ; <i32> [#uses=2]
- %1 = load i8* null, align 1 ; <i8> [#uses=2]
+ %1 = load i8, i8* null, align 1 ; <i8> [#uses=2]
%2 = zext i8 %1 to i32 ; <i32> [#uses=1]
%3 = sub i32 %0, %2 ; <i32> [#uses=1]
%4 = icmp eq i8 0, %1 ; <i1> [#uses=1]
br i1 %4, label %bb1, label %bb79
bb1: ; preds = %entry
- %5 = load i8* null, align 4 ; <i8> [#uses=2]
+ %5 = load i8, i8* null, align 4 ; <i8> [#uses=2]
%6 = zext i8 %5 to i32 ; <i32> [#uses=2]
%7 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %b, i32 0, i32 3 ; <i8*> [#uses=1]
- %8 = load i8* %7, align 4 ; <i8> [#uses=2]
+ %8 = load i8, i8* %7, align 4 ; <i8> [#uses=2]
%9 = zext i8 %8 to i32 ; <i32> [#uses=1]
%10 = sub i32 %6, %9 ; <i32> [#uses=1]
%11 = icmp eq i8 %5, %8 ; <i1> [#uses=1]
@@ -132,32 +132,32 @@ bb3: ; preds = %bb1
]
bb4: ; preds = %bb3, %bb3
- %12 = load i16* null, align 4 ; <i16> [#uses=1]
+ %12 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%13 = zext i16 %12 to i32 ; <i32> [#uses=1]
%14 = sub i32 0, %13 ; <i32> [#uses=1]
br i1 false, label %bb23, label %bb79
bb6: ; preds = %bb3
- %15 = load i16* null, align 4 ; <i16> [#uses=1]
+ %15 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%16 = zext i16 %15 to i32 ; <i32> [#uses=1]
%17 = sub i32 0, %16 ; <i32> [#uses=1]
ret i32 %17
bb10: ; preds = %bb3
- %18 = load i8* null, align 1 ; <i8> [#uses=2]
+ %18 = load i8, i8* null, align 1 ; <i8> [#uses=2]
%19 = zext i8 %18 to i32 ; <i32> [#uses=1]
%20 = sub i32 0, %19 ; <i32> [#uses=1]
%21 = icmp eq i8 0, %18 ; <i1> [#uses=1]
br i1 %21, label %bb12, label %bb79
bb12: ; preds = %bb10
- %22 = load i16* null, align 4 ; <i16> [#uses=1]
+ %22 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%23 = zext i16 %22 to i32 ; <i32> [#uses=1]
%24 = sub i32 0, %23 ; <i32> [#uses=1]
ret i32 %24
bb17: ; preds = %bb3
- %25 = load i8* null, align 1 ; <i8> [#uses=2]
+ %25 = load i8, i8* null, align 1 ; <i8> [#uses=2]
%26 = icmp eq i8 %25, 1 ; <i1> [#uses=1]
br i1 %26, label %bb18, label %bb23
@@ -166,16 +166,16 @@ bb18: ; preds = %bb17
br i1 %27, label %bb19, label %bb23
bb19: ; preds = %bb18
- %28 = load i16* null, align 4 ; <i16> [#uses=1]
+ %28 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%29 = zext i16 %28 to i32 ; <i32> [#uses=1]
%30 = sub i32 0, %29 ; <i32> [#uses=1]
br i1 false, label %bb23, label %bb79
bb21: ; preds = %bb3
%31 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %a, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
- %32 = load i32* %31, align 4 ; <i32> [#uses=2]
+ %32 = load i32, i32* %31, align 4 ; <i32> [#uses=2]
%33 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %b, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
- %34 = load i32* %33, align 4 ; <i32> [#uses=2]
+ %34 = load i32, i32* %33, align 4 ; <i32> [#uses=2]
%35 = sub i32 %32, %34 ; <i32> [#uses=1]
%36 = icmp eq i32 %32, %34 ; <i1> [#uses=1]
br i1 %36, label %bb23, label %bb79
@@ -188,11 +188,11 @@ bb24: ; preds = %bb23
ret i32 1
bb70: ; preds = %bb23
- %37 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=3]
+ %37 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)*, i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=3]
br i1 false, label %bb78, label %bb73
bb73: ; preds = %bb70
- %38 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=2]
+ %38 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)*, i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=2]
%39 = icmp eq i32 (%struct.pf_app_state*, %struct.pf_app_state*)* %38, null ; <i1> [#uses=1]
br i1 %39, label %bb78, label %bb74
Modified: llvm/trunk/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll Fri Feb 27 15:17:42 2015
@@ -71,7 +71,7 @@
define fastcc void @dropCell(%struct.MemPage* nocapture %pPage, i32 %idx, i32 %sz) nounwind ssp {
entry:
- %0 = load i8** null, align 8 ; <i8*> [#uses=4]
+ %0 = load i8*, i8** null, align 8 ; <i8*> [#uses=4]
%1 = or i32 0, 0 ; <i32> [#uses=1]
%2 = icmp slt i32 %sz, 4 ; <i1> [#uses=1]
%size_addr.0.i = select i1 %2, i32 4, i32 %sz ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-04-scale.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-scale.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-scale.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-scale.ll Fri Feb 27 15:17:42 2015
@@ -8,13 +8,13 @@
define void @test() {
entry:
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%1 = lshr i32 %0, 8 ; <i32> [#uses=1]
%2 = and i32 %1, 255 ; <i32> [#uses=1]
%3 = getelementptr %struct.array, %struct.array* null, i32 0, i32 3 ; <[256 x %struct.pair]*> [#uses=1]
%4 = getelementptr [256 x %struct.pair], [256 x %struct.pair]* %3, i32 0, i32 %2 ; <%struct.pair*> [#uses=1]
%5 = getelementptr %struct.pair, %struct.pair* %4, i32 0, i32 1 ; <i64*> [#uses=1]
- %6 = load i64* %5, align 4 ; <i64> [#uses=1]
+ %6 = load i64, i64* %5, align 4 ; <i64> [#uses=1]
%7 = xor i64 0, %6 ; <i64> [#uses=1]
%8 = xor i64 %7, 0 ; <i64> [#uses=1]
%9 = xor i64 %8, 0 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
br label %bb
bb: ; preds = %bb.i, %bb, %entry
- %2 = load volatile i32* @g_9, align 4 ; <i32> [#uses=2]
+ %2 = load volatile i32, i32* @g_9, align 4 ; <i32> [#uses=2]
%3 = icmp sgt i32 %2, 1 ; <i1> [#uses=1]
%4 = and i1 %3, %1 ; <i1> [#uses=1]
br i1 %4, label %bb.i, label %bb
Modified: llvm/trunk/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ entry:
br label %bb14
bb14: ; preds = %bb
- %srcval16 = load i448* %P, align 8 ; <i448> [#uses=1]
+ %srcval16 = load i448, i448* %P, align 8 ; <i448> [#uses=1]
%tmp = zext i32 undef to i448 ; <i448> [#uses=1]
%tmp15 = shl i448 %tmp, 288 ; <i448> [#uses=1]
%mask = and i448 %srcval16, -2135987035423586845985235064014169866455883682256196619149693890381755748887481053010428711403521 ; <i448> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-05-30-ISelBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-05-30-ISelBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-05-30-ISelBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-05-30-ISelBug.ll Fri Feb 27 15:17:42 2015
@@ -14,13 +14,13 @@ bb35.i.backedge.exitStub: ; preds = %bb
bb54.i: ; preds = %newFuncRoot
%1 = zext i32 %.reload51 to i64 ; <i64> [#uses=1]
%2 = getelementptr i32, i32* %0, i64 %1 ; <i32*> [#uses=1]
- %3 = load i32* %2, align 4 ; <i32> [#uses=2]
+ %3 = load i32, i32* %2, align 4 ; <i32> [#uses=2]
%4 = lshr i32 %3, 8 ; <i32> [#uses=1]
%5 = and i32 %3, 255 ; <i32> [#uses=1]
%6 = add i32 %5, 4 ; <i32> [#uses=1]
%7 = zext i32 %4 to i64 ; <i64> [#uses=1]
%8 = getelementptr i32, i32* %0, i64 %7 ; <i32*> [#uses=1]
- %9 = load i32* %8, align 4 ; <i32> [#uses=2]
+ %9 = load i32, i32* %8, align 4 ; <i32> [#uses=2]
%10 = and i32 %9, 255 ; <i32> [#uses=1]
%11 = lshr i32 %9, 8 ; <i32> [#uses=1]
%12 = add i32 %c_nblock_used.2.i, 5 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-06-02-RewriterBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-06-02-RewriterBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-06-02-RewriterBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-06-02-RewriterBug.ll Fri Feb 27 15:17:42 2015
@@ -14,11 +14,11 @@ while.body: ; preds = %for.end, %bb.nph
%ctg22996 = getelementptr i8, i8* %in, i64 0 ; <i8*> [#uses=1]
%conv = zext i32 undef to i64 ; <i64> [#uses=1]
%conv11 = zext i32 undef to i64 ; <i64> [#uses=1]
- %tmp18 = load i32* undef ; <i32> [#uses=1]
+ %tmp18 = load i32, i32* undef ; <i32> [#uses=1]
%conv19 = zext i32 %tmp18 to i64 ; <i64> [#uses=1]
- %tmp30 = load i32* undef ; <i32> [#uses=1]
+ %tmp30 = load i32, i32* undef ; <i32> [#uses=1]
%conv31 = zext i32 %tmp30 to i64 ; <i64> [#uses=4]
- %ptrincdec3065 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec3065 = load i8, i8* null ; <i8> [#uses=1]
%conv442709 = zext i8 %ptrincdec3065 to i64 ; <i64> [#uses=1]
%shl45 = shl i64 %conv442709, 16 ; <i64> [#uses=1]
%conv632707 = zext i8 undef to i64 ; <i64> [#uses=1]
@@ -68,10 +68,10 @@ while.body: ; preds = %for.end, %bb.nph
%add479 = add i64 %add473, %add441 ; <i64> [#uses=3]
%conv4932682 = zext i8 undef to i64 ; <i64> [#uses=1]
%shl494 = shl i64 %conv4932682, 16 ; <i64> [#uses=1]
- %ptrincdec4903012 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec4903012 = load i8, i8* null ; <i8> [#uses=1]
%conv5032681 = zext i8 %ptrincdec4903012 to i64 ; <i64> [#uses=1]
%shl504 = shl i64 %conv5032681, 8 ; <i64> [#uses=1]
- %ptrincdec5003009 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec5003009 = load i8, i8* null ; <i8> [#uses=1]
%conv5132680 = zext i8 %ptrincdec5003009 to i64 ; <i64> [#uses=1]
%or495 = or i64 %shl494, 0 ; <i64> [#uses=1]
%or505 = or i64 %or495, %conv5132680 ; <i64> [#uses=1]
@@ -91,10 +91,10 @@ while.body: ; preds = %for.end, %bb.nph
%xor575 = xor i64 %xor568, %or561 ; <i64> [#uses=1]
%add587 = add i64 %xor575, 0 ; <i64> [#uses=1]
%add593 = add i64 %add587, %add555 ; <i64> [#uses=1]
- %ptrincdec6043000 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec6043000 = load i8, i8* null ; <i8> [#uses=1]
%conv6172676 = zext i8 %ptrincdec6043000 to i64 ; <i64> [#uses=1]
%shl618 = shl i64 %conv6172676, 8 ; <i64> [#uses=1]
- %ptrincdec6142997 = load i8* %ctg22996 ; <i8> [#uses=1]
+ %ptrincdec6142997 = load i8, i8* %ctg22996 ; <i8> [#uses=1]
%conv6272675 = zext i8 %ptrincdec6142997 to i64 ; <i64> [#uses=1]
%or619 = or i64 0, %conv6272675 ; <i64> [#uses=1]
%or628 = or i64 %or619, %shl618 ; <i64> [#uses=1]
@@ -106,7 +106,7 @@ while.body: ; preds = %for.end, %bb.nph
%xor700 = xor i64 0, %and699 ; <i64> [#uses=1]
%add701 = add i64 0, %xor700 ; <i64> [#uses=1]
%add707 = add i64 %add701, %add669 ; <i64> [#uses=4]
- %ptrincdec6242994 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec6242994 = load i8, i8* null ; <i8> [#uses=1]
%conv7122673 = zext i8 %ptrincdec6242994 to i64 ; <i64> [#uses=1]
%shl713 = shl i64 %conv7122673, 24 ; <i64> [#uses=1]
%conv7412670 = zext i8 undef to i64 ; <i64> [#uses=1]
@@ -132,7 +132,7 @@ while.body: ; preds = %for.end, %bb.nph
%add821 = add i64 %add815, %add783 ; <i64> [#uses=1]
%add1160 = add i64 0, %add707 ; <i64> [#uses=0]
%add1157 = add i64 undef, undef ; <i64> [#uses=0]
- %ptrincdec11742940 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec11742940 = load i8, i8* null ; <i8> [#uses=1]
%conv11872651 = zext i8 %ptrincdec11742940 to i64 ; <i64> [#uses=1]
%shl1188 = shl i64 %conv11872651, 8 ; <i64> [#uses=1]
%or1198 = or i64 0, %shl1188 ; <i64> [#uses=1]
@@ -172,18 +172,18 @@ bb.nph: ; preds = %entry
br label %while.body
while.body: ; preds = %for.end, %bb.nph
- %tmp3 = load i32* %arr ; <i32> [#uses=2]
+ %tmp3 = load i32, i32* %arr ; <i32> [#uses=2]
%conv = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
- %tmp10 = load i32* %arrayidx9 ; <i32> [#uses=1]
+ %tmp10 = load i32, i32* %arrayidx9 ; <i32> [#uses=1]
%conv11 = zext i32 %tmp10 to i64 ; <i64> [#uses=1]
- %tmp14 = load i32* %arrayidx13 ; <i32> [#uses=3]
+ %tmp14 = load i32, i32* %arrayidx13 ; <i32> [#uses=3]
%conv15 = zext i32 %tmp14 to i64 ; <i64> [#uses=2]
- %tmp18 = load i32* undef ; <i32> [#uses=2]
+ %tmp18 = load i32, i32* undef ; <i32> [#uses=2]
%conv19 = zext i32 %tmp18 to i64 ; <i64> [#uses=1]
%conv23 = zext i32 undef to i64 ; <i64> [#uses=1]
- %tmp26 = load i32* %arrayidx25 ; <i32> [#uses=1]
+ %tmp26 = load i32, i32* %arrayidx25 ; <i32> [#uses=1]
%conv27 = zext i32 %tmp26 to i64 ; <i64> [#uses=1]
- %tmp30 = load i32* %arrayidx29 ; <i32> [#uses=2]
+ %tmp30 = load i32, i32* %arrayidx29 ; <i32> [#uses=2]
%conv31 = zext i32 %tmp30 to i64 ; <i64> [#uses=5]
%shl72 = shl i64 %conv31, 26 ; <i64> [#uses=1]
%shr = lshr i64 %conv31, 6 ; <i64> [#uses=1]
@@ -203,7 +203,7 @@ while.body: ; preds = %for.end, %bb.nph
%add137 = add i64 %add131, %add99 ; <i64> [#uses=5]
%conv1422700 = zext i8 undef to i64 ; <i64> [#uses=1]
%shl143 = shl i64 %conv1422700, 24 ; <i64> [#uses=1]
- %ptrincdec1393051 = load i8* undef ; <i8> [#uses=1]
+ %ptrincdec1393051 = load i8, i8* undef ; <i8> [#uses=1]
%conv1512699 = zext i8 %ptrincdec1393051 to i64 ; <i64> [#uses=1]
%shl152 = shl i64 %conv1512699, 16 ; <i64> [#uses=1]
%conv1712697 = zext i8 undef to i64 ; <i64> [#uses=1]
@@ -283,7 +283,7 @@ for.body: ; preds = %for.cond
%add1427 = add i64 %add1392, %d.0 ; <i64> [#uses=1]
%add1424 = add i64 %xor1412, 0 ; <i64> [#uses=1]
%add1430 = add i64 %add1424, %add1392 ; <i64> [#uses=5]
- %tmp1438 = load i32* undef ; <i32> [#uses=1]
+ %tmp1438 = load i32, i32* undef ; <i32> [#uses=1]
%conv1439 = zext i32 %tmp1438 to i64 ; <i64> [#uses=4]
%shl1441 = shl i64 %conv1439, 25 ; <i64> [#uses=1]
%shr1444 = lshr i64 %conv1439, 7 ; <i64> [#uses=1]
@@ -302,13 +302,13 @@ for.body: ; preds = %for.cond
%shr1479 = lshr i64 %conv1464, 10 ; <i64> [#uses=1]
%xor1477 = xor i64 %or1476, %shr1479 ; <i64> [#uses=1]
%xor1480 = xor i64 %xor1477, %or1470 ; <i64> [#uses=1]
- %tmp1499 = load i32* null ; <i32> [#uses=1]
+ %tmp1499 = load i32, i32* null ; <i32> [#uses=1]
%conv1500 = zext i32 %tmp1499 to i64 ; <i64> [#uses=1]
%add1491 = add i64 %conv1500, 0 ; <i64> [#uses=1]
%add1501 = add i64 %add1491, %xor1455 ; <i64> [#uses=1]
%add1502 = add i64 %add1501, %xor1480 ; <i64> [#uses=1]
%conv1504 = and i64 %add1502, 4294967295 ; <i64> [#uses=1]
- %tmp1541 = load i32* undef ; <i32> [#uses=1]
+ %tmp1541 = load i32, i32* undef ; <i32> [#uses=1]
%conv1542 = zext i32 %tmp1541 to i64 ; <i64> [#uses=1]
%add1527 = add i64 %conv1542, %g.0 ; <i64> [#uses=1]
%add1536 = add i64 %add1527, 0 ; <i64> [#uses=1]
@@ -327,7 +327,7 @@ for.body: ; preds = %for.cond
%add1576 = add i64 %xor1564, 0 ; <i64> [#uses=1]
%add1582 = add i64 %add1576, %add1544 ; <i64> [#uses=3]
store i32 undef, i32* undef
- %tmp1693 = load i32* undef ; <i32> [#uses=1]
+ %tmp1693 = load i32, i32* undef ; <i32> [#uses=1]
%conv1694 = zext i32 %tmp1693 to i64 ; <i64> [#uses=1]
%add1679 = add i64 %conv1694, %f.0 ; <i64> [#uses=1]
%add1688 = add i64 %add1679, 0 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@
define fastcc void @MinSize(%struct.rec* %x) nounwind {
entry:
- %tmp13 = load i8* undef, align 4 ; <i8> [#uses=3]
+ %tmp13 = load i8, i8* undef, align 4 ; <i8> [#uses=3]
%tmp14 = zext i8 %tmp13 to i32 ; <i32> [#uses=2]
switch i32 %tmp14, label %bb1109 [
i32 42, label %bb246
Modified: llvm/trunk/test/CodeGen/X86/2009-06-05-VZextByteShort.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-06-05-VZextByteShort.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-06-05-VZextByteShort.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-06-05-VZextByteShort.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define <4 x i16> @a(i32* %x1) nounwind {
; CHECK-NEXT: movd %[[R]], %xmm0
; CHECK-NEXT: retl
- %x2 = load i32* %x1
+ %x2 = load i32, i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i16
%r = insertelement <4 x i16> zeroinitializer, i16 %x, i32 0
@@ -20,7 +20,7 @@ define <8 x i16> @b(i32* %x1) nounwind {
; CHECK-NEXT: movd %e[[R]]x, %xmm0
; CHECK-NEXT: retl
- %x2 = load i32* %x1
+ %x2 = load i32, i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i16
%r = insertelement <8 x i16> zeroinitializer, i16 %x, i32 0
@@ -34,7 +34,7 @@ define <8 x i8> @c(i32* %x1) nounwind {
; CHECK-NEXT: movd %e[[R]]x, %xmm0
; CHECK-NEXT: retl
- %x2 = load i32* %x1
+ %x2 = load i32, i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i8
%r = insertelement <8 x i8> zeroinitializer, i8 %x, i32 0
@@ -48,7 +48,7 @@ define <16 x i8> @d(i32* %x1) nounwind {
; CHECK-NEXT: movd %e[[R]]x, %xmm0
; CHECK-NEXT: retl
- %x2 = load i32* %x1
+ %x2 = load i32, i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i8
%r = insertelement <16 x i8> zeroinitializer, i8 %x, i32 0
Modified: llvm/trunk/test/CodeGen/X86/2009-07-15-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-07-15-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-07-15-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-07-15-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -237,7 +237,7 @@ bb1545: ; preds = %bb1544
br i1 undef, label %bb1563, label %bb1558
bb1558: ; preds = %bb1545
- %0 = load %struct.SV** undef ; <%struct.SV*> [#uses=1]
+ %0 = load %struct.SV*, %struct.SV** undef ; <%struct.SV*> [#uses=1]
%1 = bitcast %struct.SV* %0 to %struct.GV* ; <%struct.GV*> [#uses=5]
br i1 undef, label %bb1563, label %bb1559
Modified: llvm/trunk/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define fastcc i32 @bsGetUInt32() nounwind ssp {
entry:
- %bsBuff.promoted44 = load i32* @bsBuff ; <i32> [#uses=1]
+ %bsBuff.promoted44 = load i32, i32* @bsBuff ; <i32> [#uses=1]
%0 = add i32 0, -8 ; <i32> [#uses=1]
%1 = lshr i32 %bsBuff.promoted44, %0 ; <i32> [#uses=1]
%2 = shl i32 %1, 8 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll Fri Feb 27 15:17:42 2015
@@ -43,12 +43,12 @@ entry:
br i1 %tobool, label %lor.lhs.false, label %if.then
lor.lhs.false: ; preds = %entry
- %tmp1 = load i8* @g_3 ; <i8> [#uses=1]
+ %tmp1 = load i8, i8* @g_3 ; <i8> [#uses=1]
%tobool3 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tobool3, label %return, label %if.then
if.then: ; preds = %lor.lhs.false, %entry
- %tmp4 = load i8* @g_3 ; <i8> [#uses=1]
+ %tmp4 = load i8, i8* @g_3 ; <i8> [#uses=1]
%conv5 = sext i8 %tmp4 to i32 ; <i32> [#uses=1]
ret i32 %conv5
@@ -93,12 +93,12 @@ entry:
br i1 %tobool, label %lor.lhs.false, label %if.then
lor.lhs.false: ; preds = %entry
- %tmp1 = load i8* @g_3 ; <i8> [#uses=1]
+ %tmp1 = load i8, i8* @g_3 ; <i8> [#uses=1]
%tobool3 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tobool3, label %return, label %if.then
if.then: ; preds = %lor.lhs.false, %entry
- %tmp4 = load i8* @g_3 ; <i8> [#uses=1]
+ %tmp4 = load i8, i8* @g_3 ; <i8> [#uses=1]
%conv5 = sext i8 %tmp4 to i32 ; <i32> [#uses=1]
ret i32 %conv5
Modified: llvm/trunk/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll Fri Feb 27 15:17:42 2015
@@ -14,12 +14,12 @@ primitiveTextureFetchBlock: ; preds = %
%pointerArithmeticTmp = bitcast %0* %shaderExecutionStatePtr to i8* ; <i8*> [#uses=1]
%pointerArithmeticTmp1 = getelementptr i8, i8* %pointerArithmeticTmp, i64 1808 ; <i8*> [#uses=1]
%pointerArithmeticTmp2 = bitcast i8* %pointerArithmeticTmp1 to %1** ; <%1**> [#uses=1]
- %primitivePtr = load %1** %pointerArithmeticTmp2 ; <%1*> [#uses=1]
+ %primitivePtr = load %1*, %1** %pointerArithmeticTmp2 ; <%1*> [#uses=1]
%pointerArithmeticTmp3 = bitcast %1* %primitivePtr to i8* ; <i8*> [#uses=1]
%pointerArithmeticTmp4 = getelementptr i8, i8* %pointerArithmeticTmp3, i64 19408 ; <i8*> [#uses=1]
%pointerArithmeticTmp5 = bitcast i8* %pointerArithmeticTmp4 to %1** ; <%1**> [#uses=1]
%primitiveTexturePtr = getelementptr %1*, %1** %pointerArithmeticTmp5, i32 %index ; <%1**> [#uses=1]
- %primitiveTexturePtr6 = load %1** %primitiveTexturePtr ; <%1*> [#uses=2]
+ %primitiveTexturePtr6 = load %1*, %1** %primitiveTexturePtr ; <%1*> [#uses=2]
br label %textureCheckBlock
textureCheckBlock: ; preds = %primitiveTextureFetchBlock
@@ -31,7 +31,7 @@ rhoCalculateBlock: ; preds = %textureCh
%pointerArithmeticTmp7 = bitcast %1* %primitiveTexturePtr6 to i8* ; <i8*> [#uses=1]
%pointerArithmeticTmp8 = getelementptr i8, i8* %pointerArithmeticTmp7, i64 640 ; <i8*> [#uses=1]
%pointerArithmeticTmp9 = bitcast i8* %pointerArithmeticTmp8 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %dimensionsPtr = load <4 x float>* %pointerArithmeticTmp9, align 1 ; <<4 x float>> [#uses=2]
+ %dimensionsPtr = load <4 x float>, <4 x float>* %pointerArithmeticTmp9, align 1 ; <<4 x float>> [#uses=2]
%texDiffDX = fsub <4 x float> %texCoordDX, %texCoord ; <<4 x float>> [#uses=1]
%texDiffDY = fsub <4 x float> %texCoordDY, %texCoord ; <<4 x float>> [#uses=1]
%ddx = fmul <4 x float> %texDiffDX, %dimensionsPtr ; <<4 x float>> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define void @c() nounwind {
; CHECK: movl a+8, %eax
- %srcval1 = load i96* @a, align 4
+ %srcval1 = load i96, i96* @a, align 4
%sroa.store.elt2 = lshr i96 %srcval1, 64
%tmp = trunc i96 %sroa.store.elt2 to i64
; CHECK: movl %eax, b
Modified: llvm/trunk/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll Fri Feb 27 15:17:42 2015
@@ -41,18 +41,18 @@ bb3:
br i1 undef, label %bb5, label %bb4
bb4: ; preds = %bb3
- %17 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
+ %17 = load volatile i32, i32* @uint8, align 4 ; <i32> [#uses=0]
br label %bb5
bb5: ; preds = %bb4, %bb3
- %18 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
+ %18 = load volatile i32, i32* @uint8, align 4 ; <i32> [#uses=0]
%19 = sext i8 undef to i16 ; <i16> [#uses=1]
%20 = tail call i32 @func_24(i16 zeroext %19, i8 signext 1) nounwind; <i32> [#uses=0]
br i1 undef, label %return, label %bb6.preheader
bb6.preheader: ; preds = %bb5
%21 = sext i8 %p_52 to i32 ; <i32> [#uses=1]
- %22 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
+ %22 = load volatile i32, i32* @uint8, align 4 ; <i32> [#uses=0]
%23 = tail call i32 (...)* @safefuncts(i32 %21, i32 1) nounwind; <i32> [#uses=0]
unreachable
Modified: llvm/trunk/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll Fri Feb 27 15:17:42 2015
@@ -26,7 +26,7 @@ invcont:
invcont1: ; preds = %invcont
%6 = getelementptr inbounds %struct.ComplexType, %struct.ComplexType* %2, i64 0, i32 0 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
+ %7 = load i32, i32* %6, align 4 ; <i32> [#uses=1]
invoke void @booleanAndDataReply(i32 %7, i32 undef, i32 %requestID, i32 undef, i64 undef, i32 undef)
to label %invcont2 unwind label %lpad
Modified: llvm/trunk/test/CodeGen/X86/2009-09-10-SpillComments.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-09-10-SpillComments.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-09-10-SpillComments.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-09-10-SpillComments.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ entry:
cond_next: ; preds = %entry
%tmp6 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 0 ; <i16*> [#uses=1]
- %tmp7 = load i16* %tmp6 ; <i16> [#uses=2]
+ %tmp7 = load i16, i16* %tmp6 ; <i16> [#uses=2]
%tmp78 = zext i16 %tmp7 to i32 ; <i32> [#uses=2]
%tmp10 = icmp eq i16 %tmp7, 54 ; <i1> [#uses=1]
br i1 %tmp10, label %cond_true13, label %cond_next32
@@ -29,9 +29,9 @@ cond_next: ; preds = %entry
cond_true13: ; preds = %cond_next
%tmp15 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3 ; <[1 x %struct..0anon]*> [#uses=1]
%tmp1718 = bitcast [1 x %struct..0anon]* %tmp15 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp19 = load %struct.rtx_def** %tmp1718 ; <%struct.rtx_def*> [#uses=1]
+ %tmp19 = load %struct.rtx_def*, %struct.rtx_def** %tmp1718 ; <%struct.rtx_def*> [#uses=1]
%tmp20 = getelementptr %struct.rtx_def, %struct.rtx_def* %tmp19, i32 0, i32 0 ; <i16*> [#uses=1]
- %tmp21 = load i16* %tmp20 ; <i16> [#uses=1]
+ %tmp21 = load i16, i16* %tmp20 ; <i16> [#uses=1]
%tmp22 = icmp eq i16 %tmp21, 57 ; <i1> [#uses=1]
br i1 %tmp22, label %cond_true25, label %cond_next32
@@ -41,9 +41,9 @@ cond_true25: ; preds = %cond_true13
cond_next32: ; preds = %cond_true13, %cond_next
%tmp34 = getelementptr [116 x i8*], [116 x i8*]* @rtx_format, i32 0, i32 %tmp78 ; <i8**> [#uses=1]
- %tmp35 = load i8** %tmp34, align 4 ; <i8*> [#uses=1]
+ %tmp35 = load i8*, i8** %tmp34, align 4 ; <i8*> [#uses=1]
%tmp37 = getelementptr [117 x i32], [117 x i32]* @rtx_length, i32 0, i32 %tmp78 ; <i32*> [#uses=1]
- %tmp38 = load i32* %tmp37, align 4 ; <i32> [#uses=1]
+ %tmp38 = load i32, i32* %tmp37, align 4 ; <i32> [#uses=1]
%i.011 = add i32 %tmp38, -1 ; <i32> [#uses=2]
%tmp12513 = icmp sgt i32 %i.011, -1 ; <i1> [#uses=1]
br i1 %tmp12513, label %bb, label %UnifiedReturnBlock
@@ -52,7 +52,7 @@ bb: ; preds = %bb123, %cond_next32
%indvar = phi i32 [ %indvar.next26, %bb123 ], [ 0, %cond_next32 ] ; <i32> [#uses=2]
%i.01.0 = sub i32 %i.011, %indvar ; <i32> [#uses=5]
%tmp42 = getelementptr i8, i8* %tmp35, i32 %i.01.0 ; <i8*> [#uses=2]
- %tmp43 = load i8* %tmp42 ; <i8> [#uses=1]
+ %tmp43 = load i8, i8* %tmp42 ; <i8> [#uses=1]
switch i8 %tmp43, label %bb123 [
i8 101, label %cond_true47
i8 69, label %bb105.preheader
@@ -61,38 +61,38 @@ bb: ; preds = %bb123, %cond_next32
cond_true47: ; preds = %bb
%tmp52 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
%tmp5354 = bitcast %struct..0anon* %tmp52 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp55 = load %struct.rtx_def** %tmp5354 ; <%struct.rtx_def*> [#uses=1]
+ %tmp55 = load %struct.rtx_def*, %struct.rtx_def** %tmp5354 ; <%struct.rtx_def*> [#uses=1]
%tmp58 = tail call %struct.rtx_def* @walk_fixup_memory_subreg( %struct.rtx_def* %tmp55, %struct.rtx_def* %insn ) nounwind ; <%struct.rtx_def*> [#uses=1]
%tmp62 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0, i32 0 ; <i32*> [#uses=1]
%tmp58.c = ptrtoint %struct.rtx_def* %tmp58 to i32 ; <i32> [#uses=1]
store i32 %tmp58.c, i32* %tmp62
- %tmp6816 = load i8* %tmp42 ; <i8> [#uses=1]
+ %tmp6816 = load i8, i8* %tmp42 ; <i8> [#uses=1]
%tmp6917 = icmp eq i8 %tmp6816, 69 ; <i1> [#uses=1]
br i1 %tmp6917, label %bb105.preheader, label %bb123
bb105.preheader: ; preds = %cond_true47, %bb
%tmp11020 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
%tmp11111221 = bitcast %struct..0anon* %tmp11020 to %struct.rtvec_def** ; <%struct.rtvec_def**> [#uses=3]
- %tmp11322 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
+ %tmp11322 = load %struct.rtvec_def*, %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
%tmp11423 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp11322, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp11524 = load i32* %tmp11423 ; <i32> [#uses=1]
+ %tmp11524 = load i32, i32* %tmp11423 ; <i32> [#uses=1]
%tmp11625 = icmp eq i32 %tmp11524, 0 ; <i1> [#uses=1]
br i1 %tmp11625, label %bb123, label %bb73
bb73: ; preds = %bb73, %bb105.preheader
%j.019 = phi i32 [ %tmp104, %bb73 ], [ 0, %bb105.preheader ] ; <i32> [#uses=3]
- %tmp81 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=2]
+ %tmp81 = load %struct.rtvec_def*, %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=2]
%tmp92 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019 ; <%struct..0anon*> [#uses=1]
%tmp9394 = bitcast %struct..0anon* %tmp92 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp95 = load %struct.rtx_def** %tmp9394 ; <%struct.rtx_def*> [#uses=1]
+ %tmp95 = load %struct.rtx_def*, %struct.rtx_def** %tmp9394 ; <%struct.rtx_def*> [#uses=1]
%tmp98 = tail call %struct.rtx_def* @walk_fixup_memory_subreg( %struct.rtx_def* %tmp95, %struct.rtx_def* %insn ) nounwind ; <%struct.rtx_def*> [#uses=1]
%tmp101 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019, i32 0 ; <i32*> [#uses=1]
%tmp98.c = ptrtoint %struct.rtx_def* %tmp98 to i32 ; <i32> [#uses=1]
store i32 %tmp98.c, i32* %tmp101
%tmp104 = add i32 %j.019, 1 ; <i32> [#uses=2]
- %tmp113 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
+ %tmp113 = load %struct.rtvec_def*, %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
%tmp114 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp113, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp115 = load i32* %tmp114 ; <i32> [#uses=1]
+ %tmp115 = load i32, i32* %tmp114 ; <i32> [#uses=1]
%tmp116 = icmp ult i32 %tmp104, %tmp115 ; <i1> [#uses=1]
br i1 %tmp116, label %bb73, label %bb123
Modified: llvm/trunk/test/CodeGen/X86/2009-09-16-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-09-16-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-09-16-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-09-16-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -32,7 +32,7 @@ lor.lhs.false:
br i1 %cmp16, label %for.end41, label %for.cond17.preheader
for.cond17.preheader: ; preds = %lor.lhs.false
- %tmp24 = load i32* @boot_cpu_id ; <i32> [#uses=1]
+ %tmp24 = load i32, i32* @boot_cpu_id ; <i32> [#uses=1]
%shr26 = ashr i32 %tmp24, %and ; <i32> [#uses=1]
br label %for.body20
Modified: llvm/trunk/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll Fri Feb 27 15:17:42 2015
@@ -13,11 +13,11 @@ bb: ; preds = %bb, %entry
%sum.04 = phi i32 [ 0, %entry ], [ %10, %bb ] ; <i32> [#uses=1]
%1 = mul i32 %i.03, %As ; <i32> [#uses=1]
%2 = getelementptr i16, i16* %A, i32 %1 ; <i16*> [#uses=1]
- %3 = load i16* %2, align 2 ; <i16> [#uses=1]
+ %3 = load i16, i16* %2, align 2 ; <i16> [#uses=1]
%4 = sext i16 %3 to i32 ; <i32> [#uses=1]
%5 = mul i32 %i.03, %Bs ; <i32> [#uses=1]
%6 = getelementptr i16, i16* %B, i32 %5 ; <i16*> [#uses=1]
- %7 = load i16* %6, align 2 ; <i16> [#uses=1]
+ %7 = load i16, i16* %6, align 2 ; <i16> [#uses=1]
%8 = sext i16 %7 to i32 ; <i32> [#uses=1]
%9 = mul i32 %8, %4 ; <i32> [#uses=1]
%10 = add i32 %9, %sum.04 ; <i32> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/2009-09-22-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-09-22-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-09-22-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-09-22-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -54,7 +54,7 @@ bb9:
unreachable
bb.i37: ; preds = %bb.i37, %bb11.thread
- %0 = load i64* undef, align 8 ; <i64> [#uses=1]
+ %0 = load i64, i64* undef, align 8 ; <i64> [#uses=1]
%1 = shl i64 %0, %.cast.i ; <i64> [#uses=1]
store i64 %1, i64* undef, align 8
br i1 undef, label %bb.i37, label %quantum_addscratch.exit
Modified: llvm/trunk/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-10-19-EmergencySpill.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-10-19-EmergencySpill.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-10-19-EmergencySpill.ll Fri Feb 27 15:17:42 2015
@@ -8,12 +8,12 @@
define fastcc void @nodeOverwriteCell(%struct.Rtree* nocapture %pRtree, %struct.RtreeNode* nocapture %pNode, %struct.RtreeCell* nocapture %pCell, i32 %iCell) nounwind ssp {
entry:
- %0 = load i8** undef, align 8 ; <i8*> [#uses=2]
- %1 = load i32* undef, align 8 ; <i32> [#uses=1]
+ %0 = load i8*, i8** undef, align 8 ; <i8*> [#uses=2]
+ %1 = load i32, i32* undef, align 8 ; <i32> [#uses=1]
%2 = mul i32 %1, %iCell ; <i32> [#uses=1]
%3 = add nsw i32 %2, 4 ; <i32> [#uses=1]
%4 = sext i32 %3 to i64 ; <i64> [#uses=2]
- %5 = load i64* null, align 8 ; <i64> [#uses=2]
+ %5 = load i64, i64* null, align 8 ; <i64> [#uses=2]
%6 = lshr i64 %5, 48 ; <i64> [#uses=1]
%7 = trunc i64 %6 to i8 ; <i8> [#uses=1]
store i8 %7, i8* undef, align 1
@@ -36,12 +36,12 @@ bb:
%tmp = shl i64 %indvar, 2 ; <i64> [#uses=1]
%tmp26 = add i64 %tmp, %tmp25 ; <i64> [#uses=1]
%scevgep27 = getelementptr i8, i8* %0, i64 %tmp26 ; <i8*> [#uses=1]
- %12 = load i32* %scevgep12, align 4 ; <i32> [#uses=1]
+ %12 = load i32, i32* %scevgep12, align 4 ; <i32> [#uses=1]
%13 = lshr i32 %12, 24 ; <i32> [#uses=1]
%14 = trunc i32 %13 to i8 ; <i8> [#uses=1]
store i8 %14, i8* undef, align 1
store i8 undef, i8* %scevgep27, align 1
- %15 = load i32* %11, align 4 ; <i32> [#uses=1]
+ %15 = load i32, i32* %11, align 4 ; <i32> [#uses=1]
%16 = shl i32 %15, 1 ; <i32> [#uses=1]
%17 = icmp sgt i32 %16, undef ; <i1> [#uses=1]
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll Fri Feb 27 15:17:42 2015
@@ -32,7 +32,7 @@ if.end.i:
br label %lt_init.exit
lt_init.exit: ; preds = %if.end.i, %if.then.i
- %3 = load i32* %retval.i ; <i32> [#uses=1]
+ %3 = load i32, i32* %retval.i ; <i32> [#uses=1]
call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
%4 = call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
%5 = sub i64 %4, %2 ; <i64> [#uses=1]
@@ -50,7 +50,7 @@ if.then:
if.end: ; preds = %if.then, %lt_init.exit
store i32 0, i32* %retval
- %7 = load i32* %retval ; <i32> [#uses=1]
+ %7 = load i32, i32* %retval ; <i32> [#uses=1]
tail call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
%8 = tail call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
%9 = sub i64 %8, %0 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-10-25-RewriterBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-10-25-RewriterBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-10-25-RewriterBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-10-25-RewriterBug.ll Fri Feb 27 15:17:42 2015
@@ -95,7 +95,7 @@ bb41.i:
bb45.i: ; preds = %bb41.i
%33 = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %26, i64 0, i32 5, i64 undef, i64 %32, i64 undef ; <i64*> [#uses=1]
- %34 = load i64* %33, align 8 ; <i64> [#uses=1]
+ %34 = load i64, i64* %33, align 8 ; <i64> [#uses=1]
br label %bb47.i
bb47.i: ; preds = %bb45.i, %bb41.i
@@ -110,9 +110,9 @@ bb58.i:
br label %bb60.i
bb60.i: ; preds = %bb58.i, %bb57.i
- %35 = load i64*** undef, align 8 ; <i64**> [#uses=1]
+ %35 = load i64**, i64*** undef, align 8 ; <i64**> [#uses=1]
%scevgep256.i = getelementptr i64*, i64** %35, i64 %indvar248.i ; <i64**> [#uses=1]
- %36 = load i64** %scevgep256.i, align 8 ; <i64*> [#uses=1]
+ %36 = load i64*, i64** %scevgep256.i, align 8 ; <i64*> [#uses=1]
%scevgep243.i = getelementptr i64, i64* %36, i64 undef ; <i64*> [#uses=1]
store i64 -1, i64* %scevgep243.i, align 8
br label %bb64.i
@@ -160,7 +160,7 @@ bb101.i:
br label %bb102.i
bb102.i: ; preds = %bb101.i, %bb83.i
- %48 = load %struct.StorablePicture** %0, align 8 ; <%struct.StorablePicture*> [#uses=2]
+ %48 = load %struct.StorablePicture*, %struct.StorablePicture** %0, align 8 ; <%struct.StorablePicture*> [#uses=2]
br i1 undef, label %bb81.i, label %bb104.i
bb104.i: ; preds = %bb102.i, %bb80.i
Modified: llvm/trunk/test/CodeGen/X86/2009-11-16-MachineLICM.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-11-16-MachineLICM.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-11-16-MachineLICM.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-11-16-MachineLICM.ll Fri Feb 27 15:17:42 2015
@@ -25,13 +25,13 @@ bb:
%tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1]
%scevgep14 = getelementptr float, float* %x, i64 %tmp1318 ; <float*> [#uses=1]
%x_addr.03 = getelementptr float, float* %x, i64 %tmp9 ; <float*> [#uses=1]
- %1 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 0), align 16 ; <float> [#uses=1]
+ %1 = load float, float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 0), align 16 ; <float> [#uses=1]
store float %1, float* %x_addr.03, align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 1), align 4 ; <float> [#uses=1]
+ %2 = load float, float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 1), align 4 ; <float> [#uses=1]
store float %2, float* %scevgep, align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 2), align 8 ; <float> [#uses=1]
+ %3 = load float, float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 2), align 8 ; <float> [#uses=1]
store float %3, float* %scevgep12, align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 3), align 4 ; <float> [#uses=1]
+ %4 = load float, float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 3), align 4 ; <float> [#uses=1]
store float %4, float* %scevgep14, align 4
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=2]
%exitcond = icmp eq i64 %indvar.next, %tmp ; <i1> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-11-25-ImpDefBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-11-25-ImpDefBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-11-25-ImpDefBug.ll Fri Feb 27 15:17:42 2015
@@ -48,7 +48,7 @@ lpad:
%.SV10.phi807 = phi i8* [ undef, %bb1.i.fragment.cl ], [ undef, %bb1.i.fragment ], [ undef, %bb5 ] ; <i8*> [#uses=1]
%exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
cleanup
- %1 = load i8* %.SV10.phi807, align 8 ; <i8> [#uses=0]
+ %1 = load i8, i8* %.SV10.phi807, align 8 ; <i8> [#uses=0]
br i1 undef, label %meshBB81.bbcl.disp, label %bb13.fragment.bbcl.disp
bb.i1: ; preds = %bb.i.i.bbcl.disp
Modified: llvm/trunk/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ entry:
%b = alloca i32 ; <i32*> [#uses=2]
%a = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load i32* %b, align 4 ; <i32> [#uses=1]
- %1 = load i32* %b, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %b, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %b, align 4 ; <i32> [#uses=1]
%asmtmp = call i32 asm "$0 = foo ($1, $2)", "=&{ax},%0,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) nounwind ; <i32> [#uses=1]
store i32 %asmtmp, i32* %a
br label %return
@@ -30,8 +30,8 @@ entry:
%b = alloca i32 ; <i32*> [#uses=2]
%a = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load i32* %b, align 4 ; <i32> [#uses=1]
- %1 = load i32* %b, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %b, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %b, align 4 ; <i32> [#uses=1]
%asmtmp = call i32 asm "$0 = foo ($1, $2)", "=&r,%0,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) nounwind ; <i32> [#uses=1]
store i32 %asmtmp, i32* %a
br label %return
Modified: llvm/trunk/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll Fri Feb 27 15:17:42 2015
@@ -30,13 +30,13 @@ define void @leaf() nounwind {
br label %"@CFE_debug_label_0"
"@CFE_debug_label_0": ; preds = %"file foo2.c, line 14, bb2"
- %r = load %test** bitcast ([1 x i64]* @ptr to %test**), align 8 ; <%test*> [#uses=1]
+ %r = load %test*, %test** bitcast ([1 x i64]* @ptr to %test**), align 8 ; <%test*> [#uses=1]
store %test* %r, %test** %p, align 8
br label %"@CFE_debug_label_2"
"@CFE_debug_label_2": ; preds = %"@CFE_debug_label_0"
- %r1 = load %link** bitcast ([1 x i64]* @link_ptr to %link**), align 8 ; <%link*> [#uses=1]
- %r2 = load %test** %p, align 8 ; <%test*> [#uses=1]
+ %r1 = load %link*, %link** bitcast ([1 x i64]* @link_ptr to %link**), align 8 ; <%link*> [#uses=1]
+ %r2 = load %test*, %test** %p, align 8 ; <%test*> [#uses=1]
%r3 = ptrtoint %test* %r2 to i64 ; <i64> [#uses=1]
%r4 = inttoptr i64 %r3 to %link** ; <%link**> [#uses=1]
%r5 = getelementptr %link*, %link** %r4, i64 1 ; <%link**> [#uses=1]
@@ -44,7 +44,7 @@ define void @leaf() nounwind {
br label %"@CFE_debug_label_3"
"@CFE_debug_label_3": ; preds = %"@CFE_debug_label_2"
- %r6 = load %test** %p, align 8 ; <%test*> [#uses=1]
+ %r6 = load %test*, %test** %p, align 8 ; <%test*> [#uses=1]
%r7 = ptrtoint %test* %r6 to i64 ; <i64> [#uses=1]
%r8 = inttoptr i64 %r7 to %link* ; <%link*> [#uses=1]
%r9 = getelementptr %link, %link* %r8, i64 1 ; <%link*> [#uses=1]
@@ -52,7 +52,7 @@ define void @leaf() nounwind {
br label %"@CFE_debug_label_4"
"@CFE_debug_label_4": ; preds = %"@CFE_debug_label_3"
- %r10 = load %test** %p, align 8 ; <%test*> [#uses=1]
+ %r10 = load %test*, %test** %p, align 8 ; <%test*> [#uses=1]
%r11 = ptrtoint %test* %r10 to i64 ; <i64> [#uses=1]
%r12 = inttoptr i64 %r11 to i32* ; <i32*> [#uses=1]
store i32 1, i32* %r12, align 4
Modified: llvm/trunk/test/CodeGen/X86/20090313-signext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/20090313-signext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/20090313-signext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/20090313-signext.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
%0 = tail call signext i16 @h() nounwind
%1 = sext i16 %0 to i32
tail call void @g(i32 %1) nounwind
- %2 = load i16* @x, align 2
+ %2 = load i16, i16* @x, align 2
ret i16 %2
}
Modified: llvm/trunk/test/CodeGen/X86/2010-01-13-OptExtBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-01-13-OptExtBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-01-13-OptExtBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-01-13-OptExtBug.ll Fri Feb 27 15:17:42 2015
@@ -8,14 +8,14 @@ entry:
%call = tail call i8* @_Z15uprv_malloc_4_2v()
%0 = bitcast i8* %call to double*
%tmp = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 3
- %tmp2 = load i16* %tmp
+ %tmp2 = load i16, i16* %tmp
%tmp525 = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 0
- %tmp626 = load i16* %tmp525
+ %tmp626 = load i16, i16* %tmp525
%cmp27 = icmp slt i16 %tmp2, %tmp626
br i1 %cmp27, label %bb.nph, label %for.end
for.cond:
- %tmp6 = load i16* %tmp5
+ %tmp6 = load i16, i16* %tmp5
%cmp = icmp slt i16 %inc, %tmp6
%indvar.next = add i32 %indvar, 1
br i1 %cmp, label %for.body, label %for.end
@@ -34,9 +34,9 @@ for.body:
%tmp30 = add i32 %indvar, %tmp29
%tmp33 = add i32 %indvar, %tmp32
%inc = trunc i32 %tmp33 to i16
- %tmp11 = load i8** %tmp10
+ %tmp11 = load i8*, i8** %tmp10
%arrayidx = getelementptr i8, i8* %tmp11, i32 %tmp30
- %tmp12 = load i8* %arrayidx
+ %tmp12 = load i8, i8* %arrayidx
br label %for.cond
for.end:
Modified: llvm/trunk/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll Fri Feb 27 15:17:42 2015
@@ -11,12 +11,12 @@ define void @numvec_(i32* noalias %ncele
"file bug754399.f90, line 184, in inner vector loop at depth 0, bb164": ; preds = %"file bug754399.f90, line 184, in inner vector loop at depth 0, bb164", %"file bug754399.f90, line 1, bb1"
%tmp641 = add i64 0, 48 ; <i64> [#uses=1]
%tmp641642 = inttoptr i64 %tmp641 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %r1258 = load <4 x i32>* %tmp641642, align 4 ; <<4 x i32>> [#uses=2]
+ %r1258 = load <4 x i32>, <4 x i32>* %tmp641642, align 4 ; <<4 x i32>> [#uses=2]
%r1295 = extractelement <4 x i32> %r1258, i32 3 ; <i32> [#uses=1]
%r1296 = sext i32 %r1295 to i64 ; <i64> [#uses=1]
%r1297 = add i64 %r1296, -1 ; <i64> [#uses=1]
%r1298183 = getelementptr [0 x i32], [0 x i32]* %ismbs, i64 0, i64 %r1297 ; <i32*> [#uses=1]
- %r1298184 = load i32* %r1298183, align 4 ; <i32> [#uses=1]
+ %r1298184 = load i32, i32* %r1298183, align 4 ; <i32> [#uses=1]
%r1301 = extractelement <4 x i32> %r1037, i32 3 ; <i32> [#uses=1]
%r1302 = mul i32 %r1298184, %r1301 ; <i32> [#uses=1]
%r1306 = insertelement <4 x i32> zeroinitializer, i32 %r1302, i32 3 ; <<4 x i32>> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2010-01-18-DbgValue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-01-18-DbgValue.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-01-18-DbgValue.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-01-18-DbgValue.ll Fri Feb 27 15:17:42 2015
@@ -15,14 +15,14 @@ entry:
call void @llvm.dbg.declare(metadata %struct.Rect* %my_r0, metadata !0, metadata !{!"0x102"}), !dbg !15
%1 = getelementptr inbounds %struct.Rect, %struct.Rect* %my_r0, i32 0, i32 0, !dbg !16 ; <%struct.Pt*> [#uses=1]
%2 = getelementptr inbounds %struct.Pt, %struct.Pt* %1, i32 0, i32 0, !dbg !16 ; <double*> [#uses=1]
- %3 = load double* %2, align 8, !dbg !16 ; <double> [#uses=1]
+ %3 = load double, double* %2, align 8, !dbg !16 ; <double> [#uses=1]
store double %3, double* %0, align 8, !dbg !16
- %4 = load double* %0, align 8, !dbg !16 ; <double> [#uses=1]
+ %4 = load double, double* %0, align 8, !dbg !16 ; <double> [#uses=1]
store double %4, double* %retval, align 8, !dbg !16
br label %return, !dbg !16
return: ; preds = %entry
- %retval1 = load double* %retval, !dbg !16 ; <double> [#uses=1]
+ %retval1 = load double, double* %retval, !dbg !16 ; <double> [#uses=1]
ret double %retval1, !dbg !16
}
Modified: llvm/trunk/test/CodeGen/X86/2010-01-19-OptExtBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-01-19-OptExtBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-01-19-OptExtBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-01-19-OptExtBug.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ bb7:
unreachable
bb9: ; preds = %bb6
- %0 = load i8* undef, align 1 ; <i8> [#uses=3]
+ %0 = load i8, i8* undef, align 1 ; <i8> [#uses=3]
br i1 undef, label %bb12, label %bb10
bb10: ; preds = %bb9
Modified: llvm/trunk/test/CodeGen/X86/2010-02-04-SchedulerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-02-04-SchedulerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-02-04-SchedulerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-02-04-SchedulerBug.ll Fri Feb 27 15:17:42 2015
@@ -6,13 +6,13 @@
define void @t(i32 %cNum, i64 %max) nounwind optsize ssp noimplicitfloat {
entry:
- %0 = load %struct.b_t** null, align 4 ; <%struct.b_t*> [#uses=1]
+ %0 = load %struct.b_t*, %struct.b_t** null, align 4 ; <%struct.b_t*> [#uses=1]
%1 = getelementptr inbounds %struct.b_t, %struct.b_t* %0, i32 %cNum, i32 5 ; <i64*> [#uses=1]
- %2 = load i64* %1, align 4 ; <i64> [#uses=1]
+ %2 = load i64, i64* %1, align 4 ; <i64> [#uses=1]
%3 = icmp ult i64 %2, %max ; <i1> [#uses=1]
%4 = getelementptr inbounds %struct.a_t, %struct.a_t* null, i32 0, i32 7 ; <i64**> [#uses=1]
- %5 = load i64** %4, align 4 ; <i64*> [#uses=0]
- %6 = load i64* null, align 4 ; <i64> [#uses=1]
+ %5 = load i64*, i64** %4, align 4 ; <i64*> [#uses=0]
+ %6 = load i64, i64* null, align 4 ; <i64> [#uses=1]
br i1 %3, label %bb2, label %bb
bb: ; preds = %entry
Modified: llvm/trunk/test/CodeGen/X86/2010-02-11-NonTemporal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-02-11-NonTemporal.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-02-11-NonTemporal.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-02-11-NonTemporal.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@ define void @sub_(i32* noalias %n) {
%i = alloca i32, align 4
%"$LCS_0" = alloca i64, align 8
%"$LCS_S2" = alloca <2 x double>, align 16
- %r9 = load <2 x double>* %"$LCS_S2", align 8
- %r10 = load i64* %"$LCS_0", align 8
+ %r9 = load <2 x double>, <2 x double>* %"$LCS_S2", align 8
+ %r10 = load i64, i64* %"$LCS_0", align 8
%r11 = inttoptr i64 %r10 to <2 x double>*
store <2 x double> %r9, <2 x double>* %r11, align 16, !nontemporal !0
br label %"file movnt.f90, line 18, bb5"
Modified: llvm/trunk/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll Fri Feb 27 15:17:42 2015
@@ -228,7 +228,7 @@ entry:
unreachable
"67": ; preds = %"65"
- %1 = load i32* undef, align 4 ; <i32> [#uses=0]
+ %1 = load i32, i32* undef, align 4 ; <i32> [#uses=0]
br label %"100"
"82": ; preds = %"61", %"60", %"59"
Modified: llvm/trunk/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll Fri Feb 27 15:17:42 2015
@@ -23,23 +23,23 @@ declare fastcc void @l298(i32 %r10, i32
define fastcc void @l186(%tupl* %r1) noreturn nounwind {
entry:
%ptr1 = getelementptr %tupl, %tupl* %r1, i32 0, i32 0
- %r2 = load i32* %ptr1
+ %r2 = load i32, i32* %ptr1
%ptr3 = getelementptr %tupl, %tupl* %r1, i32 0, i32 1
- %r3 = load i32* %ptr3
+ %r3 = load i32, i32* %ptr3
%ptr5 = getelementptr %tupl, %tupl* %r1, i32 0, i32 2
- %r4 = load i32* %ptr5
+ %r4 = load i32, i32* %ptr5
%ptr7 = getelementptr %tupl, %tupl* %r1, i32 0, i32 3
- %r5 = load i32* %ptr7
+ %r5 = load i32, i32* %ptr7
%ptr9 = getelementptr %tupl, %tupl* %r1, i32 0, i32 4
- %r6 = load i32* %ptr9
+ %r6 = load i32, i32* %ptr9
%ptr11 = getelementptr %tupl, %tupl* %r1, i32 0, i32 5
- %r7 = load i32* %ptr11
+ %r7 = load i32, i32* %ptr11
%ptr13 = getelementptr %tupl, %tupl* %r1, i32 0, i32 6
- %r8 = load i32* %ptr13
+ %r8 = load i32, i32* %ptr13
%ptr15 = getelementptr %tupl, %tupl* %r1, i32 0, i32 7
- %r9 = load i32* %ptr15
+ %r9 = load i32, i32* %ptr15
%ptr17 = getelementptr %tupl, %tupl* %r1, i32 0, i32 8
- %r10 = load i32* %ptr17
+ %r10 = load i32, i32* %ptr17
%cond = icmp eq i32 %r10, 3
br i1 %cond, label %true, label %false
Modified: llvm/trunk/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ entry:
br i1 undef, label %for.end, label %for.body
for.body: ; preds = %if.end40, %entry
- %tmp6 = load i8* undef, align 2 ; <i8> [#uses=3]
+ %tmp6 = load i8, i8* undef, align 2 ; <i8> [#uses=3]
%conv11 = sext i8 %tmp6 to i64 ; <i64> [#uses=1]
%cmp15 = icmp slt i64 %conv11, undef ; <i1> [#uses=1]
br i1 %cmp15, label %if.end, label %if.then
@@ -29,7 +29,7 @@ if.then:
if.end: ; preds = %if.then, %for.body
%index.0 = phi i8 [ 0, %if.then ], [ %tmp6, %for.body ] ; <i8> [#uses=1]
store i8 %index.0, i8* undef
- %tmp24 = load i8* undef ; <i8> [#uses=2]
+ %tmp24 = load i8, i8* undef ; <i8> [#uses=2]
br i1 undef, label %if.end40, label %if.then36
if.then36: ; preds = %if.end
Modified: llvm/trunk/test/CodeGen/X86/2010-03-17-ISelBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-03-17-ISelBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-03-17-ISelBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-03-17-ISelBug.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
define i32* @t() align 2 nounwind {
entry:
%operation = alloca %struct.PPOperation, align 8 ; <%struct.PPOperation*> [#uses=2]
- %0 = load i32*** null, align 4 ; [#uses=1]
+ %0 = load i32**, i32*** null, align 4 ; [#uses=1]
%1 = ptrtoint i32** %0 to i32 ; <i32> [#uses=1]
%2 = sub nsw i32 %1, undef ; <i32> [#uses=2]
br i1 false, label %bb20, label %bb.nph380
Modified: llvm/trunk/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ invcont64:
br i1 undef, label %invcont65, label %bb.i.i
bb.i.i: ; preds = %invcont64
- %1 = load <4 x float>* undef, align 16 ; <<4 x float>> [#uses=5]
+ %1 = load <4 x float>, <4 x float>* undef, align 16 ; <<4 x float>> [#uses=5]
br i1 undef, label %bb.nph.i.i, label %invcont65
bb.nph.i.i: ; preds = %bb.i.i
Modified: llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ entry:
; CHECK: addq $12, %rsi
%BitValueArray = alloca [32 x i32], align 4
%tmp2 = getelementptr inbounds %struct.F, %struct.F* %this, i64 0, i32 0
- %tmp3 = load %struct.FC** %tmp2, align 8
+ %tmp3 = load %struct.FC*, %struct.FC** %tmp2, align 8
%tmp4 = getelementptr inbounds %struct.FC, %struct.FC* %tmp3, i64 0, i32 1, i64 0
%tmp5 = bitcast [32 x i32]* %BitValueArray to i8*
%tmp6 = bitcast i32* %tmp4 to i8*
Modified: llvm/trunk/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
]
if.then: ; preds = %entry, %entry
- %tmp69 = load float* null, align 4 ; <float> [#uses=1]
+ %tmp69 = load float, float* null, align 4 ; <float> [#uses=1]
%cmp19 = icmp eq %1* null, %scroller ; <i1> [#uses=2]
%cond = select i1 %cmp19, float %tmp69, float 0.000000e+00 ; <float> [#uses=1]
%call36 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*)*)(i8* undef, i8* undef) nounwind optsize ; <i64> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll Fri Feb 27 15:17:42 2015
@@ -47,7 +47,7 @@ try.handler:
match: ; preds = %try.handler
%4 = call i8* @__cxa_begin_catch(i8* %exc1) ; <i8*> [#uses=1]
%5 = bitcast i8* %4 to i32* ; <i32*> [#uses=1]
- %6 = load i32* %5 ; <i32> [#uses=1]
+ %6 = load i32, i32* %5 ; <i32> [#uses=1]
store i32 %6, i32* %0
%call = invoke i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), %struct.S* %s2)
to label %invoke.cont2 unwind label %match.handler ; <i32> [#uses=0]
@@ -80,7 +80,7 @@ invoke.cont5:
br label %cleanup.switch
cleanup.switch: ; preds = %invoke.cont5
- %tmp = load i32* %cleanup.dst ; <i32> [#uses=1]
+ %tmp = load i32, i32* %cleanup.dst ; <i32> [#uses=1]
switch i32 %tmp, label %cleanup.end [
i32 1, label %cleanup.pad
i32 2, label %cleanup.pad4
@@ -99,7 +99,7 @@ finally:
br label %cleanup.switch9
cleanup.switch9: ; preds = %finally
- %tmp8 = load i32* %cleanup.dst7 ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %cleanup.dst7 ; <i32> [#uses=1]
switch i32 %tmp8, label %cleanup.end10 [
i32 1, label %finally.end
i32 2, label %finally.throw
@@ -109,7 +109,7 @@ cleanup.end10:
br label %finally.end
finally.throw: ; preds = %cleanup.switch9
- %8 = load i8** %_rethrow ; <i8*> [#uses=1]
+ %8 = load i8*, i8** %_rethrow ; <i8*> [#uses=1]
call void @_Unwind_Resume_or_Rethrow(i8* %8)
unreachable
@@ -117,9 +117,9 @@ finally.end:
%tmp11 = getelementptr inbounds %struct.S, %struct.S* %s1, i32 0, i32 0 ; <[2 x i8*]*> [#uses=1]
%arraydecay = getelementptr inbounds [2 x i8*], [2 x i8*]* %tmp11, i32 0, i32 0 ; <i8**> [#uses=1]
%arrayidx = getelementptr inbounds i8*, i8** %arraydecay, i32 1 ; <i8**> [#uses=1]
- %tmp12 = load i8** %arrayidx ; <i8*> [#uses=1]
+ %tmp12 = load i8*, i8** %arrayidx ; <i8*> [#uses=1]
store i8* %tmp12, i8** %retval
- %9 = load i8** %retval ; <i8*> [#uses=1]
+ %9 = load i8*, i8** %retval ; <i8*> [#uses=1]
ret i8* %9
}
Modified: llvm/trunk/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll Fri Feb 27 15:17:42 2015
@@ -23,9 +23,9 @@ entry:
store i8* %asmresult, i8** %ret
store i8* %asmresult1, i8** %p
store i32 %asmresult2, i32* %t
- %tmp = load i8** %ret ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** %ret ; <i8*> [#uses=1]
store i8* %tmp, i8** %retval
- %1 = load i8** %retval ; <i8*> [#uses=1]
+ %1 = load i8*, i8** %retval ; <i8*> [#uses=1]
ret i8* %1
}
Modified: llvm/trunk/test/CodeGen/X86/2010-05-07-ldconvert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-05-07-ldconvert.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-05-07-ldconvert.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-05-07-ldconvert.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
%tmp = call x86_fp80 @llvm.powi.f80(x86_fp80 0xK3FFF8000000000000000, i32 -64) ; <x86_fp80> [#uses=1]
%conv = fptosi x86_fp80 %tmp to i32 ; <i32> [#uses=1]
store i32 %conv, i32* %r
- %tmp1 = load i32* %r ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %r ; <i32> [#uses=1]
%tobool = icmp ne i32 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tobool, label %if.then, label %if.end
@@ -18,7 +18,7 @@ if.then:
br label %if.end
if.end: ; preds = %if.then, %entry
- %0 = load i32* %retval ; <i32> [#uses=1]
+ %0 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
define i32 @CXB30130(i32 %num1, i16* nocapture %num2, float* nocapture %num3, double* nocapture %num4) nounwind ssp {
entry:
- %0 = load i16* %num2, align 2 ; <i16> [#uses=2]
+ %0 = load i16, i16* %num2, align 2 ; <i16> [#uses=2]
%1 = mul nsw i16 %0, %0 ; <i16> [#uses=1]
store i16 %1, i16* %num2, align 2
ret i32 undef
Modified: llvm/trunk/test/CodeGen/X86/2010-05-16-nosseconversion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-05-16-nosseconversion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-05-16-nosseconversion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-05-16-nosseconversion.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define i32 @foo() nounwind readonly ssp {
entry:
- %0 = load i64* @x, align 8 ; <i64> [#uses=1]
+ %0 = load i64, i64* @x, align 8 ; <i64> [#uses=1]
%1 = uitofp i64 %0 to double ; <double> [#uses=1]
%2 = fptosi double %1 to i32 ; <i32> [#uses=1]
ret i32 %2
Modified: llvm/trunk/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i8* @bar(%struct.a* %myvar) nounw
entry:
tail call void @llvm.dbg.value(metadata %struct.a* %myvar, i64 0, metadata !8, metadata !{!"0x102"})
%0 = getelementptr inbounds %struct.a, %struct.a* %myvar, i64 0, i32 0, !dbg !28 ; <i32*> [#uses=1]
- %1 = load i32* %0, align 8, !dbg !28 ; <i32> [#uses=1]
+ %1 = load i32, i32* %0, align 8, !dbg !28 ; <i32> [#uses=1]
tail call void @foo(i32 %1) nounwind optsize noinline ssp, !dbg !28
%2 = bitcast %struct.a* %myvar to i8*, !dbg !30 ; <i8*> [#uses=1]
ret i8* %2, !dbg !30
Modified: llvm/trunk/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ module asm "\09.ident\09\22GCC: (GNU) 4.
define i32 @f2(double %x) nounwind {
entry:
- %0 = load double* undef, align 64 ; <double> [#uses=1]
+ %0 = load double, double* undef, align 64 ; <double> [#uses=1]
%1 = fptoui double %0 to i16 ; <i16> [#uses=1]
%2 = zext i16 %1 to i32 ; <i32> [#uses=1]
%3 = add nsw i32 0, %2 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll Fri Feb 27 15:17:42 2015
@@ -2,6 +2,6 @@
; CHECK: %fs:
define i32 @test1(i32 addrspace(257)* %arg) nounwind {
- %tmp = load i32 addrspace(257)* %arg
+ %tmp = load i32, i32 addrspace(257)* %arg
ret i32 %tmp
}
Modified: llvm/trunk/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll Fri Feb 27 15:17:42 2015
@@ -10,17 +10,17 @@ entry:
%retval = alloca i32, align 4 ; <i32*> [#uses=3]
%v = alloca i32, align 4 ; <i32*> [#uses=3]
store i32 0, i32* %retval
- %zero = load i32* %retval
+ %zero = load i32, i32* %retval
; The earlyclobber register EC0 should not be spilled before the inline asm.
; Yes, check-not can refer to FileCheck variables defined in the future.
; CHECK-NOT: [[EC0]]{{.*}}(%rsp)
; CHECK: bsr {{[^,]*}}, [[EC0:%...]]
%0 = call i32 asm "bsr $1, $0\0A\09cmovz $2, $0", "=&r,ro,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i32 %zero, i32 -1) nounwind, !srcloc !0 ; <i32> [#uses=1]
store i32 %0, i32* %v
- %tmp = load i32* %v ; <i32> [#uses=1]
+ %tmp = load i32, i32* %v ; <i32> [#uses=1]
%call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([23 x i8]* @.str, i32 0, i32 0), i32 %tmp) ; <i32> [#uses=0]
store i32 0, i32* %retval
- %1 = load i32* %retval ; <i32> [#uses=1]
+ %1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
define i32 @func(%struct.type* %s) nounwind optsize ssp {
entry:
%tmp1 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 1
- %tmp2 = load i32* %tmp1, align 8
+ %tmp2 = load i32, i32* %tmp1, align 8
%tmp3 = icmp eq i32 %tmp2, 10
%tmp4 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 40
br i1 %tmp3, label %bb, label %entry.bb1_crit_edge
Modified: llvm/trunk/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll Fri Feb 27 15:17:42 2015
@@ -6,10 +6,10 @@ define void @_SEH2FrameHandler() nounwin
entry:
%target.addr.i = alloca i8*, align 4 ; <i8**> [#uses=2]
%frame = alloca %struct.__SEH2Frame*, align 4 ; <%struct.__SEH2Frame**> [#uses=1]
- %tmp = load %struct.__SEH2Frame** %frame ; <%struct.__SEH2Frame*> [#uses=1]
+ %tmp = load %struct.__SEH2Frame*, %struct.__SEH2Frame** %frame ; <%struct.__SEH2Frame*> [#uses=1]
%conv = bitcast %struct.__SEH2Frame* %tmp to i8* ; <i8*> [#uses=1]
store i8* %conv, i8** %target.addr.i
- %tmp.i = load i8** %target.addr.i ; <i8*> [#uses=1]
+ %tmp.i = load i8*, i8** %target.addr.i ; <i8*> [#uses=1]
call void asm sideeffect "push %ebp\0Apush $$0\0Apush $$0\0Apush $$Return${:uid}\0Apush $0\0Acall ${1:c}\0AReturn${:uid}: pop %ebp\0A", "imr,imr,~{ax},~{bx},~{cx},~{dx},~{si},~{di},~{flags},~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %tmp.i, void (...)* @RtlUnwind) nounwind, !srcloc !0
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll Fri Feb 27 15:17:42 2015
@@ -6,6 +6,6 @@ entry:
; CHECK: GCROOT %eax
%_r = alloca i32, align 4 ; <i32*> [#uses=2]
call void asm "/* GCROOT $0 */", "=*imr,0,~{dirflag},~{fpsr},~{flags}"(i32* %_r, i32 4) nounwind
- %0 = load i32* %_r, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %_r, align 4 ; <i32> [#uses=1]
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/2010-07-02-UnfoldBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-07-02-UnfoldBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-07-02-UnfoldBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-07-02-UnfoldBug.ll Fri Feb 27 15:17:42 2015
@@ -61,7 +61,7 @@ bb22:
br i1 undef, label %bb2.i.i, label %bb.i.i49
bb.i.i49: ; preds = %bb22
- %0 = load float* undef, align 4 ; <float> [#uses=1]
+ %0 = load float, float* undef, align 4 ; <float> [#uses=1]
%1 = insertelement <4 x float> undef, float %0, i32 0 ; <<4 x float>> [#uses=1]
%2 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> <float 1.000000e+00, float undef, float undef, float undef>, <4 x float> %1) nounwind readnone ; <<4 x float>> [#uses=1]
%3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %2, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>) nounwind readnone ; <<4 x float>> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "x86_64-apple-darwin10.0
define void @_ZN7QVectorIdE4fillERKdi(double* nocapture %t) nounwind ssp align 2 {
entry:
- %tmp2 = load double* %t ; <double> [#uses=1]
+ %tmp2 = load double, double* %t ; <double> [#uses=1]
br i1 undef, label %if.end, label %if.then
if.then: ; preds = %entry
Modified: llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
define i32 @main() nounwind {
entry:
- %tmp = load i64* @g_16 ; <i64> [#uses=1]
+ %tmp = load i64, i64* @g_16 ; <i64> [#uses=1]
%not.lnot = icmp ne i64 %tmp, 0 ; <i1> [#uses=1]
%conv = sext i1 %not.lnot to i64 ; <i64> [#uses=1]
%and = and i64 %conv, 150 ; <i64> [#uses=1]
@@ -20,7 +20,7 @@ entry:
; CHECK-NEXT: jle
entry.if.end_crit_edge: ; preds = %entry
- %tmp4.pre = load i32* @g_38 ; <i32> [#uses=1]
+ %tmp4.pre = load i32, i32* @g_38 ; <i32> [#uses=1]
br label %if.end
if.then: ; preds = %entry
Modified: llvm/trunk/test/CodeGen/X86/2010-08-04-StackVariable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-08-04-StackVariable.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-08-04-StackVariable.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-08-04-StackVariable.ll Fri Feb 27 15:17:42 2015
@@ -13,13 +13,13 @@ entry:
bb: ; preds = %entry
%1 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !29 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 8, !dbg !29 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 8, !dbg !29 ; <i32> [#uses=1]
%3 = add i32 %2, %i, !dbg !29 ; <i32> [#uses=1]
br label %bb2, !dbg !29
bb1: ; preds = %entry
%4 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !30 ; <i32*> [#uses=1]
- %5 = load i32* %4, align 8, !dbg !30 ; <i32> [#uses=1]
+ %5 = load i32, i32* %4, align 8, !dbg !30 ; <i32> [#uses=1]
%6 = sub i32 %5, 1, !dbg !30 ; <i32> [#uses=1]
br label %bb2, !dbg !30
@@ -58,11 +58,11 @@ entry:
store i32 1, i32* %1, align 8, !dbg !42
%2 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
%3 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
- %4 = load i8** %3, align 8, !dbg !43 ; <i8*> [#uses=1]
+ %4 = load i8*, i8** %3, align 8, !dbg !43 ; <i8*> [#uses=1]
store i8* %4, i8** %2, align 8, !dbg !43
%5 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
%6 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 8, !dbg !43 ; <i32> [#uses=1]
+ %7 = load i32, i32* %6, align 8, !dbg !43 ; <i32> [#uses=1]
store i32 %7, i32* %5, align 8, !dbg !43
%8 = call i32 @_Z3fooi4SVal(i32 2, %struct.SVal* noalias %0) nounwind, !dbg !43 ; <i32> [#uses=0]
call void @llvm.dbg.value(metadata i32 %8, i64 0, metadata !44, metadata !{!"0x102"}), !dbg !43
Modified: llvm/trunk/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define void @f(i32* %w, i32* %h, i8* %_t
%x1 = tail call i64 @g(i8* %_this, i8* %image) nounwind ; <i64> [#uses=3]
%tmp1 = trunc i64 %x1 to i32 ; <i32> [#uses=1]
; CHECK: movl (%r{{.*}}), %
- %x4 = load i32* %h, align 4 ; <i32> [#uses=1]
+ %x4 = load i32, i32* %h, align 4 ; <i32> [#uses=1]
; The imull clobbers a 32-bit register.
; CHECK: imull %{{...}}, %e[[CLOBBER:..]]
Modified: llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ entry:
%a = alloca [64 x i8]
%b = getelementptr inbounds [64 x i8], [64 x i8]* %a, i64 0, i32 0
%c = getelementptr inbounds [64 x i8], [64 x i8]* %a, i64 0, i32 30
- %d = load i8* %b, align 8
- %e = load i8* %c, align 8
+ %d = load i8, i8* %b, align 8
+ %e = load i8, i8* %c, align 8
%f = bitcast [64 x i8]* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %f, i8* %in, i64 64, i32 8, i1 false) nounwind
store i8 %d, i8* %b, align 8
Modified: llvm/trunk/test/CodeGen/X86/2010-11-09-MOVLPS.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-11-09-MOVLPS.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-11-09-MOVLPS.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-11-09-MOVLPS.ll Fri Feb 27 15:17:42 2015
@@ -21,9 +21,9 @@ entry:
store i8* %a, i8** %a_addr
store %0* %b, %0** %b_addr
store %0* %c, %0** %c_addr
- %0 = load i8** %a_addr, align 64
- %1 = load %0** %b_addr, align 64
- %2 = load %0** %c_addr, align 64
+ %0 = load i8*, i8** %a_addr, align 64
+ %1 = load %0*, %0** %b_addr, align 64
+ %2 = load %0*, %0** %c_addr, align 64
%"ssa point" = bitcast i32 0 to i32
br label %"2"
@@ -31,10 +31,10 @@ entry:
%3 = bitcast i8* %0 to <2 x i32>*
%4 = getelementptr inbounds %0, %0* %1, i32 0, i32 0
%5 = bitcast %"int[]"* %4 to <4 x float>*
- %6 = load <4 x float>* %5, align 16
+ %6 = load <4 x float>, <4 x float>* %5, align 16
%7 = bitcast <2 x i32>* %3 to <2 x float>*
%8 = bitcast <2 x float>* %7 to double*
- %9 = load double* %8
+ %9 = load double, double* %8
%10 = insertelement <2 x double> undef, double %9, i32 0
%11 = insertelement <2 x double> %10, double undef, i32 1
%12 = bitcast <2 x double> %11 to <4 x float>
@@ -48,10 +48,10 @@ entry:
%19 = bitcast i8* %18 to <2 x i32>*
%20 = getelementptr inbounds %0, %0* %2, i32 0, i32 0
%21 = bitcast %"int[]"* %20 to <4 x float>*
- %22 = load <4 x float>* %21, align 16
+ %22 = load <4 x float>, <4 x float>* %21, align 16
%23 = bitcast <2 x i32>* %19 to <2 x float>*
%24 = bitcast <2 x float>* %23 to double*
- %25 = load double* %24
+ %25 = load double, double* %24
%26 = insertelement <2 x double> undef, double %25, i32 0
%27 = insertelement <2 x double> %26, double undef, i32 1
%28 = bitcast <2 x double> %27 to <4 x float>
Modified: llvm/trunk/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll Fri Feb 27 15:17:42 2015
@@ -4,11 +4,11 @@
@s = external global i8
define i32 @foo(i1 %cond) {
; CHECK: @foo
- %u_base = load i8* @u
+ %u_base = load i8, i8* @u
%u_val = zext i8 %u_base to i32
; CHECK: movzbl
; CHECK: movsbl
- %s_base = load i8* @s
+ %s_base = load i8, i8* @s
%s_val = sext i8 %s_base to i32
%val = select i1 %cond, i32 %u_val, i32 %s_val
ret i32 %val
Modified: llvm/trunk/test/CodeGen/X86/2011-02-12-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-02-12-shuffle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-02-12-shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-02-12-shuffle.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
br i1 undef, label %if.end, label %UnifiedReturnBlock
if.end: ; preds = %entry
- %tmp1067 = load <16 x i32> addrspace(1)* null, align 64
+ %tmp1067 = load <16 x i32>, <16 x i32> addrspace(1)* null, align 64
%tmp1082 = shufflevector <16 x i32> <i32 0, i32 0, i32 0, i32 undef, i32 undef, i32 0, i32 0, i32 undef, i32 0, i32 0, i32 undef, i32 undef, i32 0, i32 undef, i32 undef, i32 undef>,
<16 x i32> %tmp1067,
<16 x i32> <i32 0, i32 1, i32 2, i32 undef, i32 26, i32 5, i32 6, i32 undef, i32 8, i32 9, i32 31, i32 30, i32 12, i32 undef, i32 undef, i32 undef>
Modified: llvm/trunk/test/CodeGen/X86/2011-03-02-DAGCombiner.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-03-02-DAGCombiner.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-03-02-DAGCombiner.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-03-02-DAGCombiner.ll Fri Feb 27 15:17:42 2015
@@ -13,23 +13,23 @@ entry:
%K = alloca %0, align 4
store i32 0, i32* %retval
%0 = bitcast %0* %K to i32*
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = and i32 %1, -121
%3 = or i32 %2, 32
store i32 %3, i32* %0, align 4
%4 = bitcast %0* %K to i32*
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = lshr i32 %5, 3
%bf.clear = and i32 %6, 15
%conv = sitofp i32 %bf.clear to float
%f = getelementptr inbounds %struct.anon, %struct.anon* %F, i32 0, i32 0
- %tmp = load float* %f, align 4
+ %tmp = load float, float* %f, align 4
%sub = fsub float %tmp, %conv
store float %sub, float* %f, align 4
%ld = getelementptr inbounds %struct.anon, %struct.anon* %F, i32 0, i32 1
- %tmp1 = load x86_fp80* %ld, align 16
+ %tmp1 = load x86_fp80, x86_fp80* %ld, align 16
%7 = bitcast %0* %K to i32*
- %8 = load i32* %7, align 4
+ %8 = load i32, i32* %7, align 4
%9 = lshr i32 %8, 7
%bf.clear2 = and i32 %9, 1
%conv3 = uitofp i32 %bf.clear2 to x86_fp80
@@ -39,12 +39,12 @@ entry:
%10 = bitcast %0* %K to i32*
%11 = and i32 %bf.value, 1
%12 = shl i32 %11, 7
- %13 = load i32* %10, align 4
+ %13 = load i32, i32* %10, align 4
%14 = and i32 %13, -129
%15 = or i32 %14, %12
store i32 %15, i32* %10, align 4
%call = call i32 (...)* @iequals(i32 1841, i32 %bf.value, i32 0)
- %16 = load i32* %retval
+ %16 = load i32, i32* %retval
ret i32 %16
}
Modified: llvm/trunk/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ declare fastcc i8* @save_string(i8* %d,
define i32 @cvtchar(i8* nocapture %sp) nounwind {
%temp.i = alloca [2 x i8], align 1
- %tmp1 = load i8* %sp, align 1
+ %tmp1 = load i8, i8* %sp, align 1
%div = udiv i8 %tmp1, 10
%rem = urem i8 %div, 10
%arrayidx.i = getelementptr inbounds [2 x i8], [2 x i8]* %temp.i, i32 0, i32 0
Modified: llvm/trunk/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ declare hidden fastcc void @_ZN3JSCL23re
; CHECK: je
define i32 @cti_op_eq(i8** nocapture %args) nounwind ssp {
entry:
- %0 = load i8** null, align 8
+ %0 = load i8*, i8** null, align 8
%tmp13 = bitcast i8* %0 to %"class.JSC::CodeLocationCall"*
%tobool.i.i.i = icmp ugt i8* undef, inttoptr (i64 281474976710655 to i8*)
%or.cond.i = and i1 %tobool.i.i.i, undef
@@ -34,7 +34,7 @@ if.end.i:
br i1 undef, label %land.rhs.i121.i, label %_ZNK3JSC7JSValue8isStringEv.exit122.i
land.rhs.i121.i: ; preds = %if.end.i
- %tmp.i.i117.i = load %"class.JSC::Structure"** undef, align 8
+ %tmp.i.i117.i = load %"class.JSC::Structure"*, %"class.JSC::Structure"** undef, align 8
br label %_ZNK3JSC7JSValue8isStringEv.exit122.i
_ZNK3JSC7JSValue8isStringEv.exit122.i: ; preds = %land.rhs.i121.i, %if.end.i
@@ -48,7 +48,7 @@ if.then.i92.i:
_ZN3JSC7JSValue19equalSlowCaseInlineEPNS_9ExecStateES0_S0_.exit: ; preds = %_ZNK3JSC7JSValue8isStringEv.exit122.i, %if.then.i.i.i, %if.then.i
- %1 = load i8** undef, align 8
+ %1 = load i8*, i8** undef, align 8
br i1 undef, label %do.end39, label %do.body27
do.body27: ; preds = %_ZN3JSC7JSValue19equalSlowCaseInlineEPNS_9ExecStateES0_S0_.exit
Modified: llvm/trunk/test/CodeGen/X86/2011-05-09-loaduse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-05-09-loaduse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-05-09-loaduse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-05-09-loaduse.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
;CHECK: ret
define float @test(<4 x float>* %A) nounwind {
entry:
- %T = load <4 x float>* %A
+ %T = load <4 x float>, <4 x float>* %A
%R = extractelement <4 x float> %T, i32 3
store <4 x float><float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>* %A
ret float %R
Modified: llvm/trunk/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll Fri Feb 27 15:17:42 2015
@@ -41,7 +41,7 @@ cond.false156.i:
cond.end166.i: ; preds = %cond.false156.i, %cond.true138.i
%idxprom1113.i = phi i64 [ %idxprom1114.i, %cond.false156.i ], [ undef, %cond.true138.i ]
- %tmp235.i = load %struct.state** getelementptr inbounds (%struct.dfa* @aux_temp, i64 0, i32 2), align 8
+ %tmp235.i = load %struct.state*, %struct.state** getelementptr inbounds (%struct.dfa* @aux_temp, i64 0, i32 2), align 8
%att.i = getelementptr inbounds %struct.state, %struct.state* %tmp235.i, i64 %idxprom1113.i, i32 0
store i32 0, i32* %att.i, align 4
ret void
Modified: llvm/trunk/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ land.lhs.true:
for.body.i: ; preds = %for.inc.i, %if.then
%tmp3524.i = phi i32 [ 0, %land.lhs.true ], [ %tmp351.i, %for.inc.i ]
- %tmp6.i12 = load i32* undef, align 4
+ %tmp6.i12 = load i32, i32* undef, align 4
br i1 undef, label %for.inc.i, label %if.then.i17
if.then.i17: ; preds = %for.body.i
@@ -28,7 +28,7 @@ if.then.i17:
%and14.i = and i32 %shr.i14, 255
%idxprom15.i = zext i32 %and14.i to i64
%arrayidx16.i = getelementptr inbounds [256 x i32], [256 x i32]* @bit_count, i64 0, i64 %idxprom15.i
- %tmp17.i15 = load i32* %arrayidx16.i, align 4
+ %tmp17.i15 = load i32, i32* %arrayidx16.i, align 4
%add.i = add i32 0, %tmp3524.i
%add24.i = add i32 %add.i, %tmp17.i15
%add31.i = add i32 %add24.i, 0
Modified: llvm/trunk/test/CodeGen/X86/2011-06-01-fildll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-06-01-fildll.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-06-01-fildll.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-06-01-fildll.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define float @f(i64* nocapture %x) nounw
entry:
; CHECK: movl
; CHECK-NOT: movl
- %tmp1 = load i64* %x, align 4
+ %tmp1 = load i64, i64* %x, align 4
; CHECK: fildll
%conv = sitofp i64 %tmp1 to float
%add = fadd float %conv, 1.000000e+00
Modified: llvm/trunk/test/CodeGen/X86/2011-06-03-x87chain.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-06-03-x87chain.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-06-03-x87chain.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-06-03-x87chain.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
define float @chainfail1(i64* nocapture %a, i64* nocapture %b, i32 %x, i32 %y, float* nocapture %f) nounwind uwtable noinline ssp {
entry:
- %tmp1 = load i64* %a, align 8
+ %tmp1 = load i64, i64* %a, align 8
; Insure x87 ops are properly chained, order preserved.
; CHECK: fildll
%conv = sitofp i64 %tmp1 to float
@@ -23,7 +23,7 @@ entry:
%sub = add nsw i32 %mul, -1
%idxprom = sext i32 %sub to i64
%arrayidx = getelementptr inbounds i64, i64* %a, i64 %idxprom
- %tmp4 = load i64* %arrayidx, align 8
+ %tmp4 = load i64, i64* %arrayidx, align 8
; CHECK: fildll
%conv = sitofp i64 %tmp4 to float
store float %conv, float* %f, align 4
@@ -35,7 +35,7 @@ entry:
br i1 undef, label %while.end, label %while.body
while.body: ; preds = %while.body, %entry
- %x.1.copyload = load i24* undef, align 1
+ %x.1.copyload = load i24, i24* undef, align 1
%conv = sitofp i24 %x.1.copyload to float
%div = fmul float %conv, 0x3E80000000000000
store float %div, float* undef, align 4
Modified: llvm/trunk/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll Fri Feb 27 15:17:42 2015
@@ -32,17 +32,17 @@ bb8:
store i8* bitcast (%0* @0 to i8*), i8** %tmp15
%tmp16 = bitcast %3* %tmp7 to void ()*
store void ()* %tmp16, void ()** %tmp6, align 8
- %tmp17 = load void ()** %tmp6, align 8
+ %tmp17 = load void ()*, void ()** %tmp6, align 8
%tmp18 = bitcast void ()* %tmp17 to %6*
%tmp19 = getelementptr inbounds %6, %6* %tmp18, i32 0, i32 3
%tmp20 = bitcast %6* %tmp18 to i8*
- %tmp21 = load i8** %tmp19
+ %tmp21 = load i8*, i8** %tmp19
%tmp22 = bitcast i8* %tmp21 to void (i8*)*
call void %tmp22(i8* %tmp20)
br label %bb23
bb23: ; preds = %bb8
- %tmp24 = load i64* %tmp5, align 8
+ %tmp24 = load i64, i64* %tmp5, align 8
%tmp25 = add i64 %tmp24, 1
store i64 %tmp25, i64* %tmp5, align 8
%tmp26 = icmp ult i64 %tmp25, 10
Modified: llvm/trunk/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
%tmp6 = add i64 %a, -2147483647
%.sum = add i64 %tmp6, %b
%tmp8 = getelementptr inbounds [39 x i8], [39 x i8]* %stack_main, i64 0, i64 %.sum
- %tmp9 = load i8* %tmp8, align 1
+ %tmp9 = load i8, i8* %tmp8, align 1
%tmp10 = sext i8 %tmp9 to i32
ret i32 %tmp10
}
Modified: llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll Fri Feb 27 15:17:42 2015
@@ -121,7 +121,7 @@ while.body.i188:
while.body85.i: ; preds = %while.body85.i, %while.body.i188
%aFreq.0518.i = phi i32 [ %add93.i, %while.body85.i ], [ 0, %while.body.i188 ]
%inc87.i = add nsw i32 0, 1
- %tmp91.i = load i32* undef, align 4
+ %tmp91.i = load i32, i32* undef, align 4
%add93.i = add nsw i32 %tmp91.i, %aFreq.0518.i
%or.cond514.i = and i1 undef, false
br i1 %or.cond514.i, label %while.body85.i, label %while.end.i
Modified: llvm/trunk/test/CodeGen/X86/2011-09-21-setcc-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-09-21-setcc-bug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-09-21-setcc-bug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-09-21-setcc-bug.ll Fri Feb 27 15:17:42 2015
@@ -3,10 +3,10 @@
; Make sure we are not crashing on this code.
define void @load_4_i8(<4 x i8>* %k, <4 x i8>* %y, <4 x double>* %A1, <4 x double>* %A0) {
- %A = load <4 x i8>* %k
- %B = load <4 x i8>* %y
- %C = load <4 x double>* %A0
- %D= load <4 x double>* %A1
+ %A = load <4 x i8>, <4 x i8>* %k
+ %B = load <4 x i8>, <4 x i8>* %y
+ %C = load <4 x double>, <4 x double>* %A0
+ %D= load <4 x double>, <4 x double>* %A1
%M = icmp uge <4 x i8> %A, %B
%T = select <4 x i1> %M, <4 x double> %C, <4 x double> %D
store <4 x double> %T, <4 x double>* undef
@@ -15,10 +15,10 @@ define void @load_4_i8(<4 x i8>* %k, <4
define void @load_256_i8(<256 x i8>* %k, <256 x i8>* %y, <256 x double>* %A1, <256 x double>* %A0) {
- %A = load <256 x i8>* %k
- %B = load <256 x i8>* %y
- %C = load <256 x double>* %A0
- %D= load <256 x double>* %A1
+ %A = load <256 x i8>, <256 x i8>* %k
+ %B = load <256 x i8>, <256 x i8>* %y
+ %C = load <256 x double>, <256 x double>* %A0
+ %D= load <256 x double>, <256 x double>* %A1
%M = icmp uge <256 x i8> %A, %B
%T = select <256 x i1> %M, <256 x double> %C, <256 x double> %D
store <256 x double> %T, <256 x double>* undef
Modified: llvm/trunk/test/CodeGen/X86/2011-10-11-srl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-10-11-srl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-10-11-srl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-10-11-srl.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
target triple = "x86_64-unknown-linux-gnu"
define void @m387(<2 x i8>* %p, <2 x i16>* %q) {
- %t = load <2 x i8>* %p
+ %t = load <2 x i8>, <2 x i8>* %p
%r = sext <2 x i8> %t to <2 x i16>
store <2 x i16> %r, <2 x i16>* %q
ret void
Modified: llvm/trunk/test/CodeGen/X86/2011-10-12-MachineCSE.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-10-12-MachineCSE.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-10-12-MachineCSE.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-10-12-MachineCSE.ll Fri Feb 27 15:17:42 2015
@@ -16,15 +16,15 @@ target triple = "x86_64-apple-macosx10.7
define %struct.rtx_def* @gen_add3_insn(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c) nounwind uwtable ssp {
entry:
%0 = bitcast %struct.rtx_def* %r0 to i32*
- %1 = load i32* %0, align 8
+ %1 = load i32, i32* %0, align 8
%2 = lshr i32 %1, 16
%bf.clear = and i32 %2, 255
%idxprom = sext i32 %bf.clear to i64
- %3 = load %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8
+ %3 = load %struct.optab*, %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8
%handlers = getelementptr inbounds %struct.optab, %struct.optab* %3, i32 0, i32 1
%arrayidx = getelementptr inbounds [59 x %struct.anon.3], [59 x %struct.anon.3]* %handlers, i32 0, i64 %idxprom
%insn_code = getelementptr inbounds %struct.anon.3, %struct.anon.3* %arrayidx, i32 0, i32 0
- %4 = load i32* %insn_code, align 4
+ %4 = load i32, i32* %insn_code, align 4
%cmp = icmp eq i32 %4, 1317
br i1 %cmp, label %if.then, label %lor.lhs.false
@@ -32,19 +32,19 @@ lor.lhs.false:
%idxprom1 = sext i32 %4 to i64
%arrayidx2 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom1
%operand = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx2, i32 0, i32 3
- %5 = load %struct.insn_operand_data** %operand, align 8
+ %5 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand, align 8
%arrayidx3 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %5, i64 0
%predicate = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx3, i32 0, i32 0
- %6 = load i32 (%struct.rtx_def*, i32)** %predicate, align 8
+ %6 = load i32 (%struct.rtx_def*, i32)*, i32 (%struct.rtx_def*, i32)** %predicate, align 8
%idxprom4 = sext i32 %4 to i64
%arrayidx5 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom4
%operand6 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx5, i32 0, i32 3
- %7 = load %struct.insn_operand_data** %operand6, align 8
+ %7 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand6, align 8
%arrayidx7 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %7, i64 0
%8 = bitcast %struct.insn_operand_data* %arrayidx7 to i8*
%bf.field.offs = getelementptr i8, i8* %8, i32 16
%9 = bitcast i8* %bf.field.offs to i32*
- %10 = load i32* %9, align 8
+ %10 = load i32, i32* %9, align 8
%bf.clear8 = and i32 %10, 65535
%call = tail call i32 %6(%struct.rtx_def* %r0, i32 %bf.clear8)
%tobool = icmp ne i32 %call, 0
@@ -54,19 +54,19 @@ lor.lhs.false9:
%idxprom10 = sext i32 %4 to i64
%arrayidx11 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom10
%operand12 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx11, i32 0, i32 3
- %11 = load %struct.insn_operand_data** %operand12, align 8
+ %11 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand12, align 8
%arrayidx13 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %11, i64 1
%predicate14 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx13, i32 0, i32 0
- %12 = load i32 (%struct.rtx_def*, i32)** %predicate14, align 8
+ %12 = load i32 (%struct.rtx_def*, i32)*, i32 (%struct.rtx_def*, i32)** %predicate14, align 8
%idxprom15 = sext i32 %4 to i64
%arrayidx16 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom15
%operand17 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx16, i32 0, i32 3
- %13 = load %struct.insn_operand_data** %operand17, align 8
+ %13 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand17, align 8
%arrayidx18 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %13, i64 1
%14 = bitcast %struct.insn_operand_data* %arrayidx18 to i8*
%bf.field.offs19 = getelementptr i8, i8* %14, i32 16
%15 = bitcast i8* %bf.field.offs19 to i32*
- %16 = load i32* %15, align 8
+ %16 = load i32, i32* %15, align 8
%bf.clear20 = and i32 %16, 65535
%call21 = tail call i32 %12(%struct.rtx_def* %r1, i32 %bf.clear20)
%tobool22 = icmp ne i32 %call21, 0
@@ -76,19 +76,19 @@ lor.lhs.false23:
%idxprom24 = sext i32 %4 to i64
%arrayidx25 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom24
%operand26 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx25, i32 0, i32 3
- %17 = load %struct.insn_operand_data** %operand26, align 8
+ %17 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand26, align 8
%arrayidx27 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %17, i64 2
%predicate28 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx27, i32 0, i32 0
- %18 = load i32 (%struct.rtx_def*, i32)** %predicate28, align 8
+ %18 = load i32 (%struct.rtx_def*, i32)*, i32 (%struct.rtx_def*, i32)** %predicate28, align 8
%idxprom29 = sext i32 %4 to i64
%arrayidx30 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom29
%operand31 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx30, i32 0, i32 3
- %19 = load %struct.insn_operand_data** %operand31, align 8
+ %19 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand31, align 8
%arrayidx32 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %19, i64 2
%20 = bitcast %struct.insn_operand_data* %arrayidx32 to i8*
%bf.field.offs33 = getelementptr i8, i8* %20, i32 16
%21 = bitcast i8* %bf.field.offs33 to i32*
- %22 = load i32* %21, align 8
+ %22 = load i32, i32* %21, align 8
%bf.clear34 = and i32 %22, 65535
%call35 = tail call i32 %18(%struct.rtx_def* %c, i32 %bf.clear34)
%tobool36 = icmp ne i32 %call35, 0
@@ -101,7 +101,7 @@ if.end:
%idxprom37 = sext i32 %4 to i64
%arrayidx38 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom37
%genfun = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx38, i32 0, i32 2
- %23 = load %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8
+ %23 = load %struct.rtx_def* (%struct.rtx_def*, ...)*, %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8
%call39 = tail call %struct.rtx_def* (%struct.rtx_def*, ...)* %23(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c)
br label %return
Modified: llvm/trunk/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll Fri Feb 27 15:17:42 2015
@@ -15,11 +15,11 @@ entry:
store <4 x float> <float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000>, <4 x float>* %p3, align 16
store <4 x float> <float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000>, <4 x float>* %p4, align 16
store <4 x float> <float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000>, <4 x float>* %p5, align 16
- %0 = load <4 x float>* %p1, align 16
- %1 = load <4 x float>* %p2, align 16
- %2 = load <4 x float>* %p3, align 16
- %3 = load <4 x float>* %p4, align 16
- %4 = load <4 x float>* %p5, align 16
+ %0 = load <4 x float>, <4 x float>* %p1, align 16
+ %1 = load <4 x float>, <4 x float>* %p2, align 16
+ %2 = load <4 x float>, <4 x float>* %p3, align 16
+ %3 = load <4 x float>, <4 x float>* %p4, align 16
+ %4 = load <4 x float>, <4 x float>* %p5, align 16
; CHECK: movups {{%xmm[0-7]}}, (%esp)
; CHECK-NEXT: calll _dovectortest
call void @dovectortest(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4)
Modified: llvm/trunk/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll Fri Feb 27 15:17:42 2015
@@ -18,8 +18,8 @@ define i32 @main() nounwind uwtable {
entry:
; CHECK: pmovsxbq i(%rip), %
; CHECK: pmovsxbq j(%rip), %
- %0 = load <2 x i8>* @i, align 8
- %1 = load <2 x i8>* @j, align 8
+ %0 = load <2 x i8>, <2 x i8>* @i, align 8
+ %1 = load <2 x i8>, <2 x i8>* @j, align 8
%div = sdiv <2 x i8> %1, %0
store <2 x i8> %div, <2 x i8>* getelementptr inbounds (%union.anon* @res, i32 0, i32 0), align 8
ret i32 0
Modified: llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll Fri Feb 27 15:17:42 2015
@@ -49,7 +49,7 @@ define void @full_test() {
br label %B1
B1: ; preds = %entry
- %0 = load <2 x float>* %Cy119
+ %0 = load <2 x float>, <2 x float>* %Cy119
%1 = fptosi <2 x float> %0 to <2 x i32>
%2 = sitofp <2 x i32> %1 to <2 x float>
%3 = fcmp ogt <2 x float> %0, zeroinitializer
@@ -58,7 +58,7 @@ define void @full_test() {
%6 = fcmp oeq <2 x float> %2, %0
%7 = select <2 x i1> %6, <2 x float> %0, <2 x float> %5
store <2 x float> %7, <2 x float>* %Cy118
- %8 = load <2 x float>* %Cy118
+ %8 = load <2 x float>, <2 x float>* %Cy118
store <2 x float> %8, <2 x float>* %Cy11a
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/2011-10-27-tstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-10-27-tstore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-10-27-tstore.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-10-27-tstore.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gn
;CHECK: ret
define void @ltstore(<4 x i32>* %pA, <2 x i32>* %pB) {
entry:
- %in = load <4 x i32>* %pA
+ %in = load <4 x i32>, <4 x i32>* %pA
%j = shufflevector <4 x i32> %in, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
store <2 x i32> %j, <2 x i32>* %pB
ret void
Modified: llvm/trunk/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll Fri Feb 27 15:17:42 2015
@@ -18,9 +18,9 @@ if_else:
br i1 undef, label %for_loop156.lr.ph, label %if_exit
for_loop156.lr.ph: ; preds = %if_else
- %val_6.i21244 = load i16* undef, align 2
+ %val_6.i21244 = load i16, i16* undef, align 2
%0 = insertelement <8 x i16> undef, i16 %val_6.i21244, i32 6
- %val_7.i21248 = load i16* undef, align 2
+ %val_7.i21248 = load i16, i16* undef, align 2
%1 = insertelement <8 x i16> %0, i16 %val_7.i21248, i32 7
%uint2uint32.i20206 = zext <8 x i16> %1 to <8 x i32>
%bitop5.i20208 = and <8 x i32> %uint2uint32.i20206, <i32 31744, i32 31744, i32 31744, i32 31744, i32 31744, i32 31744, i32 31744, i32 31744>
@@ -39,26 +39,26 @@ for_loop156.lr.ph:
%binop407 = fadd <8 x float> %binop406, <float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00>
%binop408 = fmul <8 x float> zeroinitializer, %binop407
%binop411 = fsub <8 x float> <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>, undef
- %val_4.i21290 = load i16* undef, align 2
+ %val_4.i21290 = load i16, i16* undef, align 2
%2 = insertelement <8 x i16> undef, i16 %val_4.i21290, i32 4
- %val_5.i21294 = load i16* undef, align 2
+ %val_5.i21294 = load i16, i16* undef, align 2
%3 = insertelement <8 x i16> %2, i16 %val_5.i21294, i32 5
- %val_6.i21298 = load i16* undef, align 2
+ %val_6.i21298 = load i16, i16* undef, align 2
%4 = insertelement <8 x i16> %3, i16 %val_6.i21298, i32 6
%ptr_7.i21301 = inttoptr i64 undef to i16*
- %val_7.i21302 = load i16* %ptr_7.i21301, align 2
+ %val_7.i21302 = load i16, i16* %ptr_7.i21301, align 2
%5 = insertelement <8 x i16> %4, i16 %val_7.i21302, i32 7
%uint2uint32.i20218 = zext <8 x i16> %5 to <8 x i32>
- %structelement561 = load i8** undef, align 8
+ %structelement561 = load i8*, i8** undef, align 8
%ptr2int563 = ptrtoint i8* %structelement561 to i64
%smear.ptr_smear7571 = insertelement <8 x i64> undef, i64 %ptr2int563, i32 7
%new_ptr582 = add <8 x i64> %smear.ptr_smear7571, zeroinitializer
- %val_5.i21509 = load i8* null, align 1
+ %val_5.i21509 = load i8, i8* null, align 1
%6 = insertelement <8 x i8> undef, i8 %val_5.i21509, i32 5
%7 = insertelement <8 x i8> %6, i8 undef, i32 6
%iptr_7.i21515 = extractelement <8 x i64> %new_ptr582, i32 7
%ptr_7.i21516 = inttoptr i64 %iptr_7.i21515 to i8*
- %val_7.i21517 = load i8* %ptr_7.i21516, align 1
+ %val_7.i21517 = load i8, i8* %ptr_7.i21516, align 1
%8 = insertelement <8 x i8> %7, i8 %val_7.i21517, i32 7
%uint2float.i20245 = uitofp <8 x i8> %8 to <8 x float>
%binop.i20246 = fmul <8 x float> %uint2float.i20245, <float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000>
Modified: llvm/trunk/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ loop:
br i1 undef, label %0, label %t1.exit
; <label>:0 ; preds = %loop
- %1 = load <16 x i32> addrspace(1)* undef, align 64
+ %1 = load <16 x i32>, <16 x i32> addrspace(1)* undef, align 64
%2 = shufflevector <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, <16 x i32> %1, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 0, i32 0>
store <16 x i32> %2, <16 x i32> addrspace(1)* undef, align 64
br label %t1.exit
@@ -29,7 +29,7 @@ define void @t2() nounwind {
br i1 undef, label %1, label %4
; <label>:1 ; preds = %0
- %2 = load <16 x i32> addrspace(1)* undef, align 64
+ %2 = load <16 x i32>, <16 x i32> addrspace(1)* undef, align 64
%3 = shufflevector <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, <16 x i32> %2, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 20, i32 0, i32 0, i32 0, i32 0>
store <16 x i32> %3, <16 x i32> addrspace(1)* undef, align 64
br label %4
@@ -50,7 +50,7 @@ loop:
; <label>:0 ; preds = %loop
%1 = shufflevector <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 25, i32 0>
- %2 = load <16 x i32> addrspace(1)* undef, align 64
+ %2 = load <16 x i32>, <16 x i32> addrspace(1)* undef, align 64
%3 = shufflevector <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, <16 x i32> %2, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 28, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
store <16 x i32> %3, <16 x i32> addrspace(1)* undef, align 64
br label %t2.exit
@@ -64,7 +64,7 @@ return:
define <3 x i64> @t4() nounwind {
entry:
- %0 = load <2 x i64> addrspace(1)* undef, align 16
+ %0 = load <2 x i64>, <2 x i64> addrspace(1)* undef, align 16
%1 = extractelement <2 x i64> %0, i32 0
%2 = insertelement <3 x i64> <i64 undef, i64 0, i64 0>, i64 %1, i32 0
ret <3 x i64> %2
Modified: llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
; CHECK-LABEL: test:
; CHECK: pextrd $2, %xmm
define <4 x i32> @test(<4 x i32>* %p) {
- %v = load <4 x i32>* %p
+ %v = load <4 x i32>, <4 x i32>* %p
%e = extractelement <4 x i32> %v, i32 2
%cmp = icmp eq i32 %e, 3
%sel = select i1 %cmp, <4 x i32> %v, <4 x i32> zeroinitializer
Modified: llvm/trunk/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll Fri Feb 27 15:17:42 2015
@@ -109,7 +109,7 @@ bb49:
%tmp51 = add i32 %tmp50, undef
%tmp52 = add i32 %tmp50, undef
%tmp53 = getelementptr i32, i32* %tmp13, i32 %tmp52
- %tmp54 = load i32* %tmp53, align 4
+ %tmp54 = load i32, i32* %tmp53, align 4
%tmp55 = add i32 %tmp50, 1
%tmp56 = icmp eq i32 %tmp55, %tmp8
br i1 %tmp56, label %bb57, label %bb49
Modified: llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
;CHECK-LABEL: add18i16:
define void @add18i16(<18 x i16>* nocapture sret %ret, <18 x i16>* %bp) nounwind {
;CHECK: vmovaps
- %b = load <18 x i16>* %bp, align 16
+ %b = load <18 x i16>, <18 x i16>* %bp, align 16
%x = add <18 x i16> zeroinitializer, %b
store <18 x i16> %x, <18 x i16>* %ret, align 16
;CHECK: ret
Modified: llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
; CHECK: endless_loop
define void @endless_loop() {
entry:
- %0 = load <8 x i32> addrspace(1)* undef, align 32
+ %0 = load <8 x i32>, <8 x i32> addrspace(1)* undef, align 32
%1 = shufflevector <8 x i32> %0, <8 x i32> undef, <16 x i32> <i32 4, i32 4, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = shufflevector <16 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 undef>, <16 x i32> %1, <16 x i32> <i32 16, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 17>
store <16 x i32> %2, <16 x i32> addrspace(1)* undef, align 64
Modified: llvm/trunk/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define void @baz() nounwind ssp {
entry:
- %0 = load i8** @ptr, align 4
+ %0 = load i8*, i8** @ptr, align 4
%cmp = icmp eq i8* %0, null
fence seq_cst
br i1 %cmp, label %if.then, label %if.else
Modified: llvm/trunk/test/CodeGen/X86/2012-02-12-dagco.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-02-12-dagco.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-02-12-dagco.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-02-12-dagco.ll Fri Feb 27 15:17:42 2015
@@ -3,9 +3,9 @@ target triple = "x86_64-unknown-linux-gn
; Make sure we are not crashing on this one
define void @dagco_crash() {
entry:
- %srcval.i411.i = load <4 x i64>* undef, align 1
+ %srcval.i411.i = load <4 x i64>, <4 x i64>* undef, align 1
%0 = extractelement <4 x i64> %srcval.i411.i, i32 3
- %srcval.i409.i = load <2 x i64>* undef, align 1
+ %srcval.i409.i = load <2 x i64>, <2 x i64>* undef, align 1
%1 = extractelement <2 x i64> %srcval.i409.i, i32 0
%2 = insertelement <8 x i64> undef, i64 %0, i32 5
%3 = insertelement <8 x i64> %2, i64 %1, i32 6
Modified: llvm/trunk/test/CodeGen/X86/2012-02-29-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-02-29-CoalescerBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-02-29-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-02-29-CoalescerBug.ll Fri Feb 27 15:17:42 2015
@@ -14,9 +14,9 @@ target triple = "i386-apple-macosx10.7.0
define void @fn2() nounwind optsize ssp {
entry:
store i64 0, i64* bitcast ([2 x [2 x %struct.S0]]* @d to i64*), align 4
- %0 = load i32* @c, align 4
+ %0 = load i32, i32* @c, align 4
%tobool2 = icmp eq i32 %0, 0
- %1 = load i32* @a, align 4
+ %1 = load i32, i32* @a, align 4
%tobool4 = icmp eq i32 %1, 0
br label %for.cond
Modified: llvm/trunk/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ entry:
if.end: ; preds = %entry
%size5 = getelementptr inbounds %struct.ref_s, %struct.ref_s* %op, i64 0, i32 2
- %tmp6 = load i16* %size5, align 2
+ %tmp6 = load i16, i16* %size5, align 2
%tobool1 = icmp eq i16 %tmp6, 0
%1 = select i1 %tobool1, i32 1396, i32 -1910
%index10 = add i32 %index9, %1
@@ -29,12 +29,12 @@ if.end:
while.body.lr.ph: ; preds = %if.end
%refs = bitcast %struct.ref_s* %op to %struct.ref_s**
- %tmp9 = load %struct.ref_s** %refs, align 8
+ %tmp9 = load %struct.ref_s*, %struct.ref_s** %refs, align 8
%tmp4 = zext i16 %tmp6 to i64
%index13 = add i32 %index10, 1658
%2 = sext i32 %index13 to i64
%3 = getelementptr [3891 x i64], [3891 x i64]* @table, i64 0, i64 %2
- %blockaddress14 = load i64* %3, align 8
+ %blockaddress14 = load i64, i64* %3, align 8
%4 = inttoptr i64 %blockaddress14 to i8*
indirectbr i8* %4, [label %while.body]
@@ -50,7 +50,7 @@ while.body:
%tmp8 = select i1 %exitcond5, i64 13, i64 0
%5 = sext i32 %index15 to i64
%6 = getelementptr [3891 x i64], [3891 x i64]* @table, i64 0, i64 %5
- %blockaddress16 = load i64* %6, align 8
+ %blockaddress16 = load i64, i64* %6, align 8
%7 = inttoptr i64 %blockaddress16 to i8*
indirectbr i8* %7, [label %return, label %while.body]
Modified: llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll Fri Feb 27 15:17:42 2015
@@ -14,9 +14,9 @@
;CHECK: ret
define void @func() nounwind ssp {
- %tmp = load <4 x float>* null, align 1
+ %tmp = load <4 x float>, <4 x float>* null, align 1
%tmp14 = getelementptr <4 x float>, <4 x float>* null, i32 2
- %tmp15 = load <4 x float>* %tmp14, align 1
+ %tmp15 = load <4 x float>, <4 x float>* %tmp14, align 1
%tmp16 = shufflevector <4 x float> %tmp, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
%tmp17 = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %tmp16, <4 x float> undef, i8 1)
%tmp18 = bitcast <4 x float> %tmp to <16 x i8>
Modified: llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
define void @load_store(<4 x i16>* %in) {
entry:
; CHECK: pmovzxwd
- %A27 = load <4 x i16>* %in, align 4
+ %A27 = load <4 x i16>, <4 x i16>* %in, align 4
%A28 = add <4 x i16> %A27, %A27
; CHECK: movlpd
store <4 x i16> %A28, <4 x i16>* %in, align 4
@@ -25,7 +25,7 @@ BB:
;CHECK-LABEL: load_64:
define <2 x i32> @load_64(<2 x i32>* %ptr) {
BB:
- %t = load <2 x i32>* %ptr
+ %t = load <2 x i32>, <2 x i32>* %ptr
ret <2 x i32> %t
;CHECK: pmovzxdq
;CHECK: ret
Modified: llvm/trunk/test/CodeGen/X86/2012-07-15-broadcastfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-07-15-broadcastfold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-07-15-broadcastfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-07-15-broadcastfold.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ declare x86_fastcallcc i64 @barrier()
;CHECK: ret
define <8 x float> @bcast_fold( float* %A) {
BB:
- %A0 = load float* %A
+ %A0 = load float, float* %A
%tt3 = call x86_fastcallcc i64 @barrier()
br i1 undef, label %work, label %exit
Modified: llvm/trunk/test/CodeGen/X86/2012-08-17-legalizer-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-08-17-legalizer-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-08-17-legalizer-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-08-17-legalizer-crash.ll Fri Feb 27 15:17:42 2015
@@ -12,9 +12,9 @@ target triple = "x86_64-apple-macosx10.8
define void @fn1() nounwind uwtable ssp {
entry:
- %0 = load %struct._GtkSheetRow** @a, align 8
+ %0 = load %struct._GtkSheetRow*, %struct._GtkSheetRow** @a, align 8
%1 = bitcast %struct._GtkSheetRow* %0 to i576*
- %srcval2 = load i576* %1, align 8
+ %srcval2 = load i576, i576* %1, align 8
%tobool = icmp ugt i576 %srcval2, 57586096570152913699974892898380567793532123114264532903689671329431521032595044740083720782129802971518987656109067457577065805510327036019308994315074097345724415
br i1 %tobool, label %if.then, label %if.end
Modified: llvm/trunk/test/CodeGen/X86/2012-09-28-CGPBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-09-28-CGPBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-09-28-CGPBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-09-28-CGPBug.ll Fri Feb 27 15:17:42 2015
@@ -16,10 +16,10 @@
define void @h(i8*) nounwind ssp {
%2 = alloca i8*
store i8* %0, i8** %2
- %3 = load i8** %2
+ %3 = load i8*, i8** %2
%4 = bitcast i8* %3 to { i32, i32 }*
%5 = getelementptr { i32, i32 }, { i32, i32 }* %4, i32 0, i32 0
- %6 = load i32* %5
+ %6 = load i32, i32* %5
%7 = srem i32 %6, 2
%8 = icmp slt i32 %6, 2
%9 = select i1 %8, i32 %6, i32 %7
@@ -29,7 +29,7 @@ define void @h(i8*) nounwind ssp {
; <label>:11 ; preds = %1
%12 = zext i1 %10 to i32
%13 = getelementptr [4 x i32], [4 x i32]* @JT, i32 0, i32 %12
- %14 = load i32* %13
+ %14 = load i32, i32* %13
%15 = add i32 %14, ptrtoint (i8* blockaddress(@h, %11) to i32)
%16 = inttoptr i32 %15 to i8*
indirectbr i8* %16, [label %17, label %18]
Modified: llvm/trunk/test/CodeGen/X86/2012-10-02-DAGCycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-10-02-DAGCycle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-10-02-DAGCycle.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-10-02-DAGCycle.ll Fri Feb 27 15:17:42 2015
@@ -9,9 +9,9 @@
define i32 @t(%TRp* inreg %rp) nounwind optsize ssp {
entry:
%handler = getelementptr inbounds %TRp, %TRp* %rp, i32 0, i32 1
- %0 = load %TRH** %handler, align 4
+ %0 = load %TRH*, %TRH** %handler, align 4
%sync = getelementptr inbounds %TRH, %TRH* %0, i32 0, i32 4
- %sync12 = load {}** %sync, align 4
+ %sync12 = load {}*, {}** %sync, align 4
%1 = bitcast {}* %sync12 to i32 (%TRp*)*
%call = tail call i32 %1(%TRp* inreg %rp) nounwind optsize
ret i32 %call
@@ -29,13 +29,13 @@ entry:
br i1 undef, label %if.then, label %if.end17
if.then: ; preds = %entry
- %vecnorm.sroa.2.8.copyload = load float* undef, align 4
+ %vecnorm.sroa.2.8.copyload = load float, float* undef, align 4
%cmp4 = fcmp olt float undef, 0x3D10000000000000
%vecnorm.sroa.2.8.copyload36 = select i1 %cmp4, float -1.000000e+00, float %vecnorm.sroa.2.8.copyload
%call.i.i.i = tail call float @sqrtf(float 0.000000e+00) nounwind readnone
%div.i.i = fdiv float 1.000000e+00, %call.i.i.i
%mul7.i.i.i = fmul float %div.i.i, %vecnorm.sroa.2.8.copyload36
- %1 = load float (%btConvexInternalShape*)** undef, align 8
+ %1 = load float (%btConvexInternalShape*)*, float (%btConvexInternalShape*)** undef, align 8
%call12 = tail call float %1(%btConvexInternalShape* %0)
%mul7.i.i = fmul float %call12, %mul7.i.i.i
%retval.sroa.0.4.insert = insertelement <2 x float> zeroinitializer, float undef, i32 1
Modified: llvm/trunk/test/CodeGen/X86/2012-10-03-DAGCycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-10-03-DAGCycle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-10-03-DAGCycle.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-10-03-DAGCycle.ll Fri Feb 27 15:17:42 2015
@@ -13,11 +13,11 @@ define fastcc void @bar(%struct.pluto.0*
bb:
%tmp1 = alloca %struct.widget.375, align 8
%tmp2 = getelementptr inbounds %struct.pluto.0, %struct.pluto.0* %arg, i64 0, i32 1
- %tmp3 = load %struct.hoge.368** %tmp2, align 8
+ %tmp3 = load %struct.hoge.368*, %struct.hoge.368** %tmp2, align 8
store %struct.pluto.0* %arg, %struct.pluto.0** undef, align 8
%tmp = getelementptr inbounds %struct.widget.375, %struct.widget.375* %tmp1, i64 0, i32 2
%tmp4 = getelementptr %struct.pluto.0, %struct.pluto.0* %arg, i64 0, i32 0, i32 0
- %tmp5 = load %i8** %tmp4, align 8
+ %tmp5 = load %i8*, %i8** %tmp4, align 8
store %i8* %tmp5, %i8** %tmp, align 8
%tmp6 = getelementptr inbounds %struct.widget.375, %struct.widget.375* %tmp1, i64 0, i32 3
store %struct.hoge.368* %tmp3, %struct.hoge.368** %tmp6, align 8
Modified: llvm/trunk/test/CodeGen/X86/2012-10-18-crash-dagco.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-10-18-crash-dagco.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-10-18-crash-dagco.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-10-18-crash-dagco.ll Fri Feb 27 15:17:42 2015
@@ -22,23 +22,23 @@ bb27:
]
bb28: ; preds = %bb27, %bb26
- %tmp = load i32* null
+ %tmp = load i32, i32* null
%tmp29 = trunc i32 %tmp to i8
store i8* undef, i8** undef
- %tmp30 = load i32* null
+ %tmp30 = load i32, i32* null
%tmp31 = icmp eq i32 %tmp30, 0
%tmp32 = getelementptr inbounds [411 x i8], [411 x i8]* @global, i32 0, i32 undef
- %tmp33 = load i8* %tmp32, align 1
+ %tmp33 = load i8, i8* %tmp32, align 1
%tmp34 = getelementptr inbounds [411 x i8], [411 x i8]* @global, i32 0, i32 0
- %tmp35 = load i8* %tmp34, align 1
+ %tmp35 = load i8, i8* %tmp34, align 1
%tmp36 = select i1 %tmp31, i8 %tmp35, i8 %tmp33
%tmp37 = select i1 undef, i8 %tmp29, i8 %tmp36
%tmp38 = zext i8 %tmp37 to i32
%tmp39 = select i1 undef, i32 0, i32 %tmp38
%tmp40 = getelementptr inbounds i32, i32* null, i32 %tmp39
- %tmp41 = load i32* %tmp40, align 4
- %tmp42 = load i32* undef, align 4
- %tmp43 = load i32* undef
+ %tmp41 = load i32, i32* %tmp40, align 4
+ %tmp42 = load i32, i32* undef, align 4
+ %tmp43 = load i32, i32* undef
%tmp44 = xor i32 %tmp42, %tmp43
%tmp45 = lshr i32 %tmp44, 8
%tmp46 = lshr i32 %tmp44, 7
Modified: llvm/trunk/test/CodeGen/X86/2012-11-28-merge-store-alias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-11-28-merge-store-alias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-11-28-merge-store-alias.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-11-28-merge-store-alias.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ define i32 @merge_stores_can() nounwind
store i32 0, i32* %O1_1
store i32 0, i32* %O1_2
- %ret = load i32* %ld_ptr ; <--- does not alias.
+ %ret = load i32, i32* %ld_ptr ; <--- does not alias.
store i32 0, i32* %O1_3
store i32 0, i32* %O1_4
@@ -44,7 +44,7 @@ define i32 @merge_stores_cant([10 x i32]
store i32 0, i32* %O1_1
store i32 0, i32* %O1_2
- %ret = load i32* %ld_ptr ; <--- may alias
+ %ret = load i32, i32* %ld_ptr ; <--- may alias
store i32 0, i32* %O1_3
store i32 0, i32* %O1_4
Modified: llvm/trunk/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ define signext i16 @subdivp(%struct.node
entry:
call void @llvm.dbg.declare(metadata %struct.hgstruct.2.29* %hg, metadata !4, metadata !{!"0x102"})
%type = getelementptr inbounds %struct.node.0.27, %struct.node.0.27* %p, i64 0, i32 0
- %0 = load i16* %type, align 2
+ %0 = load i16, i16* %type, align 2
%cmp = icmp eq i16 %0, 1
br i1 %cmp, label %return, label %for.cond.preheader
Modified: llvm/trunk/test/CodeGen/X86/2012-11-30-misched-dbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-11-30-misched-dbg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-11-30-misched-dbg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-11-30-misched-dbg.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ if.then3344:
if.then4073: ; preds = %if.then3344
call void @llvm.dbg.declare(metadata [20 x i8]* %num14075, metadata !4, metadata !{!"0x102"})
%arraydecay4078 = getelementptr inbounds [20 x i8], [20 x i8]* %num14075, i64 0, i64 0
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%add4093 = add nsw i32 %0, 0
%conv4094 = sitofp i32 %add4093 to float
%div4095 = fdiv float %conv4094, 5.670000e+02
Modified: llvm/trunk/test/CodeGen/X86/2012-12-06-python27-miscompile.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-12-06-python27-miscompile.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-12-06-python27-miscompile.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-12-06-python27-miscompile.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ entry:
%used = getelementptr inbounds i64, i64* %so, i32 3
store i64 0, i64* %used, align 8
%fill = getelementptr inbounds i64, i64* %so, i32 2
- %L = load i64* %fill, align 8
+ %L = load i64, i64* %fill, align 8
store i64 0, i64* %fill, align 8
%cmp28 = icmp sgt i64 %L, 0
%R = sext i1 %cmp28 to i32
Modified: llvm/trunk/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define void @test() nounwind noimplicitf
entry:
; CHECK-NOT: xmm
; CHECK: ret
- %0 = load %struct1** undef, align 8
+ %0 = load %struct1*, %struct1** undef, align 8
%1 = getelementptr inbounds %struct1, %struct1* %0, i64 0, i32 0
store i32* null, i32** %1, align 8
%2 = getelementptr inbounds %struct1, %struct1* %0, i64 0, i32 1
Modified: llvm/trunk/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-macosx10.8
define void @main() #0 {
entry:
- %0 = load <8 x float>* bitcast ([8 x float]* @b to <8 x float>*), align 32
+ %0 = load <8 x float>, <8 x float>* bitcast ([8 x float]* @b to <8 x float>*), align 32
%bitcast.i = extractelement <8 x float> %0, i32 0
%vecinit.i.i = insertelement <4 x float> undef, float %bitcast.i, i32 0
%vecinit2.i.i = insertelement <4 x float> %vecinit.i.i, float 0.000000e+00, i32 1
Modified: llvm/trunk/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll Fri Feb 27 15:17:42 2015
@@ -34,7 +34,7 @@
; CHECK: ret
define i64 @test_bitcast(i64 (i64, i64, i64)** %arg, i1 %bool, i64 %arg2) {
entry:
- %loaded_ptr = load i64 (i64, i64, i64)** %arg, align 8
+ %loaded_ptr = load i64 (i64, i64, i64)*, i64 (i64, i64, i64)** %arg, align 8
%raw = bitcast i64 (i64, i64, i64)* %loaded_ptr to i8*
switch i1 %bool, label %default [
i1 true, label %label_true
@@ -73,7 +73,7 @@ label_end:
; CHECK: ret
define i64 @test_inttoptr(i64 (i64, i64, i64)** %arg, i1 %bool, i64 %arg2) {
entry:
- %loaded_ptr = load i64 (i64, i64, i64)** %arg, align 8
+ %loaded_ptr = load i64 (i64, i64, i64)*, i64 (i64, i64, i64)** %arg, align 8
%raw = ptrtoint i64 (i64, i64, i64)* %loaded_ptr to i64
switch i1 %bool, label %default [
i1 true, label %label_true
@@ -112,7 +112,7 @@ label_end:
; CHECK: ret
define i64 @test_ptrtoint(i64 (i64, i64, i64)** %arg, i1 %bool, i64 %arg2) {
entry:
- %loaded_ptr = load i64 (i64, i64, i64)** %arg, align 8
+ %loaded_ptr = load i64 (i64, i64, i64)*, i64 (i64, i64, i64)** %arg, align 8
%raw = bitcast i64 (i64, i64, i64)* %loaded_ptr to i8*
switch i1 %bool, label %default [
i1 true, label %label_true
Modified: llvm/trunk/test/CodeGen/X86/Atomics-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/Atomics-64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/Atomics-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/Atomics-64.ll Fri Feb 27 15:17:42 2015
@@ -308,331 +308,331 @@ return:
define void @test_op_and_fetch() nounwind {
entry:
- %0 = load i8* @uc, align 1
+ %0 = load i8, i8* @uc, align 1
%1 = zext i8 %0 to i32
%2 = trunc i32 %1 to i8
%3 = atomicrmw add i8* @sc, i8 %2 monotonic
%4 = add i8 %3, %2
store i8 %4, i8* @sc, align 1
- %5 = load i8* @uc, align 1
+ %5 = load i8, i8* @uc, align 1
%6 = zext i8 %5 to i32
%7 = trunc i32 %6 to i8
%8 = atomicrmw add i8* @uc, i8 %7 monotonic
%9 = add i8 %8, %7
store i8 %9, i8* @uc, align 1
- %10 = load i8* @uc, align 1
+ %10 = load i8, i8* @uc, align 1
%11 = zext i8 %10 to i32
%12 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%13 = trunc i32 %11 to i16
%14 = atomicrmw add i16* %12, i16 %13 monotonic
%15 = add i16 %14, %13
store i16 %15, i16* @ss, align 2
- %16 = load i8* @uc, align 1
+ %16 = load i8, i8* @uc, align 1
%17 = zext i8 %16 to i32
%18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%19 = trunc i32 %17 to i16
%20 = atomicrmw add i16* %18, i16 %19 monotonic
%21 = add i16 %20, %19
store i16 %21, i16* @us, align 2
- %22 = load i8* @uc, align 1
+ %22 = load i8, i8* @uc, align 1
%23 = zext i8 %22 to i32
%24 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%25 = atomicrmw add i32* %24, i32 %23 monotonic
%26 = add i32 %25, %23
store i32 %26, i32* @si, align 4
- %27 = load i8* @uc, align 1
+ %27 = load i8, i8* @uc, align 1
%28 = zext i8 %27 to i32
%29 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%30 = atomicrmw add i32* %29, i32 %28 monotonic
%31 = add i32 %30, %28
store i32 %31, i32* @ui, align 4
- %32 = load i8* @uc, align 1
+ %32 = load i8, i8* @uc, align 1
%33 = zext i8 %32 to i64
%34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%35 = atomicrmw add i64* %34, i64 %33 monotonic
%36 = add i64 %35, %33
store i64 %36, i64* @sl, align 8
- %37 = load i8* @uc, align 1
+ %37 = load i8, i8* @uc, align 1
%38 = zext i8 %37 to i64
%39 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%40 = atomicrmw add i64* %39, i64 %38 monotonic
%41 = add i64 %40, %38
store i64 %41, i64* @ul, align 8
- %42 = load i8* @uc, align 1
+ %42 = load i8, i8* @uc, align 1
%43 = zext i8 %42 to i64
%44 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%45 = atomicrmw add i64* %44, i64 %43 monotonic
%46 = add i64 %45, %43
store i64 %46, i64* @sll, align 8
- %47 = load i8* @uc, align 1
+ %47 = load i8, i8* @uc, align 1
%48 = zext i8 %47 to i64
%49 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%50 = atomicrmw add i64* %49, i64 %48 monotonic
%51 = add i64 %50, %48
store i64 %51, i64* @ull, align 8
- %52 = load i8* @uc, align 1
+ %52 = load i8, i8* @uc, align 1
%53 = zext i8 %52 to i32
%54 = trunc i32 %53 to i8
%55 = atomicrmw sub i8* @sc, i8 %54 monotonic
%56 = sub i8 %55, %54
store i8 %56, i8* @sc, align 1
- %57 = load i8* @uc, align 1
+ %57 = load i8, i8* @uc, align 1
%58 = zext i8 %57 to i32
%59 = trunc i32 %58 to i8
%60 = atomicrmw sub i8* @uc, i8 %59 monotonic
%61 = sub i8 %60, %59
store i8 %61, i8* @uc, align 1
- %62 = load i8* @uc, align 1
+ %62 = load i8, i8* @uc, align 1
%63 = zext i8 %62 to i32
%64 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%65 = trunc i32 %63 to i16
%66 = atomicrmw sub i16* %64, i16 %65 monotonic
%67 = sub i16 %66, %65
store i16 %67, i16* @ss, align 2
- %68 = load i8* @uc, align 1
+ %68 = load i8, i8* @uc, align 1
%69 = zext i8 %68 to i32
%70 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%71 = trunc i32 %69 to i16
%72 = atomicrmw sub i16* %70, i16 %71 monotonic
%73 = sub i16 %72, %71
store i16 %73, i16* @us, align 2
- %74 = load i8* @uc, align 1
+ %74 = load i8, i8* @uc, align 1
%75 = zext i8 %74 to i32
%76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%77 = atomicrmw sub i32* %76, i32 %75 monotonic
%78 = sub i32 %77, %75
store i32 %78, i32* @si, align 4
- %79 = load i8* @uc, align 1
+ %79 = load i8, i8* @uc, align 1
%80 = zext i8 %79 to i32
%81 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%82 = atomicrmw sub i32* %81, i32 %80 monotonic
%83 = sub i32 %82, %80
store i32 %83, i32* @ui, align 4
- %84 = load i8* @uc, align 1
+ %84 = load i8, i8* @uc, align 1
%85 = zext i8 %84 to i64
%86 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%87 = atomicrmw sub i64* %86, i64 %85 monotonic
%88 = sub i64 %87, %85
store i64 %88, i64* @sl, align 8
- %89 = load i8* @uc, align 1
+ %89 = load i8, i8* @uc, align 1
%90 = zext i8 %89 to i64
%91 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%92 = atomicrmw sub i64* %91, i64 %90 monotonic
%93 = sub i64 %92, %90
store i64 %93, i64* @ul, align 8
- %94 = load i8* @uc, align 1
+ %94 = load i8, i8* @uc, align 1
%95 = zext i8 %94 to i64
%96 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%97 = atomicrmw sub i64* %96, i64 %95 monotonic
%98 = sub i64 %97, %95
store i64 %98, i64* @sll, align 8
- %99 = load i8* @uc, align 1
+ %99 = load i8, i8* @uc, align 1
%100 = zext i8 %99 to i64
%101 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%102 = atomicrmw sub i64* %101, i64 %100 monotonic
%103 = sub i64 %102, %100
store i64 %103, i64* @ull, align 8
- %104 = load i8* @uc, align 1
+ %104 = load i8, i8* @uc, align 1
%105 = zext i8 %104 to i32
%106 = trunc i32 %105 to i8
%107 = atomicrmw or i8* @sc, i8 %106 monotonic
%108 = or i8 %107, %106
store i8 %108, i8* @sc, align 1
- %109 = load i8* @uc, align 1
+ %109 = load i8, i8* @uc, align 1
%110 = zext i8 %109 to i32
%111 = trunc i32 %110 to i8
%112 = atomicrmw or i8* @uc, i8 %111 monotonic
%113 = or i8 %112, %111
store i8 %113, i8* @uc, align 1
- %114 = load i8* @uc, align 1
+ %114 = load i8, i8* @uc, align 1
%115 = zext i8 %114 to i32
%116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%117 = trunc i32 %115 to i16
%118 = atomicrmw or i16* %116, i16 %117 monotonic
%119 = or i16 %118, %117
store i16 %119, i16* @ss, align 2
- %120 = load i8* @uc, align 1
+ %120 = load i8, i8* @uc, align 1
%121 = zext i8 %120 to i32
%122 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%123 = trunc i32 %121 to i16
%124 = atomicrmw or i16* %122, i16 %123 monotonic
%125 = or i16 %124, %123
store i16 %125, i16* @us, align 2
- %126 = load i8* @uc, align 1
+ %126 = load i8, i8* @uc, align 1
%127 = zext i8 %126 to i32
%128 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%129 = atomicrmw or i32* %128, i32 %127 monotonic
%130 = or i32 %129, %127
store i32 %130, i32* @si, align 4
- %131 = load i8* @uc, align 1
+ %131 = load i8, i8* @uc, align 1
%132 = zext i8 %131 to i32
%133 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%134 = atomicrmw or i32* %133, i32 %132 monotonic
%135 = or i32 %134, %132
store i32 %135, i32* @ui, align 4
- %136 = load i8* @uc, align 1
+ %136 = load i8, i8* @uc, align 1
%137 = zext i8 %136 to i64
%138 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%139 = atomicrmw or i64* %138, i64 %137 monotonic
%140 = or i64 %139, %137
store i64 %140, i64* @sl, align 8
- %141 = load i8* @uc, align 1
+ %141 = load i8, i8* @uc, align 1
%142 = zext i8 %141 to i64
%143 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%144 = atomicrmw or i64* %143, i64 %142 monotonic
%145 = or i64 %144, %142
store i64 %145, i64* @ul, align 8
- %146 = load i8* @uc, align 1
+ %146 = load i8, i8* @uc, align 1
%147 = zext i8 %146 to i64
%148 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%149 = atomicrmw or i64* %148, i64 %147 monotonic
%150 = or i64 %149, %147
store i64 %150, i64* @sll, align 8
- %151 = load i8* @uc, align 1
+ %151 = load i8, i8* @uc, align 1
%152 = zext i8 %151 to i64
%153 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%154 = atomicrmw or i64* %153, i64 %152 monotonic
%155 = or i64 %154, %152
store i64 %155, i64* @ull, align 8
- %156 = load i8* @uc, align 1
+ %156 = load i8, i8* @uc, align 1
%157 = zext i8 %156 to i32
%158 = trunc i32 %157 to i8
%159 = atomicrmw xor i8* @sc, i8 %158 monotonic
%160 = xor i8 %159, %158
store i8 %160, i8* @sc, align 1
- %161 = load i8* @uc, align 1
+ %161 = load i8, i8* @uc, align 1
%162 = zext i8 %161 to i32
%163 = trunc i32 %162 to i8
%164 = atomicrmw xor i8* @uc, i8 %163 monotonic
%165 = xor i8 %164, %163
store i8 %165, i8* @uc, align 1
- %166 = load i8* @uc, align 1
+ %166 = load i8, i8* @uc, align 1
%167 = zext i8 %166 to i32
%168 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%169 = trunc i32 %167 to i16
%170 = atomicrmw xor i16* %168, i16 %169 monotonic
%171 = xor i16 %170, %169
store i16 %171, i16* @ss, align 2
- %172 = load i8* @uc, align 1
+ %172 = load i8, i8* @uc, align 1
%173 = zext i8 %172 to i32
%174 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%175 = trunc i32 %173 to i16
%176 = atomicrmw xor i16* %174, i16 %175 monotonic
%177 = xor i16 %176, %175
store i16 %177, i16* @us, align 2
- %178 = load i8* @uc, align 1
+ %178 = load i8, i8* @uc, align 1
%179 = zext i8 %178 to i32
%180 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%181 = atomicrmw xor i32* %180, i32 %179 monotonic
%182 = xor i32 %181, %179
store i32 %182, i32* @si, align 4
- %183 = load i8* @uc, align 1
+ %183 = load i8, i8* @uc, align 1
%184 = zext i8 %183 to i32
%185 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%186 = atomicrmw xor i32* %185, i32 %184 monotonic
%187 = xor i32 %186, %184
store i32 %187, i32* @ui, align 4
- %188 = load i8* @uc, align 1
+ %188 = load i8, i8* @uc, align 1
%189 = zext i8 %188 to i64
%190 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%191 = atomicrmw xor i64* %190, i64 %189 monotonic
%192 = xor i64 %191, %189
store i64 %192, i64* @sl, align 8
- %193 = load i8* @uc, align 1
+ %193 = load i8, i8* @uc, align 1
%194 = zext i8 %193 to i64
%195 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%196 = atomicrmw xor i64* %195, i64 %194 monotonic
%197 = xor i64 %196, %194
store i64 %197, i64* @ul, align 8
- %198 = load i8* @uc, align 1
+ %198 = load i8, i8* @uc, align 1
%199 = zext i8 %198 to i64
%200 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%201 = atomicrmw xor i64* %200, i64 %199 monotonic
%202 = xor i64 %201, %199
store i64 %202, i64* @sll, align 8
- %203 = load i8* @uc, align 1
+ %203 = load i8, i8* @uc, align 1
%204 = zext i8 %203 to i64
%205 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%206 = atomicrmw xor i64* %205, i64 %204 monotonic
%207 = xor i64 %206, %204
store i64 %207, i64* @ull, align 8
- %208 = load i8* @uc, align 1
+ %208 = load i8, i8* @uc, align 1
%209 = zext i8 %208 to i32
%210 = trunc i32 %209 to i8
%211 = atomicrmw and i8* @sc, i8 %210 monotonic
%212 = and i8 %211, %210
store i8 %212, i8* @sc, align 1
- %213 = load i8* @uc, align 1
+ %213 = load i8, i8* @uc, align 1
%214 = zext i8 %213 to i32
%215 = trunc i32 %214 to i8
%216 = atomicrmw and i8* @uc, i8 %215 monotonic
%217 = and i8 %216, %215
store i8 %217, i8* @uc, align 1
- %218 = load i8* @uc, align 1
+ %218 = load i8, i8* @uc, align 1
%219 = zext i8 %218 to i32
%220 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%221 = trunc i32 %219 to i16
%222 = atomicrmw and i16* %220, i16 %221 monotonic
%223 = and i16 %222, %221
store i16 %223, i16* @ss, align 2
- %224 = load i8* @uc, align 1
+ %224 = load i8, i8* @uc, align 1
%225 = zext i8 %224 to i32
%226 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%227 = trunc i32 %225 to i16
%228 = atomicrmw and i16* %226, i16 %227 monotonic
%229 = and i16 %228, %227
store i16 %229, i16* @us, align 2
- %230 = load i8* @uc, align 1
+ %230 = load i8, i8* @uc, align 1
%231 = zext i8 %230 to i32
%232 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%233 = atomicrmw and i32* %232, i32 %231 monotonic
%234 = and i32 %233, %231
store i32 %234, i32* @si, align 4
- %235 = load i8* @uc, align 1
+ %235 = load i8, i8* @uc, align 1
%236 = zext i8 %235 to i32
%237 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%238 = atomicrmw and i32* %237, i32 %236 monotonic
%239 = and i32 %238, %236
store i32 %239, i32* @ui, align 4
- %240 = load i8* @uc, align 1
+ %240 = load i8, i8* @uc, align 1
%241 = zext i8 %240 to i64
%242 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%243 = atomicrmw and i64* %242, i64 %241 monotonic
%244 = and i64 %243, %241
store i64 %244, i64* @sl, align 8
- %245 = load i8* @uc, align 1
+ %245 = load i8, i8* @uc, align 1
%246 = zext i8 %245 to i64
%247 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%248 = atomicrmw and i64* %247, i64 %246 monotonic
%249 = and i64 %248, %246
store i64 %249, i64* @ul, align 8
- %250 = load i8* @uc, align 1
+ %250 = load i8, i8* @uc, align 1
%251 = zext i8 %250 to i64
%252 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%253 = atomicrmw and i64* %252, i64 %251 monotonic
%254 = and i64 %253, %251
store i64 %254, i64* @sll, align 8
- %255 = load i8* @uc, align 1
+ %255 = load i8, i8* @uc, align 1
%256 = zext i8 %255 to i64
%257 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%258 = atomicrmw and i64* %257, i64 %256 monotonic
%259 = and i64 %258, %256
store i64 %259, i64* @ull, align 8
- %260 = load i8* @uc, align 1
+ %260 = load i8, i8* @uc, align 1
%261 = zext i8 %260 to i32
%262 = trunc i32 %261 to i8
%263 = atomicrmw nand i8* @sc, i8 %262 monotonic
%264 = xor i8 %263, -1
%265 = and i8 %264, %262
store i8 %265, i8* @sc, align 1
- %266 = load i8* @uc, align 1
+ %266 = load i8, i8* @uc, align 1
%267 = zext i8 %266 to i32
%268 = trunc i32 %267 to i8
%269 = atomicrmw nand i8* @uc, i8 %268 monotonic
%270 = xor i8 %269, -1
%271 = and i8 %270, %268
store i8 %271, i8* @uc, align 1
- %272 = load i8* @uc, align 1
+ %272 = load i8, i8* @uc, align 1
%273 = zext i8 %272 to i32
%274 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%275 = trunc i32 %273 to i16
@@ -640,7 +640,7 @@ entry:
%277 = xor i16 %276, -1
%278 = and i16 %277, %275
store i16 %278, i16* @ss, align 2
- %279 = load i8* @uc, align 1
+ %279 = load i8, i8* @uc, align 1
%280 = zext i8 %279 to i32
%281 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%282 = trunc i32 %280 to i16
@@ -648,42 +648,42 @@ entry:
%284 = xor i16 %283, -1
%285 = and i16 %284, %282
store i16 %285, i16* @us, align 2
- %286 = load i8* @uc, align 1
+ %286 = load i8, i8* @uc, align 1
%287 = zext i8 %286 to i32
%288 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%289 = atomicrmw nand i32* %288, i32 %287 monotonic
%290 = xor i32 %289, -1
%291 = and i32 %290, %287
store i32 %291, i32* @si, align 4
- %292 = load i8* @uc, align 1
+ %292 = load i8, i8* @uc, align 1
%293 = zext i8 %292 to i32
%294 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%295 = atomicrmw nand i32* %294, i32 %293 monotonic
%296 = xor i32 %295, -1
%297 = and i32 %296, %293
store i32 %297, i32* @ui, align 4
- %298 = load i8* @uc, align 1
+ %298 = load i8, i8* @uc, align 1
%299 = zext i8 %298 to i64
%300 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%301 = atomicrmw nand i64* %300, i64 %299 monotonic
%302 = xor i64 %301, -1
%303 = and i64 %302, %299
store i64 %303, i64* @sl, align 8
- %304 = load i8* @uc, align 1
+ %304 = load i8, i8* @uc, align 1
%305 = zext i8 %304 to i64
%306 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%307 = atomicrmw nand i64* %306, i64 %305 monotonic
%308 = xor i64 %307, -1
%309 = and i64 %308, %305
store i64 %309, i64* @ul, align 8
- %310 = load i8* @uc, align 1
+ %310 = load i8, i8* @uc, align 1
%311 = zext i8 %310 to i64
%312 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%313 = atomicrmw nand i64* %312, i64 %311 monotonic
%314 = xor i64 %313, -1
%315 = and i64 %314, %311
store i64 %315, i64* @sll, align 8
- %316 = load i8* @uc, align 1
+ %316 = load i8, i8* @uc, align 1
%317 = zext i8 %316 to i64
%318 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%319 = atomicrmw nand i64* %318, i64 %317 monotonic
@@ -698,28 +698,28 @@ return:
define void @test_compare_and_swap() nounwind {
entry:
- %0 = load i8* @sc, align 1
+ %0 = load i8, i8* @sc, align 1
%1 = zext i8 %0 to i32
- %2 = load i8* @uc, align 1
+ %2 = load i8, i8* @uc, align 1
%3 = zext i8 %2 to i32
%4 = trunc i32 %3 to i8
%5 = trunc i32 %1 to i8
%pair6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
%6 = extractvalue { i8, i1 } %pair6, 0
store i8 %6, i8* @sc, align 1
- %7 = load i8* @sc, align 1
+ %7 = load i8, i8* @sc, align 1
%8 = zext i8 %7 to i32
- %9 = load i8* @uc, align 1
+ %9 = load i8, i8* @uc, align 1
%10 = zext i8 %9 to i32
%11 = trunc i32 %10 to i8
%12 = trunc i32 %8 to i8
%pair13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
%13 = extractvalue { i8, i1 } %pair13, 0
store i8 %13, i8* @uc, align 1
- %14 = load i8* @sc, align 1
+ %14 = load i8, i8* @sc, align 1
%15 = sext i8 %14 to i16
%16 = zext i16 %15 to i32
- %17 = load i8* @uc, align 1
+ %17 = load i8, i8* @uc, align 1
%18 = zext i8 %17 to i32
%19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%20 = trunc i32 %18 to i16
@@ -727,10 +727,10 @@ entry:
%pair22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
%22 = extractvalue { i16, i1 } %pair22, 0
store i16 %22, i16* @ss, align 2
- %23 = load i8* @sc, align 1
+ %23 = load i8, i8* @sc, align 1
%24 = sext i8 %23 to i16
%25 = zext i16 %24 to i32
- %26 = load i8* @uc, align 1
+ %26 = load i8, i8* @uc, align 1
%27 = zext i8 %26 to i32
%28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%29 = trunc i32 %27 to i16
@@ -738,57 +738,57 @@ entry:
%pair31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
%31 = extractvalue { i16, i1 } %pair31, 0
store i16 %31, i16* @us, align 2
- %32 = load i8* @sc, align 1
+ %32 = load i8, i8* @sc, align 1
%33 = sext i8 %32 to i32
- %34 = load i8* @uc, align 1
+ %34 = load i8, i8* @uc, align 1
%35 = zext i8 %34 to i32
%36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%pair37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
%37 = extractvalue { i32, i1 } %pair37, 0
store i32 %37, i32* @si, align 4
- %38 = load i8* @sc, align 1
+ %38 = load i8, i8* @sc, align 1
%39 = sext i8 %38 to i32
- %40 = load i8* @uc, align 1
+ %40 = load i8, i8* @uc, align 1
%41 = zext i8 %40 to i32
%42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%pair43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
%43 = extractvalue { i32, i1 } %pair43, 0
store i32 %43, i32* @ui, align 4
- %44 = load i8* @sc, align 1
+ %44 = load i8, i8* @sc, align 1
%45 = sext i8 %44 to i64
- %46 = load i8* @uc, align 1
+ %46 = load i8, i8* @uc, align 1
%47 = zext i8 %46 to i64
%48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%pair49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
%49 = extractvalue { i64, i1 } %pair49, 0
store i64 %49, i64* @sl, align 8
- %50 = load i8* @sc, align 1
+ %50 = load i8, i8* @sc, align 1
%51 = sext i8 %50 to i64
- %52 = load i8* @uc, align 1
+ %52 = load i8, i8* @uc, align 1
%53 = zext i8 %52 to i64
%54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%pair55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
%55 = extractvalue { i64, i1 } %pair55, 0
store i64 %55, i64* @ul, align 8
- %56 = load i8* @sc, align 1
+ %56 = load i8, i8* @sc, align 1
%57 = sext i8 %56 to i64
- %58 = load i8* @uc, align 1
+ %58 = load i8, i8* @uc, align 1
%59 = zext i8 %58 to i64
%60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%pair61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
%61 = extractvalue { i64, i1 } %pair61, 0
store i64 %61, i64* @sll, align 8
- %62 = load i8* @sc, align 1
+ %62 = load i8, i8* @sc, align 1
%63 = sext i8 %62 to i64
- %64 = load i8* @uc, align 1
+ %64 = load i8, i8* @uc, align 1
%65 = zext i8 %64 to i64
%66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%pair67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
%67 = extractvalue { i64, i1 } %pair67, 0
store i64 %67, i64* @ull, align 8
- %68 = load i8* @sc, align 1
+ %68 = load i8, i8* @sc, align 1
%69 = zext i8 %68 to i32
- %70 = load i8* @uc, align 1
+ %70 = load i8, i8* @uc, align 1
%71 = zext i8 %70 to i32
%72 = trunc i32 %71 to i8
%73 = trunc i32 %69 to i8
@@ -798,9 +798,9 @@ entry:
%76 = zext i1 %75 to i8
%77 = zext i8 %76 to i32
store i32 %77, i32* @ui, align 4
- %78 = load i8* @sc, align 1
+ %78 = load i8, i8* @sc, align 1
%79 = zext i8 %78 to i32
- %80 = load i8* @uc, align 1
+ %80 = load i8, i8* @uc, align 1
%81 = zext i8 %80 to i32
%82 = trunc i32 %81 to i8
%83 = trunc i32 %79 to i8
@@ -810,10 +810,10 @@ entry:
%86 = zext i1 %85 to i8
%87 = zext i8 %86 to i32
store i32 %87, i32* @ui, align 4
- %88 = load i8* @sc, align 1
+ %88 = load i8, i8* @sc, align 1
%89 = sext i8 %88 to i16
%90 = zext i16 %89 to i32
- %91 = load i8* @uc, align 1
+ %91 = load i8, i8* @uc, align 1
%92 = zext i8 %91 to i32
%93 = trunc i32 %92 to i8
%94 = trunc i32 %90 to i8
@@ -823,10 +823,10 @@ entry:
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
store i32 %98, i32* @ui, align 4
- %99 = load i8* @sc, align 1
+ %99 = load i8, i8* @sc, align 1
%100 = sext i8 %99 to i16
%101 = zext i16 %100 to i32
- %102 = load i8* @uc, align 1
+ %102 = load i8, i8* @uc, align 1
%103 = zext i8 %102 to i32
%104 = trunc i32 %103 to i8
%105 = trunc i32 %101 to i8
@@ -836,9 +836,9 @@ entry:
%108 = zext i1 %107 to i8
%109 = zext i8 %108 to i32
store i32 %109, i32* @ui, align 4
- %110 = load i8* @sc, align 1
+ %110 = load i8, i8* @sc, align 1
%111 = sext i8 %110 to i32
- %112 = load i8* @uc, align 1
+ %112 = load i8, i8* @uc, align 1
%113 = zext i8 %112 to i32
%114 = trunc i32 %113 to i8
%115 = trunc i32 %111 to i8
@@ -848,9 +848,9 @@ entry:
%118 = zext i1 %117 to i8
%119 = zext i8 %118 to i32
store i32 %119, i32* @ui, align 4
- %120 = load i8* @sc, align 1
+ %120 = load i8, i8* @sc, align 1
%121 = sext i8 %120 to i32
- %122 = load i8* @uc, align 1
+ %122 = load i8, i8* @uc, align 1
%123 = zext i8 %122 to i32
%124 = trunc i32 %123 to i8
%125 = trunc i32 %121 to i8
@@ -860,9 +860,9 @@ entry:
%128 = zext i1 %127 to i8
%129 = zext i8 %128 to i32
store i32 %129, i32* @ui, align 4
- %130 = load i8* @sc, align 1
+ %130 = load i8, i8* @sc, align 1
%131 = sext i8 %130 to i64
- %132 = load i8* @uc, align 1
+ %132 = load i8, i8* @uc, align 1
%133 = zext i8 %132 to i64
%134 = trunc i64 %133 to i8
%135 = trunc i64 %131 to i8
@@ -872,9 +872,9 @@ entry:
%138 = zext i1 %137 to i8
%139 = zext i8 %138 to i32
store i32 %139, i32* @ui, align 4
- %140 = load i8* @sc, align 1
+ %140 = load i8, i8* @sc, align 1
%141 = sext i8 %140 to i64
- %142 = load i8* @uc, align 1
+ %142 = load i8, i8* @uc, align 1
%143 = zext i8 %142 to i64
%144 = trunc i64 %143 to i8
%145 = trunc i64 %141 to i8
@@ -884,9 +884,9 @@ entry:
%148 = zext i1 %147 to i8
%149 = zext i8 %148 to i32
store i32 %149, i32* @ui, align 4
- %150 = load i8* @sc, align 1
+ %150 = load i8, i8* @sc, align 1
%151 = sext i8 %150 to i64
- %152 = load i8* @uc, align 1
+ %152 = load i8, i8* @uc, align 1
%153 = zext i8 %152 to i64
%154 = trunc i64 %153 to i8
%155 = trunc i64 %151 to i8
@@ -896,9 +896,9 @@ entry:
%158 = zext i1 %157 to i8
%159 = zext i8 %158 to i32
store i32 %159, i32* @ui, align 4
- %160 = load i8* @sc, align 1
+ %160 = load i8, i8* @sc, align 1
%161 = sext i8 %160 to i64
- %162 = load i8* @uc, align 1
+ %162 = load i8, i8* @uc, align 1
%163 = zext i8 %162 to i64
%164 = trunc i64 %163 to i8
%165 = trunc i64 %161 to i8
Modified: llvm/trunk/test/CodeGen/X86/GC/alloc_loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GC/alloc_loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GC/alloc_loop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GC/alloc_loop.ll Fri Feb 27 15:17:42 2015
@@ -31,8 +31,8 @@ entry:
store i8** %tmp.2, i8*** %B
;; *B = A;
- %B.1 = load i8*** %B
- %A.1 = load i8** %A
+ %B.1 = load i8**, i8*** %B
+ %A.1 = load i8*, i8** %A
call void @llvm.gcwrite(i8* %A.1, i8* %B.upgrd.1, i8** %B.1)
br label %AllocLoop
Modified: llvm/trunk/test/CodeGen/X86/GC/argpromotion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GC/argpromotion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GC/argpromotion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GC/argpromotion.ll Fri Feb 27 15:17:42 2015
@@ -14,6 +14,6 @@ define internal i32 @f(i32* %xp) gc "exa
entry:
%var = alloca i8*
call void @llvm.gcroot(i8** %var, i8* null)
- %x = load i32* %xp
+ %x = load i32, i32* %xp
ret i32 %x
}
Modified: llvm/trunk/test/CodeGen/X86/GC/inline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GC/inline.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GC/inline.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GC/inline.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ define internal i32 @g() gc "example" {
%obj.2 = bitcast %IntArray* %obj to i8* ; <i8*> [#uses=1]
store i8* %obj.2, i8** %root
%Length.ptr = getelementptr %IntArray, %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
- %Length = load i32* %Length.ptr ; <i32> [#uses=1]
+ %Length = load i32, i32* %Length.ptr ; <i32> [#uses=1]
ret i32 %Length
}
Modified: llvm/trunk/test/CodeGen/X86/GC/inline2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GC/inline2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GC/inline2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GC/inline2.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ define internal i32 @g() gc "example" {
%obj.2 = bitcast %IntArray* %obj to i8* ; <i8*> [#uses=1]
store i8* %obj.2, i8** %root
%Length.ptr = getelementptr %IntArray, %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
- %Length = load i32* %Length.ptr ; <i32> [#uses=1]
+ %Length = load i32, i32* %Length.ptr ; <i32> [#uses=1]
ret i32 %Length
}
Modified: llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll (original)
+++ llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ for.cond2:
%i.1 = phi i32 [ %inc19, %for.inc ], [ 0, %for.cond ]
%bit.0 = phi i32 [ %shl, %for.inc ], [ 1, %for.cond ]
%tobool = icmp eq i32 %bit.0, 0
- %v3 = load i32* @max_regno, align 4
+ %v3 = load i32, i32* @max_regno, align 4
%cmp4 = icmp eq i32 %i.1, %v3
%or.cond = or i1 %tobool, %cmp4
br i1 %or.cond, label %for.inc20, label %for.inc, !prof !0
Modified: llvm/trunk/test/CodeGen/X86/MachineSink-DbgValue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/MachineSink-DbgValue.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/MachineSink-DbgValue.ll (original)
+++ llvm/trunk/test/CodeGen/X86/MachineSink-DbgValue.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.7
define i32 @foo(i32 %i, i32* nocapture %c) nounwind uwtable readonly ssp {
tail call void @llvm.dbg.value(metadata i32 %i, i64 0, metadata !6, metadata !{!"0x102"}), !dbg !12
- %ab = load i32* %c, align 1, !dbg !14
+ %ab = load i32, i32* %c, align 1, !dbg !14
tail call void @llvm.dbg.value(metadata i32* %c, i64 0, metadata !7, metadata !{!"0x102"}), !dbg !13
tail call void @llvm.dbg.value(metadata i32 %ab, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !14
%cd = icmp eq i32 %i, 42, !dbg !15
Modified: llvm/trunk/test/CodeGen/X86/MachineSink-eflags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/MachineSink-eflags.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/MachineSink-eflags.ll (original)
+++ llvm/trunk/test/CodeGen/X86/MachineSink-eflags.ll Fri Feb 27 15:17:42 2015
@@ -16,18 +16,18 @@ entry:
%i2 = alloca i8*, align 8
%b.i = alloca [16 x <2 x double>], align 16
%conv = bitcast i8* %_stubArgs to i32*
- %tmp1 = load i32* %conv, align 4
+ %tmp1 = load i32, i32* %conv, align 4
%ptr8 = getelementptr i8, i8* %_stubArgs, i64 16
%i4 = bitcast i8* %ptr8 to <2 x double>*
%ptr20 = getelementptr i8, i8* %_stubArgs, i64 48
%i7 = bitcast i8* %ptr20 to <2 x double> addrspace(1)**
- %tmp21 = load <2 x double> addrspace(1)** %i7, align 8
+ %tmp21 = load <2 x double> addrspace(1)*, <2 x double> addrspace(1)** %i7, align 8
%ptr28 = getelementptr i8, i8* %_stubArgs, i64 64
%i9 = bitcast i8* %ptr28 to i32*
- %tmp29 = load i32* %i9, align 4
+ %tmp29 = load i32, i32* %i9, align 4
%ptr32 = getelementptr i8, i8* %_stubArgs, i64 68
%i10 = bitcast i8* %ptr32 to i32*
- %tmp33 = load i32* %i10, align 4
+ %tmp33 = load i32, i32* %i10, align 4
%tmp17.i = mul i32 10, 20
%tmp19.i = add i32 %tmp17.i, %tmp33
%conv21.i = zext i32 %tmp19.i to i64
@@ -49,14 +49,14 @@ entry:
%conv160.i = zext i32 %i39 to i64
%tmp22.sum652.i = add i64 %conv160.i, %conv21.i
%arrayidx161.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum652.i
- %tmp162.i = load <2 x double> addrspace(1)* %arrayidx161.i, align 16
+ %tmp162.i = load <2 x double>, <2 x double> addrspace(1)* %arrayidx161.i, align 16
%tmp222.i = add i32 %tmp154.i, 1
%i43 = mul i32 %tmp222.i, %tmp29
%i44 = add i32 %tmp158.i, %i43
%conv228.i = zext i32 %i44 to i64
%tmp22.sum656.i = add i64 %conv228.i, %conv21.i
%arrayidx229.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum656.i
- %tmp230.i = load <2 x double> addrspace(1)* %arrayidx229.i, align 16
+ %tmp230.i = load <2 x double>, <2 x double> addrspace(1)* %arrayidx229.i, align 16
%cmp432.i = icmp ult i32 %tmp156.i, %tmp1
; %shl.i should not be sinked below the compare.
Modified: llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll (original)
+++ llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll Fri Feb 27 15:17:42 2015
@@ -166,8 +166,8 @@ define void @merge_loads_i16(i32 %count,
; <label>:4 ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %9, %4 ]
%.01 = phi %struct.A* [ %p, %.lr.ph ], [ %10, %4 ]
- %5 = load i8* %2, align 1
- %6 = load i8* %3, align 1
+ %5 = load i8, i8* %2, align 1
+ %6 = load i8, i8* %3, align 1
%7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
store i8 %5, i8* %7, align 1
%8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
@@ -200,11 +200,11 @@ define void @no_merge_loads(i32 %count,
a4: ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %a9, %a4 ]
%.01 = phi %struct.A* [ %p, %.lr.ph ], [ %a10, %a4 ]
- %a5 = load i8* %2, align 1
+ %a5 = load i8, i8* %2, align 1
%a7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
store i8 %a5, i8* %a7, align 1
%a8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
- %a6 = load i8* %3, align 1
+ %a6 = load i8, i8* %3, align 1
store i8 %a6, i8* %a8, align 1
%a9 = add nsw i32 %i.02, 1
%a10 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
@@ -234,8 +234,8 @@ define void @merge_loads_integer(i32 %co
; <label>:4 ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %9, %4 ]
%.01 = phi %struct.B* [ %p, %.lr.ph ], [ %10, %4 ]
- %5 = load i32* %2
- %6 = load i32* %3
+ %5 = load i32, i32* %2
+ %6 = load i32, i32* %3
%7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
store i32 %5, i32* %7
%8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
@@ -274,10 +274,10 @@ block4:
%a8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
%a9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
%a10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
- %b1 = load i32* %a2
- %b2 = load i32* %a3
- %b3 = load i32* %a4
- %b4 = load i32* %a5
+ %b1 = load i32, i32* %a2
+ %b2 = load i32, i32* %a3
+ %b3 = load i32, i32* %a4
+ %b4 = load i32, i32* %a5
store i32 %b1, i32* %a7
store i32 %b2, i32* %a8
store i32 %b3, i32* %a9
@@ -321,10 +321,10 @@ block4:
%a8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
%a9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
%a10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
- %b1 = load i32* %a2, align 1
- %b2 = load i32* %a3, align 1
- %b3 = load i32* %a4, align 1
- %b4 = load i32* %a5, align 1
+ %b1 = load i32, i32* %a2, align 1
+ %b2 = load i32, i32* %a3, align 1
+ %b3 = load i32, i32* %a4, align 1
+ %b4 = load i32, i32* %a5, align 1
store i32 %b1, i32* %a7, align 1
store i32 %b2, i32* %a8, align 1
store i32 %b3, i32* %a9, align 1
@@ -351,12 +351,12 @@ define void @MergeLoadStoreBaseIndexOffs
%.08 = phi i8* [ %b, %0 ], [ %10, %1 ]
%.0 = phi i64* [ %a, %0 ], [ %2, %1 ]
%2 = getelementptr inbounds i64, i64* %.0, i64 1
- %3 = load i64* %.0, align 1
+ %3 = load i64, i64* %.0, align 1
%4 = getelementptr inbounds i8, i8* %c, i64 %3
- %5 = load i8* %4, align 1
+ %5 = load i8, i8* %4, align 1
%6 = add i64 %3, 1
%7 = getelementptr inbounds i8, i8* %c, i64 %6
- %8 = load i8* %7, align 1
+ %8 = load i8, i8* %7, align 1
store i8 %5, i8* %.08, align 1
%9 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %8, i8* %9, align 1
@@ -383,13 +383,13 @@ define void @MergeLoadStoreBaseIndexOffs
%.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
%2 = getelementptr inbounds i8, i8* %.0, i64 1
- %3 = load i8* %.0, align 1
+ %3 = load i8, i8* %.0, align 1
%4 = sext i8 %3 to i64
%5 = getelementptr inbounds i8, i8* %c, i64 %4
- %6 = load i8* %5, align 1
+ %6 = load i8, i8* %5, align 1
%7 = add i64 %4, 1
%8 = getelementptr inbounds i8, i8* %c, i64 %7
- %9 = load i8* %8, align 1
+ %9 = load i8, i8* %8, align 1
store i8 %6, i8* %.08, align 1
%10 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %9, i8* %10, align 1
@@ -415,14 +415,14 @@ define void @loadStoreBaseIndexOffsetSex
%.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
%2 = getelementptr inbounds i8, i8* %.0, i64 1
- %3 = load i8* %.0, align 1
+ %3 = load i8, i8* %.0, align 1
%4 = sext i8 %3 to i64
%5 = getelementptr inbounds i8, i8* %c, i64 %4
- %6 = load i8* %5, align 1
+ %6 = load i8, i8* %5, align 1
%7 = add i8 %3, 1
%wrap.4 = sext i8 %7 to i64
%8 = getelementptr inbounds i8, i8* %c, i64 %wrap.4
- %9 = load i8* %8, align 1
+ %9 = load i8, i8* %8, align 1
store i8 %6, i8* %.08, align 1
%10 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %9, i8* %10, align 1
@@ -477,11 +477,11 @@ define void @merge_vec_element_and_scala
%idx4 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 4
%idx5 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 5
- %a0 = load i64* %idx0, align 8
+ %a0 = load i64, i64* %idx0, align 8
store i64 %a0, i64* %idx4, align 8
%b = bitcast i64* %idx1 to <2 x i64>*
- %v = load <2 x i64>* %b, align 8
+ %v = load <2 x i64>, <2 x i64>* %b, align 8
%a1 = extractelement <2 x i64> %v, i32 0
store i64 %a1, i64* %idx5, align 8
ret void
Modified: llvm/trunk/test/CodeGen/X86/StackColoring.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/StackColoring.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/StackColoring.ll (original)
+++ llvm/trunk/test/CodeGen/X86/StackColoring.ll Fri Feb 27 15:17:42 2015
@@ -414,7 +414,7 @@ define i32 @shady_range(i32 %argc, i8**
%z2 = getelementptr inbounds [4 x %struct.Klass], [4 x %struct.Klass]* %a.i, i64 0, i64 0, i32 0
call void @llvm.lifetime.start(i64 -1, i8* %a8)
call void @llvm.lifetime.start(i64 -1, i8* %b8)
- %z3 = load i32* %z2, align 16
+ %z3 = load i32, i32* %z2, align 16
%r = call i32 @foo(i32 %z3, i8* %a8)
%r2 = call i32 @foo(i32 %z3, i8* %b8)
call void @llvm.lifetime.end(i64 -1, i8* %a8)
Modified: llvm/trunk/test/CodeGen/X86/SwitchLowering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/SwitchLowering.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/SwitchLowering.ll (original)
+++ llvm/trunk/test/CodeGen/X86/SwitchLowering.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ bb: ; preds = %bb, %entry
%CurPtr_addr.0.rec = bitcast i32 %indvar to i32 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %indvar to i64 ; <i64> [#uses=1]
%CurPtr_addr.0 = getelementptr i8, i8* %CurPtr, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
- %tmp = load i8* %CurPtr_addr.0 ; <i8> [#uses=3]
+ %tmp = load i8, i8* %CurPtr_addr.0 ; <i8> [#uses=3]
%tmp2.rec = add i32 %CurPtr_addr.0.rec, 1 ; <i32> [#uses=1]
%tmp2 = getelementptr i8, i8* %CurPtr, i32 %tmp2.rec ; <i8*> [#uses=1]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll (original)
+++ llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
; CHECK: xorl
; CHECK: ret
define void @pull_bitcast (<4 x i8>* %pA, <4 x i8>* %pB) {
- %A = load <4 x i8>* %pA
- %B = load <4 x i8>* %pB
+ %A = load <4 x i8>, <4 x i8>* %pA
+ %B = load <4 x i8>, <4 x i8>* %pB
%C = xor <4 x i8> %A, %B
store <4 x i8> %C, <4 x i8>* %pA
ret void
@@ -22,8 +22,8 @@ define void @pull_bitcast (<4 x i8>* %pA
; CHECK-NEXT: pxor
; CHECK-NEXT: ret
define <4 x i32> @multi_use_swizzle (<4 x i32>* %pA, <4 x i32>* %pB) {
- %A = load <4 x i32>* %pA
- %B = load <4 x i32>* %pB
+ %A = load <4 x i32>, <4 x i32>* %pA
+ %B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 6>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 2>
%S2 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 2>
@@ -35,9 +35,9 @@ define <4 x i32> @multi_use_swizzle (<4
; CHECK: xorl
; CHECK: ret
define <4 x i8> @pull_bitcast2 (<4 x i8>* %pA, <4 x i8>* %pB, <4 x i8>* %pC) {
- %A = load <4 x i8>* %pA
+ %A = load <4 x i8>, <4 x i8>* %pA
store <4 x i8> %A, <4 x i8>* %pC
- %B = load <4 x i8>* %pB
+ %B = load <4 x i8>, <4 x i8>* %pB
%C = xor <4 x i8> %A, %B
store <4 x i8> %C, <4 x i8>* %pA
ret <4 x i8> %C
@@ -49,8 +49,8 @@ define <4 x i8> @pull_bitcast2 (<4 x i8>
; CHECK-NOT: pshufd
; CHECK: ret
define <4 x i32> @reverse_1 (<4 x i32>* %pA, <4 x i32>* %pB) {
- %A = load <4 x i32>* %pA
- %B = load <4 x i32>* %pB
+ %A = load <4 x i32>, <4 x i32>* %pA
+ %B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x i32> %S1
@@ -61,8 +61,8 @@ define <4 x i32> @reverse_1 (<4 x i32>*
; CHECK: pshufd
; CHECK: ret
define <4 x i32> @no_reverse_shuff (<4 x i32>* %pA, <4 x i32>* %pB) {
- %A = load <4 x i32>* %pA
- %B = load <4 x i32>* %pB
+ %A = load <4 x i32>, <4 x i32>* %pA
+ %B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 3, i32 2>
ret <4 x i32> %S1
Modified: llvm/trunk/test/CodeGen/X86/abi-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/abi-isel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/abi-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/abi-isel.ll Fri Feb 27 15:17:42 2015
@@ -33,7 +33,7 @@
define void @foo00() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 0), align 4
ret void
@@ -105,7 +105,7 @@ entry:
define void @fxo00() nounwind {
entry:
- %0 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
store i32 %0, i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 0), align 4
ret void
@@ -297,8 +297,8 @@ entry:
define void @foo02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
store i32 %1, i32* %0, align 4
ret void
; LINUX-64-STATIC-LABEL: foo02:
@@ -379,8 +379,8 @@ entry:
define void @fxo02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
store i32 %1, i32* %0, align 4
; LINUX-64-STATIC-LABEL: fxo02:
; LINUX-64-STATIC: movl xsrc(%rip), %
@@ -461,7 +461,7 @@ entry:
define void @foo03() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 0), align 32
ret void
; LINUX-64-STATIC-LABEL: foo03:
@@ -576,8 +576,8 @@ entry:
define void @foo05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
+ %0 = load i32*, i32** @dptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
store i32 %1, i32* %0, align 4
ret void
; LINUX-64-STATIC-LABEL: foo05:
@@ -648,7 +648,7 @@ entry:
define void @foo06() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 0), align 4
ret void
; LINUX-64-STATIC-LABEL: foo06:
@@ -760,8 +760,8 @@ entry:
define void @foo08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
+ %0 = load i32*, i32** @lptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
store i32 %1, i32* %0, align 4
ret void
; LINUX-64-STATIC-LABEL: foo08:
@@ -830,7 +830,7 @@ entry:
define void @qux00() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 16), align 4
ret void
; LINUX-64-STATIC-LABEL: qux00:
@@ -901,7 +901,7 @@ entry:
define void @qxx00() nounwind {
entry:
- %0 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
+ %0 = load i32, i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
store i32 %0, i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 16), align 4
ret void
; LINUX-64-STATIC-LABEL: qxx00:
@@ -1104,8 +1104,8 @@ entry:
define void @qux02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
%2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux02:
@@ -1187,8 +1187,8 @@ entry:
define void @qxx02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
%2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qxx02:
@@ -1270,7 +1270,7 @@ entry:
define void @qux03() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 16), align 32
ret void
; LINUX-64-STATIC-LABEL: qux03:
@@ -1386,8 +1386,8 @@ entry:
define void @qux05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
+ %0 = load i32*, i32** @dptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
%2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux05:
@@ -1459,7 +1459,7 @@ entry:
define void @qux06() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 16), align 4
ret void
; LINUX-64-STATIC-LABEL: qux06:
@@ -1571,8 +1571,8 @@ entry:
define void @qux08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
+ %0 = load i32*, i32** @lptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
%2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux08:
@@ -1643,7 +1643,7 @@ entry:
define void @ind00(i64 %i) nounwind {
entry:
%0 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %i
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
@@ -1721,7 +1721,7 @@ entry:
define void @ixd00(i64 %i) nounwind {
entry:
%0 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
@@ -1950,9 +1950,9 @@ entry:
define void @ind02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
@@ -2039,9 +2039,9 @@ entry:
define void @ixd02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
@@ -2129,7 +2129,7 @@ entry:
define void @ind03(i64 %i) nounwind {
entry:
%0 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
@@ -2271,9 +2271,9 @@ entry:
define void @ind05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
@@ -2354,7 +2354,7 @@ entry:
define void @ind06(i64 %i) nounwind {
entry:
%0 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
@@ -2495,9 +2495,9 @@ entry:
define void @ind08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
@@ -2578,7 +2578,7 @@ define void @off00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
%1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -2657,7 +2657,7 @@ define void @oxf00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
%1 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -2888,10 +2888,10 @@ entry:
define void @off02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -2978,10 +2978,10 @@ entry:
define void @oxf02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -3070,7 +3070,7 @@ define void @off03(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
%1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -3213,10 +3213,10 @@ entry:
define void @off05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -3298,7 +3298,7 @@ define void @off06(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
%1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -3440,10 +3440,10 @@ entry:
define void @off08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -3522,7 +3522,7 @@ entry:
define void @moo00(i64 %i) nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 65536), align 4
ret void
; LINUX-64-STATIC-LABEL: moo00:
@@ -3659,8 +3659,8 @@ entry:
define void @moo02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
%2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
@@ -3742,7 +3742,7 @@ entry:
define void @moo03(i64 %i) nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 65536), align 32
ret void
; LINUX-64-STATIC-LABEL: moo03:
@@ -3858,8 +3858,8 @@ entry:
define void @moo05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
+ %0 = load i32*, i32** @dptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
%2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
@@ -3931,7 +3931,7 @@ entry:
define void @moo06(i64 %i) nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 65536), align 4
ret void
; LINUX-64-STATIC-LABEL: moo06:
@@ -4043,8 +4043,8 @@ entry:
define void @moo08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
+ %0 = load i32*, i32** @lptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
%2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
@@ -4116,7 +4116,7 @@ define void @big00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
%1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -4270,10 +4270,10 @@ entry:
define void @big02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -4362,7 +4362,7 @@ define void @big03(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
%1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -4505,10 +4505,10 @@ entry:
define void @big05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -4590,7 +4590,7 @@ define void @big06(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
%1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -4732,10 +4732,10 @@ entry:
define void @big08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -5519,7 +5519,7 @@ entry:
define i8* @har02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = bitcast i32* %0 to i8*
ret i8* %1
; LINUX-64-STATIC-LABEL: har02:
@@ -5668,7 +5668,7 @@ entry:
define i8* @har05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = bitcast i32* %0 to i8*
ret i8* %1
; LINUX-64-STATIC-LABEL: har05:
@@ -5812,7 +5812,7 @@ entry:
define i8* @har08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = bitcast i32* %0 to i8*
ret i8* %1
; LINUX-64-STATIC-LABEL: har08:
@@ -6073,7 +6073,7 @@ entry:
define i8* @bat02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6235,7 +6235,7 @@ entry:
define i8* @bat05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6390,7 +6390,7 @@ entry:
define i8* @bat08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6609,7 +6609,7 @@ entry:
define i8* @bam02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6771,7 +6771,7 @@ entry:
define i8* @bam05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6926,7 +6926,7 @@ entry:
define i8* @bam08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -7230,7 +7230,7 @@ entry:
define i8* @cat02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -7420,7 +7420,7 @@ entry:
define i8* @cat05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -7605,7 +7605,7 @@ entry:
define i8* @cat08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -7915,7 +7915,7 @@ entry:
define i8* @cam02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -8105,7 +8105,7 @@ entry:
define i8* @cam05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -8290,7 +8290,7 @@ entry:
define i8* @cam08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -9180,9 +9180,9 @@ entry:
define void @icaller() nounwind {
entry:
- %0 = load void ()** @ifunc, align 8
+ %0 = load void ()*, void ()** @ifunc, align 8
call void %0() nounwind
- %1 = load void ()** @ifunc, align 8
+ %1 = load void ()*, void ()** @ifunc, align 8
call void %1() nounwind
ret void
; LINUX-64-STATIC-LABEL: icaller:
@@ -9270,9 +9270,9 @@ entry:
define void @dicaller() nounwind {
entry:
- %0 = load void ()** @difunc, align 8
+ %0 = load void ()*, void ()** @difunc, align 8
call void %0() nounwind
- %1 = load void ()** @difunc, align 8
+ %1 = load void ()*, void ()** @difunc, align 8
call void %1() nounwind
ret void
; LINUX-64-STATIC-LABEL: dicaller:
@@ -9353,9 +9353,9 @@ entry:
define void @licaller() nounwind {
entry:
- %0 = load void ()** @lifunc, align 8
+ %0 = load void ()*, void ()** @lifunc, align 8
call void %0() nounwind
- %1 = load void ()** @lifunc, align 8
+ %1 = load void ()*, void ()** @lifunc, align 8
call void %1() nounwind
ret void
; LINUX-64-STATIC-LABEL: licaller:
@@ -9435,9 +9435,9 @@ entry:
define void @itailcaller() nounwind {
entry:
- %0 = load void ()** @ifunc, align 8
+ %0 = load void ()*, void ()** @ifunc, align 8
call void %0() nounwind
- %1 = load void ()** @ifunc, align 8
+ %1 = load void ()*, void ()** @ifunc, align 8
call void %1() nounwind
ret void
; LINUX-64-STATIC-LABEL: itailcaller:
@@ -9525,7 +9525,7 @@ entry:
define void @ditailcaller() nounwind {
entry:
- %0 = load void ()** @difunc, align 8
+ %0 = load void ()*, void ()** @difunc, align 8
call void %0() nounwind
ret void
; LINUX-64-STATIC-LABEL: ditailcaller:
@@ -9593,7 +9593,7 @@ entry:
define void @litailcaller() nounwind {
entry:
- %0 = load void ()** @lifunc, align 8
+ %0 = load void ()*, void ()** @lifunc, align 8
call void %0() nounwind
ret void
; LINUX-64-STATIC-LABEL: litailcaller:
Modified: llvm/trunk/test/CodeGen/X86/addr-mode-matcher.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/addr-mode-matcher.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/addr-mode-matcher.ll (original)
+++ llvm/trunk/test/CodeGen/X86/addr-mode-matcher.ll Fri Feb 27 15:17:42 2015
@@ -26,14 +26,14 @@ bb1692:
%tmp1702 = and i32 %tmp1701, 1020
%tmp1703 = getelementptr inbounds [1028 x i8], [1028 x i8]* null, i32 0, i32 %tmp1702
%tmp1704 = bitcast i8* %tmp1703 to i32*
- %load1 = load i32* %tmp1704, align 4
+ %load1 = load i32, i32* %tmp1704, align 4
; %load2 = (load (shl (and %xor, 255), 2))
%tmp1698 = and i32 %xor, 255
%tmp1706 = shl i32 %tmp1698, 2
%tmp1707 = getelementptr inbounds [1028 x i8], [1028 x i8]* null, i32 0, i32 %tmp1706
%tmp1708 = bitcast i8* %tmp1707 to i32*
- %load2 = load i32* %tmp1708, align 4
+ %load2 = load i32, i32* %tmp1708, align 4
%tmp1710 = or i32 %load2, %a
Modified: llvm/trunk/test/CodeGen/X86/address-type-promotion-constantexpr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/address-type-promotion-constantexpr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/address-type-promotion-constantexpr.ll (original)
+++ llvm/trunk/test/CodeGen/X86/address-type-promotion-constantexpr.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
; CHECK: xor %eax, %eax
define i32 @main() {
entry:
- %foo = load i8* getelementptr ([2 x i8]* @b, i64 0, i64 sext (i8 or (i8 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i8), i8 1) to i64)), align 1
+ %foo = load i8, i8* getelementptr ([2 x i8]* @b, i64 0, i64 sext (i8 or (i8 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i8), i8 1) to i64)), align 1
ret i32 0
}
Modified: llvm/trunk/test/CodeGen/X86/aliases.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/aliases.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/aliases.ll (original)
+++ llvm/trunk/test/CodeGen/X86/aliases.ll Fri Feb 27 15:17:42 2015
@@ -64,9 +64,9 @@ define i32 @foo_f() {
; CHECK-DAG: .globl test
define i32 @test() {
entry:
- %tmp = load i32* @foo1
- %tmp1 = load i32* @foo2
- %tmp0 = load i32* @bar_i
+ %tmp = load i32, i32* @foo1
+ %tmp1 = load i32, i32* @foo2
+ %tmp0 = load i32, i32* @bar_i
%tmp2 = call i32 @foo_f()
%tmp3 = add i32 %tmp, %tmp2
%tmp4 = call %FunTy* @bar_f()
Modified: llvm/trunk/test/CodeGen/X86/aligned-variadic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/aligned-variadic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/aligned-variadic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/aligned-variadic.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
%arraydecay1 = bitcast [1 x %struct.__va_list_tag]* %va to i8*
call void @llvm.va_start(i8* %arraydecay1)
%overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0, i32 2
- %overflow_arg_area = load i8** %overflow_arg_area_p, align 8
+ %overflow_arg_area = load i8*, i8** %overflow_arg_area_p, align 8
%overflow_arg_area.next = getelementptr i8, i8* %overflow_arg_area, i64 24
store i8* %overflow_arg_area.next, i8** %overflow_arg_area_p, align 8
; X32: leal 68(%esp), [[REG:%.*]]
Modified: llvm/trunk/test/CodeGen/X86/and-su.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/and-su.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/and-su.ll (original)
+++ llvm/trunk/test/CodeGen/X86/and-su.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define fastcc i32 @foo(i32* %p) nounwind
; CHECK-LABEL: foo:
; CHECK: andl $10, %eax
; CHECK: je
- %t0 = load i32* %p
+ %t0 = load i32, i32* %p
%t2 = and i32 %t0, 10
%t3 = icmp ne i32 %t2, 0
br i1 %t3, label %bb63, label %bb76
Modified: llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll Fri Feb 27 15:17:42 2015
@@ -36,34 +36,34 @@
define void @func() #0 {
entry:
store i32 0, i32* @sum, align 4
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
store i32 %0, i32* @i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %1 = load i32* @i, align 4
- %2 = load i32* @b, align 4
+ %1 = load i32, i32* @i, align 4
+ %2 = load i32, i32* @b, align 4
%cmp = icmp slt i32 %1, %2
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %3 = load i32 (i32, i32, i32, i32, i32, i32, i32, i32)** @funcp, align 4
- %4 = load i32* @i, align 4
- %5 = load i32* @b, align 4
- %6 = load i32* @c, align 4
- %7 = load i32* @d, align 4
- %8 = load i32* @e, align 4
- %9 = load i32* @f, align 4
- %10 = load i32* @g, align 4
- %11 = load i32* @h, align 4
+ %3 = load i32 (i32, i32, i32, i32, i32, i32, i32, i32)*, i32 (i32, i32, i32, i32, i32, i32, i32, i32)** @funcp, align 4
+ %4 = load i32, i32* @i, align 4
+ %5 = load i32, i32* @b, align 4
+ %6 = load i32, i32* @c, align 4
+ %7 = load i32, i32* @d, align 4
+ %8 = load i32, i32* @e, align 4
+ %9 = load i32, i32* @f, align 4
+ %10 = load i32, i32* @g, align 4
+ %11 = load i32, i32* @h, align 4
%call = call i32 %3(i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11)
- %12 = load i32* @sum, align 4
+ %12 = load i32, i32* @sum, align 4
%add = add nsw i32 %12, %call
store i32 %add, i32* @sum, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %13 = load i32* @i, align 4
+ %13 = load i32, i32* @i, align 4
%inc = add nsw i32 %13, 1
store i32 %inc, i32* @i, align 4
br label %for.cond
Modified: llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll Fri Feb 27 15:17:42 2015
@@ -42,43 +42,43 @@
define void @func() #0 {
entry:
store i32 0, i32* @sum, align 4
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
store i32 %0, i32* @i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %1 = load i32* @i, align 4
- %2 = load i32* @b, align 4
+ %1 = load i32, i32* @i, align 4
+ %2 = load i32, i32* @b, align 4
%cmp = icmp slt i32 %1, %2
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %3 = load i32 (i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)** @funcp, align 8
- %4 = load i32* @a, align 4
- %5 = load i32* @i, align 4
- %6 = load i32* @i, align 4
+ %3 = load i32 (i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)*, i32 (i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)** @funcp, align 8
+ %4 = load i32, i32* @a, align 4
+ %5 = load i32, i32* @i, align 4
+ %6 = load i32, i32* @i, align 4
%mul = mul nsw i32 %6, 2
- %7 = load i32* @i, align 4
- %8 = load i32* @b, align 4
+ %7 = load i32, i32* @i, align 4
+ %8 = load i32, i32* @b, align 4
%div = sdiv i32 %7, %8
- %9 = load i32* @c, align 4
- %10 = load i32* @d, align 4
- %11 = load i32* @e, align 4
- %12 = load i32* @f, align 4
- %13 = load i32* @g, align 4
- %14 = load i32* @h, align 4
- %15 = load i32* @j, align 4
- %16 = load i32* @k, align 4
- %17 = load i32* @l, align 4
- %18 = load i32* @n, align 4
+ %9 = load i32, i32* @c, align 4
+ %10 = load i32, i32* @d, align 4
+ %11 = load i32, i32* @e, align 4
+ %12 = load i32, i32* @f, align 4
+ %13 = load i32, i32* @g, align 4
+ %14 = load i32, i32* @h, align 4
+ %15 = load i32, i32* @j, align 4
+ %16 = load i32, i32* @k, align 4
+ %17 = load i32, i32* @l, align 4
+ %18 = load i32, i32* @n, align 4
%call = call i32 %3(i32 %4, i32 %5, i32 %mul, i32 %div, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16, i32 %17, i32 %18)
- %19 = load i32* @sum, align 4
+ %19 = load i32, i32* @sum, align 4
%add = add nsw i32 %19, %call
store i32 %add, i32* @sum, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %20 = load i32* @i, align 4
+ %20 = load i32, i32* @i, align 4
%inc = add nsw i32 %20, 1
store i32 %inc, i32* @i, align 4
br label %for.cond
Modified: llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect.ll Fri Feb 27 15:17:42 2015
@@ -14,8 +14,8 @@ define i32 @test1() #0 {
entry:
%call = tail call %class.A* @_Z3facv()
%0 = bitcast %class.A* %call to void (%class.A*)***
- %vtable = load void (%class.A*)*** %0, align 8
- %1 = load void (%class.A*)** %vtable, align 8
+ %vtable = load void (%class.A*)**, void (%class.A*)*** %0, align 8
+ %1 = load void (%class.A*)*, void (%class.A*)** %vtable, align 8
;ATOM32: movl (%ecx), %ecx
;ATOM32: calll *%ecx
;ATOM-NOT32: calll *(%ecx)
@@ -38,8 +38,8 @@ declare %class.A* @_Z3facv() #1
define i32 @test2() #0 {
;ATOM-LABEL: test2:
entry:
- %0 = load void (i32)*** @p, align 8
- %1 = load void (i32)** %0, align 8
+ %0 = load void (i32)**, void (i32)*** @p, align 8
+ %1 = load void (i32)*, void (i32)** %0, align 8
;ATOM32: movl (%eax), %eax
;ATOM32: calll *%eax
;ATOM-NOT: calll *(%eax)
Modified: llvm/trunk/test/CodeGen/X86/atom-cmpb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-cmpb.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-cmpb.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-cmpb.ll Fri Feb 27 15:17:42 2015
@@ -12,9 +12,9 @@
define i8 @run_test(i8* %rd_p) {
entry:
%incdec.ptr = getelementptr inbounds i8, i8* %rd_p, i64 1
- %ld1 = load i8* %rd_p, align 1
+ %ld1 = load i8, i8* %rd_p, align 1
%incdec.ptr1 = getelementptr inbounds i8, i8* %rd_p, i64 2
- %ld2 = load i8* %incdec.ptr, align 1
+ %ld2 = load i8, i8* %incdec.ptr, align 1
%x4 = xor i8 %ld1, -1
%x5 = xor i8 %ld2, -1
%cmp34 = icmp ult i8 %ld2, %ld1
Modified: llvm/trunk/test/CodeGen/X86/atom-fixup-lea1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-fixup-lea1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-fixup-lea1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-fixup-lea1.ll Fri Feb 27 15:17:42 2015
@@ -26,7 +26,7 @@ for.body:
%i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %array, i32 %i.06
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.05
%inc = add nsw i32 %i.06, 1
%exitcond = icmp eq i32 %inc, %n
Modified: llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll Fri Feb 27 15:17:42 2015
@@ -38,31 +38,31 @@ entry:
%n = alloca %struct.node_t, align 4
call void bitcast (void (%struct.node_t*, ...)* @getnode to void (%struct.node_t*)*)(%struct.node_t* sret %n)
%array = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 4
- %0 = load i32** %array, align 4
+ %0 = load i32*, i32** %array, align 4
%cmp = icmp eq i32* %0, null
br i1 %cmp, label %if.end, label %land.lhs.true
land.lhs.true:
%p = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 3
- %1 = load i32* %p, align 4
+ %1 = load i32, i32* %p, align 4
%cmp1 = icmp sgt i32 %1, 0
br i1 %cmp1, label %land.lhs.true2, label %if.end
land.lhs.true2:
%k = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 0
- %2 = load i32* %k, align 4
+ %2 = load i32, i32* %k, align 4
%cmp3 = icmp sgt i32 %2, 0
br i1 %cmp3, label %land.lhs.true4, label %if.end
land.lhs.true4:
%n5 = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 2
- %3 = load i32* %n5, align 4
+ %3 = load i32, i32* %n5, align 4
%cmp6 = icmp sgt i32 %3, 0
br i1 %cmp6, label %land.lhs.true7, label %if.end
land.lhs.true7:
%m = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 1
- %4 = load i32* %m, align 4
+ %4 = load i32, i32* %m, align 4
%cmp8 = icmp sgt i32 %4, 0
br i1 %cmp8, label %if.then, label %if.end
@@ -73,7 +73,7 @@ if.then:
%add15 = add nsw i32 %1, %5
%6 = inttoptr i32 %add15 to i32*
%arrayidx = getelementptr inbounds i32, i32* %6, i32 %add12
- %7 = load i32* %arrayidx, align 4
+ %7 = load i32, i32* %arrayidx, align 4
br label %if.end
if.end:
Modified: llvm/trunk/test/CodeGen/X86/atom-fixup-lea3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-fixup-lea3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-fixup-lea3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-fixup-lea3.ll Fri Feb 27 15:17:42 2015
@@ -26,7 +26,7 @@ entry:
br i1 %cmp7, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
- %.pre = load i32* %m, align 4
+ %.pre = load i32, i32* %m, align 4
br label %for.body
for.body: ; preds = %for.body, %for.body.lr.ph
@@ -35,11 +35,11 @@ for.body:
%j.09 = phi i32 [ 0, %for.body.lr.ph ], [ %inc1, %for.body ]
%inc1 = add nsw i32 %j.09, 1
%arrayidx = getelementptr inbounds i32, i32* %array2, i32 %j.09
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %1
store i32 %add, i32* %m, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %array, i32 %inc1
- %2 = load i32* %arrayidx2, align 4
+ %2 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %2, %sum.010
%exitcond = icmp eq i32 %inc1, %n
br i1 %exitcond, label %for.end, label %for.body
Modified: llvm/trunk/test/CodeGen/X86/atom-fixup-lea4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-fixup-lea4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-fixup-lea4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-fixup-lea4.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ define linkonce_odr void @_ZN12ValueWrap
entry:
%this.addr = alloca %struct.ValueWrapper.6*, align 8
store %struct.ValueWrapper.6* %this, %struct.ValueWrapper.6** %this.addr, align 8
- %this1 = load %struct.ValueWrapper.6** %this.addr
+ %this1 = load %struct.ValueWrapper.6*, %struct.ValueWrapper.6** %this.addr
%value = getelementptr inbounds %struct.ValueWrapper.6, %struct.ValueWrapper.6* %this1, i32 0, i32 0
call void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7* %value)
ret void
Modified: llvm/trunk/test/CodeGen/X86/atom-lea-addw-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-lea-addw-bug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-lea-addw-bug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-lea-addw-bug.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@ target triple = "x86_64-apple-darwin12.5
define i32 @DoLayout() {
entry:
- %tmp1 = load i16* undef, align 2
- %tmp17 = load i16* null, align 2
- %tmp19 = load i16* undef, align 2
+ %tmp1 = load i16, i16* undef, align 2
+ %tmp17 = load i16, i16* null, align 2
+ %tmp19 = load i16, i16* undef, align 2
%shl = shl i16 %tmp19, 1
%add55 = add i16 %tmp17, %tmp1
%add57 = add i16 %add55, %shl
Modified: llvm/trunk/test/CodeGen/X86/atom-sched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-sched.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-sched.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-sched.ll Fri Feb 27 15:17:42 2015
@@ -21,12 +21,12 @@ define void @func() nounwind uwtable {
; CHECK: movl
; CHECK: imull
entry:
- %0 = load i32* @b, align 4
- %1 = load i32* @c, align 4
+ %0 = load i32, i32* @b, align 4
+ %1 = load i32, i32* @c, align 4
%mul = mul nsw i32 %0, %1
store i32 %mul, i32* @a, align 4
- %2 = load i32* @e, align 4
- %3 = load i32* @f, align 4
+ %2 = load i32, i32* @e, align 4
+ %3 = load i32, i32* @f, align 4
%mul1 = mul nsw i32 %2, %3
store i32 %mul1, i32* @d, align 4
ret void
Modified: llvm/trunk/test/CodeGen/X86/atomic-dagsched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-dagsched.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-dagsched.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-dagsched.ll Fri Feb 27 15:17:42 2015
@@ -2,10 +2,10 @@
define void @test(i8** %a, i64* %b, i64 %c, i64 %d) nounwind {
entry:
- %ptrtoarg4 = load i8** %a, align 8
+ %ptrtoarg4 = load i8*, i8** %a, align 8
%brglist1 = getelementptr i8*, i8** %a, i64 1
- %ptrtoarg25 = load i8** %brglist1, align 8
- %0 = load i64* %b, align 8
+ %ptrtoarg25 = load i8*, i8** %brglist1, align 8
+ %0 = load i64, i64* %b, align 8
%1 = mul i64 %0, 4
%scevgep = getelementptr i8, i8* %ptrtoarg25, i64 %1
%2 = mul i64 %d, 4
@@ -18,8 +18,8 @@ loop.cond:
br i1 %3, label %return, label %loop
loop: ; preds = %loop.cond
- %4 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
- %5 = load i64* %4, align 8
+ %4 = load i64*, i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
+ %5 = load i64, i64* %4, align 8
%vector.size.i = ashr i64 %5, 3
%num.vector.wi.i = shl i64 %vector.size.i, 3
%6 = icmp eq i64 %vector.size.i, 0
@@ -36,7 +36,7 @@ vector_kernel_entry.i:
%asr.iv = phi i64 [ %asr.iv.next, %vector_kernel_entry.i ], [ %vector.size.i, %dim_0_vector_pre_head.i ]
%8 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
%asr.iv911 = addrspacecast i8* %asr.iv9 to <8 x i32> addrspace(1)*
- %9 = load <8 x i32> addrspace(1)* %asr.iv911, align 4
+ %9 = load <8 x i32>, <8 x i32> addrspace(1)* %asr.iv911, align 4
%extract8vector_func.i = extractelement <8 x i32> %9, i32 0
%extract9vector_func.i = extractelement <8 x i32> %9, i32 1
%extract10vector_func.i = extractelement <8 x i32> %9, i32 2
@@ -65,8 +65,8 @@ scalarIf.i:
br i1 %18, label %test.exit, label %dim_0_pre_head.i
dim_0_pre_head.i: ; preds = %scalarIf.i
- %19 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
- %20 = load i64* %19, align 8
+ %19 = load i64*, i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
+ %20 = load i64, i64* %19, align 8
%21 = trunc i64 %20 to i32
%22 = mul i64 %vector.size.i, 8
br label %scalar_kernel_entry.i
@@ -76,7 +76,7 @@ scalar_kernel_entry.i:
%23 = addrspacecast i8* %asr.iv6 to i32 addrspace(1)*
%24 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
%scevgep16 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
- %25 = load i32 addrspace(1)* %scevgep16, align 4
+ %25 = load i32, i32 addrspace(1)* %scevgep16, align 4
%26 = atomicrmw min i32 addrspace(1)* %24, i32 %25 seq_cst
%scevgep15 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
store i32 %21, i32 addrspace(1)* %scevgep15, align 4
Modified: llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll Fri Feb 27 15:17:42 2015
@@ -16,6 +16,6 @@ define i64 @test2(i64* %ptr) {
; CHECK-LABEL: test2
; CHECK: lock
; CHECK-NEXT: cmpxchg8b
- %val = load atomic i64* %ptr seq_cst, align 8
+ %val = load atomic i64, i64* %ptr seq_cst, align 8
ret i64 %val
}
Modified: llvm/trunk/test/CodeGen/X86/atomic-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-load-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-load-store.ll Fri Feb 27 15:17:42 2015
@@ -18,6 +18,6 @@ define void @test2(i32* %ptr, i32 %val1)
define i32 @test3(i32* %ptr) {
; CHECK: test3
; CHECK: movl (%rdi), %eax
- %val = load atomic i32* %ptr seq_cst, align 4
+ %val = load atomic i32, i32* %ptr seq_cst, align 4
ret i32 %val
}
Modified: llvm/trunk/test/CodeGen/X86/atomic-or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-or.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-or.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-or.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define void @t1(i64* %p, i32 %b) nounwin
entry:
%p.addr = alloca i64*, align 8
store i64* %p, i64** %p.addr, align 8
- %tmp = load i64** %p.addr, align 8
+ %tmp = load i64*, i64** %p.addr, align 8
; CHECK-LABEL: t1:
; CHECK: movl $2147483648, %eax
; CHECK: lock
@@ -19,7 +19,7 @@ define void @t2(i64* %p, i32 %b) nounwin
entry:
%p.addr = alloca i64*, align 8
store i64* %p, i64** %p.addr, align 8
- %tmp = load i64** %p.addr, align 8
+ %tmp = load i64*, i64** %p.addr, align 8
; CHECK-LABEL: t2:
; CHECK: lock
; CHECK-NEXT: orq $2147483644, (%r{{.*}})
Modified: llvm/trunk/test/CodeGen/X86/atomic-pointer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-pointer.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-pointer.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-pointer.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define i32* @test_atomic_ptr_load(i32**
; CHECK: movl
; CHECK: ret
0:
- %0 = load atomic i32** %a0 seq_cst, align 4
+ %0 = load atomic i32*, i32** %a0 seq_cst, align 4
ret i32* %0
}
Modified: llvm/trunk/test/CodeGen/X86/atomic128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic128.ll Fri Feb 27 15:17:42 2015
@@ -249,7 +249,7 @@ define i128 @atomic_load_seq_cst(i128* %
; CHECK: lock
; CHECK: cmpxchg16b (%rdi)
- %r = load atomic i128* %p seq_cst, align 16
+ %r = load atomic i128, i128* %p seq_cst, align 16
ret i128 %r
}
@@ -262,7 +262,7 @@ define i128 @atomic_load_relaxed(i128* %
; CHECK: lock
; CHECK: cmpxchg16b (%rdi)
- %r = load atomic i128* %p monotonic, align 16
+ %r = load atomic i128, i128* %p monotonic, align 16
ret i128 %r
}
Modified: llvm/trunk/test/CodeGen/X86/atomic_mi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic_mi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic_mi.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic_mi.ll Fri Feb 27 15:17:42 2015
@@ -103,7 +103,7 @@ define void @add_8(i8* %p) {
; X32-NOT: lock
; X32: addb
; X32-NOT: movb
- %1 = load atomic i8* %p seq_cst, align 1
+ %1 = load atomic i8, i8* %p seq_cst, align 1
%2 = add i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -116,7 +116,7 @@ define void @add_16(i16* %p) {
; X64-NOT: addw
; X32-LABEL: add_16
; X32-NOT: addw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = add i16 %1, 2
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -131,7 +131,7 @@ define void @add_32(i32* %p) {
; X32-NOT: lock
; X32: addl
; X32-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = add i32 %1, 2
store atomic i32 %2, i32* %p monotonic, align 4
ret void
@@ -144,7 +144,7 @@ define void @add_64(i64* %p) {
; X64-NOT: movq
; We do not check X86-32 as it cannot do 'addq'.
; X32-LABEL: add_64
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = add i64 %1, 2
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -155,7 +155,7 @@ define void @add_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: add_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = add i32 %1, 2
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -172,7 +172,7 @@ define void @and_8(i8* %p) {
; X32-NOT: lock
; X32: andb
; X32-NOT: movb
- %1 = load atomic i8* %p monotonic, align 1
+ %1 = load atomic i8, i8* %p monotonic, align 1
%2 = and i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -185,7 +185,7 @@ define void @and_16(i16* %p) {
; X64-NOT: andw
; X32-LABEL: and_16
; X32-NOT: andw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = and i16 %1, 2
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -200,7 +200,7 @@ define void @and_32(i32* %p) {
; X32-NOT: lock
; X32: andl
; X32-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = and i32 %1, 2
store atomic i32 %2, i32* %p release, align 4
ret void
@@ -213,7 +213,7 @@ define void @and_64(i64* %p) {
; X64-NOT: movq
; We do not check X86-32 as it cannot do 'andq'.
; X32-LABEL: and_64
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = and i64 %1, 2
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -224,7 +224,7 @@ define void @and_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: and_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = and i32 %1, 2
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -241,7 +241,7 @@ define void @or_8(i8* %p) {
; X32-NOT: lock
; X32: orb
; X32-NOT: movb
- %1 = load atomic i8* %p acquire, align 1
+ %1 = load atomic i8, i8* %p acquire, align 1
%2 = or i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -252,7 +252,7 @@ define void @or_16(i16* %p) {
; X64-NOT: orw
; X32-LABEL: or_16
; X32-NOT: orw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = or i16 %1, 2
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -267,7 +267,7 @@ define void @or_32(i32* %p) {
; X32-NOT: lock
; X32: orl
; X32-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = or i32 %1, 2
store atomic i32 %2, i32* %p release, align 4
ret void
@@ -280,7 +280,7 @@ define void @or_64(i64* %p) {
; X64-NOT: movq
; We do not check X86-32 as it cannot do 'orq'.
; X32-LABEL: or_64
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = or i64 %1, 2
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -291,7 +291,7 @@ define void @or_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: or_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = or i32 %1, 2
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -308,7 +308,7 @@ define void @xor_8(i8* %p) {
; X32-NOT: lock
; X32: xorb
; X32-NOT: movb
- %1 = load atomic i8* %p acquire, align 1
+ %1 = load atomic i8, i8* %p acquire, align 1
%2 = xor i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -319,7 +319,7 @@ define void @xor_16(i16* %p) {
; X64-NOT: xorw
; X32-LABEL: xor_16
; X32-NOT: xorw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = xor i16 %1, 2
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -334,7 +334,7 @@ define void @xor_32(i32* %p) {
; X32-NOT: lock
; X32: xorl
; X32-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = xor i32 %1, 2
store atomic i32 %2, i32* %p release, align 4
ret void
@@ -347,7 +347,7 @@ define void @xor_64(i64* %p) {
; X64-NOT: movq
; We do not check X86-32 as it cannot do 'xorq'.
; X32-LABEL: xor_64
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = xor i64 %1, 2
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -358,7 +358,7 @@ define void @xor_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: xor_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = xor i32 %1, 2
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -378,7 +378,7 @@ define void @inc_8(i8* %p) {
; SLOW_INC-LABEL: inc_8
; SLOW_INC-NOT: incb
; SLOW_INC-NOT: movb
- %1 = load atomic i8* %p seq_cst, align 1
+ %1 = load atomic i8, i8* %p seq_cst, align 1
%2 = add i8 %1, 1
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -393,7 +393,7 @@ define void @inc_16(i16* %p) {
; X32-NOT: incw
; SLOW_INC-LABEL: inc_16
; SLOW_INC-NOT: incw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = add i16 %1, 1
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -411,7 +411,7 @@ define void @inc_32(i32* %p) {
; SLOW_INC-LABEL: inc_32
; SLOW_INC-NOT: incl
; SLOW_INC-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = add i32 %1, 1
store atomic i32 %2, i32* %p monotonic, align 4
ret void
@@ -427,7 +427,7 @@ define void @inc_64(i64* %p) {
; SLOW_INC-LABEL: inc_64
; SLOW_INC-NOT: incq
; SLOW_INC-NOT: movq
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = add i64 %1, 1
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -438,7 +438,7 @@ define void @inc_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: inc_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = add i32 %1, 1
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -458,7 +458,7 @@ define void @dec_8(i8* %p) {
; SLOW_INC-LABEL: dec_8
; SLOW_INC-NOT: decb
; SLOW_INC-NOT: movb
- %1 = load atomic i8* %p seq_cst, align 1
+ %1 = load atomic i8, i8* %p seq_cst, align 1
%2 = sub i8 %1, 1
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -473,7 +473,7 @@ define void @dec_16(i16* %p) {
; X32-NOT: decw
; SLOW_INC-LABEL: dec_16
; SLOW_INC-NOT: decw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = sub i16 %1, 1
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -491,7 +491,7 @@ define void @dec_32(i32* %p) {
; SLOW_INC-LABEL: dec_32
; SLOW_INC-NOT: decl
; SLOW_INC-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = sub i32 %1, 1
store atomic i32 %2, i32* %p monotonic, align 4
ret void
@@ -507,7 +507,7 @@ define void @dec_64(i64* %p) {
; SLOW_INC-LABEL: dec_64
; SLOW_INC-NOT: decq
; SLOW_INC-NOT: movq
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = sub i64 %1, 1
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -518,7 +518,7 @@ define void @dec_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: dec_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = sub i32 %1, 1
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
Modified: llvm/trunk/test/CodeGen/X86/atomic_op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic_op.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic_op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic_op.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ entry:
store i32 3855, i32* %ort
store i32 3855, i32* %xort
store i32 4, i32* %temp
- %tmp = load i32* %temp
+ %tmp = load i32, i32* %temp
; CHECK: lock
; CHECK: xaddl
%0 = atomicrmw add i32* %val1, i32 %tmp monotonic
Modified: llvm/trunk/test/CodeGen/X86/avoid-loop-align-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avoid-loop-align-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avoid-loop-align-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avoid-loop-align-2.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ entry:
bb.nph12: ; preds = %entry
%1 = icmp eq i32 %b, 0 ; <i1> [#uses=1]
- %2 = load i32** @x, align 8 ; <i32*> [#uses=1]
+ %2 = load i32*, i32** @x, align 8 ; <i32*> [#uses=1]
br i1 %1, label %bb2.preheader, label %bb2.preheader.us
bb2.preheader.us: ; preds = %bb2.bb3_crit_edge.us, %bb.nph12
@@ -27,7 +27,7 @@ bb1.us: ; preds = %bb1.us, %bb2.prehead
%tmp17 = add i32 %indvar, %tmp16 ; <i32> [#uses=1]
%tmp. = zext i32 %tmp17 to i64 ; <i64> [#uses=1]
%3 = getelementptr i32, i32* %2, i64 %tmp. ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4 ; <i32> [#uses=2]
+ %4 = load i32, i32* %3, align 4 ; <i32> [#uses=2]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %b ; <i1> [#uses=1]
br i1 %exitcond, label %bb2.bb3_crit_edge.us, label %bb1.us
Modified: llvm/trunk/test/CodeGen/X86/avoid-loop-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avoid-loop-align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avoid-loop-align.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avoid-loop-align.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ bb: ; preds = %bb1, %bb1
bb1: ; preds = %bb, %entry
%P.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%P.0 = getelementptr i8, i8* %tmp1, i32 %P.0.rec ; <i8*> [#uses=3]
- %tmp2 = load i8* %P.0, align 1 ; <i8> [#uses=1]
+ %tmp2 = load i8, i8* %P.0, align 1 ; <i8> [#uses=1]
switch i8 %tmp2, label %bb4 [
i8 12, label %bb
i8 42, label %bb
Modified: llvm/trunk/test/CodeGen/X86/avoid_complex_am.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avoid_complex_am.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avoid_complex_am.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avoid_complex_am.ll Fri Feb 27 15:17:42 2015
@@ -20,12 +20,12 @@ for.body:
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
%tmp = add nsw i64 %indvars.iv, -1
%arrayidx = getelementptr inbounds double, double* %b, i64 %tmp
- %tmp1 = load double* %arrayidx, align 8
+ %tmp1 = load double, double* %arrayidx, align 8
; The induction variable should carry the scaling factor: 1.
; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 1
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx2 = getelementptr inbounds double, double* %c, i64 %indvars.iv.next
- %tmp2 = load double* %arrayidx2, align 8
+ %tmp2 = load double, double* %arrayidx2, align 8
%mul = fmul double %tmp1, %tmp2
%arrayidx4 = getelementptr inbounds double, double* %a, i64 %indvars.iv
store double %mul, double* %arrayidx4, align 8
Modified: llvm/trunk/test/CodeGen/X86/avx-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-arith.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-arith.ll Fri Feb 27 15:17:42 2015
@@ -38,7 +38,7 @@ entry:
; CHECK: vsubpd (%
define <4 x double> @subpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
entry:
- %tmp2 = load <4 x double>* %x, align 32
+ %tmp2 = load <4 x double>, <4 x double>* %x, align 32
%sub.i = fsub <4 x double> %y, %tmp2
ret <4 x double> %sub.i
}
@@ -53,7 +53,7 @@ entry:
; CHECK: vsubps (%
define <8 x float> @subps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
entry:
- %tmp2 = load <8 x float>* %x, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %x, align 32
%sub.i = fsub <8 x float> %y, %tmp2
ret <8 x float> %sub.i
}
@@ -264,7 +264,7 @@ declare <4 x float> @llvm.x86.sse.sqrt.s
define <4 x float> @int_sqrt_ss() {
; CHECK: int_sqrt_ss
; CHECK: vsqrtss
- %x0 = load float addrspace(1)* undef, align 8
+ %x0 = load float, float addrspace(1)* undef, align 8
%x1 = insertelement <4 x float> undef, float %x0, i32 0
%x2 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %x1) nounwind
ret <4 x float> %x2
Modified: llvm/trunk/test/CodeGen/X86/avx-basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-basic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-basic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-basic.ll Fri Feb 27 15:17:42 2015
@@ -57,10 +57,10 @@ entry:
define <8 x i32> @VMOVZQI2PQI([0 x float]* nocapture %aFOO) nounwind {
allocas:
%ptrcast.i33.i = bitcast [0 x float]* %aFOO to i32*
- %val.i34.i = load i32* %ptrcast.i33.i, align 4
+ %val.i34.i = load i32, i32* %ptrcast.i33.i, align 4
%ptroffset.i22.i992 = getelementptr [0 x float], [0 x float]* %aFOO, i64 0, i64 1
%ptrcast.i23.i = bitcast float* %ptroffset.i22.i992 to i32*
- %val.i24.i = load i32* %ptrcast.i23.i, align 4
+ %val.i24.i = load i32, i32* %ptrcast.i23.i, align 4
%updatedret.i30.i = insertelement <8 x i32> undef, i32 %val.i34.i, i32 1
ret <8 x i32> %updatedret.i30.i
}
Modified: llvm/trunk/test/CodeGen/X86/avx-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-bitcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-bitcast.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
; CHECK: vmovsd (%
; CHECK-NEXT: vmovq %xmm
define i64 @bitcasti64tof64() {
- %a = load double* undef
+ %a = load double, double* undef
%b = bitcast double %a to i64
ret i64 %b
}
Modified: llvm/trunk/test/CodeGen/X86/avx-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cvt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cvt.ll Fri Feb 27 15:17:42 2015
@@ -47,7 +47,7 @@ define <4 x double> @fpext00(<4 x float>
; CHECK: vcvtsi2sdq (%
define double @funcA(i64* nocapture %e) nounwind uwtable readonly ssp {
entry:
- %tmp1 = load i64* %e, align 8
+ %tmp1 = load i64, i64* %e, align 8
%conv = sitofp i64 %tmp1 to double
ret double %conv
}
@@ -55,7 +55,7 @@ entry:
; CHECK: vcvtsi2sdl (%
define double @funcB(i32* nocapture %e) nounwind uwtable readonly ssp {
entry:
- %tmp1 = load i32* %e, align 4
+ %tmp1 = load i32, i32* %e, align 4
%conv = sitofp i32 %tmp1 to double
ret double %conv
}
@@ -63,7 +63,7 @@ entry:
; CHECK: vcvtsi2ssl (%
define float @funcC(i32* nocapture %e) nounwind uwtable readonly ssp {
entry:
- %tmp1 = load i32* %e, align 4
+ %tmp1 = load i32, i32* %e, align 4
%conv = sitofp i32 %tmp1 to float
ret float %conv
}
@@ -71,7 +71,7 @@ entry:
; CHECK: vcvtsi2ssq (%
define float @funcD(i64* nocapture %e) nounwind uwtable readonly ssp {
entry:
- %tmp1 = load i64* %e, align 8
+ %tmp1 = load i64, i64* %e, align 8
%conv = sitofp i64 %tmp1 to float
ret float %conv
}
@@ -81,7 +81,7 @@ define void @fpext() nounwind uwtable {
entry:
%f = alloca float, align 4
%d = alloca double, align 8
- %tmp = load float* %f, align 4
+ %tmp = load float, float* %f, align 4
%conv = fpext float %tmp to double
store double %conv, double* %d, align 8
ret void
Modified: llvm/trunk/test/CodeGen/X86/avx-intel-ocl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intel-ocl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intel-ocl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intel-ocl.ll Fri Feb 27 15:17:42 2015
@@ -33,7 +33,7 @@ define <16 x float> @testf16_inp(<16 x f
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %2, %1
ret <16 x float> %3
}
@@ -58,7 +58,7 @@ define <16 x float> @testf16_regs(<16 x
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %1, %b
%4 = fadd <16 x float> %2, %3
ret <16 x float> %4
Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll Fri Feb 27 15:17:42 2015
@@ -1126,8 +1126,8 @@ define i32 @test_x86_sse42_pcmpestri128_
; CHECK: movl $7
; CHECK: vpcmpestri $7, (
; CHECK: movl
- %1 = load <16 x i8>* %a0
- %2 = load <16 x i8>* %a2
+ %1 = load <16 x i8>, <16 x i8>* %a0
+ %2 = load <16 x i8>, <16 x i8>* %a2
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1]
ret i32 %res
}
@@ -1204,7 +1204,7 @@ define <16 x i8> @test_x86_sse42_pcmpest
; CHECK: movl $7
; CHECK: vpcmpestrm $7,
; CHECK-NOT: vmov
- %1 = load <16 x i8>* %a2
+ %1 = load <16 x i8>, <16 x i8>* %a2
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %1, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
ret <16 x i8> %res
}
@@ -1222,8 +1222,8 @@ declare i32 @llvm.x86.sse42.pcmpistri128
define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
; CHECK: vpcmpistri $7, (
; CHECK: movl
- %1 = load <16 x i8>* %a0
- %2 = load <16 x i8>* %a1
+ %1 = load <16 x i8>, <16 x i8>* %a0
+ %2 = load <16 x i8>, <16 x i8>* %a1
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1]
ret i32 %res
}
@@ -1286,7 +1286,7 @@ declare <16 x i8> @llvm.x86.sse42.pcmpis
define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) {
; CHECK: vpcmpistrm $7, (
; CHECK-NOT: vmov
- %1 = load <16 x i8>* %a1
+ %1 = load <16 x i8>, <16 x i8>* %a1
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1]
ret <16 x i8> %res
}
@@ -2330,7 +2330,7 @@ define <4 x float> @test_x86_avx_vpermil
}
define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>* %a1) {
; CHECK: vpermilps
- %a2 = load <4 x i32>* %a1
+ %a2 = load <4 x i32>, <4 x i32>* %a1
%res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a2) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
Modified: llvm/trunk/test/CodeGen/X86/avx-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-load-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-load-store.ll Fri Feb 27 15:17:42 2015
@@ -10,10 +10,10 @@
define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
entry:
%0 = bitcast double* %d to <4 x double>*
- %tmp1.i = load <4 x double>* %0, align 32
+ %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
%1 = bitcast float* %f to <8 x float>*
- %tmp1.i17 = load <8 x float>* %1, align 32
- %tmp1.i16 = load <4 x i64>* %i, align 32
+ %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32
+ %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32
tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
store <4 x double> %tmp1.i, <4 x double>* %0, align 32
store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
@@ -29,7 +29,7 @@ declare void @dummy(<4 x double>, <8 x f
; CHECK: mov00
define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
- %val = load float* %ptr
+ %val = load float, float* %ptr
; CHECK: vinsertps
; CHECK: vinsertf128
%i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
@@ -39,7 +39,7 @@ define <8 x float> @mov00(<8 x float> %v
; CHECK: mov01
define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
- %val = load double* %ptr
+ %val = load double, double* %ptr
; CHECK: vmovlpd
; CHECK: vinsertf128
%i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
@@ -122,7 +122,7 @@ cif_mixed_test_any_check:
; CHECK: vmovups
; CHECK: vmovups
define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
- %b = load <8 x i32>* %bp, align 1
+ %b = load <8 x i32>, <8 x i32>* %bp, align 1
%x = add <8 x i32> zeroinitializer, %b
store <8 x i32> %x, <8 x i32>* %ret, align 1
ret void
@@ -132,7 +132,7 @@ define void @add8i32(<8 x i32>* %ret, <8
; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
- %b = load <4 x i64>* %bp, align 64
+ %b = load <4 x i64>, <4 x i64>* %bp, align 64
%x = add <4 x i64> zeroinitializer, %b
store <4 x i64> %x, <4 x i64>* %ret, align 64
ret void
@@ -144,7 +144,7 @@ define void @add4i64a64(<4 x i64>* %ret,
; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
- %b = load <4 x i64>* %bp, align 16
+ %b = load <4 x i64>, <4 x i64>* %bp, align 16
%x = add <4 x i64> zeroinitializer, %b
store <4 x i64> %x, <4 x i64>* %ret, align 16
ret void
Modified: llvm/trunk/test/CodeGen/X86/avx-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-logic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-logic.ll Fri Feb 27 15:17:42 2015
@@ -142,7 +142,7 @@ entry:
; CHECK: vandnpd (%
define <4 x double> @andnotpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
entry:
- %tmp2 = load <4 x double>* %x, align 32
+ %tmp2 = load <4 x double>, <4 x double>* %x, align 32
%0 = bitcast <4 x double> %y to <4 x i64>
%neg.i = xor <4 x i64> %0, <i64 -1, i64 -1, i64 -1, i64 -1>
%1 = bitcast <4 x double> %tmp2 to <4 x i64>
@@ -167,7 +167,7 @@ entry:
; CHECK: vandnps (%
define <8 x float> @andnotps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
entry:
- %tmp2 = load <8 x float>* %x, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %x, align 32
%0 = bitcast <8 x float> %y to <8 x i32>
%neg.i = xor <8 x i32> %0, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%1 = bitcast <8 x float> %tmp2 to <8 x i32>
Modified: llvm/trunk/test/CodeGen/X86/avx-splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-splat.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-splat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-splat.ll Fri Feb 27 15:17:42 2015
@@ -58,7 +58,7 @@ for_exit499:
load.i1247: ; preds = %for_exit499
%ptr1227 = getelementptr [18 x [18 x float]], [18 x [18 x float]]* %udx495, i64 0, i64 1, i64 1
%ptr.i1237 = bitcast float* %ptr1227 to i32*
- %val.i1238 = load i32* %ptr.i1237, align 4
+ %val.i1238 = load i32, i32* %ptr.i1237, align 4
%ret6.i1245 = insertelement <8 x i32> undef, i32 %val.i1238, i32 6
%ret7.i1246 = insertelement <8 x i32> %ret6.i1245, i32 %val.i1238, i32 7
%phitmp = bitcast <8 x i32> %ret7.i1246 to <8 x float>
Modified: llvm/trunk/test/CodeGen/X86/avx-unpack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-unpack.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-unpack.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-unpack.ll Fri Feb 27 15:17:42 2015
@@ -70,8 +70,8 @@ entry:
; CHECK: vunpckhps (%
define <8 x i32> @unpackhips2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
entry:
- %a = load <8 x i32>* %src1
- %b = load <8 x i32>* %src2
+ %a = load <8 x i32>, <8 x i32>* %src1
+ %b = load <8 x i32>, <8 x i32>* %src2
%shuffle.i = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
ret <8 x i32> %shuffle.i
}
@@ -86,8 +86,8 @@ entry:
; CHECK: vunpckhpd (%
define <4 x i64> @unpackhipd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
entry:
- %a = load <4 x i64>* %src1
- %b = load <4 x i64>* %src2
+ %a = load <4 x i64>, <4 x i64>* %src1
+ %b = load <4 x i64>, <4 x i64>* %src2
%shuffle.i = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x i64> %shuffle.i
}
@@ -102,8 +102,8 @@ entry:
; CHECK: vunpcklps (%
define <8 x i32> @unpacklops2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
entry:
- %a = load <8 x i32>* %src1
- %b = load <8 x i32>* %src2
+ %a = load <8 x i32>, <8 x i32>* %src1
+ %b = load <8 x i32>, <8 x i32>* %src2
%shuffle.i = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
ret <8 x i32> %shuffle.i
}
@@ -118,8 +118,8 @@ entry:
; CHECK: vunpcklpd (%
define <4 x i64> @unpacklopd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
entry:
- %a = load <4 x i64>* %src1
- %b = load <4 x i64>* %src2
+ %a = load <4 x i64>, <4 x i64>* %src1
+ %b = load <4 x i64>, <4 x i64>* %src2
%shuffle.i = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
ret <4 x i64> %shuffle.i
}
Modified: llvm/trunk/test/CodeGen/X86/avx-varargs-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-varargs-x86_64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-varargs-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-varargs-x86_64.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ declare i32 @f(i32, ...)
; CHECK: vmovaps %ymm0, (%rsp)
define void @test1() nounwind uwtable ssp {
entry:
- %0 = load <8 x float>* @x, align 32
+ %0 = load <8 x float>, <8 x float>* @x, align 32
%call = call i32 (i32, ...)* @f(i32 1, <8 x float> %0)
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
; CHECK: vbroadcastsd (%
define <4 x i64> @A(i64* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i64* %ptr, align 8
+ %q = load i64, i64* %ptr, align 8
%vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0
%vecinit2.i = insertelement <4 x i64> %vecinit.i, i64 %q, i32 1
%vecinit4.i = insertelement <4 x i64> %vecinit2.i, i64 %q, i32 2
@@ -14,7 +14,7 @@ entry:
; CHECK: vbroadcastss (%
define <8 x i32> @B(i32* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i32* %ptr, align 4
+ %q = load i32, i32* %ptr, align 4
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
%vecinit2.i = insertelement <8 x i32> %vecinit.i, i32 %q, i32 1
%vecinit4.i = insertelement <8 x i32> %vecinit2.i, i32 %q, i32 2
@@ -25,7 +25,7 @@ entry:
; CHECK: vbroadcastsd (%
define <4 x double> @C(double* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load double* %ptr, align 8
+ %q = load double, double* %ptr, align 8
%vecinit.i = insertelement <4 x double> undef, double %q, i32 0
%vecinit2.i = insertelement <4 x double> %vecinit.i, double %q, i32 1
%vecinit4.i = insertelement <4 x double> %vecinit2.i, double %q, i32 2
@@ -36,7 +36,7 @@ entry:
; CHECK: vbroadcastss (%
define <8 x float> @D(float* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load float* %ptr, align 4
+ %q = load float, float* %ptr, align 4
%vecinit.i = insertelement <8 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <8 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <8 x float> %vecinit2.i, float %q, i32 2
@@ -49,7 +49,7 @@ entry:
; CHECK: vbroadcastss (%
define <4 x float> @e(float* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load float* %ptr, align 4
+ %q = load float, float* %ptr, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -73,7 +73,7 @@ define <4 x float> @_e2(float* %ptr) nou
; CHECK: vbroadcastss (%
define <4 x i32> @F(i32* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i32* %ptr, align 4
+ %q = load i32, i32* %ptr, align 4
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
%vecinit2.i = insertelement <4 x i32> %vecinit.i, i32 %q, i32 1
%vecinit4.i = insertelement <4 x i32> %vecinit2.i, i32 %q, i32 2
@@ -88,7 +88,7 @@ entry:
; CHECK: ret
define <2 x i64> @G(i64* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i64* %ptr, align 8
+ %q = load i64, i64* %ptr, align 8
%vecinit.i = insertelement <2 x i64> undef, i64 %q, i32 0
%vecinit2.i = insertelement <2 x i64> %vecinit.i, i64 %q, i32 1
ret <2 x i64> %vecinit2.i
@@ -107,7 +107,7 @@ define <4 x i32> @H(<4 x i32> %a) {
; CHECK: ret
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load double* %ptr, align 4
+ %q = load double, double* %ptr, align 4
%vecinit.i = insertelement <2 x double> undef, double %q, i32 0
%vecinit2.i = insertelement <2 x double> %vecinit.i, double %q, i32 1
ret <2 x double> %vecinit2.i
@@ -118,13 +118,13 @@ entry:
; CHECK: ret
define <4 x float> @_RR(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
entry:
- %q = load float* %ptr, align 4
+ %q = load float, float* %ptr, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
%vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
; force a chain
- %j = load i32* %k, align 4
+ %j = load i32, i32* %k, align 4
store i32 %j, i32* undef
ret <4 x float> %vecinit6.i
}
@@ -135,7 +135,7 @@ entry:
; CHECK: ret
define <4 x float> @_RR2(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
entry:
- %q = load float* %ptr, align 4
+ %q = load float, float* %ptr, align 4
%v = insertelement <4 x float> undef, float %q, i32 0
%t = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %t
@@ -151,7 +151,7 @@ entry:
; CHECK: vbroadcastss (%
; CHECK-NEXT: ret
define <8 x float> @splat_concat1(float* %p) {
- %1 = load float* %p, align 4
+ %1 = load float, float* %p, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = insertelement <4 x float> %2, float %1, i32 1
%4 = insertelement <4 x float> %3, float %1, i32 2
@@ -165,7 +165,7 @@ define <8 x float> @splat_concat1(float*
; CHECK: vbroadcastss (%
; CHECK-NEXT: ret
define <8 x float> @splat_concat2(float* %p) {
- %1 = load float* %p, align 4
+ %1 = load float, float* %p, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = insertelement <4 x float> %2, float %1, i32 1
%4 = insertelement <4 x float> %3, float %1, i32 2
@@ -183,7 +183,7 @@ define <8 x float> @splat_concat2(float*
; CHECK: vbroadcastsd (%
; CHECK-NEXT: ret
define <4 x double> @splat_concat3(double* %p) {
- %1 = load double* %p, align 8
+ %1 = load double, double* %p, align 8
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
%4 = shufflevector <2 x double> %3, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -195,7 +195,7 @@ define <4 x double> @splat_concat3(doubl
; CHECK: vbroadcastsd (%
; CHECK-NEXT: ret
define <4 x double> @splat_concat4(double* %p) {
- %1 = load double* %p, align 8
+ %1 = load double, double* %p, align 8
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
%4 = insertelement <2 x double> undef, double %1, i32 0
Modified: llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll Fri Feb 27 15:17:42 2015
@@ -112,7 +112,7 @@ define <8 x float> @vinsertf128_combine(
entry:
%add.ptr = getelementptr inbounds float, float* %f, i64 4
%0 = bitcast float* %add.ptr to <4 x float>*
- %1 = load <4 x float>* %0, align 16
+ %1 = load <4 x float>, <4 x float>* %0, align 16
%2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
ret <8 x float> %2
}
@@ -125,7 +125,7 @@ define <8 x float> @vinsertf128_ucombine
entry:
%add.ptr = getelementptr inbounds float, float* %f, i64 4
%0 = bitcast float* %add.ptr to <4 x float>*
- %1 = load <4 x float>* %0, align 8
+ %1 = load <4 x float>, <4 x float>* %0, align 8
%2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
ret <8 x float> %2
}
Modified: llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll Fri Feb 27 15:17:42 2015
@@ -160,8 +160,8 @@ define <16 x i16> @E5i(<16 x i16>* %a, <
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
entry:
- %c = load <16 x i16>* %a
- %d = load <16 x i16>* %b
+ %c = load <16 x i16>, <16 x i16>* %a
+ %d = load <16 x i16>, <16 x i16>* %b
%c2 = add <16 x i16> %c, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%shuffle = shufflevector <16 x i16> %c2, <16 x i16> %d, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <16 x i16> %shuffle
Modified: llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ entry:
; CHECK: _test01
define <8 x float> @test01(<4 x float> %a, <4 x float> %b, <8 x float> %c) nounwind uwtable ssp {
entry:
- %tmp = load <4 x float>* @x, align 16
+ %tmp = load <4 x float>, <4 x float>* @x, align 16
; CHECK: vzeroupper
; CHECK-NEXT: callq _do_sse
%call = tail call <4 x float> @do_sse(<4 x float> %tmp) nounwind
@@ -73,7 +73,7 @@ for.body:
%call5 = tail call <4 x float> @do_sse(<4 x float> %c.017) nounwind
; CHECK-NEXT: callq _do_sse
%call7 = tail call <4 x float> @do_sse(<4 x float> %call5) nounwind
- %tmp11 = load <8 x float>* @g, align 32
+ %tmp11 = load <8 x float>, <8 x float>* @g, align 32
%0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %tmp11, i8 1) nounwind
; CHECK: vzeroupper
; CHECK-NEXT: callq _do_sse
Modified: llvm/trunk/test/CodeGen/X86/avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx.ll Fri Feb 27 15:17:42 2015
@@ -34,7 +34,7 @@ define <4 x float> @insertps_from_vector
; CHECK-NOT: mov
; CHECK: insertps $48
; CHECK-NEXT: ret
- %1 = load <4 x float>* %pb, align 16
+ %1 = load <4 x float>, <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
ret <4 x float> %2
}
@@ -48,7 +48,7 @@ define <4 x float> @insertps_from_vector
;; Try to match a bit more of the instr, since we need the load's offset.
; CHECK: insertps $96, 4(%{{...}}), %
; CHECK-NEXT: ret
- %1 = load <4 x float>* %pb, align 16
+ %1 = load <4 x float>, <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
ret <4 x float> %2
}
@@ -63,7 +63,7 @@ define <4 x float> @insertps_from_vector
; CHECK: vinsertps $192, 12(%{{...}},%{{...}}), %
; CHECK-NEXT: ret
%1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
- %2 = load <4 x float>* %1, align 16
+ %2 = load <4 x float>, <4 x float>* %1, align 16
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
ret <4 x float> %3
}
@@ -77,7 +77,7 @@ define <4 x float> @insertps_from_broadc
; CHECK: insertps $48
; CHECK-NEXT: ret
%1 = getelementptr inbounds float, float* %fb, i64 %index
- %2 = load float* %1, align 4
+ %2 = load float, float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
%5 = insertelement <4 x float> %4, float %2, i32 2
@@ -93,7 +93,7 @@ define <4 x float> @insertps_from_broadc
; CHECK-NOT: mov
; CHECK: insertps $48
; CHECK-NEXT: ret
- %1 = load <4 x float>* %b, align 4
+ %1 = load <4 x float>, <4 x float>* %b, align 4
%2 = extractelement <4 x float> %1, i32 0
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
@@ -120,7 +120,7 @@ define <4 x float> @insertps_from_broadc
; CHECK: vaddps
; CHECK-NEXT: ret
%1 = getelementptr inbounds float, float* %fb, i64 %index
- %2 = load float* %1, align 4
+ %2 = load float, float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
%5 = insertelement <4 x float> %4, float %2, i32 2
Modified: llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ target triple = "x86_64-apple-macosx10.9
; Function Attrs: nounwind ssp uwtable
define void @test1(float* %A, float* %C) #0 {
%tmp1 = bitcast float* %A to <8 x float>*
- %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
%tmp4 = and <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
@@ -20,7 +20,7 @@ define void @test1(float* %A, float* %C)
; Function Attrs: nounwind ssp uwtable
define void @test2(float* %A, float* %C) #0 {
%tmp1 = bitcast float* %A to <8 x float>*
- %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
%tmp4 = or <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
@@ -34,7 +34,7 @@ define void @test2(float* %A, float* %C)
; Function Attrs: nounwind ssp uwtable
define void @test3(float* %A, float* %C) #0 {
%tmp1 = bitcast float* %A to <8 x float>*
- %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
%tmp4 = xor <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
@@ -47,7 +47,7 @@ define void @test3(float* %A, float* %C)
define void @test4(float* %A, float* %C) #0 {
%tmp1 = bitcast float* %A to <8 x float>*
- %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
%tmp4 = xor <8 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%tmp5 = and <8 x i32> %tmp4, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
Modified: llvm/trunk/test/CodeGen/X86/avx2-conversions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-conversions.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-conversions.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-conversions.ll Fri Feb 27 15:17:42 2015
@@ -95,7 +95,7 @@ define <16 x i8> @trunc_16i16_16i8(<16 x
; CHECK: vpmovsxdq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
- %X = load <4 x i32>* %ptr
+ %X = load <4 x i32>, <4 x i32>* %ptr
%Y = sext <4 x i32> %X to <4 x i64>
ret <4 x i64>%Y
}
@@ -104,7 +104,7 @@ define <4 x i64> @load_sext_test1(<4 x i
; CHECK: vpmovsxbq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
- %X = load <4 x i8>* %ptr
+ %X = load <4 x i8>, <4 x i8>* %ptr
%Y = sext <4 x i8> %X to <4 x i64>
ret <4 x i64>%Y
}
@@ -113,7 +113,7 @@ define <4 x i64> @load_sext_test2(<4 x i
; CHECK: vpmovsxwq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
- %X = load <4 x i16>* %ptr
+ %X = load <4 x i16>, <4 x i16>* %ptr
%Y = sext <4 x i16> %X to <4 x i64>
ret <4 x i64>%Y
}
@@ -122,7 +122,7 @@ define <4 x i64> @load_sext_test3(<4 x i
; CHECK: vpmovsxwd (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
- %X = load <8 x i16>* %ptr
+ %X = load <8 x i16>, <8 x i16>* %ptr
%Y = sext <8 x i16> %X to <8 x i32>
ret <8 x i32>%Y
}
@@ -131,7 +131,7 @@ define <8 x i32> @load_sext_test4(<8 x i
; CHECK: vpmovsxbd (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
- %X = load <8 x i8>* %ptr
+ %X = load <8 x i8>, <8 x i8>* %ptr
%Y = sext <8 x i8> %X to <8 x i32>
ret <8 x i32>%Y
}
Modified: llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
define <16 x i16> @test_lvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
; CHECK-LABEL: test_lvm_x86_avx2_pmovsxbw
; CHECK: vpmovsxbw (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %1)
ret <16 x i16> %2
}
@@ -11,7 +11,7 @@ define <16 x i16> @test_lvm_x86_avx2_pmo
define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbd
; CHECK: vpmovsxbd (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %1)
ret <8 x i32> %2
}
@@ -19,7 +19,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbq
; CHECK: vpmovsxbq (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %1)
ret <4 x i64> %2
}
@@ -27,7 +27,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwd
; CHECK: vpmovsxwd (%rdi), %ymm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %1)
ret <8 x i32> %2
}
@@ -35,7 +35,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwq
; CHECK: vpmovsxwq (%rdi), %ymm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %1)
ret <4 x i64> %2
}
@@ -43,7 +43,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxdq
; CHECK: vpmovsxdq (%rdi), %ymm0
- %1 = load <4 x i32>* %a, align 1
+ %1 = load <4 x i32>, <4 x i32>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %1)
ret <4 x i64> %2
}
@@ -51,7 +51,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <16 x i16> @test_lvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
; CHECK-LABEL: test_lvm_x86_avx2_pmovzxbw
; CHECK: vpmovzxbw (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %1)
ret <16 x i16> %2
}
@@ -59,7 +59,7 @@ define <16 x i16> @test_lvm_x86_avx2_pmo
define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbd
; CHECK: vpmovzxbd (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %1)
ret <8 x i32> %2
}
@@ -67,7 +67,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbq
; CHECK: vpmovzxbq (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %1)
ret <4 x i64> %2
}
@@ -75,7 +75,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwd
; CHECK: vpmovzxwd (%rdi), %ymm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %1)
ret <8 x i32> %2
}
@@ -83,7 +83,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwq
; CHECK: vpmovzxwq (%rdi), %ymm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %1)
ret <4 x i64> %2
}
@@ -91,7 +91,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxdq
; CHECK: vpmovzxdq (%rdi), %ymm0
- %1 = load <4 x i32>* %a, align 1
+ %1 = load <4 x i32>, <4 x i32>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %1)
ret <4 x i64> %2
}
Modified: llvm/trunk/test/CodeGen/X86/avx2-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-shift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-shift.ll Fri Feb 27 15:17:42 2015
@@ -130,7 +130,7 @@ define <16 x i16> @vshift07(<16 x i16> %
; CHECK: vpsravd (%
; CHECK: ret
define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
- %y1 = load <4 x i32>* %y
+ %y1 = load <4 x i32>, <4 x i32>* %y
%k = ashr <4 x i32> %x, %y1
ret <4 x i32> %k
}
@@ -139,7 +139,7 @@ define <4 x i32> @variable_sra0_load(<4
; CHECK: vpsravd (%
; CHECK: ret
define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
- %y1 = load <8 x i32>* %y
+ %y1 = load <8 x i32>, <8 x i32>* %y
%k = ashr <8 x i32> %x, %y1
ret <8 x i32> %k
}
@@ -148,7 +148,7 @@ define <8 x i32> @variable_sra1_load(<8
; CHECK: vpsllvd (%
; CHECK: ret
define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
- %y1 = load <4 x i32>* %y
+ %y1 = load <4 x i32>, <4 x i32>* %y
%k = shl <4 x i32> %x, %y1
ret <4 x i32> %k
}
@@ -156,7 +156,7 @@ define <4 x i32> @variable_shl0_load(<4
; CHECK: vpsllvd (%
; CHECK: ret
define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
- %y1 = load <8 x i32>* %y
+ %y1 = load <8 x i32>, <8 x i32>* %y
%k = shl <8 x i32> %x, %y1
ret <8 x i32> %k
}
@@ -164,7 +164,7 @@ define <8 x i32> @variable_shl1_load(<8
; CHECK: vpsllvq (%
; CHECK: ret
define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
- %y1 = load <2 x i64>* %y
+ %y1 = load <2 x i64>, <2 x i64>* %y
%k = shl <2 x i64> %x, %y1
ret <2 x i64> %k
}
@@ -172,7 +172,7 @@ define <2 x i64> @variable_shl2_load(<2
; CHECK: vpsllvq (%
; CHECK: ret
define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
- %y1 = load <4 x i64>* %y
+ %y1 = load <4 x i64>, <4 x i64>* %y
%k = shl <4 x i64> %x, %y1
ret <4 x i64> %k
}
@@ -180,7 +180,7 @@ define <4 x i64> @variable_shl3_load(<4
; CHECK: vpsrlvd (%
; CHECK: ret
define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
- %y1 = load <4 x i32>* %y
+ %y1 = load <4 x i32>, <4 x i32>* %y
%k = lshr <4 x i32> %x, %y1
ret <4 x i32> %k
}
@@ -188,7 +188,7 @@ define <4 x i32> @variable_srl0_load(<4
; CHECK: vpsrlvd (%
; CHECK: ret
define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
- %y1 = load <8 x i32>* %y
+ %y1 = load <8 x i32>, <8 x i32>* %y
%k = lshr <8 x i32> %x, %y1
ret <8 x i32> %k
}
@@ -196,7 +196,7 @@ define <8 x i32> @variable_srl1_load(<8
; CHECK: vpsrlvq (%
; CHECK: ret
define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
- %y1 = load <2 x i64>* %y
+ %y1 = load <2 x i64>, <2 x i64>* %y
%k = lshr <2 x i64> %x, %y1
ret <2 x i64> %k
}
@@ -204,7 +204,7 @@ define <2 x i64> @variable_srl2_load(<2
; CHECK: vpsrlvq (%
; CHECK: ret
define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
- %y1 = load <4 x i64>* %y
+ %y1 = load <4 x i64>, <4 x i64>* %y
%k = lshr <4 x i64> %x, %y1
ret <4 x i64> %k
}
Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
; CHECK: vpbroadcastb (%
define <16 x i8> @BB16(i8* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i8* %ptr, align 4
+ %q = load i8, i8* %ptr, align 4
%q0 = insertelement <16 x i8> undef, i8 %q, i32 0
%q1 = insertelement <16 x i8> %q0, i8 %q, i32 1
%q2 = insertelement <16 x i8> %q1, i8 %q, i32 2
@@ -25,7 +25,7 @@ entry:
; CHECK: vpbroadcastb (%
define <32 x i8> @BB32(i8* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i8* %ptr, align 4
+ %q = load i8, i8* %ptr, align 4
%q0 = insertelement <32 x i8> undef, i8 %q, i32 0
%q1 = insertelement <32 x i8> %q0, i8 %q, i32 1
%q2 = insertelement <32 x i8> %q1, i8 %q, i32 2
@@ -65,7 +65,7 @@ entry:
define <8 x i16> @W16(i16* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i16* %ptr, align 4
+ %q = load i16, i16* %ptr, align 4
%q0 = insertelement <8 x i16> undef, i16 %q, i32 0
%q1 = insertelement <8 x i16> %q0, i16 %q, i32 1
%q2 = insertelement <8 x i16> %q1, i16 %q, i32 2
@@ -79,7 +79,7 @@ entry:
; CHECK: vpbroadcastw (%
define <16 x i16> @WW16(i16* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i16* %ptr, align 4
+ %q = load i16, i16* %ptr, align 4
%q0 = insertelement <16 x i16> undef, i16 %q, i32 0
%q1 = insertelement <16 x i16> %q0, i16 %q, i32 1
%q2 = insertelement <16 x i16> %q1, i16 %q, i32 2
@@ -101,7 +101,7 @@ entry:
; CHECK: vbroadcastss (%
define <4 x i32> @D32(i32* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i32* %ptr, align 4
+ %q = load i32, i32* %ptr, align 4
%q0 = insertelement <4 x i32> undef, i32 %q, i32 0
%q1 = insertelement <4 x i32> %q0, i32 %q, i32 1
%q2 = insertelement <4 x i32> %q1, i32 %q, i32 2
@@ -111,7 +111,7 @@ entry:
; CHECK: vbroadcastss (%
define <8 x i32> @DD32(i32* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i32* %ptr, align 4
+ %q = load i32, i32* %ptr, align 4
%q0 = insertelement <8 x i32> undef, i32 %q, i32 0
%q1 = insertelement <8 x i32> %q0, i32 %q, i32 1
%q2 = insertelement <8 x i32> %q1, i32 %q, i32 2
@@ -125,7 +125,7 @@ entry:
; CHECK: vpbroadcastq (%
define <2 x i64> @Q64(i64* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i64* %ptr, align 4
+ %q = load i64, i64* %ptr, align 4
%q0 = insertelement <2 x i64> undef, i64 %q, i32 0
%q1 = insertelement <2 x i64> %q0, i64 %q, i32 1
ret <2 x i64> %q1
@@ -133,7 +133,7 @@ entry:
; CHECK: vbroadcastsd (%
define <4 x i64> @QQ64(i64* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i64* %ptr, align 4
+ %q = load i64, i64* %ptr, align 4
%q0 = insertelement <4 x i64> undef, i64 %q, i32 0
%q1 = insertelement <4 x i64> %q0, i64 %q, i32 1
%q2 = insertelement <4 x i64> %q1, i64 %q, i32 2
@@ -145,7 +145,7 @@ entry:
; this used to crash
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load double* %ptr, align 4
+ %q = load double, double* %ptr, align 4
%vecinit.i = insertelement <2 x double> undef, double %q, i32 0
%vecinit2.i = insertelement <2 x double> %vecinit.i, double %q, i32 1
ret <2 x double> %vecinit2.i
@@ -431,8 +431,8 @@ eintry:
%__b.addr.i = alloca <2 x i64>, align 16
%vCr = alloca <2 x i64>, align 16
store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
- %tmp = load <2 x i64>* %vCr, align 16
- %tmp2 = load i8* %cV_R.addr, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %vCr, align 16
+ %tmp2 = load i8, i8* %cV_R.addr, align 4
%splat.splatinsert = insertelement <16 x i8> undef, i8 %tmp2, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
%tmp3 = bitcast <16 x i8> %splat.splat to <2 x i64>
@@ -450,8 +450,8 @@ eintry:
%__b.addr.i = alloca <4 x i64>, align 16
%vCr = alloca <4 x i64>, align 16
store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
- %tmp = load <4 x i64>* %vCr, align 16
- %tmp2 = load i8* %cV_R.addr, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %vCr, align 16
+ %tmp2 = load i8, i8* %cV_R.addr, align 4
%splat.splatinsert = insertelement <32 x i8> undef, i8 %tmp2, i32 0
%splat.splat = shufflevector <32 x i8> %splat.splatinsert, <32 x i8> undef, <32 x i32> zeroinitializer
%tmp3 = bitcast <32 x i8> %splat.splat to <4 x i64>
@@ -469,8 +469,8 @@ entry:
%__b.addr.i = alloca <2 x i64>, align 16
%vCr = alloca <2 x i64>, align 16
store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
- %tmp = load <2 x i64>* %vCr, align 16
- %tmp2 = load i16* %cV_R.addr, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %vCr, align 16
+ %tmp2 = load i16, i16* %cV_R.addr, align 4
%splat.splatinsert = insertelement <8 x i16> undef, i16 %tmp2, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
%tmp3 = bitcast <8 x i16> %splat.splat to <2 x i64>
@@ -488,8 +488,8 @@ eintry:
%__b.addr.i = alloca <4 x i64>, align 16
%vCr = alloca <4 x i64>, align 16
store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
- %tmp = load <4 x i64>* %vCr, align 16
- %tmp2 = load i16* %cV_R.addr, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %vCr, align 16
+ %tmp2 = load i16, i16* %cV_R.addr, align 4
%splat.splatinsert = insertelement <16 x i16> undef, i16 %tmp2, i32 0
%splat.splat = shufflevector <16 x i16> %splat.splatinsert, <16 x i16> undef, <16 x i32> zeroinitializer
%tmp3 = bitcast <16 x i16> %splat.splat to <4 x i64>
@@ -507,8 +507,8 @@ entry:
%__b.addr.i = alloca <2 x i64>, align 16
%vCr = alloca <2 x i64>, align 16
store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
- %tmp = load <2 x i64>* %vCr, align 16
- %tmp2 = load i32* %cV_R.addr, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %vCr, align 16
+ %tmp2 = load i32, i32* %cV_R.addr, align 4
%splat.splatinsert = insertelement <4 x i32> undef, i32 %tmp2, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
%tmp3 = bitcast <4 x i32> %splat.splat to <2 x i64>
@@ -526,8 +526,8 @@ eintry:
%__b.addr.i = alloca <4 x i64>, align 16
%vCr = alloca <4 x i64>, align 16
store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
- %tmp = load <4 x i64>* %vCr, align 16
- %tmp2 = load i32* %cV_R.addr, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %vCr, align 16
+ %tmp2 = load i32, i32* %cV_R.addr, align 4
%splat.splatinsert = insertelement <8 x i32> undef, i32 %tmp2, i32 0
%splat.splat = shufflevector <8 x i32> %splat.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer
%tmp3 = bitcast <8 x i32> %splat.splat to <4 x i64>
@@ -545,8 +545,8 @@ entry:
%__b.addr.i = alloca <2 x i64>, align 16
%vCr = alloca <2 x i64>, align 16
store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
- %tmp = load <2 x i64>* %vCr, align 16
- %tmp2 = load i64* %cV_R.addr, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %vCr, align 16
+ %tmp2 = load i64, i64* %cV_R.addr, align 4
%splat.splatinsert = insertelement <2 x i64> undef, i64 %tmp2, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
store <2 x i64> %tmp, <2 x i64>* %__a.addr.i, align 16
@@ -563,8 +563,8 @@ eintry:
%__b.addr.i = alloca <4 x i64>, align 16
%vCr = alloca <4 x i64>, align 16
store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
- %tmp = load <4 x i64>* %vCr, align 16
- %tmp2 = load i64* %cV_R.addr, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %vCr, align 16
+ %tmp2 = load i64, i64* %cV_R.addr, align 4
%splat.splatinsert = insertelement <4 x i64> undef, i64 %tmp2, i32 0
%splat.splat = shufflevector <4 x i64> %splat.splatinsert, <4 x i64> undef, <4 x i32> zeroinitializer
store <4 x i64> %tmp, <4 x i64>* %__a.addr.i, align 16
Modified: llvm/trunk/test/CodeGen/X86/avx512-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-arith.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-arith.ll Fri Feb 27 15:17:42 2015
@@ -56,7 +56,7 @@ define <8 x double> @subpd512fold(<8 x d
; CHECK-NEXT: vsubpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
- %tmp2 = load <8 x double>* %x, align 8
+ %tmp2 = load <8 x double>, <8 x double>* %x, align 8
%sub.i = fsub <8 x double> %y, %tmp2
ret <8 x double> %sub.i
}
@@ -77,7 +77,7 @@ define <16 x float> @subps512fold(<16 x
; CHECK-NEXT: vsubps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
- %tmp2 = load <16 x float>* %x, align 4
+ %tmp2 = load <16 x float>, <16 x float>* %x, align 4
%sub.i = fsub <16 x float> %y, %tmp2
ret <16 x float> %sub.i
}
@@ -193,7 +193,7 @@ define <8 x i64> @vpaddq_fold_test(<8 x
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
- %tmp = load <8 x i64>* %j, align 4
+ %tmp = load <8 x i64>, <8 x i64>* %j, align 4
%x = add <8 x i64> %i, %tmp
ret <8 x i64> %x
}
@@ -212,7 +212,7 @@ define <8 x i64> @vpaddq_broadcast2_test
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
- %tmp = load i64* %j
+ %tmp = load i64, i64* %j
%j.0 = insertelement <8 x i64> undef, i64 %tmp, i32 0
%j.1 = insertelement <8 x i64> %j.0, i64 %tmp, i32 1
%j.2 = insertelement <8 x i64> %j.1, i64 %tmp, i32 2
@@ -239,7 +239,7 @@ define <16 x i32> @vpaddd_fold_test(<16
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
- %tmp = load <16 x i32>* %j, align 4
+ %tmp = load <16 x i32>, <16 x i32>* %j, align 4
%x = add <16 x i32> %i, %tmp
ret <16 x i32> %x
}
@@ -287,7 +287,7 @@ define <16 x i32> @vpaddd_mask_fold_test
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %j = load <16 x i32>* %j.ptr
+ %j = load <16 x i32>, <16 x i32>* %j.ptr
%x = add <16 x i32> %i, %j
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i
ret <16 x i32> %r
@@ -314,7 +314,7 @@ define <16 x i32> @vpaddd_maskz_fold_tes
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %j = load <16 x i32>* %j.ptr
+ %j = load <16 x i32>, <16 x i32>* %j.ptr
%x = add <16 x i32> %i, %j
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer
ret <16 x i32> %r
@@ -445,7 +445,7 @@ define <16 x i32> @andd512fold(<16 x i32
; CHECK-NEXT: vpandd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
- %a = load <16 x i32>* %x, align 4
+ %a = load <16 x i32>, <16 x i32>* %x, align 4
%b = and <16 x i32> %y, %a
ret <16 x i32> %b
}
@@ -456,7 +456,7 @@ define <8 x i64> @andqbrst(<8 x i64> %p1
; CHECK-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
- %a = load i64* %ap, align 8
+ %a = load i64, i64* %ap, align 8
%b = insertelement <8 x i64> undef, i64 %a, i32 0
%c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
%d = and <8 x i64> %p1, %c
@@ -593,7 +593,7 @@ define <8 x double> @test_mask_fold_vadd
<8 x double>* %j, <8 x i64> %mask1)
nounwind {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
- %tmp = load <8 x double>* %j, align 8
+ %tmp = load <8 x double>, <8 x double>* %j, align 8
%x = fadd <8 x double> %i, %tmp
%r = select <8 x i1> %mask, <8 x double> %x, <8 x double> %dst
ret <8 x double> %r
@@ -605,7 +605,7 @@ define <8 x double> @test_mask_fold_vadd
define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
<8 x i64> %mask1) nounwind {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
- %tmp = load <8 x double>* %j, align 8
+ %tmp = load <8 x double>, <8 x double>* %j, align 8
%x = fadd <8 x double> %i, %tmp
%r = select <8 x i1> %mask, <8 x double> %x, <8 x double> zeroinitializer
ret <8 x double> %r
@@ -615,7 +615,7 @@ define <8 x double> @test_maskz_fold_vad
; CHECK: vaddpd (%rdi){1to8}, %zmm{{.*}}
; CHECK: ret
define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <8 x double> undef, double %tmp, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef,
<8 x i32> zeroinitializer
@@ -629,7 +629,7 @@ define <8 x double> @test_broadcast_vadd
define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i,
double* %j, <8 x i64> %mask1) nounwind {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <8 x double> undef, double %tmp, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef,
<8 x i32> zeroinitializer
@@ -644,7 +644,7 @@ define <8 x double> @test_mask_broadcast
define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
<8 x i64> %mask1) nounwind {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <8 x double> undef, double %tmp, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef,
<8 x i32> zeroinitializer
Modified: llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define <16 x i32> @test1(i32* %x) {
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7]
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; CHECK-NEXT: retq
- %y = load i32* %x, align 4
+ %y = load i32, i32* %x, align 4
%res = insertelement <16 x i32>zeroinitializer, i32 %y, i32 4
ret <16 x i32>%res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Fri Feb 27 15:17:42 2015
@@ -87,7 +87,7 @@ define <8 x double> @fpext00(<8 x float>
; CHECK: ret
define double @funcA(i64* nocapture %e) {
entry:
- %tmp1 = load i64* %e, align 8
+ %tmp1 = load i64, i64* %e, align 8
%conv = sitofp i64 %tmp1 to double
ret double %conv
}
@@ -97,7 +97,7 @@ entry:
; CHECK: ret
define double @funcB(i32* %e) {
entry:
- %tmp1 = load i32* %e, align 4
+ %tmp1 = load i32, i32* %e, align 4
%conv = sitofp i32 %tmp1 to double
ret double %conv
}
@@ -107,7 +107,7 @@ entry:
; CHECK: ret
define float @funcC(i32* %e) {
entry:
- %tmp1 = load i32* %e, align 4
+ %tmp1 = load i32, i32* %e, align 4
%conv = sitofp i32 %tmp1 to float
ret float %conv
}
@@ -117,7 +117,7 @@ entry:
; CHECK: ret
define float @i64tof32(i64* %e) {
entry:
- %tmp1 = load i64* %e, align 8
+ %tmp1 = load i64, i64* %e, align 8
%conv = sitofp i64 %tmp1 to float
ret float %conv
}
@@ -129,7 +129,7 @@ define void @fpext() {
entry:
%f = alloca float, align 4
%d = alloca double, align 8
- %tmp = load float* %f, align 4
+ %tmp = load float, float* %f, align 4
%conv = fpext float %tmp to double
store double %conv, double* %d, align 8
ret void
@@ -144,7 +144,7 @@ define void @fpround_scalar() nounwind u
entry:
%f = alloca float, align 4
%d = alloca double, align 8
- %tmp = load double* %d, align 8
+ %tmp = load double, double* %d, align 8
%conv = fptrunc double %tmp to float
store float %conv, float* %f, align 4
ret void
Modified: llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll Fri Feb 27 15:17:42 2015
@@ -170,7 +170,7 @@ define <8 x float> @gather_mask_qps_exec
;CHECK: vscatterdpd
;CHECK: ret
define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = load <8 x double>* %src, align 64
+ %x = load <8 x double>, <8 x double>* %src, align 64
call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind, <8 x double> %x, i32 4)
ret void
}
@@ -180,7 +180,7 @@ define void @scatter_mask_dpd_execdomain
;CHECK: vscatterqpd
;CHECK: ret
define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = load <8 x double>* %src, align 64
+ %x = load <8 x double>, <8 x double>* %src, align 64
call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x double> %x, i32 4)
ret void
}
@@ -190,7 +190,7 @@ define void @scatter_mask_qpd_execdomain
;CHECK: vscatterdps
;CHECK: ret
define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf) {
- %x = load <16 x float>* %src, align 64
+ %x = load <16 x float>, <16 x float>* %src, align 64
call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind, <16 x float> %x, i32 4)
ret void
}
@@ -200,7 +200,7 @@ define void @scatter_mask_dps_execdomain
;CHECK: vscatterqps
;CHECK: ret
define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = load <8 x float>* %src, align 32
+ %x = load <8 x float>, <8 x float>* %src, align 32
call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x float> %x, i32 4)
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-i1test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-i1test.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-i1test.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-i1test.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ bb56:
br label %bb33
bb33: ; preds = %bb51, %bb56
- %r111 = load i64* undef, align 8
+ %r111 = load i64, i64* undef, align 8
br i1 undef, label %bb51, label %bb35
bb35: ; preds = %bb33
Modified: llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
;CHECK: vinsertf32x4
;CHECK: ret
define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
- %rrr = load float* %br
+ %rrr = load float, float* %br
%rrr2 = insertelement <16 x float> %x, float %rrr, i32 1
%rrr3 = insertelement <16 x float> %rrr2, float %y, i32 14
ret <16 x float> %rrr3
@@ -20,7 +20,7 @@ define <16 x float> @test1(<16 x float>
;SKX: vinsertf64x2 $3
;CHECK: ret
define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
- %rrr = load double* %br
+ %rrr = load double, double* %br
%rrr2 = insertelement <8 x double> %x, double %rrr, i32 1
%rrr3 = insertelement <8 x double> %rrr2, double %y, i32 6
ret <8 x double> %rrr3
@@ -171,7 +171,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64
;CHECK: kmovw
;CHECK: ret
define i16 @test15(i1 *%addr) {
- %x = load i1 * %addr, align 128
+ %x = load i1 , i1 * %addr, align 128
%x1 = insertelement <16 x i1> undef, i1 %x, i32 10
%x2 = bitcast <16 x i1>%x1 to i16
ret i16 %x2
@@ -183,7 +183,7 @@ define i16 @test15(i1 *%addr) {
;CHECK: korw
;CHECK: ret
define i16 @test16(i1 *%addr, i16 %a) {
- %x = load i1 * %addr, align 128
+ %x = load i1 , i1 * %addr, align 128
%a1 = bitcast i16 %a to <16 x i1>
%x1 = insertelement <16 x i1> %a1, i1 %x, i32 10
%x2 = bitcast <16 x i1>%x1 to i16
@@ -199,7 +199,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
;SKX: korb
;CHECK: ret
define i8 @test17(i1 *%addr, i8 %a) {
- %x = load i1 * %addr, align 128
+ %x = load i1 , i1 * %addr, align 128
%a1 = bitcast i8 %a to <8 x i1>
%x1 = insertelement <8 x i1> %a1, i1 %x, i32 4
%x2 = bitcast <8 x i1>%x1 to i8
Modified: llvm/trunk/test/CodeGen/X86/avx512-intel-ocl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intel-ocl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intel-ocl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intel-ocl.ll Fri Feb 27 15:17:42 2015
@@ -30,7 +30,7 @@ define <16 x float> @testf16_inp(<16 x f
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %2, %1
ret <16 x float> %3
}
@@ -53,7 +53,7 @@ define <16 x float> @testf16_regs(<16 x
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %1, %b
%4 = fadd <16 x float> %2, %3
ret <16 x float> %4
Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Fri Feb 27 15:17:42 2015
@@ -356,7 +356,7 @@ define <8 x double> @test_x86_mask_blend
define <8 x double> @test_x86_mask_blend_pd_512_memop(<8 x double> %a, <8 x double>* %ptr, i8 %mask) {
; CHECK-LABEL: test_x86_mask_blend_pd_512_memop
; CHECK: vblendmpd (%
- %b = load <8 x double>* %ptr
+ %b = load <8 x double>, <8 x double>* %ptr
%res = call <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double> %a, <8 x double> %b, i8 %mask) ; <<8 x double>> [#uses=1]
ret <8 x double> %res
}
@@ -1435,7 +1435,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, <8 x i64>* %ptr) {
; CHECK-LABEL: test_x86_avx512_psrlv_q_memop
; CHECK: vpsrlvq (%
- %b = load <8 x i64>* %ptr
+ %b = load <8 x i64>, <8 x i64>* %ptr
%res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
ret <8 x i64> %res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-logic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-logic.ll Fri Feb 27 15:17:42 2015
@@ -83,7 +83,7 @@ define <8 x i64> @orq_broadcast(<8 x i64
; CHECK: ret
define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
entry:
- %a = load <16 x i32>* %x, align 4
+ %a = load <16 x i32>, <16 x i32>* %x, align 4
%b = and <16 x i32> %y, %a
ret <16 x i32> %b
}
@@ -93,7 +93,7 @@ entry:
; CHECK: ret
define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
entry:
- %a = load i64* %ap, align 8
+ %a = load i64, i64* %ap, align 8
%b = insertelement <8 x i64> undef, i64 %a, i32 0
%c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
%d = and <8 x i64> %p1, %c
Modified: llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll Fri Feb 27 15:17:42 2015
@@ -34,7 +34,7 @@ define i8 @mask8(i8 %x) {
; CHECK: ret
define void @mask16_mem(i16* %ptr) {
- %x = load i16* %ptr, align 4
+ %x = load i16, i16* %ptr, align 4
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <16 x i1> %m1 to i16
@@ -51,7 +51,7 @@ define void @mask16_mem(i16* %ptr) {
; SKX-NEXT: kmovb %k{{[0-7]}}, ([[ARG1]])
define void @mask8_mem(i8* %ptr) {
- %x = load i8* %ptr, align 4
+ %x = load i8, i8* %ptr, align 4
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <8 x i1> %m1 to i8
@@ -128,7 +128,7 @@ entry:
%maskPtr = alloca <8 x i1>
store <8 x i1> <i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1>* %maskPtr
- %mask = load <8 x i1>* %maskPtr
+ %mask = load <8 x i1>, <8 x i1>* %maskPtr
%mask_convert = bitcast <8 x i1> %mask to i8
ret i8 %mask_convert
}
\ No newline at end of file
Modified: llvm/trunk/test/CodeGen/X86/avx512-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mov.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mov.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ define <2 x i64> @test3(i64 %x) {
; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test4(i32* %x) {
- %y = load i32* %x
+ %y = load i32, i32* %x
%res = insertelement <4 x i32>undef, i32 %y, i32 0
ret <4 x i32>%res
}
@@ -53,7 +53,7 @@ define void @test6(double %x, double* %y
; CHECK: vmovss (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define float @test7(i32* %x) {
- %y = load i32* %x
+ %y = load i32, i32* %x
%res = bitcast i32 %y to float
ret float %res
}
@@ -78,7 +78,7 @@ define i64 @test9(<2 x i64> %x) {
; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test10(i32* %x) {
- %y = load i32* %x, align 4
+ %y = load i32, i32* %x, align 4
%res = insertelement <4 x i32>zeroinitializer, i32 %y, i32 0
ret <4 x i32>%res
}
@@ -87,7 +87,7 @@ define <4 x i32> @test10(i32* %x) {
; CHECK: vmovss (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x float> @test11(float* %x) {
- %y = load float* %x, align 4
+ %y = load float, float* %x, align 4
%res = insertelement <4 x float>zeroinitializer, float %y, i32 0
ret <4 x float>%res
}
@@ -96,7 +96,7 @@ define <4 x float> @test11(float* %x) {
; CHECK: vmovsd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <2 x double> @test12(double* %x) {
- %y = load double* %x, align 8
+ %y = load double, double* %x, align 8
%res = insertelement <2 x double>zeroinitializer, double %y, i32 0
ret <2 x double>%res
}
@@ -121,7 +121,7 @@ define <4 x i32> @test14(i32 %x) {
; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test15(i32* %x) {
- %y = load i32* %x, align 4
+ %y = load i32, i32* %x, align 4
%res = insertelement <4 x i32>zeroinitializer, i32 %y, i32 0
ret <4 x i32>%res
}
@@ -131,7 +131,7 @@ define <4 x i32> @test15(i32* %x) {
; CHECK: ret
define <16 x i32> @test16(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x i32>*
- %res = load <16 x i32>* %vaddr, align 1
+ %res = load <16 x i32>, <16 x i32>* %vaddr, align 1
ret <16 x i32>%res
}
@@ -140,7 +140,7 @@ define <16 x i32> @test16(i8 * %addr) {
; CHECK: ret
define <16 x i32> @test17(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x i32>*
- %res = load <16 x i32>* %vaddr, align 64
+ %res = load <16 x i32>, <16 x i32>* %vaddr, align 64
ret <16 x i32>%res
}
@@ -176,7 +176,7 @@ define void @test20(i8 * %addr, <16 x i3
; CHECK: ret
define <8 x i64> @test21(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i64>*
- %res = load <8 x i64>* %vaddr, align 64
+ %res = load <8 x i64>, <8 x i64>* %vaddr, align 64
ret <8 x i64>%res
}
@@ -194,7 +194,7 @@ define void @test22(i8 * %addr, <8 x i64
; CHECK: ret
define <8 x i64> @test23(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i64>*
- %res = load <8 x i64>* %vaddr, align 1
+ %res = load <8 x i64>, <8 x i64>* %vaddr, align 1
ret <8 x i64>%res
}
@@ -212,7 +212,7 @@ define void @test24(i8 * %addr, <8 x dou
; CHECK: ret
define <8 x double> @test25(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x double>*
- %res = load <8 x double>* %vaddr, align 64
+ %res = load <8 x double>, <8 x double>* %vaddr, align 64
ret <8 x double>%res
}
@@ -230,7 +230,7 @@ define void @test26(i8 * %addr, <16 x fl
; CHECK: ret
define <16 x float> @test27(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x float>*
- %res = load <16 x float>* %vaddr, align 64
+ %res = load <16 x float>, <16 x float>* %vaddr, align 64
ret <16 x float>%res
}
@@ -248,7 +248,7 @@ define void @test28(i8 * %addr, <8 x dou
; CHECK: ret
define <8 x double> @test29(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x double>*
- %res = load <8 x double>* %vaddr, align 1
+ %res = load <8 x double>, <8 x double>* %vaddr, align 1
ret <8 x double>%res
}
@@ -266,7 +266,7 @@ define void @test30(i8 * %addr, <16 x fl
; CHECK: ret
define <16 x float> @test31(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x float>*
- %res = load <16 x float>* %vaddr, align 1
+ %res = load <16 x float>, <16 x float>* %vaddr, align 1
ret <16 x float>%res
}
@@ -276,7 +276,7 @@ define <16 x float> @test31(i8 * %addr)
define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
- %r = load <16 x i32>* %vaddr, align 64
+ %r = load <16 x i32>, <16 x i32>* %vaddr, align 64
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> %old
ret <16 x i32>%res
}
@@ -287,7 +287,7 @@ define <16 x i32> @test32(i8 * %addr, <1
define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
- %r = load <16 x i32>* %vaddr, align 1
+ %r = load <16 x i32>, <16 x i32>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> %old
ret <16 x i32>%res
}
@@ -298,7 +298,7 @@ define <16 x i32> @test33(i8 * %addr, <1
define <16 x i32> @test34(i8 * %addr, <16 x i32> %mask1) {
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
- %r = load <16 x i32>* %vaddr, align 64
+ %r = load <16 x i32>, <16 x i32>* %vaddr, align 64
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> zeroinitializer
ret <16 x i32>%res
}
@@ -309,7 +309,7 @@ define <16 x i32> @test34(i8 * %addr, <1
define <16 x i32> @test35(i8 * %addr, <16 x i32> %mask1) {
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
- %r = load <16 x i32>* %vaddr, align 1
+ %r = load <16 x i32>, <16 x i32>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> zeroinitializer
ret <16 x i32>%res
}
@@ -320,7 +320,7 @@ define <16 x i32> @test35(i8 * %addr, <1
define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
- %r = load <8 x i64>* %vaddr, align 64
+ %r = load <8 x i64>, <8 x i64>* %vaddr, align 64
%res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> %old
ret <8 x i64>%res
}
@@ -331,7 +331,7 @@ define <8 x i64> @test36(i8 * %addr, <8
define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
- %r = load <8 x i64>* %vaddr, align 1
+ %r = load <8 x i64>, <8 x i64>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> %old
ret <8 x i64>%res
}
@@ -342,7 +342,7 @@ define <8 x i64> @test37(i8 * %addr, <8
define <8 x i64> @test38(i8 * %addr, <8 x i64> %mask1) {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
- %r = load <8 x i64>* %vaddr, align 64
+ %r = load <8 x i64>, <8 x i64>* %vaddr, align 64
%res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> zeroinitializer
ret <8 x i64>%res
}
@@ -353,7 +353,7 @@ define <8 x i64> @test38(i8 * %addr, <8
define <8 x i64> @test39(i8 * %addr, <8 x i64> %mask1) {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
- %r = load <8 x i64>* %vaddr, align 1
+ %r = load <8 x i64>, <8 x i64>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> zeroinitializer
ret <8 x i64>%res
}
@@ -364,7 +364,7 @@ define <8 x i64> @test39(i8 * %addr, <8
define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
- %r = load <16 x float>* %vaddr, align 64
+ %r = load <16 x float>, <16 x float>* %vaddr, align 64
%res = select <16 x i1> %mask, <16 x float> %r, <16 x float> %old
ret <16 x float>%res
}
@@ -375,7 +375,7 @@ define <16 x float> @test40(i8 * %addr,
define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
- %r = load <16 x float>* %vaddr, align 1
+ %r = load <16 x float>, <16 x float>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x float> %r, <16 x float> %old
ret <16 x float>%res
}
@@ -386,7 +386,7 @@ define <16 x float> @test41(i8 * %addr,
define <16 x float> @test42(i8 * %addr, <16 x float> %mask1) {
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
- %r = load <16 x float>* %vaddr, align 64
+ %r = load <16 x float>, <16 x float>* %vaddr, align 64
%res = select <16 x i1> %mask, <16 x float> %r, <16 x float> zeroinitializer
ret <16 x float>%res
}
@@ -397,7 +397,7 @@ define <16 x float> @test42(i8 * %addr,
define <16 x float> @test43(i8 * %addr, <16 x float> %mask1) {
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
- %r = load <16 x float>* %vaddr, align 1
+ %r = load <16 x float>, <16 x float>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x float> %r, <16 x float> zeroinitializer
ret <16 x float>%res
}
@@ -408,7 +408,7 @@ define <16 x float> @test43(i8 * %addr,
define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
- %r = load <8 x double>* %vaddr, align 64
+ %r = load <8 x double>, <8 x double>* %vaddr, align 64
%res = select <8 x i1> %mask, <8 x double> %r, <8 x double> %old
ret <8 x double>%res
}
@@ -419,7 +419,7 @@ define <8 x double> @test44(i8 * %addr,
define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
- %r = load <8 x double>* %vaddr, align 1
+ %r = load <8 x double>, <8 x double>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x double> %r, <8 x double> %old
ret <8 x double>%res
}
@@ -430,7 +430,7 @@ define <8 x double> @test45(i8 * %addr,
define <8 x double> @test46(i8 * %addr, <8 x double> %mask1) {
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
- %r = load <8 x double>* %vaddr, align 64
+ %r = load <8 x double>, <8 x double>* %vaddr, align 64
%res = select <8 x i1> %mask, <8 x double> %r, <8 x double> zeroinitializer
ret <8 x double>%res
}
@@ -441,7 +441,7 @@ define <8 x double> @test46(i8 * %addr,
define <8 x double> @test47(i8 * %addr, <8 x double> %mask1) {
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
- %r = load <8 x double>* %vaddr, align 1
+ %r = load <8 x double>, <8 x double>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x double> %r, <8 x double> zeroinitializer
ret <8 x double>%res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-round.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-round.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-round.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-round.ll Fri Feb 27 15:17:42 2015
@@ -99,7 +99,7 @@ declare float @llvm.floor.f32(float %p)
define float @floor_f32m(float* %aptr) {
; CHECK-LABEL: floor_f32m
; CHECK: vrndscaless $1, (%rdi), {{.*}}encoding: [0x62,0xf3,0x7d,0x08,0x0a,0x07,0x01]
- %a = load float* %aptr, align 4
+ %a = load float, float* %aptr, align 4
%res = call float @llvm.floor.f32(float %a)
ret float %res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-shift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-shift.ll Fri Feb 27 15:17:42 2015
@@ -76,7 +76,7 @@ define <8 x i64> @variable_sra2(<8 x i64
; CHECK: vpsravd (%
; CHECK: ret
define <16 x i32> @variable_sra01_load(<16 x i32> %x, <16 x i32>* %y) {
- %y1 = load <16 x i32>* %y
+ %y1 = load <16 x i32>, <16 x i32>* %y
%k = ashr <16 x i32> %x, %y1
ret <16 x i32> %k
}
@@ -85,7 +85,7 @@ define <16 x i32> @variable_sra01_load(<
; CHECK: vpsllvd (%
; CHECK: ret
define <16 x i32> @variable_shl1_load(<16 x i32> %x, <16 x i32>* %y) {
- %y1 = load <16 x i32>* %y
+ %y1 = load <16 x i32>, <16 x i32>* %y
%k = shl <16 x i32> %x, %y1
ret <16 x i32> %k
}
@@ -93,7 +93,7 @@ define <16 x i32> @variable_shl1_load(<1
; CHECK: vpsrlvd (%
; CHECK: ret
define <16 x i32> @variable_srl0_load(<16 x i32> %x, <16 x i32>* %y) {
- %y1 = load <16 x i32>* %y
+ %y1 = load <16 x i32>, <16 x i32>* %y
%k = lshr <16 x i32> %x, %y1
ret <16 x i32> %k
}
@@ -102,7 +102,7 @@ define <16 x i32> @variable_srl0_load(<1
; CHECK: vpsrlvq (%
; CHECK: ret
define <8 x i64> @variable_srl3_load(<8 x i64> %x, <8 x i64>* %y) {
- %y1 = load <8 x i64>* %y
+ %y1 = load <8 x i64>, <8 x i64>* %y
%k = lshr <8 x i64> %x, %y1
ret <8 x i64> %k
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll Fri Feb 27 15:17:42 2015
@@ -64,7 +64,7 @@ define <16 x float> @_ss16xfloat_maskz
;CHECK: vbroadcastss (%{{.*}}, %zmm
;CHECK: ret
define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
- %a = load float* %a.ptr
+ %a = load float, float* %a.ptr
%b = insertelement <16 x float> undef, float %a, i32 0
%c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
ret <16 x float> %c
@@ -74,7 +74,7 @@ define <16 x float> @_ss16xfloat_load(
;CHECK: vbroadcastss (%rdi), %zmm0 {%k1}
;CHECK: ret
define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
- %a = load float* %a.ptr
+ %a = load float, float* %a.ptr
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%b = insertelement <16 x float> undef, float %a, i32 0
%c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
@@ -86,7 +86,7 @@ define <16 x float> @_ss16xfloat_mask_
;CHECK: vbroadcastss (%rdi), %zmm0 {%k1} {z}
;CHECK: ret
define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
- %a = load float* %a.ptr
+ %a = load float, float* %a.ptr
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%b = insertelement <16 x float> undef, float %a, i32 0
%c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
@@ -130,7 +130,7 @@ define <8 x double> @_sd8xdouble_maskz
;CHECK: vbroadcastsd (%rdi), %zmm
;CHECK: ret
define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
- %a = load double* %a.ptr
+ %a = load double, double* %a.ptr
%b = insertelement <8 x double> undef, double %a, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
ret <8 x double> %c
@@ -140,7 +140,7 @@ define <8 x double> @_sd8xdouble_load(
;CHECK: vbroadcastsd (%rdi), %zmm0 {%k1}
;CHECK: ret
define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
- %a = load double* %a.ptr
+ %a = load double, double* %a.ptr
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%b = insertelement <8 x double> undef, double %a, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
@@ -152,7 +152,7 @@ define <8 x double> @_sd8xdouble_maskz
; CHECK-LABEL: _sd8xdouble_maskz_load:
; CHECK: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
; CHECK: ret
- %a = load double* %a.ptr
+ %a = load double, double* %a.ptr
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%b = insertelement <8 x double> undef, double %a, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
Modified: llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ define <16 x i32> @test3(<16 x i32> %x,
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %y = load <16 x i32>* %yp, align 4
+ %y = load <16 x i32>, <16 x i32>* %yp, align 4
%mask = icmp eq <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
ret <16 x i32> %max
@@ -215,7 +215,7 @@ define <16 x i32> @test17(<16 x i32> %x,
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %y = load <16 x i32>* %y.ptr, align 4
+ %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sgt <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
ret <16 x i32> %max
@@ -228,7 +228,7 @@ define <16 x i32> @test18(<16 x i32> %x,
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %y = load <16 x i32>* %y.ptr, align 4
+ %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sle <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
ret <16 x i32> %max
@@ -241,7 +241,7 @@ define <16 x i32> @test19(<16 x i32> %x,
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %y = load <16 x i32>* %y.ptr, align 4
+ %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp ule <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
ret <16 x i32> %max
@@ -286,7 +286,7 @@ define <8 x i64> @test22(<8 x i64> %x, <
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sgt <8 x i64> %x1, %y1
- %y = load <8 x i64>* %y.ptr, align 4
+ %y = load <8 x i64>, <8 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <8 x i64> %x, %y
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
@@ -302,7 +302,7 @@ define <16 x i32> @test23(<16 x i32> %x,
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i32> %x1, %y1
- %y = load <16 x i32>* %y.ptr, align 4
+ %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask0 = icmp ule <16 x i32> %x, %y
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -316,7 +316,7 @@ define <8 x i64> @test24(<8 x i64> %x, <
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
%mask = icmp eq <8 x i64> %x, %y
@@ -331,7 +331,7 @@ define <16 x i32> @test25(<16 x i32> %x,
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
%mask = icmp sle <16 x i32> %x, %y
@@ -348,7 +348,7 @@ define <16 x i32> @test26(<16 x i32> %x,
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i32> %x1, %y1
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
%mask0 = icmp sgt <16 x i32> %x, %y
@@ -366,7 +366,7 @@ define <8 x i64> @test27(<8 x i64> %x, i
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i64> %x1, %y1
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
%mask0 = icmp sle <8 x i64> %x, %y
Modified: llvm/trunk/test/CodeGen/X86/avx512bw-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-arith.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-arith.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define <64 x i8> @vpaddb512_test(<64 x i
; CHECK: vpaddb (%rdi), %zmm{{.*}}
; CHECK: ret
define <64 x i8> @vpaddb512_fold_test(<64 x i8> %i, <64 x i8>* %j) nounwind {
- %tmp = load <64 x i8>* %j, align 4
+ %tmp = load <64 x i8>, <64 x i8>* %j, align 4
%x = add <64 x i8> %i, %tmp
ret <64 x i8> %x
}
@@ -29,7 +29,7 @@ define <32 x i16> @vpaddw512_test(<32 x
; CHECK: vpaddw (%rdi), %zmm{{.*}}
; CHECK: ret
define <32 x i16> @vpaddw512_fold_test(<32 x i16> %i, <32 x i16>* %j) nounwind {
- %tmp = load <32 x i16>* %j, align 4
+ %tmp = load <32 x i16>, <32 x i16>* %j, align 4
%x = add <32 x i16> %i, %tmp
ret <32 x i16> %x
}
@@ -59,7 +59,7 @@ define <32 x i16> @vpaddw512_maskz_test(
; CHECK: ret
define <32 x i16> @vpaddw512_mask_fold_test(<32 x i16> %i, <32 x i16>* %j.ptr, <32 x i16> %mask1) nounwind readnone {
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
- %j = load <32 x i16>* %j.ptr
+ %j = load <32 x i16>, <32 x i16>* %j.ptr
%x = add <32 x i16> %i, %j
%r = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %i
ret <32 x i16> %r
@@ -70,7 +70,7 @@ define <32 x i16> @vpaddw512_mask_fold_t
; CHECK: ret
define <32 x i16> @vpaddw512_maskz_fold_test(<32 x i16> %i, <32 x i16>* %j.ptr, <32 x i16> %mask1) nounwind readnone {
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
- %j = load <32 x i16>* %j.ptr
+ %j = load <32 x i16>, <32 x i16>* %j.ptr
%x = add <32 x i16> %i, %j
%r = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> zeroinitializer
ret <32 x i16> %r
Modified: llvm/trunk/test/CodeGen/X86/avx512bw-mask-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-mask-op.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-mask-op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-mask-op.ll Fri Feb 27 15:17:42 2015
@@ -35,7 +35,7 @@ define i64 @mask64(i64 %x) {
}
define void @mask32_mem(i32* %ptr) {
- %x = load i32* %ptr, align 4
+ %x = load i32, i32* %ptr, align 4
%m0 = bitcast i32 %x to <32 x i1>
%m1 = xor <32 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1,
i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1,
@@ -52,7 +52,7 @@ define void @mask32_mem(i32* %ptr) {
}
define void @mask64_mem(i64* %ptr) {
- %x = load i64* %ptr, align 4
+ %x = load i64, i64* %ptr, align 4
%m0 = bitcast i64 %x to <64 x i1>
%m1 = xor <64 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1,
i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1,
Modified: llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
; CHECK: ret
define <64 x i8> @test1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <64 x i8>*
- %res = load <64 x i8>* %vaddr, align 1
+ %res = load <64 x i8>, <64 x i8>* %vaddr, align 1
ret <64 x i8>%res
}
@@ -24,7 +24,7 @@ define void @test2(i8 * %addr, <64 x i8>
define <64 x i8> @test3(i8 * %addr, <64 x i8> %old, <64 x i8> %mask1) {
%mask = icmp ne <64 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <64 x i8>*
- %r = load <64 x i8>* %vaddr, align 1
+ %r = load <64 x i8>, <64 x i8>* %vaddr, align 1
%res = select <64 x i1> %mask, <64 x i8> %r, <64 x i8> %old
ret <64 x i8>%res
}
@@ -35,7 +35,7 @@ define <64 x i8> @test3(i8 * %addr, <64
define <64 x i8> @test4(i8 * %addr, <64 x i8> %mask1) {
%mask = icmp ne <64 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <64 x i8>*
- %r = load <64 x i8>* %vaddr, align 1
+ %r = load <64 x i8>, <64 x i8>* %vaddr, align 1
%res = select <64 x i1> %mask, <64 x i8> %r, <64 x i8> zeroinitializer
ret <64 x i8>%res
}
@@ -45,7 +45,7 @@ define <64 x i8> @test4(i8 * %addr, <64
; CHECK: ret
define <32 x i16> @test5(i8 * %addr) {
%vaddr = bitcast i8* %addr to <32 x i16>*
- %res = load <32 x i16>* %vaddr, align 1
+ %res = load <32 x i16>, <32 x i16>* %vaddr, align 1
ret <32 x i16>%res
}
@@ -64,7 +64,7 @@ define void @test6(i8 * %addr, <32 x i16
define <32 x i16> @test7(i8 * %addr, <32 x i16> %old, <32 x i16> %mask1) {
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i16>*
- %r = load <32 x i16>* %vaddr, align 1
+ %r = load <32 x i16>, <32 x i16>* %vaddr, align 1
%res = select <32 x i1> %mask, <32 x i16> %r, <32 x i16> %old
ret <32 x i16>%res
}
@@ -75,7 +75,7 @@ define <32 x i16> @test7(i8 * %addr, <32
define <32 x i16> @test8(i8 * %addr, <32 x i16> %mask1) {
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i16>*
- %r = load <32 x i16>* %vaddr, align 1
+ %r = load <32 x i16>, <32 x i16>* %vaddr, align 1
%res = select <32 x i1> %mask, <32 x i16> %r, <32 x i16> zeroinitializer
ret <32 x i16>%res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512bw-vec-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-vec-cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-vec-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-vec-cmp.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ define <64 x i8> @test4(<64 x i8> %x, <6
; CHECK: vmovdqu16
; CHECK: ret
define <32 x i16> @test5(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %yp) nounwind {
- %y = load <32 x i16>* %yp, align 4
+ %y = load <32 x i16>, <32 x i16>* %yp, align 4
%mask = icmp eq <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
ret <32 x i16> %max
@@ -56,7 +56,7 @@ define <32 x i16> @test5(<32 x i16> %x,
; CHECK: vmovdqu16
; CHECK: ret
define <32 x i16> @test6(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
- %y = load <32 x i16>* %y.ptr, align 4
+ %y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp sgt <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
ret <32 x i16> %max
@@ -67,7 +67,7 @@ define <32 x i16> @test6(<32 x i16> %x,
; CHECK: vmovdqu16
; CHECK: ret
define <32 x i16> @test7(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
- %y = load <32 x i16>* %y.ptr, align 4
+ %y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp sle <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
ret <32 x i16> %max
@@ -78,7 +78,7 @@ define <32 x i16> @test7(<32 x i16> %x,
; CHECK: vmovdqu16
; CHECK: ret
define <32 x i16> @test8(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
- %y = load <32 x i16>* %y.ptr, align 4
+ %y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp ule <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
ret <32 x i16> %max
@@ -114,7 +114,7 @@ define <64 x i8> @test10(<64 x i8> %x, <
; CHECK: ret
define <64 x i8> @test11(<64 x i8> %x, <64 x i8>* %y.ptr, <64 x i8> %x1, <64 x i8> %y1) nounwind {
%mask1 = icmp sgt <64 x i8> %x1, %y1
- %y = load <64 x i8>* %y.ptr, align 4
+ %y = load <64 x i8>, <64 x i8>* %y.ptr, align 4
%mask0 = icmp sgt <64 x i8> %x, %y
%mask = select <64 x i1> %mask0, <64 x i1> %mask1, <64 x i1> zeroinitializer
%max = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %x1
@@ -127,7 +127,7 @@ define <64 x i8> @test11(<64 x i8> %x, <
; CHECK: ret
define <32 x i16> @test12(<32 x i16> %x, <32 x i16>* %y.ptr, <32 x i16> %x1, <32 x i16> %y1) nounwind {
%mask1 = icmp sge <32 x i16> %x1, %y1
- %y = load <32 x i16>* %y.ptr, align 4
+ %y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask0 = icmp ule <32 x i16> %x, %y
%mask = select <32 x i1> %mask0, <32 x i1> %mask1, <32 x i1> zeroinitializer
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-arith.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-arith.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define <32 x i8> @vpaddb256_test(<32 x i
; CHECK: vpaddb (%rdi), %ymm{{.*}}
; CHECK: ret
define <32 x i8> @vpaddb256_fold_test(<32 x i8> %i, <32 x i8>* %j) nounwind {
- %tmp = load <32 x i8>* %j, align 4
+ %tmp = load <32 x i8>, <32 x i8>* %j, align 4
%x = add <32 x i8> %i, %tmp
ret <32 x i8> %x
}
@@ -31,7 +31,7 @@ define <16 x i16> @vpaddw256_test(<16 x
; CHECK: vpaddw (%rdi), %ymm{{.*}}
; CHECK: ret
define <16 x i16> @vpaddw256_fold_test(<16 x i16> %i, <16 x i16>* %j) nounwind {
- %tmp = load <16 x i16>* %j, align 4
+ %tmp = load <16 x i16>, <16 x i16>* %j, align 4
%x = add <16 x i16> %i, %tmp
ret <16 x i16> %x
}
@@ -61,7 +61,7 @@ define <16 x i16> @vpaddw256_maskz_test(
; CHECK: ret
define <16 x i16> @vpaddw256_mask_fold_test(<16 x i16> %i, <16 x i16>* %j.ptr, <16 x i16> %mask1) nounwind readnone {
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
- %j = load <16 x i16>* %j.ptr
+ %j = load <16 x i16>, <16 x i16>* %j.ptr
%x = add <16 x i16> %i, %j
%r = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %i
ret <16 x i16> %r
@@ -72,7 +72,7 @@ define <16 x i16> @vpaddw256_mask_fold_t
; CHECK: ret
define <16 x i16> @vpaddw256_maskz_fold_test(<16 x i16> %i, <16 x i16>* %j.ptr, <16 x i16> %mask1) nounwind readnone {
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
- %j = load <16 x i16>* %j.ptr
+ %j = load <16 x i16>, <16 x i16>* %j.ptr
%x = add <16 x i16> %i, %j
%r = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> zeroinitializer
ret <16 x i16> %r
@@ -116,7 +116,7 @@ define <16 x i8> @vpaddb128_test(<16 x i
; CHECK: vpaddb (%rdi), %xmm{{.*}}
; CHECK: ret
define <16 x i8> @vpaddb128_fold_test(<16 x i8> %i, <16 x i8>* %j) nounwind {
- %tmp = load <16 x i8>* %j, align 4
+ %tmp = load <16 x i8>, <16 x i8>* %j, align 4
%x = add <16 x i8> %i, %tmp
ret <16 x i8> %x
}
@@ -133,7 +133,7 @@ define <8 x i16> @vpaddw128_test(<8 x i1
; CHECK: vpaddw (%rdi), %xmm{{.*}}
; CHECK: ret
define <8 x i16> @vpaddw128_fold_test(<8 x i16> %i, <8 x i16>* %j) nounwind {
- %tmp = load <8 x i16>* %j, align 4
+ %tmp = load <8 x i16>, <8 x i16>* %j, align 4
%x = add <8 x i16> %i, %tmp
ret <8 x i16> %x
}
@@ -163,7 +163,7 @@ define <8 x i16> @vpaddw128_maskz_test(<
; CHECK: ret
define <8 x i16> @vpaddw128_mask_fold_test(<8 x i16> %i, <8 x i16>* %j.ptr, <8 x i16> %mask1) nounwind readnone {
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
- %j = load <8 x i16>* %j.ptr
+ %j = load <8 x i16>, <8 x i16>* %j.ptr
%x = add <8 x i16> %i, %j
%r = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %i
ret <8 x i16> %r
@@ -174,7 +174,7 @@ define <8 x i16> @vpaddw128_mask_fold_te
; CHECK: ret
define <8 x i16> @vpaddw128_maskz_fold_test(<8 x i16> %i, <8 x i16>* %j.ptr, <8 x i16> %mask1) nounwind readnone {
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
- %j = load <8 x i16>* %j.ptr
+ %j = load <8 x i16>, <8 x i16>* %j.ptr
%x = add <8 x i16> %i, %j
%r = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> zeroinitializer
ret <8 x i16> %r
Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics.ll Fri Feb 27 15:17:42 2015
@@ -830,7 +830,7 @@ define <2 x double> @test_mask_vfmsubadd
define <2 x double> @test_mask_vfmsubadd128rm_pd(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmsubadd128rm_pd
; CHECK: vfmsubadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa7,0x07]
- %a2 = load <2 x double>* %ptr_a2
+ %a2 = load <2 x double>, <2 x double>* %ptr_a2
%res = call <2 x double> @llvm.x86.fma.mask.vfmsubadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
ret <2 x double> %res
}
@@ -838,7 +838,7 @@ declare <8 x double> @llvm.x86.fma.mask.
define <8 x double> @test_mask_vfmsubaddrm_pd(<8 x double> %a0, <8 x double> %a1, <8 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmsubaddrm_pd
; CHECK: vfmsubadd213pd (%rdi), %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x49,0xa7,0x07]
- %a2 = load <8 x double>* %ptr_a2, align 8
+ %a2 = load <8 x double>, <8 x double>* %ptr_a2, align 8
%res = call <8 x double> @llvm.x86.fma.mask.vfmsubadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
ret <8 x double> %res
}
@@ -860,7 +860,7 @@ define <4 x float> @test_mask_vfmadd128_
define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmk
; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
- %a2 = load <4 x float>* %ptr_a2
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2
%res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
ret <4 x float> %res
}
@@ -868,7 +868,7 @@ define <4 x float> @test_mask_vfmadd128_
define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmka
; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
- %a2 = load <4 x float>* %ptr_a2, align 8
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 8
%res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
ret <4 x float> %res
}
@@ -876,7 +876,7 @@ define <4 x float> @test_mask_vfmadd128_
define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmkz
; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0x07]
- %a2 = load <4 x float>* %ptr_a2
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2
%res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
ret <4 x float> %res
}
@@ -884,7 +884,7 @@ define <4 x float> @test_mask_vfmadd128_
define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmkza
; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0x07]
- %a2 = load <4 x float>* %ptr_a2, align 4
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 4
%res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
ret <4 x float> %res
}
@@ -892,7 +892,7 @@ define <4 x float> @test_mask_vfmadd128_
define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmb
; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
- %q = load float* %ptr_a2
+ %q = load float, float* %ptr_a2
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -904,7 +904,7 @@ define <4 x float> @test_mask_vfmadd128_
define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmba
; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
- %q = load float* %ptr_a2, align 4
+ %q = load float, float* %ptr_a2, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -916,7 +916,7 @@ define <4 x float> @test_mask_vfmadd128_
define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmbz
; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
- %q = load float* %ptr_a2
+ %q = load float, float* %ptr_a2
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -928,7 +928,7 @@ define <4 x float> @test_mask_vfmadd128_
define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmbza
; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
- %q = load float* %ptr_a2, align 4
+ %q = load float, float* %ptr_a2, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -954,7 +954,7 @@ define <2 x double> @test_mask_vfmadd128
define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_pd_rmk
; CHECK: vfmadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0x07]
- %a2 = load <2 x double>* %ptr_a2
+ %a2 = load <2 x double>, <2 x double>* %ptr_a2
%res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
ret <2 x double> %res
}
@@ -962,7 +962,7 @@ define <2 x double> @test_mask_vfmadd128
define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_pd_rmkz
; CHECK: vfmadd213pd (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0x07]
- %a2 = load <2 x double>* %ptr_a2
+ %a2 = load <2 x double>, <2 x double>* %ptr_a2
%res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
ret <2 x double> %res
}
@@ -984,7 +984,7 @@ define <4 x double> @test_mask_vfmadd256
define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd256_pd_rmk
; CHECK: vfmadd213pd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0x07]
- %a2 = load <4 x double>* %ptr_a2
+ %a2 = load <4 x double>, <4 x double>* %ptr_a2
%res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
ret <4 x double> %res
}
@@ -992,7 +992,7 @@ define <4 x double> @test_mask_vfmadd256
define <4 x double> @test_mask_vfmadd256_pd_rmkz(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd256_pd_rmkz
; CHECK: vfmadd213pd (%rdi), %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0x07]
- %a2 = load <4 x double>* %ptr_a2
+ %a2 = load <4 x double>, <4 x double>* %ptr_a2
%res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
ret <4 x double> %res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
; CHECK: ret
define <32 x i8> @test_256_1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <32 x i8>*
- %res = load <32 x i8>* %vaddr, align 1
+ %res = load <32 x i8>, <32 x i8>* %vaddr, align 1
ret <32 x i8>%res
}
@@ -24,7 +24,7 @@ define void @test_256_2(i8 * %addr, <32
define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) {
%mask = icmp ne <32 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i8>*
- %r = load <32 x i8>* %vaddr, align 1
+ %r = load <32 x i8>, <32 x i8>* %vaddr, align 1
%res = select <32 x i1> %mask, <32 x i8> %r, <32 x i8> %old
ret <32 x i8>%res
}
@@ -35,7 +35,7 @@ define <32 x i8> @test_256_3(i8 * %addr,
define <32 x i8> @test_256_4(i8 * %addr, <32 x i8> %mask1) {
%mask = icmp ne <32 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i8>*
- %r = load <32 x i8>* %vaddr, align 1
+ %r = load <32 x i8>, <32 x i8>* %vaddr, align 1
%res = select <32 x i1> %mask, <32 x i8> %r, <32 x i8> zeroinitializer
ret <32 x i8>%res
}
@@ -45,7 +45,7 @@ define <32 x i8> @test_256_4(i8 * %addr,
; CHECK: ret
define <16 x i16> @test_256_5(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x i16>*
- %res = load <16 x i16>* %vaddr, align 1
+ %res = load <16 x i16>, <16 x i16>* %vaddr, align 1
ret <16 x i16>%res
}
@@ -64,7 +64,7 @@ define void @test_256_6(i8 * %addr, <16
define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) {
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i16>*
- %r = load <16 x i16>* %vaddr, align 1
+ %r = load <16 x i16>, <16 x i16>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i16> %r, <16 x i16> %old
ret <16 x i16>%res
}
@@ -75,7 +75,7 @@ define <16 x i16> @test_256_7(i8 * %addr
define <16 x i16> @test_256_8(i8 * %addr, <16 x i16> %mask1) {
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i16>*
- %r = load <16 x i16>* %vaddr, align 1
+ %r = load <16 x i16>, <16 x i16>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i16> %r, <16 x i16> zeroinitializer
ret <16 x i16>%res
}
@@ -85,7 +85,7 @@ define <16 x i16> @test_256_8(i8 * %addr
; CHECK: ret
define <16 x i8> @test_128_1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x i8>*
- %res = load <16 x i8>* %vaddr, align 1
+ %res = load <16 x i8>, <16 x i8>* %vaddr, align 1
ret <16 x i8>%res
}
@@ -104,7 +104,7 @@ define void @test_128_2(i8 * %addr, <16
define <16 x i8> @test_128_3(i8 * %addr, <16 x i8> %old, <16 x i8> %mask1) {
%mask = icmp ne <16 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i8>*
- %r = load <16 x i8>* %vaddr, align 1
+ %r = load <16 x i8>, <16 x i8>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i8> %r, <16 x i8> %old
ret <16 x i8>%res
}
@@ -115,7 +115,7 @@ define <16 x i8> @test_128_3(i8 * %addr,
define <16 x i8> @test_128_4(i8 * %addr, <16 x i8> %mask1) {
%mask = icmp ne <16 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i8>*
- %r = load <16 x i8>* %vaddr, align 1
+ %r = load <16 x i8>, <16 x i8>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i8> %r, <16 x i8> zeroinitializer
ret <16 x i8>%res
}
@@ -125,7 +125,7 @@ define <16 x i8> @test_128_4(i8 * %addr,
; CHECK: ret
define <8 x i16> @test_128_5(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i16>*
- %res = load <8 x i16>* %vaddr, align 1
+ %res = load <8 x i16>, <8 x i16>* %vaddr, align 1
ret <8 x i16>%res
}
@@ -144,7 +144,7 @@ define void @test_128_6(i8 * %addr, <8 x
define <8 x i16> @test_128_7(i8 * %addr, <8 x i16> %old, <8 x i16> %mask1) {
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i16>*
- %r = load <8 x i16>* %vaddr, align 1
+ %r = load <8 x i16>, <8 x i16>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i16> %r, <8 x i16> %old
ret <8 x i16>%res
}
@@ -155,7 +155,7 @@ define <8 x i16> @test_128_7(i8 * %addr,
define <8 x i16> @test_128_8(i8 * %addr, <8 x i16> %mask1) {
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i16>*
- %r = load <8 x i16>* %vaddr, align 1
+ %r = load <8 x i16>, <8 x i16>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i16> %r, <8 x i16> zeroinitializer
ret <8 x i16>%res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-cmp.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ define <32 x i8> @test256_4(<32 x i8> %x
; CHECK: vmovdqu16
; CHECK: ret
define <16 x i16> @test256_5(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %yp) nounwind {
- %y = load <16 x i16>* %yp, align 4
+ %y = load <16 x i16>, <16 x i16>* %yp, align 4
%mask = icmp eq <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
ret <16 x i16> %max
@@ -56,7 +56,7 @@ define <16 x i16> @test256_5(<16 x i16>
; CHECK: vmovdqu16
; CHECK: ret
define <16 x i16> @test256_6(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
- %y = load <16 x i16>* %y.ptr, align 4
+ %y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp sgt <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
ret <16 x i16> %max
@@ -67,7 +67,7 @@ define <16 x i16> @test256_6(<16 x i16>
; CHECK: vmovdqu16
; CHECK: ret
define <16 x i16> @test256_7(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
- %y = load <16 x i16>* %y.ptr, align 4
+ %y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp sle <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
ret <16 x i16> %max
@@ -78,7 +78,7 @@ define <16 x i16> @test256_7(<16 x i16>
; CHECK: vmovdqu16
; CHECK: ret
define <16 x i16> @test256_8(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
- %y = load <16 x i16>* %y.ptr, align 4
+ %y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp ule <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
ret <16 x i16> %max
@@ -114,7 +114,7 @@ define <32 x i8> @test256_10(<32 x i8> %
; CHECK: ret
define <32 x i8> @test256_11(<32 x i8> %x, <32 x i8>* %y.ptr, <32 x i8> %x1, <32 x i8> %y1) nounwind {
%mask1 = icmp sgt <32 x i8> %x1, %y1
- %y = load <32 x i8>* %y.ptr, align 4
+ %y = load <32 x i8>, <32 x i8>* %y.ptr, align 4
%mask0 = icmp sgt <32 x i8> %x, %y
%mask = select <32 x i1> %mask0, <32 x i1> %mask1, <32 x i1> zeroinitializer
%max = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %x1
@@ -127,7 +127,7 @@ define <32 x i8> @test256_11(<32 x i8> %
; CHECK: ret
define <16 x i16> @test256_12(<16 x i16> %x, <16 x i16>* %y.ptr, <16 x i16> %x1, <16 x i16> %y1) nounwind {
%mask1 = icmp sge <16 x i16> %x1, %y1
- %y = load <16 x i16>* %y.ptr, align 4
+ %y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask0 = icmp ule <16 x i16> %x, %y
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
@@ -179,7 +179,7 @@ define <16 x i8> @test128_4(<16 x i8> %x
; CHECK: vmovdqu16
; CHECK: ret
define <8 x i16> @test128_5(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %yp) nounwind {
- %y = load <8 x i16>* %yp, align 4
+ %y = load <8 x i16>, <8 x i16>* %yp, align 4
%mask = icmp eq <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
ret <8 x i16> %max
@@ -190,7 +190,7 @@ define <8 x i16> @test128_5(<8 x i16> %x
; CHECK: vmovdqu16
; CHECK: ret
define <8 x i16> @test128_6(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
- %y = load <8 x i16>* %y.ptr, align 4
+ %y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp sgt <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
ret <8 x i16> %max
@@ -201,7 +201,7 @@ define <8 x i16> @test128_6(<8 x i16> %x
; CHECK: vmovdqu16
; CHECK: ret
define <8 x i16> @test128_7(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
- %y = load <8 x i16>* %y.ptr, align 4
+ %y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp sle <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
ret <8 x i16> %max
@@ -212,7 +212,7 @@ define <8 x i16> @test128_7(<8 x i16> %x
; CHECK: vmovdqu16
; CHECK: ret
define <8 x i16> @test128_8(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
- %y = load <8 x i16>* %y.ptr, align 4
+ %y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp ule <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
ret <8 x i16> %max
@@ -248,7 +248,7 @@ define <16 x i8> @test128_10(<16 x i8> %
; CHECK: ret
define <16 x i8> @test128_11(<16 x i8> %x, <16 x i8>* %y.ptr, <16 x i8> %x1, <16 x i8> %y1) nounwind {
%mask1 = icmp sgt <16 x i8> %x1, %y1
- %y = load <16 x i8>* %y.ptr, align 4
+ %y = load <16 x i8>, <16 x i8>* %y.ptr, align 4
%mask0 = icmp sgt <16 x i8> %x, %y
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
%max = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %x1
@@ -261,7 +261,7 @@ define <16 x i8> @test128_11(<16 x i8> %
; CHECK: ret
define <8 x i16> @test128_12(<8 x i16> %x, <8 x i16>* %y.ptr, <8 x i16> %x1, <8 x i16> %y1) nounwind {
%mask1 = icmp sge <8 x i16> %x1, %y1
- %y = load <8 x i16>* %y.ptr, align 4
+ %y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask0 = icmp ule <8 x i16> %x, %y
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
Modified: llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i8 @mask8(i8 %x) {
}
define void @mask8_mem(i8* %ptr) {
- %x = load i8* %ptr, align 4
+ %x = load i8, i8* %ptr, align 4
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <8 x i1> %m1 to i8
Modified: llvm/trunk/test/CodeGen/X86/avx512er-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512er-intrinsics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512er-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512er-intrinsics.ll Fri Feb 27 15:17:42 2015
@@ -99,7 +99,7 @@ declare <2 x double> @llvm.x86.avx512.rs
define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr ) {
; CHECK: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
- %mem = load double * %ptr, align 8
+ %mem = load double , double * %ptr, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
%res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
ret <2 x double> %res
@@ -108,7 +108,7 @@ define <2 x double> @test_rsqrt28_sd_mas
define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr ) {
; CHECK: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
%ptr1 = getelementptr double, double* %ptr, i32 18
- %mem = load double * %ptr1, align 8
+ %mem = load double , double * %ptr1, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
%res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
ret <2 x double> %res
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define <4 x i64> @vpaddq256_test(<4 x i6
; CHECK: vpaddq (%rdi), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind {
- %tmp = load <4 x i64>* %j, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %j, align 4
%x = add <4 x i64> %i, %tmp
ret <4 x i64> %x
}
@@ -31,7 +31,7 @@ define <4 x i64> @vpaddq256_broadcast_te
; CHECK: vpaddq (%rdi){1to4}, %ymm{{.*}}
; CHECK: ret
define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind {
- %j = load i64* %j.ptr
+ %j = load i64, i64* %j.ptr
%j.0 = insertelement <4 x i64> undef, i64 %j, i32 0
%j.v = shufflevector <4 x i64> %j.0, <4 x i64> undef, <4 x i32> zeroinitializer
%x = add <4 x i64> %i, %j.v
@@ -50,7 +50,7 @@ define <8 x i32> @vpaddd256_test(<8 x i3
; CHECK: vpaddd (%rdi), %ymm{{.*}}
; CHECK: ret
define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind {
- %tmp = load <8 x i32>* %j, align 4
+ %tmp = load <8 x i32>, <8 x i32>* %j, align 4
%x = add <8 x i32> %i, %tmp
ret <8 x i32> %x
}
@@ -88,7 +88,7 @@ define <8 x i32> @vpaddd256_maskz_test(<
; CHECK: ret
define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
- %j = load <8 x i32>* %j.ptr
+ %j = load <8 x i32>, <8 x i32>* %j.ptr
%x = add <8 x i32> %i, %j
%r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
ret <8 x i32> %r
@@ -109,7 +109,7 @@ define <8 x i32> @vpaddd256_mask_broadca
; CHECK: ret
define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
- %j = load <8 x i32>* %j.ptr
+ %j = load <8 x i32>, <8 x i32>* %j.ptr
%x = add <8 x i32> %i, %j
%r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
ret <8 x i32> %r
@@ -341,7 +341,7 @@ define <4 x double> @test_mask_fold_vadd
<4 x double>* %j, <4 x i64> %mask1)
nounwind {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
- %tmp = load <4 x double>* %j
+ %tmp = load <4 x double>, <4 x double>* %j
%x = fadd <4 x double> %i, %tmp
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
ret <4 x double> %r
@@ -353,7 +353,7 @@ define <4 x double> @test_mask_fold_vadd
define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j,
<4 x i64> %mask1) nounwind {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
- %tmp = load <4 x double>* %j
+ %tmp = load <4 x double>, <4 x double>* %j
%x = fadd <4 x double> %i, %tmp
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
ret <4 x double> %r
@@ -363,7 +363,7 @@ define <4 x double> @test_maskz_fold_vad
; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*}}
; CHECK: ret
define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind {
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef,
<4 x i32> zeroinitializer
@@ -377,7 +377,7 @@ define <4 x double> @test_broadcast2_vad
define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i,
double* %j, <4 x i64> %mask1) nounwind {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef,
<4 x i32> zeroinitializer
@@ -392,7 +392,7 @@ define <4 x double> @test_mask_broadcast
define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j,
<4 x i64> %mask1) nounwind {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef,
<4 x i32> zeroinitializer
@@ -415,7 +415,7 @@ define <2 x i64> @vpaddq128_test(<2 x i6
; CHECK: vpaddq (%rdi), %xmm{{.*}}
; CHECK: ret
define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind {
- %tmp = load <2 x i64>* %j, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %j, align 4
%x = add <2 x i64> %i, %tmp
ret <2 x i64> %x
}
@@ -424,7 +424,7 @@ define <2 x i64> @vpaddq128_fold_test(<2
; CHECK: vpaddq (%rdi){1to2}, %xmm{{.*}}
; CHECK: ret
define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind {
- %tmp = load i64* %j
+ %tmp = load i64, i64* %j
%j.0 = insertelement <2 x i64> undef, i64 %tmp, i32 0
%j.1 = insertelement <2 x i64> %j.0, i64 %tmp, i32 1
%x = add <2 x i64> %i, %j.1
@@ -443,7 +443,7 @@ define <4 x i32> @vpaddd128_test(<4 x i3
; CHECK: vpaddd (%rdi), %xmm{{.*}}
; CHECK: ret
define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind {
- %tmp = load <4 x i32>* %j, align 4
+ %tmp = load <4 x i32>, <4 x i32>* %j, align 4
%x = add <4 x i32> %i, %tmp
ret <4 x i32> %x
}
@@ -481,7 +481,7 @@ define <4 x i32> @vpaddd128_maskz_test(<
; CHECK: ret
define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
- %j = load <4 x i32>* %j.ptr
+ %j = load <4 x i32>, <4 x i32>* %j.ptr
%x = add <4 x i32> %i, %j
%r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
ret <4 x i32> %r
@@ -502,7 +502,7 @@ define <4 x i32> @vpaddd128_mask_broadca
; CHECK: ret
define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
- %j = load <4 x i32>* %j.ptr
+ %j = load <4 x i32>, <4 x i32>* %j.ptr
%x = add <4 x i32> %i, %j
%r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
ret <4 x i32> %r
@@ -735,7 +735,7 @@ define <2 x double> @test_mask_fold_vadd
<2 x double>* %j, <2 x i64> %mask1)
nounwind {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
- %tmp = load <2 x double>* %j
+ %tmp = load <2 x double>, <2 x double>* %j
%x = fadd <2 x double> %i, %tmp
%r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
ret <2 x double> %r
@@ -747,7 +747,7 @@ define <2 x double> @test_mask_fold_vadd
define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j,
<2 x i64> %mask1) nounwind {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
- %tmp = load <2 x double>* %j
+ %tmp = load <2 x double>, <2 x double>* %j
%x = fadd <2 x double> %i, %tmp
%r = select <2 x i1> %mask, <2 x double> %x, <2 x double> zeroinitializer
ret <2 x double> %r
@@ -757,7 +757,7 @@ define <2 x double> @test_maskz_fold_vad
; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*}}
; CHECK: ret
define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind {
- %tmp = load double* %j
+ %tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
%j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
%x = fadd <2 x double> %j.1, %i
@@ -771,7 +771,7 @@ define <2 x double> @test_mask_broadcast
double* %j, <2 x i64> %mask1)
nounwind {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
%j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
%x = fadd <2 x double> %j.1, %i
@@ -785,7 +785,7 @@ define <2 x double> @test_mask_broadcast
define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j,
<2 x i64> %mask1) nounwind {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
%j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
%x = fadd <2 x double> %j.1, %i
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll Fri Feb 27 15:17:42 2015
@@ -805,7 +805,7 @@ define <4 x double> @test_x86_mask_blend
define <4 x double> @test_x86_mask_blend_pd_256_memop(<4 x double> %a, <4 x double>* %ptr, i8 %mask) {
; CHECK-LABEL: test_x86_mask_blend_pd_256_memop
; CHECK: vblendmpd (%
- %b = load <4 x double>* %ptr
+ %b = load <4 x double>, <4 x double>* %ptr
%res = call <4 x double> @llvm.x86.avx512.mask.blend.pd.256(<4 x double> %a, <4 x double> %b, i8 %mask) ; <<4 x double>> [#uses=1]
ret <4 x double> %res
}
@@ -843,7 +843,7 @@ define <2 x double> @test_x86_mask_blend
define <2 x double> @test_x86_mask_blend_pd_128_memop(<2 x double> %a, <2 x double>* %ptr, i8 %mask) {
; CHECK-LABEL: test_x86_mask_blend_pd_128_memop
; CHECK: vblendmpd (%
- %b = load <2 x double>* %ptr
+ %b = load <2 x double>, <2 x double>* %ptr
%res = call <2 x double> @llvm.x86.avx512.mask.blend.pd.128(<2 x double> %a, <2 x double> %b, i8 %mask) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
; CHECK: ret
define <8 x i32> @test_256_1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i32>*
- %res = load <8 x i32>* %vaddr, align 1
+ %res = load <8 x i32>, <8 x i32>* %vaddr, align 1
ret <8 x i32>%res
}
@@ -14,7 +14,7 @@ define <8 x i32> @test_256_1(i8 * %addr)
; CHECK: ret
define <8 x i32> @test_256_2(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i32>*
- %res = load <8 x i32>* %vaddr, align 32
+ %res = load <8 x i32>, <8 x i32>* %vaddr, align 32
ret <8 x i32>%res
}
@@ -50,7 +50,7 @@ define void @test_256_5(i8 * %addr, <8 x
; CHECK: ret
define <4 x i64> @test_256_6(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x i64>*
- %res = load <4 x i64>* %vaddr, align 32
+ %res = load <4 x i64>, <4 x i64>* %vaddr, align 32
ret <4 x i64>%res
}
@@ -68,7 +68,7 @@ define void @test_256_7(i8 * %addr, <4 x
; CHECK: ret
define <4 x i64> @test_256_8(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x i64>*
- %res = load <4 x i64>* %vaddr, align 1
+ %res = load <4 x i64>, <4 x i64>* %vaddr, align 1
ret <4 x i64>%res
}
@@ -86,7 +86,7 @@ define void @test_256_9(i8 * %addr, <4 x
; CHECK: ret
define <4 x double> @test_256_10(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x double>*
- %res = load <4 x double>* %vaddr, align 32
+ %res = load <4 x double>, <4 x double>* %vaddr, align 32
ret <4 x double>%res
}
@@ -104,7 +104,7 @@ define void @test_256_11(i8 * %addr, <8
; CHECK: ret
define <8 x float> @test_256_12(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x float>*
- %res = load <8 x float>* %vaddr, align 32
+ %res = load <8 x float>, <8 x float>* %vaddr, align 32
ret <8 x float>%res
}
@@ -122,7 +122,7 @@ define void @test_256_13(i8 * %addr, <4
; CHECK: ret
define <4 x double> @test_256_14(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x double>*
- %res = load <4 x double>* %vaddr, align 1
+ %res = load <4 x double>, <4 x double>* %vaddr, align 1
ret <4 x double>%res
}
@@ -140,7 +140,7 @@ define void @test_256_15(i8 * %addr, <8
; CHECK: ret
define <8 x float> @test_256_16(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x float>*
- %res = load <8 x float>* %vaddr, align 1
+ %res = load <8 x float>, <8 x float>* %vaddr, align 1
ret <8 x float>%res
}
@@ -150,7 +150,7 @@ define <8 x float> @test_256_16(i8 * %ad
define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
- %r = load <8 x i32>* %vaddr, align 32
+ %r = load <8 x i32>, <8 x i32>* %vaddr, align 32
%res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old
ret <8 x i32>%res
}
@@ -161,7 +161,7 @@ define <8 x i32> @test_256_17(i8 * %addr
define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
- %r = load <8 x i32>* %vaddr, align 1
+ %r = load <8 x i32>, <8 x i32>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old
ret <8 x i32>%res
}
@@ -172,7 +172,7 @@ define <8 x i32> @test_256_18(i8 * %addr
define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
- %r = load <8 x i32>* %vaddr, align 32
+ %r = load <8 x i32>, <8 x i32>* %vaddr, align 32
%res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer
ret <8 x i32>%res
}
@@ -183,7 +183,7 @@ define <8 x i32> @test_256_19(i8 * %addr
define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
- %r = load <8 x i32>* %vaddr, align 1
+ %r = load <8 x i32>, <8 x i32>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer
ret <8 x i32>%res
}
@@ -194,7 +194,7 @@ define <8 x i32> @test_256_20(i8 * %addr
define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
- %r = load <4 x i64>* %vaddr, align 32
+ %r = load <4 x i64>, <4 x i64>* %vaddr, align 32
%res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old
ret <4 x i64>%res
}
@@ -205,7 +205,7 @@ define <4 x i64> @test_256_21(i8 * %addr
define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
- %r = load <4 x i64>* %vaddr, align 1
+ %r = load <4 x i64>, <4 x i64>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old
ret <4 x i64>%res
}
@@ -216,7 +216,7 @@ define <4 x i64> @test_256_22(i8 * %addr
define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
- %r = load <4 x i64>* %vaddr, align 32
+ %r = load <4 x i64>, <4 x i64>* %vaddr, align 32
%res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer
ret <4 x i64>%res
}
@@ -227,7 +227,7 @@ define <4 x i64> @test_256_23(i8 * %addr
define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
- %r = load <4 x i64>* %vaddr, align 1
+ %r = load <4 x i64>, <4 x i64>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer
ret <4 x i64>%res
}
@@ -238,7 +238,7 @@ define <4 x i64> @test_256_24(i8 * %addr
define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
- %r = load <8 x float>* %vaddr, align 32
+ %r = load <8 x float>, <8 x float>* %vaddr, align 32
%res = select <8 x i1> %mask, <8 x float> %r, <8 x float> %old
ret <8 x float>%res
}
@@ -249,7 +249,7 @@ define <8 x float> @test_256_25(i8 * %ad
define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
- %r = load <8 x float>* %vaddr, align 1
+ %r = load <8 x float>, <8 x float>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x float> %r, <8 x float> %old
ret <8 x float>%res
}
@@ -260,7 +260,7 @@ define <8 x float> @test_256_26(i8 * %ad
define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) {
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
- %r = load <8 x float>* %vaddr, align 32
+ %r = load <8 x float>, <8 x float>* %vaddr, align 32
%res = select <8 x i1> %mask, <8 x float> %r, <8 x float> zeroinitializer
ret <8 x float>%res
}
@@ -271,7 +271,7 @@ define <8 x float> @test_256_27(i8 * %ad
define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) {
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
- %r = load <8 x float>* %vaddr, align 1
+ %r = load <8 x float>, <8 x float>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x float> %r, <8 x float> zeroinitializer
ret <8 x float>%res
}
@@ -282,7 +282,7 @@ define <8 x float> @test_256_28(i8 * %ad
define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
- %r = load <4 x double>* %vaddr, align 32
+ %r = load <4 x double>, <4 x double>* %vaddr, align 32
%res = select <4 x i1> %mask, <4 x double> %r, <4 x double> %old
ret <4 x double>%res
}
@@ -293,7 +293,7 @@ define <4 x double> @test_256_29(i8 * %a
define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
- %r = load <4 x double>* %vaddr, align 1
+ %r = load <4 x double>, <4 x double>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x double> %r, <4 x double> %old
ret <4 x double>%res
}
@@ -304,7 +304,7 @@ define <4 x double> @test_256_30(i8 * %a
define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
- %r = load <4 x double>* %vaddr, align 32
+ %r = load <4 x double>, <4 x double>* %vaddr, align 32
%res = select <4 x i1> %mask, <4 x double> %r, <4 x double> zeroinitializer
ret <4 x double>%res
}
@@ -315,7 +315,7 @@ define <4 x double> @test_256_31(i8 * %a
define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
- %r = load <4 x double>* %vaddr, align 1
+ %r = load <4 x double>, <4 x double>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x double> %r, <4 x double> zeroinitializer
ret <4 x double>%res
}
@@ -325,7 +325,7 @@ define <4 x double> @test_256_32(i8 * %a
; CHECK: ret
define <4 x i32> @test_128_1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x i32>*
- %res = load <4 x i32>* %vaddr, align 1
+ %res = load <4 x i32>, <4 x i32>* %vaddr, align 1
ret <4 x i32>%res
}
@@ -334,7 +334,7 @@ define <4 x i32> @test_128_1(i8 * %addr)
; CHECK: ret
define <4 x i32> @test_128_2(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x i32>*
- %res = load <4 x i32>* %vaddr, align 16
+ %res = load <4 x i32>, <4 x i32>* %vaddr, align 16
ret <4 x i32>%res
}
@@ -370,7 +370,7 @@ define void @test_128_5(i8 * %addr, <4 x
; CHECK: ret
define <2 x i64> @test_128_6(i8 * %addr) {
%vaddr = bitcast i8* %addr to <2 x i64>*
- %res = load <2 x i64>* %vaddr, align 16
+ %res = load <2 x i64>, <2 x i64>* %vaddr, align 16
ret <2 x i64>%res
}
@@ -388,7 +388,7 @@ define void @test_128_7(i8 * %addr, <2 x
; CHECK: ret
define <2 x i64> @test_128_8(i8 * %addr) {
%vaddr = bitcast i8* %addr to <2 x i64>*
- %res = load <2 x i64>* %vaddr, align 1
+ %res = load <2 x i64>, <2 x i64>* %vaddr, align 1
ret <2 x i64>%res
}
@@ -406,7 +406,7 @@ define void @test_128_9(i8 * %addr, <2 x
; CHECK: ret
define <2 x double> @test_128_10(i8 * %addr) {
%vaddr = bitcast i8* %addr to <2 x double>*
- %res = load <2 x double>* %vaddr, align 16
+ %res = load <2 x double>, <2 x double>* %vaddr, align 16
ret <2 x double>%res
}
@@ -424,7 +424,7 @@ define void @test_128_11(i8 * %addr, <4
; CHECK: ret
define <4 x float> @test_128_12(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x float>*
- %res = load <4 x float>* %vaddr, align 16
+ %res = load <4 x float>, <4 x float>* %vaddr, align 16
ret <4 x float>%res
}
@@ -442,7 +442,7 @@ define void @test_128_13(i8 * %addr, <2
; CHECK: ret
define <2 x double> @test_128_14(i8 * %addr) {
%vaddr = bitcast i8* %addr to <2 x double>*
- %res = load <2 x double>* %vaddr, align 1
+ %res = load <2 x double>, <2 x double>* %vaddr, align 1
ret <2 x double>%res
}
@@ -460,7 +460,7 @@ define void @test_128_15(i8 * %addr, <4
; CHECK: ret
define <4 x float> @test_128_16(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x float>*
- %res = load <4 x float>* %vaddr, align 1
+ %res = load <4 x float>, <4 x float>* %vaddr, align 1
ret <4 x float>%res
}
@@ -470,7 +470,7 @@ define <4 x float> @test_128_16(i8 * %ad
define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
- %r = load <4 x i32>* %vaddr, align 16
+ %r = load <4 x i32>, <4 x i32>* %vaddr, align 16
%res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old
ret <4 x i32>%res
}
@@ -481,7 +481,7 @@ define <4 x i32> @test_128_17(i8 * %addr
define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
- %r = load <4 x i32>* %vaddr, align 1
+ %r = load <4 x i32>, <4 x i32>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old
ret <4 x i32>%res
}
@@ -492,7 +492,7 @@ define <4 x i32> @test_128_18(i8 * %addr
define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
- %r = load <4 x i32>* %vaddr, align 16
+ %r = load <4 x i32>, <4 x i32>* %vaddr, align 16
%res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer
ret <4 x i32>%res
}
@@ -503,7 +503,7 @@ define <4 x i32> @test_128_19(i8 * %addr
define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
- %r = load <4 x i32>* %vaddr, align 1
+ %r = load <4 x i32>, <4 x i32>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer
ret <4 x i32>%res
}
@@ -514,7 +514,7 @@ define <4 x i32> @test_128_20(i8 * %addr
define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
- %r = load <2 x i64>* %vaddr, align 16
+ %r = load <2 x i64>, <2 x i64>* %vaddr, align 16
%res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old
ret <2 x i64>%res
}
@@ -525,7 +525,7 @@ define <2 x i64> @test_128_21(i8 * %addr
define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
- %r = load <2 x i64>* %vaddr, align 1
+ %r = load <2 x i64>, <2 x i64>* %vaddr, align 1
%res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old
ret <2 x i64>%res
}
@@ -536,7 +536,7 @@ define <2 x i64> @test_128_22(i8 * %addr
define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
- %r = load <2 x i64>* %vaddr, align 16
+ %r = load <2 x i64>, <2 x i64>* %vaddr, align 16
%res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer
ret <2 x i64>%res
}
@@ -547,7 +547,7 @@ define <2 x i64> @test_128_23(i8 * %addr
define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
- %r = load <2 x i64>* %vaddr, align 1
+ %r = load <2 x i64>, <2 x i64>* %vaddr, align 1
%res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer
ret <2 x i64>%res
}
@@ -558,7 +558,7 @@ define <2 x i64> @test_128_24(i8 * %addr
define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
- %r = load <4 x float>* %vaddr, align 16
+ %r = load <4 x float>, <4 x float>* %vaddr, align 16
%res = select <4 x i1> %mask, <4 x float> %r, <4 x float> %old
ret <4 x float>%res
}
@@ -569,7 +569,7 @@ define <4 x float> @test_128_25(i8 * %ad
define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
- %r = load <4 x float>* %vaddr, align 1
+ %r = load <4 x float>, <4 x float>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x float> %r, <4 x float> %old
ret <4 x float>%res
}
@@ -580,7 +580,7 @@ define <4 x float> @test_128_26(i8 * %ad
define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
- %r = load <4 x float>* %vaddr, align 16
+ %r = load <4 x float>, <4 x float>* %vaddr, align 16
%res = select <4 x i1> %mask, <4 x float> %r, <4 x float> zeroinitializer
ret <4 x float>%res
}
@@ -591,7 +591,7 @@ define <4 x float> @test_128_27(i8 * %ad
define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
- %r = load <4 x float>* %vaddr, align 1
+ %r = load <4 x float>, <4 x float>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x float> %r, <4 x float> zeroinitializer
ret <4 x float>%res
}
@@ -602,7 +602,7 @@ define <4 x float> @test_128_28(i8 * %ad
define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
- %r = load <2 x double>* %vaddr, align 16
+ %r = load <2 x double>, <2 x double>* %vaddr, align 16
%res = select <2 x i1> %mask, <2 x double> %r, <2 x double> %old
ret <2 x double>%res
}
@@ -613,7 +613,7 @@ define <2 x double> @test_128_29(i8 * %a
define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
- %r = load <2 x double>* %vaddr, align 1
+ %r = load <2 x double>, <2 x double>* %vaddr, align 1
%res = select <2 x i1> %mask, <2 x double> %r, <2 x double> %old
ret <2 x double>%res
}
@@ -624,7 +624,7 @@ define <2 x double> @test_128_30(i8 * %a
define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
- %r = load <2 x double>* %vaddr, align 16
+ %r = load <2 x double>, <2 x double>* %vaddr, align 16
%res = select <2 x i1> %mask, <2 x double> %r, <2 x double> zeroinitializer
ret <2 x double>%res
}
@@ -635,7 +635,7 @@ define <2 x double> @test_128_31(i8 * %a
define <2 x double> @test_128_32(i8 * %addr, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
- %r = load <2 x double>* %vaddr, align 1
+ %r = load <2 x double>, <2 x double>* %vaddr, align 1
%res = select <2 x i1> %mask, <2 x double> %r, <2 x double> zeroinitializer
ret <2 x double>%res
}
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ define <4 x i64> @test256_4(<4 x i64> %x
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
- %y = load <8 x i32>* %yp, align 4
+ %y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
ret <8 x i32> %max
@@ -56,7 +56,7 @@ define <8 x i32> @test256_5(<8 x i32> %x
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
- %y = load <8 x i32>* %y.ptr, align 4
+ %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sgt <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
ret <8 x i32> %max
@@ -67,7 +67,7 @@ define <8 x i32> @test256_6(<8 x i32> %x
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
- %y = load <8 x i32>* %y.ptr, align 4
+ %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sle <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
ret <8 x i32> %max
@@ -78,7 +78,7 @@ define <8 x i32> @test256_7(<8 x i32> %x
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
- %y = load <8 x i32>* %y.ptr, align 4
+ %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp ule <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
ret <8 x i32> %max
@@ -114,7 +114,7 @@ define <4 x i64> @test256_10(<4 x i64> %
; CHECK: ret
define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4 x i64> %y1) nounwind {
%mask1 = icmp sgt <4 x i64> %x1, %y1
- %y = load <4 x i64>* %y.ptr, align 4
+ %y = load <4 x i64>, <4 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <4 x i64> %x, %y
%mask = select <4 x i1> %mask0, <4 x i1> %mask1, <4 x i1> zeroinitializer
%max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %x1
@@ -127,7 +127,7 @@ define <4 x i64> @test256_11(<4 x i64> %
; CHECK: ret
define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8 x i32> %y1) nounwind {
%mask1 = icmp sge <8 x i32> %x1, %y1
- %y = load <8 x i32>* %y.ptr, align 4
+ %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask0 = icmp ule <8 x i32> %x, %y
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -139,7 +139,7 @@ define <8 x i32> @test256_12(<8 x i32> %
; CHECK: vmovdqa64
; CHECK: ret
define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind {
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
%y = shufflevector <4 x i64> %y.0, <4 x i64> undef, <4 x i32> zeroinitializer
%mask = icmp eq <4 x i64> %x, %y
@@ -152,7 +152,7 @@ define <4 x i64> @test256_13(<4 x i64> %
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind {
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
%y = shufflevector <8 x i32> %y.0, <8 x i32> undef, <8 x i32> zeroinitializer
%mask = icmp sle <8 x i32> %x, %y
@@ -166,7 +166,7 @@ define <8 x i32> @test256_14(<8 x i32> %
; CHECK: ret
define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32> %y1) nounwind {
%mask1 = icmp sge <8 x i32> %x1, %y1
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
%y = shufflevector <8 x i32> %y.0, <8 x i32> undef, <8 x i32> zeroinitializer
%mask0 = icmp sgt <8 x i32> %x, %y
@@ -181,7 +181,7 @@ define <8 x i32> @test256_15(<8 x i32> %
; CHECK: ret
define <4 x i64> @test256_16(<4 x i64> %x, i64* %yb.ptr, <4 x i64> %x1, <4 x i64> %y1) nounwind {
%mask1 = icmp sge <4 x i64> %x1, %y1
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
%y = shufflevector <4 x i64> %y.0, <4 x i64> undef, <4 x i32> zeroinitializer
%mask0 = icmp sgt <4 x i64> %x, %y
@@ -235,7 +235,7 @@ define <2 x i64> @test128_4(<2 x i64> %x
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwind {
- %y = load <4 x i32>* %yp, align 4
+ %y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
ret <4 x i32> %max
@@ -246,7 +246,7 @@ define <4 x i32> @test128_5(<4 x i32> %x
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
- %y = load <4 x i32>* %y.ptr, align 4
+ %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sgt <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
ret <4 x i32> %max
@@ -257,7 +257,7 @@ define <4 x i32> @test128_6(<4 x i32> %x
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
- %y = load <4 x i32>* %y.ptr, align 4
+ %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sle <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
ret <4 x i32> %max
@@ -268,7 +268,7 @@ define <4 x i32> @test128_7(<4 x i32> %x
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
- %y = load <4 x i32>* %y.ptr, align 4
+ %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ule <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
ret <4 x i32> %max
@@ -304,7 +304,7 @@ define <2 x i64> @test128_10(<2 x i64> %
; CHECK: ret
define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2 x i64> %y1) nounwind {
%mask1 = icmp sgt <2 x i64> %x1, %y1
- %y = load <2 x i64>* %y.ptr, align 4
+ %y = load <2 x i64>, <2 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <2 x i64> %x, %y
%mask = select <2 x i1> %mask0, <2 x i1> %mask1, <2 x i1> zeroinitializer
%max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %x1
@@ -317,7 +317,7 @@ define <2 x i64> @test128_11(<2 x i64> %
; CHECK: ret
define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4 x i32> %y1) nounwind {
%mask1 = icmp sge <4 x i32> %x1, %y1
- %y = load <4 x i32>* %y.ptr, align 4
+ %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask0 = icmp ule <4 x i32> %x, %y
%mask = select <4 x i1> %mask0, <4 x i1> %mask1, <4 x i1> zeroinitializer
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -329,7 +329,7 @@ define <4 x i32> @test128_12(<4 x i32> %
; CHECK: vmovdqa64
; CHECK: ret
define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind {
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
%y = insertelement <2 x i64> %y.0, i64 %yb, i32 1
%mask = icmp eq <2 x i64> %x, %y
@@ -342,7 +342,7 @@ define <2 x i64> @test128_13(<2 x i64> %
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind {
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
%y = shufflevector <4 x i32> %y.0, <4 x i32> undef, <4 x i32> zeroinitializer
%mask = icmp sle <4 x i32> %x, %y
@@ -356,7 +356,7 @@ define <4 x i32> @test128_14(<4 x i32> %
; CHECK: ret
define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32> %y1) nounwind {
%mask1 = icmp sge <4 x i32> %x1, %y1
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
%y = shufflevector <4 x i32> %y.0, <4 x i32> undef, <4 x i32> zeroinitializer
%mask0 = icmp sgt <4 x i32> %x, %y
@@ -371,7 +371,7 @@ define <4 x i32> @test128_15(<4 x i32> %
; CHECK: ret
define <2 x i64> @test128_16(<2 x i64> %x, i64* %yb.ptr, <2 x i64> %x1, <2 x i64> %y1) nounwind {
%mask1 = icmp sge <2 x i64> %x1, %y1
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
%y = insertelement <2 x i64> %y.0, i64 %yb, i32 1
%mask0 = icmp sgt <2 x i64> %x, %y
Modified: llvm/trunk/test/CodeGen/X86/bitcast-mmx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-mmx.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-mmx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bitcast-mmx.ll Fri Feb 27 15:17:42 2015
@@ -64,8 +64,8 @@ define i64 @t3(<1 x i64>* %y, i32* %n) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %y to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %n, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %n, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
Modified: llvm/trunk/test/CodeGen/X86/block-placement.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/block-placement.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/block-placement.ll (original)
+++ llvm/trunk/test/CodeGen/X86/block-placement.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ define i32 @test_ifchains(i32 %i, i32* %
entry:
%gep1 = getelementptr i32, i32* %a, i32 1
- %val1 = load i32* %gep1
+ %val1 = load i32, i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then1, label %else1, !prof !0
@@ -35,7 +35,7 @@ then1:
else1:
%gep2 = getelementptr i32, i32* %a, i32 2
- %val2 = load i32* %gep2
+ %val2 = load i32, i32* %gep2
%cond2 = icmp ugt i32 %val2, 2
br i1 %cond2, label %then2, label %else2, !prof !0
@@ -45,7 +45,7 @@ then2:
else2:
%gep3 = getelementptr i32, i32* %a, i32 3
- %val3 = load i32* %gep3
+ %val3 = load i32, i32* %gep3
%cond3 = icmp ugt i32 %val3, 3
br i1 %cond3, label %then3, label %else3, !prof !0
@@ -55,7 +55,7 @@ then3:
else3:
%gep4 = getelementptr i32, i32* %a, i32 4
- %val4 = load i32* %gep4
+ %val4 = load i32, i32* %gep4
%cond4 = icmp ugt i32 %val4, 4
br i1 %cond4, label %then4, label %else4, !prof !0
@@ -65,7 +65,7 @@ then4:
else4:
%gep5 = getelementptr i32, i32* %a, i32 3
- %val5 = load i32* %gep5
+ %val5 = load i32, i32* %gep5
%cond5 = icmp ugt i32 %val5, 3
br i1 %cond5, label %then5, label %exit, !prof !0
@@ -114,7 +114,7 @@ unlikely2:
body3:
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
@@ -167,7 +167,7 @@ bail3:
body4:
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
@@ -198,7 +198,7 @@ body0:
body1:
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%bailcond1 = icmp eq i32 %sum, 42
br label %body0
@@ -223,7 +223,7 @@ body0:
%iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
%base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%bailcond1 = icmp eq i32 %sum, 42
br i1 %bailcond1, label %exit, label %body1
@@ -253,7 +253,7 @@ body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
@@ -280,7 +280,7 @@ entry:
loop.body.1:
%iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %bidx = load i32* %arrayidx
+ %bidx = load i32, i32* %arrayidx
br label %inner.loop.body
inner.loop.body:
@@ -288,7 +288,7 @@ inner.loop.body:
%base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
%scaled_idx = mul i32 %bidx, %iv
%inner.arrayidx = getelementptr inbounds i32, i32* %b, i32 %scaled_idx
- %0 = load i32* %inner.arrayidx
+ %0 = load i32, i32* %inner.arrayidx
%sum = add nsw i32 %0, %base
%inner.next = add i32 %iv, 1
%inner.exitcond = icmp eq i32 %inner.next, %i
@@ -322,13 +322,13 @@ loop.body1:
br i1 undef, label %loop.body3, label %loop.body2
loop.body2:
- %ptr = load i32** undef, align 4
+ %ptr = load i32*, i32** undef, align 4
br label %loop.body3
loop.body3:
%myptr = phi i32* [ %ptr2, %loop.body5 ], [ %ptr, %loop.body2 ], [ undef, %loop.body1 ]
%bcmyptr = bitcast i32* %myptr to i32*
- %val = load i32* %bcmyptr, align 4
+ %val = load i32, i32* %bcmyptr, align 4
%comp = icmp eq i32 %val, 48
br i1 %comp, label %loop.body4, label %loop.body5
@@ -336,7 +336,7 @@ loop.body4:
br i1 undef, label %loop.header, label %loop.body5
loop.body5:
- %ptr2 = load i32** undef, align 4
+ %ptr2 = load i32*, i32** undef, align 4
br label %loop.body3
}
@@ -366,7 +366,7 @@ loop.header:
br i1 %comp0, label %bail, label %loop.body1
loop.body1:
- %val0 = load i32** undef, align 4
+ %val0 = load i32*, i32** undef, align 4
br i1 undef, label %loop.body2, label %loop.inner1.begin
loop.body2:
@@ -375,7 +375,7 @@ loop.body2:
loop.body3:
%ptr1 = getelementptr inbounds i32, i32* %val0, i32 0
%castptr1 = bitcast i32* %ptr1 to i32**
- %val1 = load i32** %castptr1, align 4
+ %val1 = load i32*, i32** %castptr1, align 4
br label %loop.inner1.begin
loop.inner1.begin:
@@ -387,7 +387,7 @@ loop.inner1.begin:
loop.inner1.end:
%ptr2 = getelementptr inbounds i32, i32* %valphi, i32 0
%castptr2 = bitcast i32* %ptr2 to i32**
- %val2 = load i32** %castptr2, align 4
+ %val2 = load i32*, i32** %castptr2, align 4
br label %loop.inner1.begin
loop.body4.dead:
@@ -486,7 +486,7 @@ entry:
br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
entry.if.then_crit_edge:
- %.pre14 = load i8* undef, align 1
+ %.pre14 = load i8, i8* undef, align 1
br label %if.then
lor.lhs.false:
@@ -616,7 +616,7 @@ body:
br label %loop2a
loop1:
- %next.load = load i32** undef
+ %next.load = load i32*, i32** undef
br i1 %comp.a, label %loop2a, label %loop2b
loop2a:
@@ -728,199 +728,199 @@ define void @many_unanalyzable_branches(
entry:
br label %0
- %val0 = load volatile float* undef
+ %val0 = load volatile float, float* undef
%cmp0 = fcmp une float %val0, undef
br i1 %cmp0, label %1, label %0
- %val1 = load volatile float* undef
+ %val1 = load volatile float, float* undef
%cmp1 = fcmp une float %val1, undef
br i1 %cmp1, label %2, label %1
- %val2 = load volatile float* undef
+ %val2 = load volatile float, float* undef
%cmp2 = fcmp une float %val2, undef
br i1 %cmp2, label %3, label %2
- %val3 = load volatile float* undef
+ %val3 = load volatile float, float* undef
%cmp3 = fcmp une float %val3, undef
br i1 %cmp3, label %4, label %3
- %val4 = load volatile float* undef
+ %val4 = load volatile float, float* undef
%cmp4 = fcmp une float %val4, undef
br i1 %cmp4, label %5, label %4
- %val5 = load volatile float* undef
+ %val5 = load volatile float, float* undef
%cmp5 = fcmp une float %val5, undef
br i1 %cmp5, label %6, label %5
- %val6 = load volatile float* undef
+ %val6 = load volatile float, float* undef
%cmp6 = fcmp une float %val6, undef
br i1 %cmp6, label %7, label %6
- %val7 = load volatile float* undef
+ %val7 = load volatile float, float* undef
%cmp7 = fcmp une float %val7, undef
br i1 %cmp7, label %8, label %7
- %val8 = load volatile float* undef
+ %val8 = load volatile float, float* undef
%cmp8 = fcmp une float %val8, undef
br i1 %cmp8, label %9, label %8
- %val9 = load volatile float* undef
+ %val9 = load volatile float, float* undef
%cmp9 = fcmp une float %val9, undef
br i1 %cmp9, label %10, label %9
- %val10 = load volatile float* undef
+ %val10 = load volatile float, float* undef
%cmp10 = fcmp une float %val10, undef
br i1 %cmp10, label %11, label %10
- %val11 = load volatile float* undef
+ %val11 = load volatile float, float* undef
%cmp11 = fcmp une float %val11, undef
br i1 %cmp11, label %12, label %11
- %val12 = load volatile float* undef
+ %val12 = load volatile float, float* undef
%cmp12 = fcmp une float %val12, undef
br i1 %cmp12, label %13, label %12
- %val13 = load volatile float* undef
+ %val13 = load volatile float, float* undef
%cmp13 = fcmp une float %val13, undef
br i1 %cmp13, label %14, label %13
- %val14 = load volatile float* undef
+ %val14 = load volatile float, float* undef
%cmp14 = fcmp une float %val14, undef
br i1 %cmp14, label %15, label %14
- %val15 = load volatile float* undef
+ %val15 = load volatile float, float* undef
%cmp15 = fcmp une float %val15, undef
br i1 %cmp15, label %16, label %15
- %val16 = load volatile float* undef
+ %val16 = load volatile float, float* undef
%cmp16 = fcmp une float %val16, undef
br i1 %cmp16, label %17, label %16
- %val17 = load volatile float* undef
+ %val17 = load volatile float, float* undef
%cmp17 = fcmp une float %val17, undef
br i1 %cmp17, label %18, label %17
- %val18 = load volatile float* undef
+ %val18 = load volatile float, float* undef
%cmp18 = fcmp une float %val18, undef
br i1 %cmp18, label %19, label %18
- %val19 = load volatile float* undef
+ %val19 = load volatile float, float* undef
%cmp19 = fcmp une float %val19, undef
br i1 %cmp19, label %20, label %19
- %val20 = load volatile float* undef
+ %val20 = load volatile float, float* undef
%cmp20 = fcmp une float %val20, undef
br i1 %cmp20, label %21, label %20
- %val21 = load volatile float* undef
+ %val21 = load volatile float, float* undef
%cmp21 = fcmp une float %val21, undef
br i1 %cmp21, label %22, label %21
- %val22 = load volatile float* undef
+ %val22 = load volatile float, float* undef
%cmp22 = fcmp une float %val22, undef
br i1 %cmp22, label %23, label %22
- %val23 = load volatile float* undef
+ %val23 = load volatile float, float* undef
%cmp23 = fcmp une float %val23, undef
br i1 %cmp23, label %24, label %23
- %val24 = load volatile float* undef
+ %val24 = load volatile float, float* undef
%cmp24 = fcmp une float %val24, undef
br i1 %cmp24, label %25, label %24
- %val25 = load volatile float* undef
+ %val25 = load volatile float, float* undef
%cmp25 = fcmp une float %val25, undef
br i1 %cmp25, label %26, label %25
- %val26 = load volatile float* undef
+ %val26 = load volatile float, float* undef
%cmp26 = fcmp une float %val26, undef
br i1 %cmp26, label %27, label %26
- %val27 = load volatile float* undef
+ %val27 = load volatile float, float* undef
%cmp27 = fcmp une float %val27, undef
br i1 %cmp27, label %28, label %27
- %val28 = load volatile float* undef
+ %val28 = load volatile float, float* undef
%cmp28 = fcmp une float %val28, undef
br i1 %cmp28, label %29, label %28
- %val29 = load volatile float* undef
+ %val29 = load volatile float, float* undef
%cmp29 = fcmp une float %val29, undef
br i1 %cmp29, label %30, label %29
- %val30 = load volatile float* undef
+ %val30 = load volatile float, float* undef
%cmp30 = fcmp une float %val30, undef
br i1 %cmp30, label %31, label %30
- %val31 = load volatile float* undef
+ %val31 = load volatile float, float* undef
%cmp31 = fcmp une float %val31, undef
br i1 %cmp31, label %32, label %31
- %val32 = load volatile float* undef
+ %val32 = load volatile float, float* undef
%cmp32 = fcmp une float %val32, undef
br i1 %cmp32, label %33, label %32
- %val33 = load volatile float* undef
+ %val33 = load volatile float, float* undef
%cmp33 = fcmp une float %val33, undef
br i1 %cmp33, label %34, label %33
- %val34 = load volatile float* undef
+ %val34 = load volatile float, float* undef
%cmp34 = fcmp une float %val34, undef
br i1 %cmp34, label %35, label %34
- %val35 = load volatile float* undef
+ %val35 = load volatile float, float* undef
%cmp35 = fcmp une float %val35, undef
br i1 %cmp35, label %36, label %35
- %val36 = load volatile float* undef
+ %val36 = load volatile float, float* undef
%cmp36 = fcmp une float %val36, undef
br i1 %cmp36, label %37, label %36
- %val37 = load volatile float* undef
+ %val37 = load volatile float, float* undef
%cmp37 = fcmp une float %val37, undef
br i1 %cmp37, label %38, label %37
- %val38 = load volatile float* undef
+ %val38 = load volatile float, float* undef
%cmp38 = fcmp une float %val38, undef
br i1 %cmp38, label %39, label %38
- %val39 = load volatile float* undef
+ %val39 = load volatile float, float* undef
%cmp39 = fcmp une float %val39, undef
br i1 %cmp39, label %40, label %39
- %val40 = load volatile float* undef
+ %val40 = load volatile float, float* undef
%cmp40 = fcmp une float %val40, undef
br i1 %cmp40, label %41, label %40
- %val41 = load volatile float* undef
+ %val41 = load volatile float, float* undef
%cmp41 = fcmp une float %val41, undef
br i1 %cmp41, label %42, label %41
- %val42 = load volatile float* undef
+ %val42 = load volatile float, float* undef
%cmp42 = fcmp une float %val42, undef
br i1 %cmp42, label %43, label %42
- %val43 = load volatile float* undef
+ %val43 = load volatile float, float* undef
%cmp43 = fcmp une float %val43, undef
br i1 %cmp43, label %44, label %43
- %val44 = load volatile float* undef
+ %val44 = load volatile float, float* undef
%cmp44 = fcmp une float %val44, undef
br i1 %cmp44, label %45, label %44
- %val45 = load volatile float* undef
+ %val45 = load volatile float, float* undef
%cmp45 = fcmp une float %val45, undef
br i1 %cmp45, label %46, label %45
- %val46 = load volatile float* undef
+ %val46 = load volatile float, float* undef
%cmp46 = fcmp une float %val46, undef
br i1 %cmp46, label %47, label %46
- %val47 = load volatile float* undef
+ %val47 = load volatile float, float* undef
%cmp47 = fcmp une float %val47, undef
br i1 %cmp47, label %48, label %47
- %val48 = load volatile float* undef
+ %val48 = load volatile float, float* undef
%cmp48 = fcmp une float %val48, undef
br i1 %cmp48, label %49, label %48
- %val49 = load volatile float* undef
+ %val49 = load volatile float, float* undef
%cmp49 = fcmp une float %val49, undef
br i1 %cmp49, label %50, label %49
- %val50 = load volatile float* undef
+ %val50 = load volatile float, float* undef
%cmp50 = fcmp une float %val50, undef
br i1 %cmp50, label %51, label %50
- %val51 = load volatile float* undef
+ %val51 = load volatile float, float* undef
%cmp51 = fcmp une float %val51, undef
br i1 %cmp51, label %52, label %51
- %val52 = load volatile float* undef
+ %val52 = load volatile float, float* undef
%cmp52 = fcmp une float %val52, undef
br i1 %cmp52, label %53, label %52
- %val53 = load volatile float* undef
+ %val53 = load volatile float, float* undef
%cmp53 = fcmp une float %val53, undef
br i1 %cmp53, label %54, label %53
- %val54 = load volatile float* undef
+ %val54 = load volatile float, float* undef
%cmp54 = fcmp une float %val54, undef
br i1 %cmp54, label %55, label %54
- %val55 = load volatile float* undef
+ %val55 = load volatile float, float* undef
%cmp55 = fcmp une float %val55, undef
br i1 %cmp55, label %56, label %55
- %val56 = load volatile float* undef
+ %val56 = load volatile float, float* undef
%cmp56 = fcmp une float %val56, undef
br i1 %cmp56, label %57, label %56
- %val57 = load volatile float* undef
+ %val57 = load volatile float, float* undef
%cmp57 = fcmp une float %val57, undef
br i1 %cmp57, label %58, label %57
- %val58 = load volatile float* undef
+ %val58 = load volatile float, float* undef
%cmp58 = fcmp une float %val58, undef
br i1 %cmp58, label %59, label %58
- %val59 = load volatile float* undef
+ %val59 = load volatile float, float* undef
%cmp59 = fcmp une float %val59, undef
br i1 %cmp59, label %60, label %59
- %val60 = load volatile float* undef
+ %val60 = load volatile float, float* undef
%cmp60 = fcmp une float %val60, undef
br i1 %cmp60, label %61, label %60
- %val61 = load volatile float* undef
+ %val61 = load volatile float, float* undef
%cmp61 = fcmp une float %val61, undef
br i1 %cmp61, label %62, label %61
- %val62 = load volatile float* undef
+ %val62 = load volatile float, float* undef
%cmp62 = fcmp une float %val62, undef
br i1 %cmp62, label %63, label %62
- %val63 = load volatile float* undef
+ %val63 = load volatile float, float* undef
%cmp63 = fcmp une float %val63, undef
br i1 %cmp63, label %64, label %63
- %val64 = load volatile float* undef
+ %val64 = load volatile float, float* undef
%cmp64 = fcmp une float %val64, undef
br i1 %cmp64, label %65, label %64
@@ -979,14 +979,14 @@ if.then:
%dec = add nsw i32 %l.0, -1
%idxprom = sext i32 %dec to i64
%arrayidx = getelementptr inbounds double, double* %ra, i64 %idxprom
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
br label %if.end10
if.else:
%idxprom1 = sext i32 %ir.0 to i64
%arrayidx2 = getelementptr inbounds double, double* %ra, i64 %idxprom1
- %1 = load double* %arrayidx2, align 8
- %2 = load double* %arrayidx3, align 8
+ %1 = load double, double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx3, align 8
store double %2, double* %arrayidx2, align 8
%dec6 = add nsw i32 %ir.0, -1
%cmp7 = icmp eq i32 %dec6, 1
@@ -1020,11 +1020,11 @@ while.body:
land.lhs.true:
%idxprom13 = sext i32 %j.0 to i64
%arrayidx14 = getelementptr inbounds double, double* %ra, i64 %idxprom13
- %3 = load double* %arrayidx14, align 8
+ %3 = load double, double* %arrayidx14, align 8
%add15 = add nsw i32 %j.0, 1
%idxprom16 = sext i32 %add15 to i64
%arrayidx17 = getelementptr inbounds double, double* %ra, i64 %idxprom16
- %4 = load double* %arrayidx17, align 8
+ %4 = load double, double* %arrayidx17, align 8
%cmp18 = fcmp olt double %3, %4
br i1 %cmp18, label %if.then19, label %if.end20
@@ -1035,7 +1035,7 @@ if.end20:
%j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
%idxprom21 = sext i32 %j.1 to i64
%arrayidx22 = getelementptr inbounds double, double* %ra, i64 %idxprom21
- %5 = load double* %arrayidx22, align 8
+ %5 = load double, double* %arrayidx22, align 8
%cmp23 = fcmp olt double %rra.0, %5
br i1 %cmp23, label %if.then24, label %while.cond
@@ -1066,7 +1066,7 @@ define i32 @test_cold_calls(i32* %a) {
entry:
%gep1 = getelementptr i32, i32* %a, i32 1
- %val1 = load i32* %gep1
+ %val1 = load i32, i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then, label %else
@@ -1076,7 +1076,7 @@ then:
else:
%gep2 = getelementptr i32, i32* %a, i32 2
- %val2 = load i32* %gep2
+ %val2 = load i32, i32* %gep2
br label %exit
exit:
Modified: llvm/trunk/test/CodeGen/X86/bmi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bmi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bmi.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bmi.ll Fri Feb 27 15:17:42 2015
@@ -27,7 +27,7 @@ define i32 @t3(i32 %x) nounwind {
}
define i32 @tzcnt32_load(i32* %x) nounwind {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = tail call i32 @llvm.cttz.i32(i32 %x1, i1 false )
ret i32 %tmp
; CHECK-LABEL: tzcnt32_load:
@@ -78,7 +78,7 @@ define i32 @andn32(i32 %x, i32 %y) nounw
}
define i32 @andn32_load(i32 %x, i32* %y) nounwind readnone {
- %y1 = load i32* %y
+ %y1 = load i32, i32* %y
%tmp1 = xor i32 %x, -1
%tmp2 = and i32 %y1, %tmp1
ret i32 %tmp2
@@ -102,7 +102,7 @@ define i32 @bextr32(i32 %x, i32 %y) noun
}
define i32 @bextr32_load(i32* %x, i32 %y) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x1, i32 %y)
ret i32 %tmp
; CHECK-LABEL: bextr32_load:
@@ -120,7 +120,7 @@ define i32 @bextr32b(i32 %x) nounwind uw
}
define i32 @bextr32b_load(i32* %x) nounwind uwtable readnone ssp {
- %1 = load i32* %x
+ %1 = load i32, i32* %x
%2 = lshr i32 %1, 4
%3 = and i32 %2, 4095
ret i32 %3
@@ -153,7 +153,7 @@ define i32 @bzhi32(i32 %x, i32 %y) nounw
}
define i32 @bzhi32_load(i32* %x, i32 %y) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x1, i32 %y)
ret i32 %tmp
; CHECK-LABEL: bzhi32_load:
@@ -184,7 +184,7 @@ entry:
define i32 @bzhi32b_load(i32* %w, i8 zeroext %index) #0 {
entry:
- %x = load i32* %w
+ %x = load i32, i32* %w
%conv = zext i8 %index to i32
%shl = shl i32 1, %conv
%sub = add nsw i32 %shl, -1
@@ -242,7 +242,7 @@ define i32 @blsi32(i32 %x) nounwind read
}
define i32 @blsi32_load(i32* %x) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = sub i32 0, %x1
%tmp2 = and i32 %x1, %tmp
ret i32 %tmp2
@@ -267,7 +267,7 @@ define i32 @blsmsk32(i32 %x) nounwind re
}
define i32 @blsmsk32_load(i32* %x) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = sub i32 %x1, 1
%tmp2 = xor i32 %x1, %tmp
ret i32 %tmp2
@@ -292,7 +292,7 @@ define i32 @blsr32(i32 %x) nounwind read
}
define i32 @blsr32_load(i32* %x) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = sub i32 %x1, 1
%tmp2 = and i32 %x1, %tmp
ret i32 %tmp2
@@ -316,7 +316,7 @@ define i32 @pdep32(i32 %x, i32 %y) nounw
}
define i32 @pdep32_load(i32 %x, i32* %y) nounwind readnone {
- %y1 = load i32* %y
+ %y1 = load i32, i32* %y
%tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y1)
ret i32 %tmp
; CHECK-LABEL: pdep32_load:
@@ -342,7 +342,7 @@ define i32 @pext32(i32 %x, i32 %y) nounw
}
define i32 @pext32_load(i32 %x, i32* %y) nounwind readnone {
- %y1 = load i32* %y
+ %y1 = load i32, i32* %y
%tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y1)
ret i32 %tmp
; CHECK-LABEL: pext32_load:
Modified: llvm/trunk/test/CodeGen/X86/break-anti-dependencies.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/break-anti-dependencies.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/break-anti-dependencies.ll (original)
+++ llvm/trunk/test/CodeGen/X86/break-anti-dependencies.ll Fri Feb 27 15:17:42 2015
@@ -10,14 +10,14 @@
define void @goo(double* %r, double* %p, double* %q) nounwind {
entry:
- %0 = load double* %p, align 8
+ %0 = load double, double* %p, align 8
%1 = fadd double %0, 1.100000e+00
%2 = fmul double %1, 1.200000e+00
%3 = fadd double %2, 1.300000e+00
%4 = fmul double %3, 1.400000e+00
%5 = fadd double %4, 1.500000e+00
%6 = fptosi double %5 to i32
- %7 = load double* %r, align 8
+ %7 = load double, double* %r, align 8
%8 = fadd double %7, 7.100000e+00
%9 = fmul double %8, 7.200000e+00
%10 = fadd double %9, 7.300000e+00
Modified: llvm/trunk/test/CodeGen/X86/break-false-dep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/break-false-dep.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/break-false-dep.ll (original)
+++ llvm/trunk/test/CodeGen/X86/break-false-dep.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
; SSE: movss ([[A0:%rdi|%rcx]]), %xmm0
; SSE: cvtss2sd %xmm0, %xmm0
- %0 = load float* %x, align 4
+ %0 = load float, float* %x, align 4
%1 = fpext float %0 to double
ret double %1
}
@@ -17,7 +17,7 @@ define float @t2(double* nocapture %x) n
entry:
; SSE-LABEL: t2:
; SSE: cvtsd2ss ([[A0]]), %xmm0
- %0 = load double* %x, align 8
+ %0 = load double, double* %x, align 8
%1 = fptrunc double %0 to float
ret float %1
}
@@ -27,7 +27,7 @@ entry:
; SSE-LABEL: squirtf:
; SSE: movss ([[A0]]), %xmm0
; SSE: sqrtss %xmm0, %xmm0
- %z = load float* %x
+ %z = load float, float* %x
%t = call float @llvm.sqrt.f32(float %z)
ret float %t
}
@@ -37,7 +37,7 @@ entry:
; SSE-LABEL: squirt:
; SSE: movsd ([[A0]]), %xmm0
; SSE: sqrtsd %xmm0, %xmm0
- %z = load double* %x
+ %z = load double, double* %x
%t = call double @llvm.sqrt.f64(double %z)
ret double %t
}
@@ -46,7 +46,7 @@ define float @squirtf_size(float* %x) no
entry:
; SSE-LABEL: squirtf_size:
; SSE: sqrtss ([[A0]]), %xmm0
- %z = load float* %x
+ %z = load float, float* %x
%t = call float @llvm.sqrt.f32(float %z)
ret float %t
}
@@ -55,7 +55,7 @@ define double @squirt_size(double* %x) n
entry:
; SSE-LABEL: squirt_size:
; SSE: sqrtsd ([[A0]]), %xmm0
- %z = load double* %x
+ %z = load double, double* %x
%t = call double @llvm.sqrt.f64(double %z)
ret double %t
}
@@ -120,13 +120,13 @@ for.end:
; SSE: cvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]]
define i64 @loopdep2(i64* nocapture %x, double* nocapture %y) nounwind {
entry:
- %vx = load i64* %x
+ %vx = load i64, i64* %x
br label %loop
loop:
%i = phi i64 [ 1, %entry ], [ %inc, %loop ]
%s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
%fi = sitofp i64 %i to double
- %vy = load double* %y
+ %vy = load double, double* %y
%fipy = fadd double %fi, %vy
%iipy = fptosi double %fipy to i64
%s2 = add i64 %s1, %iipy
@@ -159,16 +159,16 @@ for.cond1.preheader:
for.body3:
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @v, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%conv = sitofp i32 %0 to double
%arrayidx5 = getelementptr inbounds [1024 x double], [1024 x double]* @x, i64 0, i64 %indvars.iv
- %1 = load double* %arrayidx5, align 8
+ %1 = load double, double* %arrayidx5, align 8
%mul = fmul double %conv, %1
%arrayidx7 = getelementptr inbounds [1024 x double], [1024 x double]* @y, i64 0, i64 %indvars.iv
- %2 = load double* %arrayidx7, align 8
+ %2 = load double, double* %arrayidx7, align 8
%mul8 = fmul double %mul, %2
%arrayidx10 = getelementptr inbounds [1024 x double], [1024 x double]* @z, i64 0, i64 %indvars.iv
- %3 = load double* %arrayidx10, align 8
+ %3 = load double, double* %arrayidx10, align 8
%mul11 = fmul double %mul8, %3
%arrayidx13 = getelementptr inbounds [1024 x double], [1024 x double]* @w, i64 0, i64 %indvars.iv
store double %mul11, double* %arrayidx13, align 8
Modified: llvm/trunk/test/CodeGen/X86/bswap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bswap.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bswap.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bswap.ll Fri Feb 27 15:17:42 2015
@@ -91,7 +91,7 @@ define i64 @not_bswap() {
; CHECK64-LABEL: not_bswap:
; CHECK64-NOT: bswapq
; CHECK64: ret
- %init = load i16* @var16
+ %init = load i16, i16* @var16
%big = zext i16 %init to i64
%hishifted = lshr i64 %big, 8
@@ -115,7 +115,7 @@ define i64 @not_useful_bswap() {
; CHECK64-NOT: bswapq
; CHECK64: ret
- %init = load i8* @var8
+ %init = load i8, i8* @var8
%big = zext i8 %init to i64
%hishifted = lshr i64 %big, 8
@@ -140,7 +140,7 @@ define i64 @finally_useful_bswap() {
; CHECK64: shrq $48, [[REG]]
; CHECK64: ret
- %init = load i16* @var16
+ %init = load i16, i16* @var16
%big = zext i16 %init to i64
%hishifted = lshr i64 %big, 8
Modified: llvm/trunk/test/CodeGen/X86/byval-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/byval-align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/byval-align.ll (original)
+++ llvm/trunk/test/CodeGen/X86/byval-align.ll Fri Feb 27 15:17:42 2015
@@ -14,14 +14,14 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%obj1 = bitcast %struct.S* %obj to i8* ; <i8*> [#uses=1]
store i8* %obj1, i8** %ptr, align 8
- %0 = load i8** %ptr, align 8 ; <i8*> [#uses=1]
+ %0 = load i8*, i8** %ptr, align 8 ; <i8*> [#uses=1]
%1 = ptrtoint i8* %0 to i64 ; <i64> [#uses=1]
store i64 %1, i64* %p, align 8
- %2 = load i8** %ptr, align 8 ; <i8*> [#uses=1]
+ %2 = load i8*, i8** %ptr, align 8 ; <i8*> [#uses=1]
%3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i64 0, i64 0), i8* %2) nounwind ; <i32> [#uses=0]
- %4 = load i64* %p, align 8 ; <i64> [#uses=1]
+ %4 = load i64, i64* %p, align 8 ; <i64> [#uses=1]
%5 = and i64 %4, 140737488355264 ; <i64> [#uses=1]
- %6 = load i64* %p, align 8 ; <i64> [#uses=1]
+ %6 = load i64, i64* %p, align 8 ; <i64> [#uses=1]
%7 = icmp ne i64 %5, %6 ; <i1> [#uses=1]
br i1 %7, label %bb, label %bb2
Modified: llvm/trunk/test/CodeGen/X86/byval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/byval.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/byval.ll (original)
+++ llvm/trunk/test/CodeGen/X86/byval.ll Fri Feb 27 15:17:42 2015
@@ -12,6 +12,6 @@
define i64 @f(%struct.s* byval %a) {
entry:
%tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
- %tmp3 = load i64* %tmp2, align 8
+ %tmp3 = load i64, i64* %tmp2, align 8
ret i64 %tmp3
}
Modified: llvm/trunk/test/CodeGen/X86/call-push.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/call-push.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/call-push.ll (original)
+++ llvm/trunk/test/CodeGen/X86/call-push.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define i32 @decode_byte(%struct.decode_t
entry:
%tmp2 = getelementptr %struct.decode_t, %struct.decode_t* %decode, i32 0, i32 4 ; <i16*> [#uses=1]
%tmp23 = bitcast i16* %tmp2 to i32* ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp23 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp23 ; <i32> [#uses=1]
%tmp514 = lshr i32 %tmp4, 24 ; <i32> [#uses=1]
%tmp56 = trunc i32 %tmp514 to i8 ; <i8> [#uses=1]
%tmp7 = icmp eq i8 %tmp56, 0 ; <i1> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/cas.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cas.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cas.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cas.ll Fri Feb 27 15:17:42 2015
@@ -24,14 +24,14 @@ entry:
store float* %p, float** %p.addr, align 8
store float* %expected, float** %expected.addr, align 8
store float %desired, float* %desired.addr, align 4
- %0 = load float** %expected.addr, align 8
- %1 = load float** %expected.addr, align 8
- %2 = load float* %1, align 4
- %3 = load float* %desired.addr, align 4
- %4 = load float** %p.addr, align 8
+ %0 = load float*, float** %expected.addr, align 8
+ %1 = load float*, float** %expected.addr, align 8
+ %2 = load float, float* %1, align 4
+ %3 = load float, float* %desired.addr, align 4
+ %4 = load float*, float** %p.addr, align 8
%5 = call i8 asm sideeffect "lock; cmpxchg $3, $4; mov $2, $1; sete $0", "={ax},=*rm,{ax},q,*m,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(float* %0, float %2, float %3, float* %4) nounwind
store i8 %5, i8* %success, align 1
- %6 = load i8* %success, align 1
+ %6 = load i8, i8* %success, align 1
%tobool = trunc i8 %6 to i1
ret i1 %tobool
}
@@ -52,16 +52,16 @@ entry:
store i8* %expected, i8** %expected.addr, align 8
%frombool = zext i1 %desired to i8
store i8 %frombool, i8* %desired.addr, align 1
- %0 = load i8** %expected.addr, align 8
- %1 = load i8** %expected.addr, align 8
- %2 = load i8* %1, align 1
+ %0 = load i8*, i8** %expected.addr, align 8
+ %1 = load i8*, i8** %expected.addr, align 8
+ %2 = load i8, i8* %1, align 1
%tobool = trunc i8 %2 to i1
- %3 = load i8* %desired.addr, align 1
+ %3 = load i8, i8* %desired.addr, align 1
%tobool1 = trunc i8 %3 to i1
- %4 = load i8** %p.addr, align 8
+ %4 = load i8*, i8** %p.addr, align 8
%5 = call i8 asm sideeffect "lock; cmpxchg $3, $4; mov $2, $1; sete $0", "={ax},=*rm,{ax},q,*m,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(i8* %0, i1 %tobool, i1 %tobool1, i8* %4) nounwind
store i8 %5, i8* %success, align 1
- %6 = load i8* %success, align 1
+ %6 = load i8, i8* %success, align 1
%tobool2 = trunc i8 %6 to i1
ret i1 %tobool2
}
Modified: llvm/trunk/test/CodeGen/X86/chain_order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/chain_order.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/chain_order.ll (original)
+++ llvm/trunk/test/CodeGen/X86/chain_order.ll Fri Feb 27 15:17:42 2015
@@ -12,13 +12,13 @@
; A test from pifft (after SLP-vectorization) that fails when we drop the chain on newly merged loads.
define void @cftx020(double* nocapture %a) {
entry:
- %0 = load double* %a, align 8
+ %0 = load double, double* %a, align 8
%arrayidx1 = getelementptr inbounds double, double* %a, i64 2
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
%arrayidx2 = getelementptr inbounds double, double* %a, i64 1
- %2 = load double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %a, i64 3
- %3 = load double* %arrayidx3, align 8
+ %3 = load double, double* %arrayidx3, align 8
%4 = insertelement <2 x double> undef, double %0, i32 0
%5 = insertelement <2 x double> %4, double %3, i32 1
%6 = insertelement <2 x double> undef, double %1, i32 0
Modified: llvm/trunk/test/CodeGen/X86/change-compare-stride-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/change-compare-stride-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/change-compare-stride-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/change-compare-stride-1.ll Fri Feb 27 15:17:42 2015
@@ -41,38 +41,38 @@ bb2: ; preds = %bb2, %bb2.outer
%4 = add i32 %3, -481 ; <i32> [#uses=1]
%5 = zext i32 %4 to i64 ; <i64> [#uses=1]
%6 = getelementptr i8, i8* %in, i64 %5 ; <i8*> [#uses=1]
- %7 = load i8* %6, align 1 ; <i8> [#uses=1]
+ %7 = load i8, i8* %6, align 1 ; <i8> [#uses=1]
%8 = add i32 %3, -480 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
%10 = getelementptr i8, i8* %in, i64 %9 ; <i8*> [#uses=1]
- %11 = load i8* %10, align 1 ; <i8> [#uses=1]
+ %11 = load i8, i8* %10, align 1 ; <i8> [#uses=1]
%12 = add i32 %3, -479 ; <i32> [#uses=1]
%13 = zext i32 %12 to i64 ; <i64> [#uses=1]
%14 = getelementptr i8, i8* %in, i64 %13 ; <i8*> [#uses=1]
- %15 = load i8* %14, align 1 ; <i8> [#uses=1]
+ %15 = load i8, i8* %14, align 1 ; <i8> [#uses=1]
%16 = add i32 %3, -1 ; <i32> [#uses=1]
%17 = zext i32 %16 to i64 ; <i64> [#uses=1]
%18 = getelementptr i8, i8* %in, i64 %17 ; <i8*> [#uses=1]
- %19 = load i8* %18, align 1 ; <i8> [#uses=1]
+ %19 = load i8, i8* %18, align 1 ; <i8> [#uses=1]
%20 = zext i32 %3 to i64 ; <i64> [#uses=1]
%21 = getelementptr i8, i8* %in, i64 %20 ; <i8*> [#uses=1]
- %22 = load i8* %21, align 1 ; <i8> [#uses=1]
+ %22 = load i8, i8* %21, align 1 ; <i8> [#uses=1]
%23 = add i32 %3, 1 ; <i32> [#uses=1]
%24 = zext i32 %23 to i64 ; <i64> [#uses=1]
%25 = getelementptr i8, i8* %in, i64 %24 ; <i8*> [#uses=1]
- %26 = load i8* %25, align 1 ; <i8> [#uses=1]
+ %26 = load i8, i8* %25, align 1 ; <i8> [#uses=1]
%27 = add i32 %3, 481 ; <i32> [#uses=1]
%28 = zext i32 %27 to i64 ; <i64> [#uses=1]
%29 = getelementptr i8, i8* %in, i64 %28 ; <i8*> [#uses=1]
- %30 = load i8* %29, align 1 ; <i8> [#uses=1]
+ %30 = load i8, i8* %29, align 1 ; <i8> [#uses=1]
%31 = add i32 %3, 480 ; <i32> [#uses=1]
%32 = zext i32 %31 to i64 ; <i64> [#uses=1]
%33 = getelementptr i8, i8* %in, i64 %32 ; <i8*> [#uses=1]
- %34 = load i8* %33, align 1 ; <i8> [#uses=1]
+ %34 = load i8, i8* %33, align 1 ; <i8> [#uses=1]
%35 = add i32 %3, 479 ; <i32> [#uses=1]
%36 = zext i32 %35 to i64 ; <i64> [#uses=1]
%37 = getelementptr i8, i8* %in, i64 %36 ; <i8*> [#uses=1]
- %38 = load i8* %37, align 1 ; <i8> [#uses=1]
+ %38 = load i8, i8* %37, align 1 ; <i8> [#uses=1]
%39 = add i8 %11, %7 ; <i8> [#uses=1]
%40 = add i8 %39, %15 ; <i8> [#uses=1]
%41 = add i8 %40, %19 ; <i8> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/clobber-fi0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clobber-fi0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clobber-fi0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clobber-fi0.ll Fri Feb 27 15:17:42 2015
@@ -20,17 +20,17 @@ bb:
br label %bb4
bb4: ; preds = %bb4, %bb
- %tmp6 = load i32* %tmp2, align 4 ; [#uses=1 type=i32]
+ %tmp6 = load i32, i32* %tmp2, align 4 ; [#uses=1 type=i32]
%tmp7 = add i32 %tmp6, -1 ; [#uses=2 type=i32]
store i32 %tmp7, i32* %tmp2, align 4
%tmp8 = icmp eq i32 %tmp7, 0 ; [#uses=1 type=i1]
- %tmp9 = load i32* %tmp ; [#uses=1 type=i32]
+ %tmp9 = load i32, i32* %tmp ; [#uses=1 type=i32]
%tmp10 = add i32 %tmp9, -1 ; [#uses=1 type=i32]
store i32 %tmp10, i32* %tmp3
br i1 %tmp8, label %bb11, label %bb4
bb11: ; preds = %bb4
- %tmp12 = load i32* %tmp, align 4 ; [#uses=1 type=i32]
+ %tmp12 = load i32, i32* %tmp, align 4 ; [#uses=1 type=i32]
ret i32 %tmp12
}
Modified: llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
; cmp with single-use load, should not form cmov.
define i32 @test1(double %a, double* nocapture %b, i32 %x, i32 %y) {
- %load = load double* %b, align 8
+ %load = load double, double* %b, align 8
%cmp = fcmp olt double %load, %a
%cond = select i1 %cmp, i32 %x, i32 %y
ret i32 %cond
@@ -25,7 +25,7 @@ define i32 @test2(double %a, double %b,
; Multiple uses of %a, should not form cmov.
define i32 @test3(i32 %a, i32* nocapture %b, i32 %x) {
- %load = load i32* %b, align 4
+ %load = load i32, i32* %b, align 4
%cmp = icmp ult i32 %load, %a
%cond = select i1 %cmp, i32 %a, i32 %x
ret i32 %cond
@@ -38,7 +38,7 @@ define i32 @test3(i32 %a, i32* nocapture
; Multiple uses of the load.
define i32 @test4(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
- %load = load i32* %b, align 4
+ %load = load i32, i32* %b, align 4
%cmp = icmp ult i32 %load, %a
%cond = select i1 %cmp, i32 %x, i32 %y
%add = add i32 %cond, %load
@@ -50,7 +50,7 @@ define i32 @test4(i32 %a, i32* nocapture
; Multiple uses of the cmp.
define i32 @test5(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
- %load = load i32* %b, align 4
+ %load = load i32, i32* %b, align 4
%cmp = icmp ult i32 %load, %a
%cmp1 = icmp ugt i32 %load, %a
%cond = select i1 %cmp1, i32 %a, i32 %y
Modified: llvm/trunk/test/CodeGen/X86/cmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmov.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmov.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
%0 = lshr i32 %x, %n ; <i32> [#uses=1]
%1 = and i32 %0, 1 ; <i32> [#uses=1]
%toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- %v = load i32* %vp
+ %v = load i32, i32* %vp
%.0 = select i1 %toBool, i32 %v, i32 12 ; <i32> [#uses=1]
ret i32 %.0
}
@@ -27,7 +27,7 @@ entry:
%0 = lshr i32 %x, %n ; <i32> [#uses=1]
%1 = and i32 %0, 1 ; <i32> [#uses=1]
%toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- %v = load i32* %vp
+ %v = load i32, i32* %vp
%.0 = select i1 %toBool, i32 12, i32 %v ; <i32> [#uses=1]
ret i32 %.0
}
@@ -71,7 +71,7 @@ define void @test3(i64 %a, i64 %b, i1 %p
define i32 @test4() nounwind {
entry:
- %0 = load i8* @g_3, align 1 ; <i8> [#uses=2]
+ %0 = load i8, i8* @g_3, align 1 ; <i8> [#uses=2]
%1 = sext i8 %0 to i32 ; <i32> [#uses=1]
%.lobit.i = lshr i8 %0, 7 ; <i8> [#uses=1]
%tmp.i = zext i8 %.lobit.i to i32 ; <i32> [#uses=1]
@@ -79,12 +79,12 @@ entry:
%iftmp.17.0.i.i = ashr i32 %1, %tmp.not.i ; <i32> [#uses=1]
%retval56.i.i = trunc i32 %iftmp.17.0.i.i to i8 ; <i8> [#uses=1]
%2 = icmp eq i8 %retval56.i.i, 0 ; <i1> [#uses=2]
- %g_96.promoted.i = load i8* @g_96 ; <i8> [#uses=3]
+ %g_96.promoted.i = load i8, i8* @g_96 ; <i8> [#uses=3]
%3 = icmp eq i8 %g_96.promoted.i, 0 ; <i1> [#uses=2]
br i1 %3, label %func_4.exit.i, label %bb.i.i.i
bb.i.i.i: ; preds = %entry
- %4 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0]
+ %4 = load volatile i8, i8* @g_100, align 1 ; <i8> [#uses=0]
br label %func_4.exit.i
; CHECK-LABEL: test4:
@@ -101,7 +101,7 @@ func_4.exit.i:
br i1 %brmerge.i, label %func_1.exit, label %bb.i.i
bb.i.i: ; preds = %func_4.exit.i
- %5 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0]
+ %5 = load volatile i8, i8* @g_100, align 1 ; <i8> [#uses=0]
br label %func_1.exit
func_1.exit: ; preds = %bb.i.i, %func_4.exit.i
@@ -125,7 +125,7 @@ entry:
; CHECK: orl $-2, %eax
; CHECK: ret
- %0 = load i32* %P, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %P, align 4 ; <i32> [#uses=1]
%1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
%iftmp.0.0 = select i1 %1, i32 -1, i32 -2 ; <i32> [#uses=1]
ret i32 %iftmp.0.0
@@ -138,7 +138,7 @@ entry:
; CHECK: movzbl %al, %eax
; CHECK: leal 4(%rax,%rax,8), %eax
; CHECK: ret
- %0 = load i32* %P, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %P, align 4 ; <i32> [#uses=1]
%1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
%iftmp.0.0 = select i1 %1, i32 4, i32 13 ; <i32> [#uses=1]
ret i32 %iftmp.0.0
Modified: llvm/trunk/test/CodeGen/X86/cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmp.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -show-mc-encoding | FileCheck %s
define i32 @test1(i32 %X, i32* %y) nounwind {
- %tmp = load i32* %y ; <i32> [#uses=1]
+ %tmp = load i32, i32* %y ; <i32> [#uses=1]
%tmp.upgrd.1 = icmp eq i32 %tmp, 0 ; <i1> [#uses=1]
br i1 %tmp.upgrd.1, label %ReturnBlock, label %cond_true
@@ -15,7 +15,7 @@ ReturnBlock: ; preds = %0
}
define i32 @test2(i32 %X, i32* %y) nounwind {
- %tmp = load i32* %y ; <i32> [#uses=1]
+ %tmp = load i32, i32* %y ; <i32> [#uses=1]
%tmp1 = shl i32 %tmp, 3 ; <i32> [#uses=1]
%tmp1.upgrd.2 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tmp1.upgrd.2, label %ReturnBlock, label %cond_true
@@ -30,7 +30,7 @@ ReturnBlock: ; preds = %0
}
define i8 @test2b(i8 %X, i8* %y) nounwind {
- %tmp = load i8* %y ; <i8> [#uses=1]
+ %tmp = load i8, i8* %y ; <i8> [#uses=1]
%tmp1 = shl i8 %tmp, 3 ; <i8> [#uses=1]
%tmp1.upgrd.2 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tmp1.upgrd.2, label %ReturnBlock, label %cond_true
@@ -90,7 +90,7 @@ declare i32 @foo(...)
define i32 @test6() nounwind align 2 {
%A = alloca {i64, i64}, align 8
%B = getelementptr inbounds {i64, i64}, {i64, i64}* %A, i64 0, i32 1
- %C = load i64* %B
+ %C = load i64, i64* %B
%D = icmp eq i64 %C, 0
br i1 %D, label %T, label %F
T:
Modified: llvm/trunk/test/CodeGen/X86/cmpxchg-clobber-flags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmpxchg-clobber-flags.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmpxchg-clobber-flags.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmpxchg-clobber-flags.ll Fri Feb 27 15:17:42 2015
@@ -42,7 +42,7 @@ loop_start:
br label %while.condthread-pre-split.i
while.condthread-pre-split.i:
- %.pr.i = load i32* %p, align 4
+ %.pr.i = load i32, i32* %p, align 4
br label %while.cond.i
while.cond.i:
Modified: llvm/trunk/test/CodeGen/X86/cmpxchg-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmpxchg-i1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmpxchg-i1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmpxchg-i1.ll Fri Feb 27 15:17:42 2015
@@ -68,7 +68,7 @@ define i32 @cmpxchg_use_eflags_and_val(i
; Result already in %eax
; CHECK: retq
entry:
- %init = load atomic i32* %addr seq_cst, align 4
+ %init = load atomic i32, i32* %addr seq_cst, align 4
br label %loop
loop:
Modified: llvm/trunk/test/CodeGen/X86/cmpxchg-i128-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmpxchg-i128-i1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmpxchg-i128-i1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmpxchg-i128-i1.ll Fri Feb 27 15:17:42 2015
@@ -62,7 +62,7 @@ define i128 @cmpxchg_use_eflags_and_val(
; CHECK-NOT: cmpq
; CHECK: jne
entry:
- %init = load atomic i128* %addr seq_cst, align 16
+ %init = load atomic i128, i128* %addr seq_cst, align 16
br label %loop
loop:
Modified: llvm/trunk/test/CodeGen/X86/coalesce-esp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalesce-esp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalesce-esp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalesce-esp.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ bb4: ; preds = %bb7.backedge, %entry
%scevgep24.sum = sub i32 undef, %indvar ; <i32> [#uses=2]
%scevgep25 = getelementptr i32, i32* %0, i32 %scevgep24.sum ; <i32*> [#uses=1]
%scevgep27 = getelementptr i32, i32* undef, i32 %scevgep24.sum ; <i32*> [#uses=1]
- %1 = load i32* %scevgep27, align 4 ; <i32> [#uses=0]
+ %1 = load i32, i32* %scevgep27, align 4 ; <i32> [#uses=0]
br i1 undef, label %bb7.backedge, label %bb5
bb5: ; preds = %bb4
Modified: llvm/trunk/test/CodeGen/X86/coalesce-implicitdef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalesce-implicitdef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalesce-implicitdef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalesce-implicitdef.ll Fri Feb 27 15:17:42 2015
@@ -71,7 +71,7 @@ for.inc27.backedge:
br i1 %tobool17, label %for.inc27.if.end30.loopexit56_crit_edge, label %while.condthread-pre-split
if.then22: ; preds = %while.end
- %1 = load i16* %p2.1, align 2
+ %1 = load i16, i16* %p2.1, align 2
%tobool23 = icmp eq i16 %1, 0
br i1 %tobool23, label %for.inc27.backedge, label %label.loopexit
@@ -89,7 +89,7 @@ for.inc27.if.end30.loopexit56_crit_edge:
if.end30: ; preds = %for.inc27.if.end30.loopexit56_crit_edge, %label.loopexit, %label.preheader, %for.inc
%i.0.load46 = phi i32 [ 0, %for.inc ], [ %i.0.load4669, %label.preheader ], [ %i.0.load4669, %label.loopexit ], [ %i.0.load4669, %for.inc27.if.end30.loopexit56_crit_edge ]
%pi.4 = phi i32* [ %i, %for.inc ], [ %pi.3.ph, %label.preheader ], [ %pi.3.ph, %label.loopexit ], [ %pi.3.ph, %for.inc27.if.end30.loopexit56_crit_edge ]
- %2 = load i32* %pi.4, align 4
+ %2 = load i32, i32* %pi.4, align 4
%tobool31 = icmp eq i32 %2, 0
br i1 %tobool31, label %for.inc34, label %label.preheader
@@ -100,7 +100,7 @@ for.inc34:
for.end36: ; preds = %for.cond
store i32 1, i32* %i, align 4
- %3 = load i32* @c, align 4
+ %3 = load i32, i32* @c, align 4
%tobool37 = icmp eq i32 %3, 0
br i1 %tobool37, label %label.preheader, label %land.rhs
@@ -111,15 +111,15 @@ land.rhs:
label.preheader: ; preds = %for.end36, %if.end30, %land.rhs
%i.0.load4669 = phi i32 [ 1, %land.rhs ], [ %i.0.load46, %if.end30 ], [ 1, %for.end36 ]
%pi.3.ph = phi i32* [ %pi.0, %land.rhs ], [ %pi.4, %if.end30 ], [ %pi.0, %for.end36 ]
- %4 = load i32* @b, align 4
+ %4 = load i32, i32* @b, align 4
%inc285863 = add nsw i32 %4, 1
store i32 %inc285863, i32* @b, align 4
%tobool175964 = icmp eq i32 %inc285863, 0
br i1 %tobool175964, label %if.end30, label %while.condthread-pre-split.lr.ph.lr.ph
while.condthread-pre-split.lr.ph.lr.ph: ; preds = %label.preheader
- %.pr50 = load i32* @d, align 4
+ %.pr50 = load i32, i32* @d, align 4
%tobool19 = icmp eq i32 %.pr50, 0
- %a.promoted.pre = load i32* @a, align 4
+ %a.promoted.pre = load i32, i32* @a, align 4
br label %while.condthread-pre-split
}
Modified: llvm/trunk/test/CodeGen/X86/coalescer-commute1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalescer-commute1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalescer-commute1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalescer-commute1.ll Fri Feb 27 15:17:42 2015
@@ -6,14 +6,14 @@
define void @runcont(i32* %source) nounwind {
entry:
- %tmp10 = load i32* @NNTOT, align 4 ; <i32> [#uses=1]
+ %tmp10 = load i32, i32* @NNTOT, align 4 ; <i32> [#uses=1]
br label %bb
bb: ; preds = %bb, %entry
%neuron.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%thesum.0 = phi float [ 0.000000e+00, %entry ], [ %tmp6, %bb ] ; <float> [#uses=1]
%tmp2 = getelementptr i32, i32* %source, i32 %neuron.0 ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
+ %tmp3 = load i32, i32* %tmp2, align 4 ; <i32> [#uses=1]
%tmp34 = sitofp i32 %tmp3 to float ; <float> [#uses=1]
%tmp6 = fadd float %tmp34, %thesum.0 ; <float> [#uses=2]
%indvar.next = add i32 %neuron.0, 1 ; <i32> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/coalescer-commute4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalescer-commute4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalescer-commute4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalescer-commute4.ll Fri Feb 27 15:17:42 2015
@@ -14,10 +14,10 @@ bb: ; preds = %bb, %bb.preheader
%i.0.reg2mem.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%res.0.reg2mem.0 = phi float [ 0.000000e+00, %bb.preheader ], [ %tmp14, %bb ] ; <float> [#uses=1]
%tmp3 = getelementptr i32, i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp3, align 4 ; <i32> [#uses=1]
%tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
%tmp8 = getelementptr float, float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
- %tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1]
+ %tmp9 = load float, float* %tmp8, align 4 ; <float> [#uses=1]
%tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1]
%tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2]
%indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/coalescer-cross.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalescer-cross.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalescer-cross.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalescer-cross.ll Fri Feb 27 15:17:42 2015
@@ -31,12 +31,12 @@ entry:
%1 = uitofp i32 %0 to double ; <double> [#uses=1]
%2 = fdiv double %1, 1.000000e+06 ; <double> [#uses=1]
%3 = getelementptr %struct.lua_State, %struct.lua_State* %L, i32 0, i32 4 ; <%struct.TValue**> [#uses=3]
- %4 = load %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=2]
+ %4 = load %struct.TValue*, %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=2]
%5 = getelementptr %struct.TValue, %struct.TValue* %4, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
store double %2, double* %5, align 4
%6 = getelementptr %struct.TValue, %struct.TValue* %4, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 3, i32* %6, align 4
- %7 = load %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=1]
+ %7 = load %struct.TValue*, %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=1]
%8 = getelementptr %struct.TValue, %struct.TValue* %7, i32 1 ; <%struct.TValue*> [#uses=1]
store %struct.TValue* %8, %struct.TValue** %3, align 4
ret i32 1
Modified: llvm/trunk/test/CodeGen/X86/coalescer-dce2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalescer-dce2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalescer-dce2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalescer-dce2.ll Fri Feb 27 15:17:42 2015
@@ -14,19 +14,19 @@ target triple = "x86_64-apple-macosx10.7
define void @fn1() nounwind uwtable ssp {
entry:
- %0 = load i32* @d, align 4
+ %0 = load i32, i32* @d, align 4
%tobool72 = icmp eq i32 %0, 0
br i1 %tobool72, label %for.end32, label %for.cond1.preheader.lr.ph
for.cond1.preheader.lr.ph: ; preds = %entry
- %1 = load i32* @c, align 4
+ %1 = load i32, i32* @c, align 4
%tobool2 = icmp eq i32 %1, 0
- %2 = load i32* @b, align 4
+ %2 = load i32, i32* @b, align 4
%cmp = icmp sgt i32 %2, 0
%conv = zext i1 %cmp to i32
- %3 = load i32* @g, align 4
+ %3 = load i32, i32* @g, align 4
%tobool4 = icmp eq i32 %3, 0
- %4 = load i16* @a, align 2
+ %4 = load i16, i16* @a, align 2
%tobool9 = icmp eq i16 %4, 0
br label %for.cond1.preheader
@@ -41,7 +41,7 @@ for.cond1.preheader.split.us:
br i1 %tobool9, label %cond.end.us.us, label %cond.end.us
cond.false18.us.us: ; preds = %if.end.us.us
- %5 = load i32* @f, align 4
+ %5 = load i32, i32* @f, align 4
%sext76 = shl i32 %5, 16
%phitmp75 = ashr exact i32 %sext76, 16
br label %cond.end.us.us
@@ -74,7 +74,7 @@ land.lhs.true12.us:
br i1 %cmp14.us, label %cond.end21.us, label %cond.false18.us
if.end6.us: ; preds = %if.end.us
- %6 = load i32* @f, align 4
+ %6 = load i32, i32* @f, align 4
%conv7.us = trunc i32 %6 to i16
%tobool11.us = icmp eq i16 %conv7.us, 0
br i1 %tobool11.us, label %cond.false18.us, label %land.lhs.true12.us
@@ -95,7 +95,7 @@ for.cond1.preheader.split.for.cond1.preh
br i1 %tobool4, label %if.end6.us65, label %for.cond25.loopexit.us-lcssa.us-lcssa
cond.false18.us40: ; preds = %if.end.us50
- %7 = load i32* @f, align 4
+ %7 = load i32, i32* @f, align 4
%sext = shl i32 %7, 16
%phitmp = ashr exact i32 %sext, 16
br label %if.end.us50
Modified: llvm/trunk/test/CodeGen/X86/coalescer-identity.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalescer-identity.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalescer-identity.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalescer-identity.ll Fri Feb 27 15:17:42 2015
@@ -12,10 +12,10 @@ target triple = "x86_64-apple-macosx10.8
define void @func() nounwind uwtable ssp {
for.body.lr.ph:
- %0 = load i32* @g2, align 4
+ %0 = load i32, i32* @g2, align 4
%tobool6 = icmp eq i32 %0, 0
- %s.promoted = load i16* @s, align 2
- %.pre = load i32* @g1, align 4
+ %s.promoted = load i16, i16* @s, align 2
+ %.pre = load i32, i32* @g1, align 4
br i1 %tobool6, label %for.body.us, label %for.body
for.body.us: ; preds = %for.body.lr.ph, %for.inc.us
Modified: llvm/trunk/test/CodeGen/X86/code_placement.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/code_placement.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/code_placement.ll (original)
+++ llvm/trunk/test/CodeGen/X86/code_placement.ll Fri Feb 27 15:17:42 2015
@@ -6,9 +6,9 @@
define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind ssp {
entry:
- %0 = load i32* %rk, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %rk, align 4 ; <i32> [#uses=1]
%1 = getelementptr i32, i32* %rk, i64 1 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 4 ; <i32> [#uses=1]
%tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
%tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
br label %bb
@@ -24,36 +24,36 @@ bb: ; preds = %bb1, %entry
%3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
%4 = zext i32 %3 to i64 ; <i64> [#uses=1]
%5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* %5, align 4 ; <i32> [#uses=1]
%7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
%8 = and i32 %7, 255 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
%10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
+ %11 = load i32, i32* %10, align 4 ; <i32> [#uses=1]
%ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
%12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
%13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
- %14 = load i32* %13, align 4 ; <i32> [#uses=1]
+ %14 = load i32, i32* %13, align 4 ; <i32> [#uses=1]
%15 = xor i32 %11, %6 ; <i32> [#uses=1]
%16 = xor i32 %15, %14 ; <i32> [#uses=3]
%17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
%18 = zext i32 %17 to i64 ; <i64> [#uses=1]
%19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
- %20 = load i32* %19, align 4 ; <i32> [#uses=1]
+ %20 = load i32, i32* %19, align 4 ; <i32> [#uses=1]
%21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
%22 = zext i32 %21 to i64 ; <i64> [#uses=1]
%23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
- %24 = load i32* %23, align 4 ; <i32> [#uses=1]
+ %24 = load i32, i32* %23, align 4 ; <i32> [#uses=1]
%ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
%25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
%26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
- %27 = load i32* %26, align 4 ; <i32> [#uses=1]
+ %27 = load i32, i32* %26, align 4 ; <i32> [#uses=1]
%28 = xor i32 %24, %20 ; <i32> [#uses=1]
%29 = xor i32 %28, %27 ; <i32> [#uses=4]
%30 = lshr i32 %16, 24 ; <i32> [#uses=1]
%31 = zext i32 %30 to i64 ; <i64> [#uses=1]
%32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
- %33 = load i32* %32, align 4 ; <i32> [#uses=2]
+ %33 = load i32, i32* %32, align 4 ; <i32> [#uses=2]
%exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
br i1 %exitcond, label %bb2, label %bb1
@@ -65,22 +65,22 @@ bb1: ; preds = %bb
%37 = and i32 %36, 255 ; <i32> [#uses=1]
%38 = zext i32 %37 to i64 ; <i64> [#uses=1]
%39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
- %40 = load i32* %39, align 4 ; <i32> [#uses=1]
- %41 = load i32* %35, align 4 ; <i32> [#uses=1]
+ %40 = load i32, i32* %39, align 4 ; <i32> [#uses=1]
+ %41 = load i32, i32* %35, align 4 ; <i32> [#uses=1]
%42 = xor i32 %40, %33 ; <i32> [#uses=1]
%43 = xor i32 %42, %41 ; <i32> [#uses=1]
%44 = lshr i32 %29, 24 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
%46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
- %47 = load i32* %46, align 4 ; <i32> [#uses=1]
+ %47 = load i32, i32* %46, align 4 ; <i32> [#uses=1]
%48 = and i32 %16, 255 ; <i32> [#uses=1]
%49 = zext i32 %48 to i64 ; <i64> [#uses=1]
%50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
- %51 = load i32* %50, align 4 ; <i32> [#uses=1]
+ %51 = load i32, i32* %50, align 4 ; <i32> [#uses=1]
%ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
%52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
%53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
- %54 = load i32* %53, align 4 ; <i32> [#uses=1]
+ %54 = load i32, i32* %53, align 4 ; <i32> [#uses=1]
%55 = xor i32 %51, %47 ; <i32> [#uses=1]
%56 = xor i32 %55, %54 ; <i32> [#uses=1]
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
@@ -96,26 +96,26 @@ bb2: ; preds = %bb
%60 = and i32 %59, 255 ; <i32> [#uses=1]
%61 = zext i32 %60 to i64 ; <i64> [#uses=1]
%62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
- %63 = load i32* %62, align 4 ; <i32> [#uses=1]
+ %63 = load i32, i32* %62, align 4 ; <i32> [#uses=1]
%64 = and i32 %63, 16711680 ; <i32> [#uses=1]
%65 = or i32 %64, %58 ; <i32> [#uses=1]
- %66 = load i32* %57, align 4 ; <i32> [#uses=1]
+ %66 = load i32, i32* %57, align 4 ; <i32> [#uses=1]
%67 = xor i32 %65, %66 ; <i32> [#uses=2]
%68 = lshr i32 %29, 8 ; <i32> [#uses=1]
%69 = zext i32 %68 to i64 ; <i64> [#uses=1]
%70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
- %71 = load i32* %70, align 4 ; <i32> [#uses=1]
+ %71 = load i32, i32* %70, align 4 ; <i32> [#uses=1]
%72 = and i32 %71, -16777216 ; <i32> [#uses=1]
%73 = and i32 %16, 255 ; <i32> [#uses=1]
%74 = zext i32 %73 to i64 ; <i64> [#uses=1]
%75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
- %76 = load i32* %75, align 4 ; <i32> [#uses=1]
+ %76 = load i32, i32* %75, align 4 ; <i32> [#uses=1]
%77 = and i32 %76, 16711680 ; <i32> [#uses=1]
%78 = or i32 %77, %72 ; <i32> [#uses=1]
%ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
%79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
%80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
- %81 = load i32* %80, align 4 ; <i32> [#uses=1]
+ %81 = load i32, i32* %80, align 4 ; <i32> [#uses=1]
%82 = xor i32 %78, %81 ; <i32> [#uses=2]
%83 = lshr i32 %67, 24 ; <i32> [#uses=1]
%84 = trunc i32 %83 to i8 ; <i8> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ define i8 @twoArgsPromotion(i32 %arg1, i
%add = add nsw i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%base = inttoptr i64 %sextadd to i8*
- %res = load i8* %base
+ %res = load i8, i8* %base
ret i8 %res
}
@@ -36,7 +36,7 @@ define i8 @twoArgsNoPromotion(i32 %arg1,
%add = add nsw i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -49,7 +49,7 @@ define i8 @noPromotion(i32 %arg1, i32 %a
%add = add i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -63,7 +63,7 @@ define i8 @oneArgPromotion(i32 %arg1, i8
%add = add nsw i32 %arg1, 1
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -78,7 +78,7 @@ define i8 @oneArgPromotionZExt(i8 %arg1,
%add = add nsw i32 %zext, 1
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -100,7 +100,7 @@ define i8 @oneArgPromotionCstZExt(i8* %b
%add = add nsw i32 %cst, 1
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -117,7 +117,7 @@ define i8 @oneArgPromotionBlockTrunc1(i3
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -136,7 +136,7 @@ define i8 @oneArgPromotionBlockTrunc2(i1
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -153,7 +153,7 @@ define i8 @oneArgPromotionPassTruncKeepS
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -165,14 +165,14 @@ define i8 @oneArgPromotionPassTruncKeepS
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, i8* [[GEP]]
; CHECK: add i8 [[LOAD]], [[TRUNC]]
; CHECK: ret
define i8 @oneArgPromotionTruncInsert(i8 %arg1, i8* %base) {
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
%finalres = add i8 %res, %add
ret i8 %finalres
}
@@ -189,7 +189,7 @@ define i8 @oneArgPromotionLargerType(i12
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
%finalres = add i8 %res, %add
ret i8 %finalres
}
@@ -203,7 +203,7 @@ define i8 @oneArgPromotionLargerType(i12
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, i8* [[GEP]]
; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = add i8 [[LOAD]], [[TRUNC]]
; CHECK: add i8 [[ADDRES]], [[TRUNC]]
; CHECK: ret
@@ -211,7 +211,7 @@ define i8 @oneArgPromotionTruncInsertSev
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
%almostfinalres = add i8 %res, %add
%finalres = add i8 %almostfinalres, %add
ret i8 %finalres
@@ -223,7 +223,7 @@ define i8 @oneArgPromotionTruncInsertSev
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, i8* [[GEP]]
; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = zext i8 [[LOAD]] to i64
; CHECK: add i64 [[ADDRES]], [[PROMOTED]]
; CHECK: ret
@@ -231,7 +231,7 @@ define i64 @oneArgPromotionSExtSeveralUs
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
%almostfinalres = zext i8 %res to i64
%finalres = add i64 %almostfinalres, %sextadd
ret i64 %finalres
@@ -264,7 +264,7 @@ define i8 @twoArgsPromotionNest(i32 %arg
%promotableadd2 = add nsw i32 %promotableadd1, %promotableadd1
%sextadd = sext i32 %promotableadd2 to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -287,7 +287,7 @@ define i8 @twoArgsNoPromotionRemove(i1 %
%add = add nsw i8 %trunc, %arg2
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -313,11 +313,11 @@ define i8 @twoArgsNoPromotionRemove(i1 %
; BB then
; CHECK: [[BASE1:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTADD]], 48
; CHECK: [[ADDR1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[BASE1]] to i32*
-; CHECK: load i32* [[ADDR1]]
+; CHECK: load i32, i32* [[ADDR1]]
; BB else
; CHECK: [[BASE2:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTADD]], 48
; CHECK: [[ADDR2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[BASE2]] to i32*
-; CHECK: load i32* [[ADDR2]]
+; CHECK: load i32, i32* [[ADDR2]]
; CHECK: ret
; CHECK-GEP-LABEL: @checkProfitability
; CHECK-GEP-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg1 to i64
@@ -330,13 +330,13 @@ define i8 @twoArgsNoPromotionRemove(i1 %
; CHECK-GEP: [[BCC1:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE1]] to i8*
; CHECK-GEP: [[FULL1:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC1]], i64 48
; CHECK-GEP: [[ADDR1:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL1]] to i32*
-; CHECK-GEP: load i32* [[ADDR1]]
+; CHECK-GEP: load i32, i32* [[ADDR1]]
; BB else
; CHECK-GEP: [[BASE2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
; CHECK-GEP: [[BCC2:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE2]] to i8*
; CHECK-GEP: [[FULL2:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC2]], i64 48
; CHECK-GEP: [[ADDR2:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL2]] to i32*
-; CHECK-GEP: load i32* [[ADDR2]]
+; CHECK-GEP: load i32, i32* [[ADDR2]]
; CHECK-GEP: ret
define i32 @checkProfitability(i32 %arg1, i32 %arg2, i1 %test) {
%shl = shl nsw i32 %arg1, 1
@@ -346,16 +346,16 @@ define i32 @checkProfitability(i32 %arg1
%arrayidx1 = getelementptr i32, i32* %tmpptr, i64 12
br i1 %test, label %then, label %else
then:
- %res1 = load i32* %arrayidx1
+ %res1 = load i32, i32* %arrayidx1
br label %end
else:
- %res2 = load i32* %arrayidx1
+ %res2 = load i32, i32* %arrayidx1
br label %end
end:
%tmp = phi i32 [%res1, %then], [%res2, %else]
%res = add i32 %tmp, %add1
%addr = inttoptr i32 %res to i32*
- %final = load i32* %addr
+ %final = load i32, i32* %addr
ret i32 %final
}
@@ -377,7 +377,7 @@ end:
; CHECK-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add i64 [[BASE]], [[PROMOTED_CONV]]
; CHECK-NEXT: [[ADDR:%[a-zA-Z_0-9-]+]] = add i64 [[ADD]], 7
; CHECK-NEXT: [[CAST:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[ADDR]] to i8*
-; CHECK-NEXT: load i8* [[CAST]], align 1
+; CHECK-NEXT: load i8, i8* [[CAST]], align 1
define signext i16 @fn3(%struct.dns_packet* nocapture readonly %P) {
entry:
%tmp = getelementptr inbounds %struct.dns_packet, %struct.dns_packet* %P, i64 0, i32 2
@@ -389,7 +389,7 @@ while.body.i.i:
%inc.i.i = add i16 %src.addr.0.i.i, 1
%idxprom.i.i = sext i16 %src.addr.0.i.i to i64
%arrayidx.i.i = getelementptr inbounds [0 x i8], [0 x i8]* %data.i.i, i64 0, i64 %idxprom.i.i
- %tmp1 = load i8* %arrayidx.i.i, align 1
+ %tmp1 = load i8, i8* %arrayidx.i.i, align 1
%conv2.i.i = zext i8 %tmp1 to i32
%and.i.i = and i32 %conv2.i.i, 15
store i32 %and.i.i, i32* @a, align 4
@@ -402,7 +402,7 @@ fn1.exit.i:
%sub.i = add nsw i32 %conv.i, -1
%idxprom.i = sext i32 %sub.i to i64
%arrayidx.i = getelementptr inbounds [0 x i8], [0 x i8]* %data.i.i, i64 0, i64 %idxprom.i
- %tmp2 = load i8* %arrayidx.i, align 1
+ %tmp2 = load i8, i8* %arrayidx.i, align 1
%conv2.i = sext i8 %tmp2 to i16
store i16 %conv2.i, i16* @b, align 2
%sub4.i = sub nsw i32 0, %conv.i
@@ -412,7 +412,7 @@ fn1.exit.i:
if.then.i: ; preds = %fn1.exit.i
%end.i = getelementptr inbounds %struct.dns_packet, %struct.dns_packet* %P, i64 0, i32 1
- %tmp3 = load i32* %end.i, align 4
+ %tmp3 = load i32, i32* %end.i, align 4
%sub7.i = add i32 %tmp3, 65535
%conv8.i = trunc i32 %sub7.i to i16
br label %fn2.exit
@@ -433,7 +433,7 @@ define i8 @noPromotionFlag(i32 %arg1, i3
%add = add nsw i32 %arg1, %arg2
%zextadd = zext i32 %add to i64
%base = inttoptr i64 %zextadd to i8*
- %res = load i8* %base
+ %res = load i8, i8* %base
ret i8 %res
}
@@ -448,7 +448,7 @@ define i8 @twoArgsPromotionZExt(i32 %arg
%add = add nuw i32 %arg1, %arg2
%zextadd = zext i32 %add to i64
%base = inttoptr i64 %zextadd to i8*
- %res = load i8* %base
+ %res = load i8, i8* %base
ret i8 %res
}
@@ -462,7 +462,7 @@ define i8 @oneArgPromotionNegativeCstZEx
%add = add nuw i8 %arg1, -1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -477,7 +477,7 @@ define i8 @oneArgPromotionZExtZExt(i8 %a
%add = add nuw i32 %zext, 1
%zextadd = zext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -496,7 +496,7 @@ define i8 @oneArgPromotionBlockTruncZExt
%add = add nuw i8 %trunc, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -513,7 +513,7 @@ define i8 @oneArgPromotionPassTruncZExt(
%add = add nuw i8 %trunc, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -529,6 +529,6 @@ define i8 @oneArgPromotionBlockSExtZExt(
%add = add nuw i8 %sextarg1, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
Modified: llvm/trunk/test/CodeGen/X86/codegen-prepare-cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/codegen-prepare-cast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/codegen-prepare-cast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/codegen-prepare-cast.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gn
@.str = external constant [7 x i8] ; <[7 x i8]*> [#uses=1]
; CHECK-LABEL: @_Dmain
-; CHECK: load i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0)
+; CHECK: load i8, i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0)
; CHECK ret
define fastcc i32 @_Dmain(%"char[][]" %unnamed) {
entry:
@@ -19,7 +19,7 @@ entry:
foreachbody: ; preds = %entry
%tmp4 = getelementptr i8, i8* %tmp, i32 undef ; <i8*> [#uses=1]
- %tmp5 = load i8* %tmp4 ; <i8> [#uses=0]
+ %tmp5 = load i8, i8* %tmp4 ; <i8> [#uses=0]
unreachable
foreachend: ; preds = %entry
Modified: llvm/trunk/test/CodeGen/X86/codegen-prepare-extload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/codegen-prepare-extload.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/codegen-prepare-extload.ll (original)
+++ llvm/trunk/test/CodeGen/X86/codegen-prepare-extload.ll Fri Feb 27 15:17:42 2015
@@ -12,13 +12,13 @@
; CHECK: movsbl ({{%rdi|%rcx}}), %eax
;
; OPTALL-LABEL: @foo
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
; OPTALL-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; OPTALL: store i32 [[ZEXT]], i32* %q
; OPTALL: ret
define void @foo(i8* %p, i32* %q) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
true:
@@ -32,7 +32,7 @@ false:
; Check that we manage to form a zextload is an operation with only one
; argument to explicitly extend is in the the way.
; OPTALL-LABEL: @promoteOneArg
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT]], 2
; Make sure the operation is not promoted when the promotion pass is disabled.
@@ -42,7 +42,7 @@ false:
; OPTALL: ret
define void @promoteOneArg(i8* %p, i32* %q) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%add = add nuw i8 %t, 2
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
@@ -58,7 +58,7 @@ false:
; argument to explicitly extend is in the the way.
; Version with sext.
; OPTALL-LABEL: @promoteOneArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32
; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SEXT]], 2
; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], 2
@@ -67,7 +67,7 @@ false:
; OPTALL: ret
define void @promoteOneArgSExt(i8* %p, i32* %q) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%add = add nsw i8 %t, 2
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
@@ -90,7 +90,7 @@ false:
; transformation, the regular heuristic does not apply the optimization.
;
; OPTALL-LABEL: @promoteTwoArgZext
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32
@@ -106,7 +106,7 @@ false:
; OPTALL: ret
define void @promoteTwoArgZext(i8* %p, i32* %q, i8 %b) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%add = add nuw i8 %t, %b
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
@@ -122,7 +122,7 @@ false:
; arguments to explicitly extend is in the the way.
; Version with sext.
; OPTALL-LABEL: @promoteTwoArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[SEXTLD:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32
; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i8 %b to i32
@@ -137,7 +137,7 @@ false:
; OPTALL: ret
define void @promoteTwoArgSExt(i8* %p, i32* %q, i8 %b) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%add = add nsw i8 %t, %b
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
@@ -152,7 +152,7 @@ false:
; Check that we do not a zextload if we need to introduce more than
; one additional extension.
; OPTALL-LABEL: @promoteThreeArgZext
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32
@@ -172,7 +172,7 @@ false:
; OPTALL: ret
define void @promoteThreeArgZext(i8* %p, i32* %q, i8 %b, i8 %c) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%tmp = add nuw i8 %t, %b
%add = add nuw i8 %tmp, %c
%a = icmp slt i8 %t, 20
@@ -188,7 +188,7 @@ false:
; Check that we manage to form a zextload after promoting and merging
; two extensions.
; OPTALL-LABEL: @promoteMergeExtArgZExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i16 %b to i32
@@ -206,7 +206,7 @@ false:
; OPTALL: ret
define void @promoteMergeExtArgZExt(i8* %p, i32* %q, i16 %b) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%ext = zext i8 %t to i16
%add = add nuw i16 %ext, %b
%a = icmp slt i8 %t, 20
@@ -223,7 +223,7 @@ false:
; two extensions.
; Version with sext.
; OPTALL-LABEL: @promoteMergeExtArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = sext i16 %b to i32
@@ -240,7 +240,7 @@ false:
; OPTALL: ret
define void @promoteMergeExtArgSExt(i8* %p, i32* %q, i16 %b) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%ext = zext i8 %t to i16
%add = add nsw i16 %ext, %b
%a = icmp slt i8 %t, 20
@@ -284,11 +284,11 @@ false:
; 3 identical zext of %ld. The extensions will be CSE'ed by SDag.
;
; OPTALL-LABEL: @severalPromotions
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %addr1
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %addr1
; OPT-NEXT: [[ZEXTLD1_1:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
; OPT-NEXT: [[ZEXTLD1_2:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
-; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32* %addr2
+; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32, i32* %addr2
; OPT-NEXT: [[SEXTLD2:%[a-zA-Z_0-9-]+]] = sext i32 [[LD2]] to i64
; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_1]]
; We do not combine this one: see 2.b.
@@ -308,9 +308,9 @@ false:
; OPTALL: call void @dummy(i64 [[RES]], i64 [[RESZA]], i64 [[RESB]])
; OPTALL: ret
define void @severalPromotions(i8* %addr1, i32* %addr2, i8 %a, i32 %b) {
- %ld = load i8* %addr1
+ %ld = load i8, i8* %addr1
%zextld = zext i8 %ld to i32
- %ld2 = load i32* %addr2
+ %ld2 = load i32, i32* %addr2
%add = add nsw i32 %ld2, %zextld
%sextadd = sext i32 %add to i64
%zexta = zext i8 %a to i32
@@ -345,7 +345,7 @@ entry:
; to an instruction.
; This used to cause a crash.
; OPTALL-LABEL: @promotionOfArgEndsUpInValue
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16* %addr
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16, i16* %addr
; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i16 [[LD]] to i32
; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i32)
@@ -356,7 +356,7 @@ entry:
; OPTALL-NEXT: ret i32 [[RES]]
define i32 @promotionOfArgEndsUpInValue(i16* %addr) {
entry:
- %val = load i16* %addr
+ %val = load i16, i16* %addr
%add = add nuw nsw i16 %val, zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i16)
%conv3 = sext i16 %add to i32
ret i32 %conv3
Modified: llvm/trunk/test/CodeGen/X86/codegen-prepare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/codegen-prepare.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/codegen-prepare.ll (original)
+++ llvm/trunk/test/CodeGen/X86/codegen-prepare.ll Fri Feb 27 15:17:42 2015
@@ -25,9 +25,9 @@ entry:
if.then: ; preds = %entry
%0 = getelementptr inbounds %class.D, %class.D* %address2, i64 0, i32 0, i64 0, i32 0
- %1 = load float* %0, align 4
+ %1 = load float, float* %0, align 4
%2 = getelementptr inbounds float, float* %0, i64 3
- %3 = load float* %2, align 4
+ %3 = load float, float* %2, align 4
%4 = getelementptr inbounds %class.D, %class.D* %address1, i64 0, i32 0, i64 0, i32 0
store float %1, float* %4, align 4
br label %if.end
Modified: llvm/trunk/test/CodeGen/X86/codemodel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/codemodel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/codemodel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/codemodel.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ entry:
; CHECK-SMALL: movl data(%rip), %eax
; CHECK-KERNEL-LABEL: foo:
; CHECK-KERNEL: movl data, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i64 0, i64 0), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i64 0, i64 0), align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -21,7 +21,7 @@ entry:
; CHECK-SMALL: movl data+40(%rip), %eax
; CHECK-KERNEL-LABEL: foo2:
; CHECK-KERNEL: movl data+40, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 10), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 10), align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -31,7 +31,7 @@ entry:
; CHECK-SMALL: movl data-40(%rip), %eax
; CHECK-KERNEL-LABEL: foo3:
; CHECK-KERNEL: movq $-40, %rax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 -10), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 -10), align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -43,7 +43,7 @@ entry:
; CHECK-SMALL: movl data(%rax), %eax
; CHECK-KERNEL-LABEL: foo4:
; CHECK-KERNEL: movl data+16777216, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194304), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194304), align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -53,7 +53,7 @@ entry:
; CHECK-SMALL: movl data+16777212(%rip), %eax
; CHECK-KERNEL-LABEL: foo1:
; CHECK-KERNEL: movl data+16777212, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194303), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194303), align 4 ; <i32> [#uses=1]
ret i32 %0
}
define i32 @foo5() nounwind readonly {
@@ -62,6 +62,6 @@ entry:
; CHECK-SMALL: movl data-16777216(%rip), %eax
; CHECK-KERNEL-LABEL: foo5:
; CHECK-KERNEL: movq $-16777216, %rax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 -4194304), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 -4194304), align 4 ; <i32> [#uses=1]
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/combiner-aa-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combiner-aa-0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combiner-aa-0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combiner-aa-0.ll Fri Feb 27 15:17:42 2015
@@ -5,14 +5,14 @@ target datalayout = "e-p:64:64:64-i1:8:8
@g_flipV_hashkey = external global %struct.Hash_Key, align 16 ; <%struct.Hash_Key*> [#uses=1]
define void @foo() nounwind {
- %t0 = load i32* undef, align 16 ; <i32> [#uses=1]
- %t1 = load i32* null, align 4 ; <i32> [#uses=1]
+ %t0 = load i32, i32* undef, align 16 ; <i32> [#uses=1]
+ %t1 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%t2 = srem i32 %t0, 32 ; <i32> [#uses=1]
%t3 = shl i32 1, %t2 ; <i32> [#uses=1]
%t4 = xor i32 %t3, %t1 ; <i32> [#uses=1]
store i32 %t4, i32* null, align 4
%t5 = getelementptr %struct.Hash_Key, %struct.Hash_Key* @g_flipV_hashkey, i64 0, i32 0, i64 0 ; <i32*> [#uses=2]
- %t6 = load i32* %t5, align 4 ; <i32> [#uses=1]
+ %t6 = load i32, i32* %t5, align 4 ; <i32> [#uses=1]
%t7 = shl i32 1, undef ; <i32> [#uses=1]
%t8 = xor i32 %t7, %t6 ; <i32> [#uses=1]
store i32 %t8, i32* %t5, align 4
Modified: llvm/trunk/test/CodeGen/X86/combiner-aa-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combiner-aa-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combiner-aa-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combiner-aa-1.ll Fri Feb 27 15:17:42 2015
@@ -13,9 +13,9 @@ target triple = "i386-pc-linux-gnu"
define i32 @._ZN8lam_node18resolve_name_clashEP8arg_nodeP9alst_node._ZNK8lam_nodeeqERK8exp_node._ZN11arglst_nodeD0Ev(%struct.lam_node* %this.this, %struct.arg_node* %outer_arg, %struct.alst_node* %env.cmp, %struct.arglst_node* %this, i32 %functionID) {
comb_entry:
%.SV59 = alloca %struct.node* ; <%struct.node**> [#uses=1]
- %0 = load i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1]
+ %0 = load i32 (...)**, i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1]
%1 = getelementptr inbounds i32 (...)*, i32 (...)** %0, i32 3 ; <i32 (...)**> [#uses=1]
- %2 = load i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1]
+ %2 = load i32 (...)*, i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1]
store %struct.node* undef, %struct.node** %.SV59
%3 = bitcast i32 (...)* %2 to i32 (%struct.node*)* ; <i32 (%struct.node*)*> [#uses=1]
%4 = tail call i32 %3(%struct.node* undef) ; <i32> [#uses=0]
Modified: llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=core-avx2 -mattr=avx2 < %s | FileCheck %s
define <8 x i16> @commute_fold_vpblendw_128(<8 x i16> %a, <8 x i16>* %b) #0 {
- %1 = load <8 x i16>* %b
+ %1 = load <8 x i16>, <8 x i16>* %b
%2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %1, <8 x i16> %a, i8 17)
ret <8 x i16> %2
@@ -12,7 +12,7 @@ define <8 x i16> @commute_fold_vpblendw_
declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone
define <16 x i16> @commute_fold_vpblendw_256(<16 x i16> %a, <16 x i16>* %b) #0 {
- %1 = load <16 x i16>* %b
+ %1 = load <16 x i16>, <16 x i16>* %b
%2 = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %1, <16 x i16> %a, i8 17)
ret <16 x i16> %2
@@ -23,7 +23,7 @@ define <16 x i16> @commute_fold_vpblendw
declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind readnone
define <4 x i32> @commute_fold_vpblendd_128(<4 x i32> %a, <4 x i32>* %b) #0 {
- %1 = load <4 x i32>* %b
+ %1 = load <4 x i32>, <4 x i32>* %b
%2 = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %1, <4 x i32> %a, i8 1)
ret <4 x i32> %2
@@ -34,7 +34,7 @@ define <4 x i32> @commute_fold_vpblendd_
declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind readnone
define <8 x i32> @commute_fold_vpblendd_256(<8 x i32> %a, <8 x i32>* %b) #0 {
- %1 = load <8 x i32>* %b
+ %1 = load <8 x i32>, <8 x i32>* %b
%2 = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %1, <8 x i32> %a, i8 129)
ret <8 x i32> %2
@@ -45,7 +45,7 @@ define <8 x i32> @commute_fold_vpblendd_
declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind readnone
define <4 x float> @commute_fold_vblendps_128(<4 x float> %a, <4 x float>* %b) #0 {
- %1 = load <4 x float>* %b
+ %1 = load <4 x float>, <4 x float>* %b
%2 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %1, <4 x float> %a, i8 3)
ret <4 x float> %2
@@ -56,7 +56,7 @@ define <4 x float> @commute_fold_vblendp
declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwind readnone
define <8 x float> @commute_fold_vblendps_256(<8 x float> %a, <8 x float>* %b) #0 {
- %1 = load <8 x float>* %b
+ %1 = load <8 x float>, <8 x float>* %b
%2 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %1, <8 x float> %a, i8 7)
ret <8 x float> %2
@@ -67,7 +67,7 @@ define <8 x float> @commute_fold_vblendp
declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
define <2 x double> @commute_fold_vblendpd_128(<2 x double> %a, <2 x double>* %b) #0 {
- %1 = load <2 x double>* %b
+ %1 = load <2 x double>, <2 x double>* %b
%2 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %1, <2 x double> %a, i8 1)
ret <2 x double> %2
@@ -78,7 +78,7 @@ define <2 x double> @commute_fold_vblend
declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nounwind readnone
define <4 x double> @commute_fold_vblendpd_256(<4 x double> %a, <4 x double>* %b) #0 {
- %1 = load <4 x double>* %b
+ %1 = load <4 x double>, <4 x double>* %b
%2 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %1, <4 x double> %a, i8 7)
ret <4 x double> %2
Modified: llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll Fri Feb 27 15:17:42 2015
@@ -1,7 +1,7 @@
; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=corei7 < %s | FileCheck %s
define <8 x i16> @commute_fold_pblendw(<8 x i16> %a, <8 x i16>* %b) #0 {
- %1 = load <8 x i16>* %b
+ %1 = load <8 x i16>, <8 x i16>* %b
%2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %1, <8 x i16> %a, i8 17)
ret <8 x i16> %2
@@ -12,7 +12,7 @@ define <8 x i16> @commute_fold_pblendw(<
declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone
define <4 x float> @commute_fold_blendps(<4 x float> %a, <4 x float>* %b) #0 {
- %1 = load <4 x float>* %b
+ %1 = load <4 x float>, <4 x float>* %b
%2 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %1, <4 x float> %a, i8 3)
ret <4 x float> %2
@@ -23,7 +23,7 @@ define <4 x float> @commute_fold_blendps
declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwind readnone
define <2 x double> @commute_fold_blendpd(<2 x double> %a, <2 x double>* %b) #0 {
- %1 = load <2 x double>* %b
+ %1 = load <2 x double>, <2 x double>* %b
%2 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %1, <2 x double> %a, i8 1)
ret <2 x double> %2
Modified: llvm/trunk/test/CodeGen/X86/commute-clmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-clmul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-clmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-clmul.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define <2 x i64> @commute_lq_lq(<2 x i64
;AVX: vpclmulqdq $0, (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 0)
ret <2 x i64> %2
}
@@ -26,7 +26,7 @@ define <2 x i64> @commute_lq_hq(<2 x i64
;AVX: vpclmulqdq $1, (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 16)
ret <2 x i64> %2
}
@@ -40,7 +40,7 @@ define <2 x i64> @commute_hq_lq(<2 x i64
;AVX: vpclmulqdq $16, (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 1)
ret <2 x i64> %2
}
@@ -54,7 +54,7 @@ define <2 x i64> @commute_hq_hq(<2 x i64
;AVX: vpclmulqdq $17, (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 17)
ret <2 x i64> %2
}
Modified: llvm/trunk/test/CodeGen/X86/commute-fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-fcmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-fcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-fcmp.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ define <4 x i32> @commute_cmpps_eq(<4 x
;AVX: vcmpeqps (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp oeq <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -30,7 +30,7 @@ define <4 x i32> @commute_cmpps_ne(<4 x
;AVX: vcmpneqps (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp une <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -45,7 +45,7 @@ define <4 x i32> @commute_cmpps_ord(<4 x
;AVX: vcmpordps (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ord <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -60,7 +60,7 @@ define <4 x i32> @commute_cmpps_uno(<4 x
;AVX: vcmpunordps (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp uno <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -78,7 +78,7 @@ define <4 x i32> @commute_cmpps_lt(<4 x
;AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp olt <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -96,7 +96,7 @@ define <4 x i32> @commute_cmpps_le(<4 x
;AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ole <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -107,7 +107,7 @@ define <8 x i32> @commute_cmpps_eq_ymm(<
;AVX: vcmpeqps (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp oeq <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -118,7 +118,7 @@ define <8 x i32> @commute_cmpps_ne_ymm(<
;AVX: vcmpneqps (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp une <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -129,7 +129,7 @@ define <8 x i32> @commute_cmpps_ord_ymm(
;AVX: vcmpordps (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ord <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -140,7 +140,7 @@ define <8 x i32> @commute_cmpps_uno_ymm(
;AVX: vcmpunordps (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp uno <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -152,7 +152,7 @@ define <8 x i32> @commute_cmpps_lt_ymm(<
;AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp olt <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -164,7 +164,7 @@ define <8 x i32> @commute_cmpps_le_ymm(<
;AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ole <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -184,7 +184,7 @@ define <2 x i64> @commute_cmppd_eq(<2 x
;AVX: vcmpeqpd (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp oeq <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -199,7 +199,7 @@ define <2 x i64> @commute_cmppd_ne(<2 x
;AVX: vcmpneqpd (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp une <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -214,7 +214,7 @@ define <2 x i64> @commute_cmppd_ord(<2 x
;AVX: vcmpordpd (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ord <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -229,7 +229,7 @@ define <2 x i64> @commute_cmppd_uno(<2 x
;AVX: vcmpunordpd (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp uno <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -247,7 +247,7 @@ define <2 x i64> @commute_cmppd_lt(<2 x
;AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp olt <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -265,7 +265,7 @@ define <2 x i64> @commute_cmppd_le(<2 x
;AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ole <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -276,7 +276,7 @@ define <4 x i64> @commute_cmppd_eq_ymmm(
;AVX: vcmpeqpd (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp oeq <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -287,7 +287,7 @@ define <4 x i64> @commute_cmppd_ne_ymmm(
;AVX: vcmpneqpd (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp une <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -298,7 +298,7 @@ define <4 x i64> @commute_cmppd_ord_ymmm
;AVX: vcmpordpd (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ord <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -309,7 +309,7 @@ define <4 x i64> @commute_cmppd_uno_ymmm
;AVX: vcmpunordpd (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp uno <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -321,7 +321,7 @@ define <4 x i64> @commute_cmppd_lt_ymmm(
;AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp olt <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -333,7 +333,7 @@ define <4 x i64> @commute_cmppd_le_ymmm(
;AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ole <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
Modified: llvm/trunk/test/CodeGen/X86/commute-intrinsic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-intrinsic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-intrinsic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-intrinsic.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
define <2 x i64> @madd(<2 x i64> %b) nounwind {
entry:
- %tmp2 = load <2 x i64>* @a, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp2 = load <2 x i64>, <2 x i64>* @a, align 16 ; <<2 x i64>> [#uses=1]
%tmp6 = bitcast <2 x i64> %b to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp9 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp11 = tail call <4 x i32> @llvm.x86.sse2.pmadd.wd( <8 x i16> %tmp9, <8 x i16> %tmp6 ) nounwind readnone ; <<4 x i32>> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/commute-xop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-xop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-xop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-xop.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
define <16 x i8> @commute_fold_vpcomb(<16 x i8>* %a0, <16 x i8> %a1) {
;CHECK-LABEL: commute_fold_vpcomb
;CHECK: vpcomgtb (%rdi), %xmm0, %xmm0
- %1 = load <16 x i8>* %a0
+ %1 = load <16 x i8>, <16 x i8>* %a0
%2 = call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %1, <16 x i8> %a1, i8 0) ; vpcomltb
ret <16 x i8> %2
}
@@ -12,7 +12,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomb(<
define <4 x i32> @commute_fold_vpcomd(<4 x i32>* %a0, <4 x i32> %a1) {
;CHECK-LABEL: commute_fold_vpcomd
;CHECK: vpcomged (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %1, <4 x i32> %a1, i8 1) ; vpcomled
ret <4 x i32> %2
}
@@ -21,7 +21,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomd(<
define <2 x i64> @commute_fold_vpcomq(<2 x i64>* %a0, <2 x i64> %a1) {
;CHECK-LABEL: commute_fold_vpcomq
;CHECK: vpcomltq (%rdi), %xmm0, %xmm0
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %1, <2 x i64> %a1, i8 2) ; vpcomgtq
ret <2 x i64> %2
}
@@ -30,7 +30,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomq(<
define <16 x i8> @commute_fold_vpcomub(<16 x i8>* %a0, <16 x i8> %a1) {
;CHECK-LABEL: commute_fold_vpcomub
;CHECK: vpcomleub (%rdi), %xmm0, %xmm0
- %1 = load <16 x i8>* %a0
+ %1 = load <16 x i8>, <16 x i8>* %a0
%2 = call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %1, <16 x i8> %a1, i8 3) ; vpcomgeub
ret <16 x i8> %2
}
@@ -39,7 +39,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomub(
define <4 x i32> @commute_fold_vpcomud(<4 x i32>* %a0, <4 x i32> %a1) {
;CHECK-LABEL: commute_fold_vpcomud
;CHECK: vpcomequd (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %1, <4 x i32> %a1, i8 4) ; vpcomequd
ret <4 x i32> %2
}
@@ -48,7 +48,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomud(
define <2 x i64> @commute_fold_vpcomuq(<2 x i64>* %a0, <2 x i64> %a1) {
;CHECK-LABEL: commute_fold_vpcomuq
;CHECK: vpcomnequq (%rdi), %xmm0, %xmm0
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %1, <2 x i64> %a1, i8 5) ; vpcomnequq
ret <2 x i64> %2
}
@@ -57,7 +57,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomuq(
define <8 x i16> @commute_fold_vpcomuw(<8 x i16>* %a0, <8 x i16> %a1) {
;CHECK-LABEL: commute_fold_vpcomuw
;CHECK: vpcomfalseuw (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %1, <8 x i16> %a1, i8 6) ; vpcomfalseuw
ret <8 x i16> %2
}
@@ -66,7 +66,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomuw(
define <8 x i16> @commute_fold_vpcomw(<8 x i16>* %a0, <8 x i16> %a1) {
;CHECK-LABEL: commute_fold_vpcomw
;CHECK: vpcomtruew (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %1, <8 x i16> %a1, i8 7) ; vpcomtruew
ret <8 x i16> %2
}
@@ -75,7 +75,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomw(<
define <4 x i32> @commute_fold_vpmacsdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmacsdd
;CHECK: vpmacsdd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32> %1, <4 x i32> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -84,7 +84,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacsdd
define <2 x i64> @commute_fold_vpmacsdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
;CHECK-LABEL: commute_fold_vpmacsdqh
;CHECK: vpmacsdqh %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
}
@@ -93,7 +93,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdq
define <2 x i64> @commute_fold_vpmacsdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
;CHECK-LABEL: commute_fold_vpmacsdql
;CHECK: vpmacsdql %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
}
@@ -102,7 +102,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdq
define <4 x i32> @commute_fold_vpmacssdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmacssdd
;CHECK: vpmacssdd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32> %1, <4 x i32> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -111,7 +111,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacssd
define <2 x i64> @commute_fold_vpmacssdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
;CHECK-LABEL: commute_fold_vpmacssdqh
;CHECK: vpmacssdqh %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
}
@@ -120,7 +120,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacssd
define <2 x i64> @commute_fold_vpmacssdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
;CHECK-LABEL: commute_fold_vpmacssdql
;CHECK: vpmacssdql %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
}
@@ -129,7 +129,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacssd
define <4 x i32> @commute_fold_vpmacsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmacsswd
;CHECK: vpmacsswd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -138,7 +138,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacssw
define <8 x i16> @commute_fold_vpmacssww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
;CHECK-LABEL: commute_fold_vpmacssww
;CHECK: vpmacssww %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16> %1, <8 x i16> %a1, <8 x i16> %a2)
ret <8 x i16> %2
}
@@ -147,7 +147,7 @@ declare <8 x i16> @llvm.x86.xop.vpmacssw
define <4 x i32> @commute_fold_vpmacswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmacswd
;CHECK: vpmacswd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -156,7 +156,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacswd
define <8 x i16> @commute_fold_vpmacsww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
;CHECK-LABEL: commute_fold_vpmacsww
;CHECK: vpmacsww %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16> %1, <8 x i16> %a1, <8 x i16> %a2)
ret <8 x i16> %2
}
@@ -165,7 +165,7 @@ declare <8 x i16> @llvm.x86.xop.vpmacsww
define <4 x i32> @commute_fold_vpmadcsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmadcsswd
;CHECK: vpmadcsswd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -174,7 +174,7 @@ declare <4 x i32> @llvm.x86.xop.vpmadcss
define <4 x i32> @commute_fold_vpmadcswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmadcswd
;CHECK: vpmadcswd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
Modified: llvm/trunk/test/CodeGen/X86/compact-unwind.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/compact-unwind.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/compact-unwind.ll (original)
+++ llvm/trunk/test/CodeGen/X86/compact-unwind.ll Fri Feb 27 15:17:42 2015
@@ -39,12 +39,12 @@
define i8* @test0(i64 %size) {
%addr = alloca i64, align 8
- %tmp20 = load i32* @gv, align 4
+ %tmp20 = load i32, i32* @gv, align 4
%tmp21 = call i32 @bar()
- %tmp25 = load i64* %addr, align 8
+ %tmp25 = load i64, i64* %addr, align 8
%tmp26 = inttoptr i64 %tmp25 to %ty*
%tmp29 = getelementptr inbounds %ty, %ty* %tmp26, i64 0, i32 0
- %tmp34 = load i8** %tmp29, align 8
+ %tmp34 = load i8*, i8** %tmp29, align 8
%tmp35 = getelementptr inbounds i8, i8* %tmp34, i64 %size
store i8* %tmp35, i8** %tmp29, align 8
ret i8* null
@@ -85,7 +85,7 @@ for.cond1.preheader:
for.body3: ; preds = %for.inc, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.inc ]
%image4 = getelementptr inbounds %"struct.dyld::MappedRanges", %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 0, i64 %indvars.iv, i32 0
- %0 = load %class.ImageLoader** %image4, align 8
+ %0 = load %class.ImageLoader*, %class.ImageLoader** %image4, align 8
%cmp5 = icmp eq %class.ImageLoader* %0, %image
br i1 %cmp5, label %if.then, label %for.inc
@@ -102,7 +102,7 @@ for.inc:
for.inc10: ; preds = %for.inc
%next = getelementptr inbounds %"struct.dyld::MappedRanges", %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 1
- %1 = load %"struct.dyld::MappedRanges"** %next, align 8
+ %1 = load %"struct.dyld::MappedRanges"*, %"struct.dyld::MappedRanges"** %next, align 8
%cmp = icmp eq %"struct.dyld::MappedRanges"* %1, null
br i1 %cmp, label %for.end11, label %for.cond1.preheader
Modified: llvm/trunk/test/CodeGen/X86/complex-asm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/complex-asm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/complex-asm.ll (original)
+++ llvm/trunk/test/CodeGen/X86/complex-asm.ll Fri Feb 27 15:17:42 2015
@@ -8,9 +8,9 @@ entry:
%v = alloca %0, align 8
call void asm sideeffect "", "=*r,r,r,0,~{dirflag},~{fpsr},~{flags}"(%0* %v, i32 0, i32 1, i128 undef) nounwind
%0 = getelementptr inbounds %0, %0* %v, i64 0, i32 0
- %1 = load i64* %0, align 8
+ %1 = load i64, i64* %0, align 8
%2 = getelementptr inbounds %0, %0* %v, i64 0, i32 1
- %3 = load i64* %2, align 8
+ %3 = load i64, i64* %2, align 8
%mrv4 = insertvalue %0 undef, i64 %1, 0
%mrv5 = insertvalue %0 %mrv4, i64 %3, 1
ret %0 %mrv5
Modified: llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@ define i32 @main() #0 {
entry:
%a = alloca i32, align 4
store i32 1, i32* %a, align 4
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%or = or i32 1, %0
%and = and i32 1, %or
%rem = urem i32 %and, 1
Modified: llvm/trunk/test/CodeGen/X86/const-base-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/const-base-addr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/const-base-addr.ll (original)
+++ llvm/trunk/test/CodeGen/X86/const-base-addr.ll Fri Feb 27 15:17:42 2015
@@ -12,11 +12,11 @@ define i32 @test1() nounwind {
; CHECK-NEXT: addl 8(%rcx), %eax
; CHECK-NEXT: addl 12(%rcx), %eax
%addr1 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 1
- %tmp1 = load i32* %addr1
+ %tmp1 = load i32, i32* %addr1
%addr2 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 2
- %tmp2 = load i32* %addr2
+ %tmp2 = load i32, i32* %addr2
%addr3 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 3
- %tmp3 = load i32* %addr3
+ %tmp3 = load i32, i32* %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
ret i32 %tmp5
Modified: llvm/trunk/test/CodeGen/X86/constant-combines.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/constant-combines.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/constant-combines.ll (original)
+++ llvm/trunk/test/CodeGen/X86/constant-combines.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ entry:
%1 = getelementptr inbounds { float, float }, { float, float }* %arg, i64 0, i32 0
%2 = bitcast float* %1 to i64*
- %3 = load i64* %2, align 8
+ %3 = load i64, i64* %2, align 8
%4 = trunc i64 %3 to i32
%5 = lshr i64 %3, 32
%6 = trunc i64 %5 to i32
Modified: llvm/trunk/test/CodeGen/X86/constant-hoisting-optnone.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/constant-hoisting-optnone.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/constant-hoisting-optnone.ll (original)
+++ llvm/trunk/test/CodeGen/X86/constant-hoisting-optnone.ll Fri Feb 27 15:17:42 2015
@@ -12,8 +12,8 @@ define i64 @constant_hoisting_optnone()
; CHECK-DAG: movabsq {{.*#+}} imm = 0xBEEBEEBEC
; CHECK: ret
entry:
- %0 = load i64* inttoptr (i64 51250129900 to i64*)
- %1 = load i64* inttoptr (i64 51250129908 to i64*)
+ %0 = load i64, i64* inttoptr (i64 51250129900 to i64*)
+ %1 = load i64, i64* inttoptr (i64 51250129908 to i64*)
%2 = add i64 %0, %1
ret i64 %2
}
Modified: llvm/trunk/test/CodeGen/X86/constant-hoisting-shift-immediate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/constant-hoisting-shift-immediate.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/constant-hoisting-shift-immediate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/constant-hoisting-shift-immediate.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define i64 @foo(i1 %z, i192* %p, i192* %
; be in another basic block. As a result, a very inefficient code might be
; produced. Here we check that this doesn't occur.
entry:
- %data1 = load i192* %p, align 8
+ %data1 = load i192, i192* %p, align 8
%lshr1 = lshr i192 %data1, 128
%val1 = trunc i192 %lshr1 to i64
br i1 %z, label %End, label %L_val2
@@ -14,7 +14,7 @@ entry:
; CHECK: movq 16(%rdx), %rax
; CHECK-NEXT: retq
L_val2:
- %data2 = load i192* %q, align 8
+ %data2 = load i192, i192* %q, align 8
%lshr2 = lshr i192 %data2, 128
%val2 = trunc i192 %lshr2 to i64
br label %End
Modified: llvm/trunk/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
%0 = add i32 %i2, 1 ; <i32> [#uses=1]
%1 = sext i32 %0 to i64 ; <i64> [#uses=1]
%2 = getelementptr i8, i8* %ptr, i64 %1 ; <i8*> [#uses=1]
- %3 = load i8* %2, align 1 ; <i8> [#uses=1]
+ %3 = load i8, i8* %2, align 1 ; <i8> [#uses=1]
%4 = icmp eq i8 0, %3 ; <i1> [#uses=1]
br i1 %4, label %bb3, label %bb34
Modified: llvm/trunk/test/CodeGen/X86/cppeh-catch-all.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cppeh-catch-all.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cppeh-catch-all.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cppeh-catch-all.ll Fri Feb 27 15:17:42 2015
@@ -39,7 +39,7 @@ lpad:
br label %catch
catch: ; preds = %lpad
- %exn = load i8** %exn.slot
+ %exn = load i8*, i8** %exn.slot
%3 = call i8* @llvm.eh.begincatch(i8* %exn) #3
call void @_Z16handle_exceptionv()
br label %invoke.cont2
@@ -57,7 +57,7 @@ try.cont:
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @_Z4testv to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %struct._Z4testv.ehdata*
; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
-; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: %eh.obj = load i8*, i8** %eh.obj.ptr
; CHECK: call void @_Z16handle_exceptionv()
; CHECK: ret i8* blockaddress(@_Z4testv, %try.cont)
; CHECK: }
Modified: llvm/trunk/test/CodeGen/X86/cppeh-catch-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cppeh-catch-scalar.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cppeh-catch-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cppeh-catch-scalar.ll Fri Feb 27 15:17:42 2015
@@ -55,18 +55,18 @@ lpad:
br label %catch.dispatch
catch.dispatch: ; preds = %lpad
- %sel = load i32* %ehselector.slot
+ %sel = load i32, i32* %ehselector.slot
%3 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #3
%matches = icmp eq i32 %sel, %3
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %catch.dispatch
- %exn11 = load i8** %exn.slot
+ %exn11 = load i8*, i8** %exn.slot
%4 = call i8* @llvm.eh.begincatch(i8* %exn11) #3
%5 = bitcast i8* %4 to i32*
- %6 = load i32* %5, align 4
+ %6 = load i32, i32* %5, align 4
store i32 %6, i32* %i, align 4
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
call void @_Z10handle_inti(i32 %7)
br label %invoke.cont2
@@ -78,8 +78,8 @@ try.cont:
ret void
eh.resume: ; preds = %catch.dispatch
- %exn3 = load i8** %exn.slot
- %sel4 = load i32* %ehselector.slot
+ %exn3 = load i8*, i8** %exn.slot
+ %sel4 = load i32, i32* %ehselector.slot
%lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0
%lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %sel4, 1
resume { i8*, i32 } %lpad.val5
@@ -90,12 +90,12 @@ eh.resume:
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @_Z4testv to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %struct._Z4testv.ehdata*
; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
-; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: %eh.obj = load i8*, i8** %eh.obj.ptr
; CHECK: %i = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 2
; CHECK: %2 = bitcast i8* %eh.obj to i32*
-; CHECK: %3 = load i32* %2, align 4
+; CHECK: %3 = load i32, i32* %2, align 4
; CHECK: store i32 %3, i32* %i, align 4
-; CHECK: %4 = load i32* %i, align 4
+; CHECK: %4 = load i32, i32* %i, align 4
; CHECK: call void @_Z10handle_inti(i32 %4)
; CHECK: ret i8* blockaddress(@_Z4testv, %try.cont)
; CHECK: }
Modified: llvm/trunk/test/CodeGen/X86/cppeh-frame-vars.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cppeh-frame-vars.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cppeh-frame-vars.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cppeh-frame-vars.ll Fri Feb 27 15:17:42 2015
@@ -83,7 +83,7 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %1, 10
br i1 %cmp, label %for.body, label %for.end
@@ -92,9 +92,9 @@ for.body:
to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %for.body
- %2 = load i32* %i, align 4
+ %2 = load i32, i32* %i, align 4
%a = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
- %3 = load i32* %a, align 4
+ %3 = load i32, i32* %a, align 4
%add = add nsw i32 %3, %2
store i32 %add, i32* %a, align 4
br label %try.cont
@@ -109,42 +109,42 @@ lpad:
br label %catch.dispatch
catch.dispatch: ; preds = %lpad
- %sel = load i32* %ehselector.slot
+ %sel = load i32, i32* %ehselector.slot
%7 = call i32 @llvm.eh.typeid.for(i8* bitcast (%rtti.TypeDescriptor2* @"\01??_R0H at 8" to i8*)) #1
%matches = icmp eq i32 %sel, %7
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %catch.dispatch
- %exn = load i8** %exn.slot
+ %exn = load i8*, i8** %exn.slot
%8 = call i8* @llvm.eh.begincatch(i8* %exn) #1
%9 = bitcast i8* %8 to i32*
- %10 = load i32* %9, align 4
+ %10 = load i32, i32* %9, align 4
store i32 %10, i32* %e, align 4
- %11 = load i32* %e, align 4
- %12 = load i32* %NumExceptions, align 4
+ %11 = load i32, i32* %e, align 4
+ %12 = load i32, i32* %NumExceptions, align 4
%idxprom = sext i32 %12 to i64
%arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
store i32 %11, i32* %arrayidx, align 4
- %13 = load i32* %NumExceptions, align 4
+ %13 = load i32, i32* %NumExceptions, align 4
%inc = add nsw i32 %13, 1
store i32 %inc, i32* %NumExceptions, align 4
- %14 = load i32* %e, align 4
- %15 = load i32* %i, align 4
+ %14 = load i32, i32* %e, align 4
+ %15 = load i32, i32* %i, align 4
%cmp1 = icmp eq i32 %14, %15
br i1 %cmp1, label %if.then, label %if.else
if.then: ; preds = %catch
- %16 = load i32* %e, align 4
+ %16 = load i32, i32* %e, align 4
%b = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 1
- %17 = load i32* %b, align 4
+ %17 = load i32, i32* %b, align 4
%add2 = add nsw i32 %17, %16
store i32 %add2, i32* %b, align 4
br label %if.end
if.else: ; preds = %catch
- %18 = load i32* %e, align 4
+ %18 = load i32, i32* %e, align 4
%a3 = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
- %19 = load i32* %a3, align 4
+ %19 = load i32, i32* %a3, align 4
%add4 = add nsw i32 %19, %18
store i32 %add4, i32* %a3, align 4
br label %if.end
@@ -154,25 +154,25 @@ if.end:
br label %try.cont
try.cont: ; preds = %if.end, %invoke.cont
- %20 = load i32* %NumExceptions, align 4
+ %20 = load i32, i32* %NumExceptions, align 4
call void @"\01?does_not_throw@@YAXH at Z"(i32 %20)
br label %for.inc
for.inc: ; preds = %try.cont
- %21 = load i32* %i, align 4
+ %21 = load i32, i32* %i, align 4
%inc5 = add nsw i32 %21, 1
store i32 %inc5, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %22 = load i32* %NumExceptions, align 4
+ %22 = load i32, i32* %NumExceptions, align 4
%arraydecay = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i32 0
call void @"\01?dump@@YAXPEAHHAEAUSomeData@@@Z"(i32* %arraydecay, i32 %22, %struct.SomeData* dereferenceable(8) %Data)
ret void
eh.resume: ; preds = %catch.dispatch
- %exn6 = load i8** %exn.slot
- %sel7 = load i32* %ehselector.slot
+ %exn6 = load i8*, i8** %exn.slot
+ %sel7 = load i32, i32* %ehselector.slot
%lpad.val = insertvalue { i8*, i32 } undef, i8* %exn6, 0
%lpad.val8 = insertvalue { i8*, i32 } %lpad.val, i32 %sel7, 1
resume { i8*, i32 } %lpad.val8
@@ -184,40 +184,40 @@ eh.resume:
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %"struct.\01?test@@YAXXZ.ehdata"*
; CHECK: %eh.obj.ptr = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 1
-; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: %eh.obj = load i8*, i8** %eh.obj.ptr
; CHECK: %e = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 2
; CHECK: %NumExceptions = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 3
; CHECK: %ExceptionVal = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 4
; CHECK: %i = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 5
; CHECK: %Data = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 6
; CHECK: %2 = bitcast i8* %eh.obj to i32*
-; CHECK: %3 = load i32* %2, align 4
+; CHECK: %3 = load i32, i32* %2, align 4
; CHECK: store i32 %3, i32* %e, align 4
-; CHECK: %4 = load i32* %e, align 4
-; CHECK: %5 = load i32* %NumExceptions, align 4
+; CHECK: %4 = load i32, i32* %e, align 4
+; CHECK: %5 = load i32, i32* %NumExceptions, align 4
; CHECK: %idxprom = sext i32 %5 to i64
; CHECK: %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
; CHECK: store i32 %4, i32* %arrayidx, align 4
-; CHECK: %6 = load i32* %NumExceptions, align 4
+; CHECK: %6 = load i32, i32* %NumExceptions, align 4
; CHECK: %inc = add nsw i32 %6, 1
; CHECK: store i32 %inc, i32* %NumExceptions, align 4
-; CHECK: %7 = load i32* %e, align 4
-; CHECK: %8 = load i32* %i, align 4
+; CHECK: %7 = load i32, i32* %e, align 4
+; CHECK: %8 = load i32, i32* %i, align 4
; CHECK: %cmp1 = icmp eq i32 %7, %8
; CHECK: br i1 %cmp1, label %if.then, label %if.else
;
; CHECK: if.then: ; preds = %catch.entry
-; CHECK: %9 = load i32* %e, align 4
+; CHECK: %9 = load i32, i32* %e, align 4
; CHECK: %b = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 1
-; CHECK: %10 = load i32* %b, align 4
+; CHECK: %10 = load i32, i32* %b, align 4
; CHECK: %add2 = add nsw i32 %10, %9
; CHECK: store i32 %add2, i32* %b, align 4
; CHECK: br label %if.end
;
; CHECK: if.else: ; preds = %catch.entry
-; CHECK: %11 = load i32* %e, align 4
+; CHECK: %11 = load i32, i32* %e, align 4
; CHECK: %a3 = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
-; CHECK: %12 = load i32* %a3, align 4
+; CHECK: %12 = load i32, i32* %a3, align 4
; CHECK: %add4 = add nsw i32 %12, %11
; CHECK: store i32 %add4, i32* %a3, align 4
; CHECK: br label %if.end
Modified: llvm/trunk/test/CodeGen/X86/crash-O0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/crash-O0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/crash-O0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/crash-O0.ll Fri Feb 27 15:17:42 2015
@@ -45,7 +45,7 @@ entry:
; CHECK: retq
define i64 @addressModeWith32bitIndex(i32 %V) {
%gep = getelementptr i64, i64* null, i32 %V
- %load = load i64* %gep
+ %load = load i64, i64* %gep
%sdiv = sdiv i64 0, %load
ret i64 %sdiv
}
Modified: llvm/trunk/test/CodeGen/X86/crash-nosse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/crash-nosse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/crash-nosse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/crash-nosse.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ BB:
br label %CF
CF: ; preds = %CF, %BB
- %L19 = load <8 x float>* %S17
+ %L19 = load <8 x float>, <8 x float>* %S17
%BC = bitcast <32 x i32> %Shuff6 to <32 x float>
%S28 = fcmp ord double 0x3ED1A1F787BB2185, 0x3EE59DE55A8DF890
br i1 %S28, label %CF, label %CF39
Modified: llvm/trunk/test/CodeGen/X86/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/crash.ll Fri Feb 27 15:17:42 2015
@@ -7,9 +7,9 @@
; Chain and flag folding issues.
define i32 @test1() nounwind ssp {
entry:
- %tmp5.i = load volatile i32* undef ; <i32> [#uses=1]
+ %tmp5.i = load volatile i32, i32* undef ; <i32> [#uses=1]
%conv.i = zext i32 %tmp5.i to i64 ; <i64> [#uses=1]
- %tmp12.i = load volatile i32* undef ; <i32> [#uses=1]
+ %tmp12.i = load volatile i32, i32* undef ; <i32> [#uses=1]
%conv13.i = zext i32 %tmp12.i to i64 ; <i64> [#uses=1]
%shl.i = shl i64 %conv13.i, 32 ; <i64> [#uses=1]
%or.i = or i64 %shl.i, %conv.i ; <i64> [#uses=1]
@@ -40,7 +40,7 @@ if.end:
define void @test3() {
dependentGraph243.exit:
- %subject19 = load %pair* undef ; <%1> [#uses=1]
+ %subject19 = load %pair, %pair* undef ; <%1> [#uses=1]
%0 = extractvalue %pair %subject19, 1 ; <double> [#uses=2]
%1 = select i1 undef, double %0, double undef ; <double> [#uses=1]
%2 = select i1 undef, double %1, double %0 ; <double> [#uses=1]
@@ -52,7 +52,7 @@ dependentGraph243.exit:
; PR6605
define i64 @test4(i8* %P) nounwind ssp {
entry:
- %tmp1 = load i8* %P ; <i8> [#uses=3]
+ %tmp1 = load i8, i8* %P ; <i8> [#uses=3]
%tobool = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
%tmp58 = sext i1 %tobool to i8 ; <i8> [#uses=1]
%mul.i = and i8 %tmp58, %tmp1 ; <i8> [#uses=1]
@@ -76,7 +76,7 @@ declare i32 @safe(i32)
; PR6607
define fastcc void @test5(i32 %FUNC) nounwind {
foo:
- %0 = load i8* undef, align 1 ; <i8> [#uses=3]
+ %0 = load i8, i8* undef, align 1 ; <i8> [#uses=3]
%1 = sext i8 %0 to i32 ; <i32> [#uses=2]
%2 = zext i8 %0 to i32 ; <i32> [#uses=1]
%tmp1.i5037 = urem i32 %2, 10 ; <i32> [#uses=1]
@@ -121,7 +121,7 @@ entry:
bb14:
%tmp0 = trunc i16 undef to i1
- %tmp1 = load i8* undef, align 8
+ %tmp1 = load i8, i8* undef, align 8
%tmp2 = shl i8 %tmp1, 4
%tmp3 = lshr i8 %tmp2, 7
%tmp4 = trunc i8 %tmp3 to i1
@@ -239,7 +239,7 @@ declare i64 @llvm.objectsize.i64.p0i8(i8
define void @_ZNK4llvm17MipsFrameLowering12emitPrologueERNS_15MachineFunctionE() ssp align 2 {
bb:
- %tmp = load %t9** undef, align 4
+ %tmp = load %t9*, %t9** undef, align 4
%tmp2 = getelementptr inbounds %t9, %t9* %tmp, i32 0, i32 0
%tmp3 = getelementptr inbounds %t9, %t9* %tmp, i32 0, i32 0, i32 0, i32 0, i32 1
br label %bb4
@@ -250,25 +250,25 @@ bb4:
br i1 undef, label %bb34, label %bb7
bb7: ; preds = %bb4
- %tmp8 = load i32* undef, align 4
+ %tmp8 = load i32, i32* undef, align 4
%tmp9 = and i96 %tmp6, 4294967040
%tmp10 = zext i32 %tmp8 to i96
%tmp11 = shl nuw nsw i96 %tmp10, 32
%tmp12 = or i96 %tmp9, %tmp11
%tmp13 = or i96 %tmp12, 1
- %tmp14 = load i32* undef, align 4
+ %tmp14 = load i32, i32* undef, align 4
%tmp15 = and i96 %tmp5, 4294967040
%tmp16 = zext i32 %tmp14 to i96
%tmp17 = shl nuw nsw i96 %tmp16, 32
%tmp18 = or i96 %tmp15, %tmp17
%tmp19 = or i96 %tmp18, 1
- %tmp20 = load i8* undef, align 1
+ %tmp20 = load i8, i8* undef, align 1
%tmp21 = and i8 %tmp20, 1
%tmp22 = icmp ne i8 %tmp21, 0
%tmp23 = select i1 %tmp22, i96 %tmp19, i96 %tmp13
%tmp24 = select i1 %tmp22, i96 %tmp13, i96 %tmp19
store i96 %tmp24, i96* undef, align 4
- %tmp25 = load %t13** %tmp3, align 4
+ %tmp25 = load %t13*, %t13** %tmp3, align 4
%tmp26 = icmp eq %t13* %tmp25, undef
br i1 %tmp26, label %bb28, label %bb27
@@ -281,7 +281,7 @@ bb28:
bb29: ; preds = %bb28, %bb27
store i96 %tmp23, i96* undef, align 4
- %tmp30 = load %t13** %tmp3, align 4
+ %tmp30 = load %t13*, %t13** %tmp3, align 4
br i1 false, label %bb33, label %bb31
bb31: ; preds = %bb29
@@ -348,13 +348,13 @@ entry:
br label %"4"
"3":
- %0 = load <2 x i32>* null, align 8
+ %0 = load <2 x i32>, <2 x i32>* null, align 8
%1 = xor <2 x i32> zeroinitializer, %0
%2 = and <2 x i32> %1, %6
%3 = or <2 x i32> undef, %2
%4 = and <2 x i32> %3, undef
store <2 x i32> %4, <2 x i32>* undef
- %5 = load <2 x i32>* undef, align 1
+ %5 = load <2 x i32>, <2 x i32>* undef, align 1
br label %"4"
"4":
@@ -378,7 +378,7 @@ entry:
@__force_order = external hidden global i32, align 4
define void @pr11078(i32* %pgd) nounwind {
entry:
- %t0 = load i32* %pgd, align 4
+ %t0 = load i32, i32* %pgd, align 4
%and2 = and i32 %t0, 1
%tobool = icmp eq i32 %and2, 0
br i1 %tobool, label %if.then, label %if.end
@@ -405,7 +405,7 @@ while.body.preheader:
br i1 undef, label %if.then3, label %if.end7
if.then3: ; preds = %while.body.preheader
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
br i1 undef, label %land.lhs.true.i255, label %if.end7
land.lhs.true.i255: ; preds = %if.then3
@@ -434,7 +434,7 @@ return:
@.str = private unnamed_addr constant { [1 x i8], [63 x i8] } zeroinitializer, align 32
define void @pr13188(i64* nocapture %this) uwtable ssp sanitize_address align 2 {
entry:
- %x7 = load i64* %this, align 8
+ %x7 = load i64, i64* %this, align 8
%sub = add i64 %x7, -1
%conv = uitofp i64 %sub to float
%div = fmul float %conv, 5.000000e-01
@@ -450,12 +450,12 @@ declare void @_Z6PrintFz(...)
define void @pr13943() nounwind uwtable ssp {
entry:
- %srcval = load i576* bitcast ([9 x i32*]* @fn1.g to i576*), align 16
+ %srcval = load i576, i576* bitcast ([9 x i32*]* @fn1.g to i576*), align 16
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%g.0 = phi i576 [ %srcval, %entry ], [ %ins, %for.inc ]
- %0 = load i32* @e, align 4
+ %0 = load i32, i32* @e, align 4
%1 = lshr i576 %g.0, 64
%2 = trunc i576 %1 to i64
%3 = inttoptr i64 %2 to i32*
@@ -510,9 +510,9 @@ bb4:
unreachable
bb5: ; preds = %bb3
- %tmp = load <4 x float>* undef, align 1
+ %tmp = load <4 x float>, <4 x float>* undef, align 1
%tmp6 = bitcast <4 x float> %tmp to i128
- %tmp7 = load <4 x float>* undef, align 1
+ %tmp7 = load <4 x float>, <4 x float>* undef, align 1
%tmp8 = bitcast <4 x float> %tmp7 to i128
br label %bb10
@@ -583,7 +583,7 @@ bb29:
}
define void @pr14194() nounwind uwtable {
- %tmp = load i64* undef, align 16
+ %tmp = load i64, i64* undef, align 16
%tmp1 = trunc i64 %tmp to i32
%tmp2 = lshr i64 %tmp, 32
%tmp3 = trunc i64 %tmp2 to i32
Modified: llvm/trunk/test/CodeGen/X86/critical-anti-dep-breaker.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/critical-anti-dep-breaker.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/critical-anti-dep-breaker.ll (original)
+++ llvm/trunk/test/CodeGen/X86/critical-anti-dep-breaker.ll Fri Feb 27 15:17:42 2015
@@ -16,9 +16,9 @@
define i32 @Part_Create(i64* %Anchor, i32 %TypeNum, i32 %F, i32 %Z, i32* %Status, i64* %PartTkn) {
%PartObj = alloca i64*, align 8
%Vchunk = alloca i64, align 8
- %1 = load i64* @NullToken, align 4
+ %1 = load i64, i64* @NullToken, align 4
store i64 %1, i64* %Vchunk, align 8
- %2 = load i32* @PartClass, align 4
+ %2 = load i32, i32* @PartClass, align 4
call i32 @Image(i64* %Anchor, i32 %2, i32 0, i32 0, i32* %Status, i64* %PartTkn, i64** %PartObj)
call i32 @Create(i64* %Anchor)
ret i32 %2
Modified: llvm/trunk/test/CodeGen/X86/cse-add-with-overflow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cse-add-with-overflow.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cse-add-with-overflow.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cse-add-with-overflow.ll Fri Feb 27 15:17:42 2015
@@ -15,8 +15,8 @@
define i64 @redundantadd(i64* %a0, i64* %a1) {
entry:
- %tmp8 = load i64* %a0, align 8
- %tmp12 = load i64* %a1, align 8
+ %tmp8 = load i64, i64* %a0, align 8
+ %tmp12 = load i64, i64* %a1, align 8
%tmp13 = icmp ult i64 %tmp12, -281474976710656
br i1 %tmp13, label %exit1, label %body
Modified: llvm/trunk/test/CodeGen/X86/cvt16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cvt16.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cvt16.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cvt16.ll Fri Feb 27 15:17:42 2015
@@ -33,7 +33,7 @@ define void @test1(float %src, i16* %des
define float @test2(i16* nocapture %src) {
- %1 = load i16* %src, align 2
+ %1 = load i16, i16* %src, align 2
%2 = tail call float @llvm.convert.from.fp16.f32(i16 %1)
ret float %2
}
@@ -60,7 +60,7 @@ define float @test3(float %src) nounwind
; F16C: ret
define double @test4(i16* nocapture %src) {
- %1 = load i16* %src, align 2
+ %1 = load i16, i16* %src, align 2
%2 = tail call double @llvm.convert.from.fp16.f64(i16 %1)
ret double %2
}
Modified: llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ entry:
; CHECK: movdqa
define void @test2(<4 x i16>* %src, <4 x i32>* %dest) nounwind {
entry:
- %tmp1 = load <4 x i16>* %src
+ %tmp1 = load <4 x i16>, <4 x i16>* %src
%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%0 = tail call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %tmp3)
store <4 x i32> %0, <4 x i32>* %dest
Modified: llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll Fri Feb 27 15:17:42 2015
@@ -7,12 +7,12 @@ entry:
%tmp9 = add i32 %tmp7, %idxX ; <i32> [#uses=1]
%tmp11 = getelementptr i8, i8* %ref_frame_ptr, i32 %tmp9 ; <i8*> [#uses=1]
%tmp1112 = bitcast i8* %tmp11 to i32* ; <i32*> [#uses=1]
- %tmp13 = load i32* %tmp1112, align 4 ; <i32> [#uses=1]
+ %tmp13 = load i32, i32* %tmp1112, align 4 ; <i32> [#uses=1]
%tmp18 = add i32 %idxX, 4 ; <i32> [#uses=1]
%tmp20.sum = add i32 %tmp18, %tmp7 ; <i32> [#uses=1]
%tmp21 = getelementptr i8, i8* %ref_frame_ptr, i32 %tmp20.sum ; <i8*> [#uses=1]
%tmp2122 = bitcast i8* %tmp21 to i16* ; <i16*> [#uses=1]
- %tmp23 = load i16* %tmp2122, align 2 ; <i16> [#uses=1]
+ %tmp23 = load i16, i16* %tmp2122, align 2 ; <i16> [#uses=1]
%tmp2425 = zext i16 %tmp23 to i64 ; <i64> [#uses=1]
%tmp26 = shl i64 %tmp2425, 32 ; <i64> [#uses=1]
%tmp2728 = zext i32 %tmp13 to i64 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/darwin-quote.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/darwin-quote.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/darwin-quote.ll (original)
+++ llvm/trunk/test/CodeGen/X86/darwin-quote.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
define internal i64 @baz() nounwind {
- %tmp = load i64* @"+x"
+ %tmp = load i64, i64* @"+x"
ret i64 %tmp
; CHECK: _baz:
; CHECK: movl "L_+x$non_lazy_ptr", %ecx
Modified: llvm/trunk/test/CodeGen/X86/dbg-changes-codegen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dbg-changes-codegen.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dbg-changes-codegen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dbg-changes-codegen.ll Fri Feb 27 15:17:42 2015
@@ -43,7 +43,7 @@
; Function Attrs: nounwind readonly uwtable
define zeroext i1 @_ZN3Foo3batEv(%struct.Foo* %this) #0 align 2 {
entry:
- %0 = load %struct.Foo** @pfoo, align 8
+ %0 = load %struct.Foo*, %struct.Foo** @pfoo, align 8
tail call void @llvm.dbg.value(metadata %struct.Foo* %0, i64 0, metadata !62, metadata !{!"0x102"})
%cmp.i = icmp eq %struct.Foo* %0, %this
ret i1 %cmp.i
@@ -52,9 +52,9 @@ entry:
; Function Attrs: nounwind uwtable
define void @_Z3bazv() #1 {
entry:
- %0 = load %struct.Wibble** @wibble1, align 8
+ %0 = load %struct.Wibble*, %struct.Wibble** @wibble1, align 8
tail call void @llvm.dbg.value(metadata %struct.Flibble* undef, i64 0, metadata !65, metadata !{!"0x102"})
- %1 = load %struct.Wibble** @wibble2, align 8
+ %1 = load %struct.Wibble*, %struct.Wibble** @wibble2, align 8
%cmp.i = icmp ugt %struct.Wibble* %1, %0
br i1 %cmp.i, label %if.then.i, label %_ZN7Flibble3barEP6Wibble.exit
Modified: llvm/trunk/test/CodeGen/X86/dbg-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dbg-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dbg-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dbg-combine.ll Fri Feb 27 15:17:42 2015
@@ -31,7 +31,7 @@ entry:
%cleanup.dest.slot = alloca i32
call void @llvm.dbg.declare(metadata i32* %elems, metadata !12, metadata !13), !dbg !14
store i32 3, i32* %elems, align 4, !dbg !14
- %0 = load i32* %elems, align 4, !dbg !15
+ %0 = load i32, i32* %elems, align 4, !dbg !15
%1 = zext i32 %0 to i64, !dbg !16
%2 = call i8* @llvm.stacksave(), !dbg !16
store i8* %2, i8** %saved_stack, !dbg !16
@@ -43,16 +43,16 @@ entry:
store i32 1, i32* %arrayidx1, align 4, !dbg !26
%arrayidx2 = getelementptr inbounds i32, i32* %vla, i64 2, !dbg !27
store i32 2, i32* %arrayidx2, align 4, !dbg !28
- %3 = load i32* %elems, align 4, !dbg !29
+ %3 = load i32, i32* %elems, align 4, !dbg !29
%4 = zext i32 %3 to i64, !dbg !30
%vla3 = alloca i32, i64 %4, align 16, !dbg !30
call void @llvm.dbg.declare(metadata i32* %vla3, metadata !31, metadata !21), !dbg !32
%arrayidx4 = getelementptr inbounds i32, i32* %vla3, i64 0, !dbg !33
store i32 1, i32* %arrayidx4, align 4, !dbg !34
%arrayidx5 = getelementptr inbounds i32, i32* %vla3, i64 0, !dbg !35
- %5 = load i32* %arrayidx5, align 4, !dbg !35
+ %5 = load i32, i32* %arrayidx5, align 4, !dbg !35
store i32 1, i32* %cleanup.dest.slot
- %6 = load i8** %saved_stack, !dbg !36
+ %6 = load i8*, i8** %saved_stack, !dbg !36
call void @llvm.stackrestore(i8* %6), !dbg !36
ret i32 %5, !dbg !36
}
Modified: llvm/trunk/test/CodeGen/X86/discontiguous-loops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/discontiguous-loops.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/discontiguous-loops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/discontiguous-loops.ll Fri Feb 27 15:17:42 2015
@@ -39,7 +39,7 @@ ybb8:
br i1 %tmp9, label %bb10, label %ybb12
bb10: ; preds = %ybb8
- %tmp11 = load i8** undef, align 8 ; <i8*> [#uses=1]
+ %tmp11 = load i8*, i8** undef, align 8 ; <i8*> [#uses=1]
call void (i8*, ...)* @fatal(i8* getelementptr inbounds ([37 x i8]* @.str96, i64 0, i64 0), i8* %tmp11) nounwind
unreachable
Modified: llvm/trunk/test/CodeGen/X86/div8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/div8.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/div8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/div8.ll Fri Feb 27 15:17:42 2015
@@ -10,13 +10,13 @@ entry:
%quotient = alloca i8, align 1
store i8 %dividend, i8* %dividend.addr, align 2
store i8 %divisor, i8* %divisor.addr, align 1
- %tmp = load i8* %dividend.addr, align 2
- %tmp1 = load i8* %divisor.addr, align 1
+ %tmp = load i8, i8* %dividend.addr, align 2
+ %tmp1 = load i8, i8* %divisor.addr, align 1
; Insist on i8->i32 zero extension, even though divb demands only i16:
; CHECK: movzbl {{.*}}%eax
; CHECK: divb
%div = udiv i8 %tmp, %tmp1
store i8 %div, i8* %quotient, align 1
- %tmp4 = load i8* %quotient, align 1
+ %tmp4 = load i8, i8* %quotient, align 1
ret i8 %tmp4
}
Modified: llvm/trunk/test/CodeGen/X86/dllimport-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dllimport-x86_64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dllimport-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dllimport-x86_64.ll Fri Feb 27 15:17:42 2015
@@ -35,13 +35,13 @@ define void @use() nounwind {
; available_externally uses go away
; OPT-NOT: call void @inline1()
; OPT-NOT: call void @inline2()
-; OPT-NOT: load i32* @Var2
+; OPT-NOT: load i32, i32* @Var2
; OPT: call void (...)* @dummy(i32 %1, i32 1)
; CHECK-DAG: movq __imp_Var1(%rip), [[R1:%[a-z]{3}]]
; CHECK-DAG: movq __imp_Var2(%rip), [[R2:%[a-z]{3}]]
- %1 = load i32* @Var1
- %2 = load i32* @Var2
+ %1 = load i32, i32* @Var1
+ %2 = load i32, i32* @Var2
call void(...)* @dummy(i32 %1, i32 %2)
ret void
Modified: llvm/trunk/test/CodeGen/X86/dllimport.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dllimport.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dllimport.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dllimport.ll Fri Feb 27 15:17:42 2015
@@ -46,13 +46,13 @@ define void @use() nounwind {
; available_externally uses go away
; OPT-NOT: call void @inline1()
; OPT-NOT: call void @inline2()
-; OPT-NOT: load i32* @Var2
+; OPT-NOT: load i32, i32* @Var2
; OPT: call void (...)* @dummy(i32 %1, i32 1)
; CHECK-DAG: movl __imp__Var1, [[R1:%[a-z]{3}]]
; CHECK-DAG: movl __imp__Var2, [[R2:%[a-z]{3}]]
- %1 = load i32* @Var1
- %2 = load i32* @Var2
+ %1 = load i32, i32* @Var1
+ %2 = load i32, i32* @Var2
call void(...)* @dummy(i32 %1, i32 %2)
ret void
Modified: llvm/trunk/test/CodeGen/X86/dollar-name.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dollar-name.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dollar-name.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dollar-name.ll Fri Feb 27 15:17:42 2015
@@ -8,8 +8,8 @@ define i32 @"$foo"() nounwind {
; CHECK: movl ($bar),
; CHECK: addl ($qux),
; CHECK: calll ($hen)
- %m = load i32* @"$bar"
- %n = load i32* @"$qux"
+ %m = load i32, i32* @"$bar"
+ %n = load i32, i32* @"$qux"
%t = add i32 %m, %n
%u = call i32 @"$hen"(i32 %t)
ret i32 %u
Modified: llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry-block:
%b = alloca float
store double 3.140000e+00, double* %a
- %0 = load double* %a
+ %0 = load double, double* %a
%1 = fptrunc double %0 to float
Modified: llvm/trunk/test/CodeGen/X86/dynamic-allocas-VLAs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dynamic-allocas-VLAs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dynamic-allocas-VLAs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dynamic-allocas-VLAs.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define i32 @t1() nounwind uwtable ssp {
entry:
%a = alloca i32, align 4
call void @t1_helper(i32* %a) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -27,7 +27,7 @@ entry:
%a = alloca i32, align 4
%v = alloca <8 x float>, align 32
call void @t2_helper(i32* %a, <8 x float>* %v) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -53,7 +53,7 @@ entry:
%a = alloca i32, align 4
%vla = alloca i32, i64 %sz, align 16
call void @t3_helper(i32* %a, i32* %vla) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -78,7 +78,7 @@ entry:
%v = alloca <8 x float>, align 32
%vla = alloca i32, i64 %sz, align 16
call void @t4_helper(i32* %a, i32* %vla, <8 x float>* %v) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -108,10 +108,10 @@ define i32 @t5(float* nocapture %f) noun
entry:
%a = alloca i32, align 4
%0 = bitcast float* %f to <8 x float>*
- %1 = load <8 x float>* %0, align 32
+ %1 = load <8 x float>, <8 x float>* %0, align 32
call void @t5_helper1(i32* %a) nounwind
call void @t5_helper2(<8 x float> %1) nounwind
- %2 = load i32* %a, align 4
+ %2 = load i32, i32* %a, align 4
%add = add nsw i32 %2, 13
ret i32 %add
@@ -138,11 +138,11 @@ entry:
; CHECK: _t6
%a = alloca i32, align 4
%0 = bitcast float* %f to <8 x float>*
- %1 = load <8 x float>* %0, align 32
+ %1 = load <8 x float>, <8 x float>* %0, align 32
%vla = alloca i32, i64 %sz, align 16
call void @t6_helper1(i32* %a, i32* %vla) nounwind
call void @t6_helper2(<8 x float> %1) nounwind
- %2 = load i32* %a, align 4
+ %2 = load i32, i32* %a, align 4
%add = add nsw i32 %2, 13
ret i32 %add
}
@@ -162,7 +162,7 @@ entry:
store i32 0, i32* %x, align 32
%0 = zext i32 %size to i64
%vla = alloca i32, i64 %0, align 16
- %1 = load i32* %x, align 32
+ %1 = load i32, i32* %x, align 32
call void @bar(i32 %1, i32* %vla, %struct.struct_t* byval align 8 %arg1)
ret void
@@ -195,7 +195,7 @@ define i32 @t8() nounwind uwtable {
entry:
%a = alloca i32, align 4
call void @t1_helper(i32* %a) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -213,7 +213,7 @@ entry:
%a = alloca i32, align 4
%vla = alloca i32, i64 %sz, align 16
call void @t3_helper(i32* %a, i32* %vla) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
Modified: llvm/trunk/test/CodeGen/X86/early-ifcvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/early-ifcvt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/early-ifcvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/early-ifcvt.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ do.body:
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ]
%p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ]
%incdec.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 1
- %0 = load i32* %p.addr.0, align 4
+ %0 = load i32, i32* %p.addr.0, align 4
%cmp = icmp sgt i32 %0, %max.0
br i1 %cmp, label %do.cond, label %if.else
Modified: llvm/trunk/test/CodeGen/X86/emit-big-cst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/emit-big-cst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/emit-big-cst.ll (original)
+++ llvm/trunk/test/CodeGen/X86/emit-big-cst.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
define void @accessBig(i64* %storage) {
%addr = bitcast i64* %storage to i82*
- %bigLoadedCst = load volatile i82* @bigCst
+ %bigLoadedCst = load volatile i82, i82* @bigCst
%tmp = add i82 %bigLoadedCst, 1
store i82 %tmp, i82* %addr
ret void
Modified: llvm/trunk/test/CodeGen/X86/expand-opaque-const.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/expand-opaque-const.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/expand-opaque-const.ll (original)
+++ llvm/trunk/test/CodeGen/X86/expand-opaque-const.ll Fri Feb 27 15:17:42 2015
@@ -11,11 +11,11 @@ entry:
%op2 = alloca i64
store i64 -6687208052682386272, i64* %op1
store i64 7106745059734980448, i64* %op2
- %tmp1 = load i64* %op1
- %tmp2 = load i64* %op2
+ %tmp1 = load i64, i64* %op1
+ %tmp2 = load i64, i64* %op2
%tmp = xor i64 %tmp2, 7106745059734980448
%tmp3 = lshr i64 %tmp1, %tmp
store i64 %tmp3, i64* %retval
- %tmp4 = load i64* %retval
+ %tmp4 = load i64, i64* %retval
ret i64 %tmp4
}
Modified: llvm/trunk/test/CodeGen/X86/extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extend.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extend.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extend.ll Fri Feb 27 15:17:42 2015
@@ -5,13 +5,13 @@
@G2 = internal global i8 0 ; <i8*> [#uses=1]
define i16 @test1() {
- %tmp.0 = load i8* @G1 ; <i8> [#uses=1]
+ %tmp.0 = load i8, i8* @G1 ; <i8> [#uses=1]
%tmp.3 = zext i8 %tmp.0 to i16 ; <i16> [#uses=1]
ret i16 %tmp.3
}
define i16 @test2() {
- %tmp.0 = load i8* @G2 ; <i8> [#uses=1]
+ %tmp.0 = load i8, i8* @G2 ; <i8> [#uses=1]
%tmp.3 = sext i8 %tmp.0 to i16 ; <i16> [#uses=1]
ret i16 %tmp.3
}
Modified: llvm/trunk/test/CodeGen/X86/extract-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extract-extract.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extract-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extract-extract.ll Fri Feb 27 15:17:42 2015
@@ -12,10 +12,10 @@
define fastcc void @foo(%pp* nocapture byval %p_arg) {
entry:
%tmp2 = getelementptr %pp, %pp* %p_arg, i64 0, i32 0 ; <%cc*> [#uses=
- %tmp3 = load %cc* %tmp2 ; <%cc> [#uses=1]
+ %tmp3 = load %cc, %cc* %tmp2 ; <%cc> [#uses=1]
%tmp34 = extractvalue %cc %tmp3, 0 ; <%crd> [#uses=1]
%tmp345 = extractvalue %crd %tmp34, 0 ; <i64> [#uses=1]
- %.ptr.i = load %cr** undef ; <%cr*> [#uses=0]
+ %.ptr.i = load %cr*, %cr** undef ; <%cr*> [#uses=0]
%tmp15.i = shl i64 %tmp345, 3 ; <i64> [#uses=0]
store %cr* undef, %cr** undef
ret void
Modified: llvm/trunk/test/CodeGen/X86/extractelement-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extractelement-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extractelement-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extractelement-load.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define i32 @t(<2 x i64>* %val) nounwind
; CHECK-NOT: movd
; CHECK: movl 8(
; CHECK-NEXT: ret
- %tmp2 = load <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp2 = load <2 x i64>, <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1]
%tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1]
ret i32 %tmp4
@@ -20,7 +20,7 @@ define i32 @t(<2 x i64>* %val) nounwind
define i32 @t2(<8 x i32>* %xp) {
; CHECK-LABEL: t2:
; CHECK: ret
- %x = load <8 x i32>* %xp
+ %x = load <8 x i32>, <8 x i32>* %xp
%Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32
undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
%y = extractelement <8 x i32> %Shuff68, i32 0
@@ -41,7 +41,7 @@ define void @t3() {
; CHECK: movhpd
bb:
- %tmp13 = load <2 x double>* undef, align 1
+ %tmp13 = load <2 x double>, <2 x double>* undef, align 1
%.sroa.3.24.vec.extract = extractelement <2 x double> %tmp13, i32 1
store double %.sroa.3.24.vec.extract, double* undef, align 8
unreachable
@@ -55,7 +55,7 @@ define i64 @t4(<2 x double>* %a) {
; CHECK-LABEL: t4:
; CHECK: mov
; CHECK: ret
- %b = load <2 x double>* %a, align 16
+ %b = load <2 x double>, <2 x double>* %a, align 16
%c = shufflevector <2 x double> %b, <2 x double> %b, <2 x i32> <i32 1, i32 0>
%d = bitcast <2 x double> %c to <2 x i64>
%e = extractelement <2 x i64> %d, i32 1
Modified: llvm/trunk/test/CodeGen/X86/extractps.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extractps.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extractps.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extractps.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
external global float, align 16 ; <float*>:0 [#uses=2]
define internal void @""() nounwind {
- load float* @0, align 16 ; <float>:1 [#uses=1]
+ load float, float* @0, align 16 ; <float>:1 [#uses=1]
insertelement <4 x float> undef, float %1, i32 0 ; <<4 x float>>:2 [#uses=1]
call <4 x float> @llvm.x86.sse.rsqrt.ss( <4 x float> %2 ) ; <<4 x float>>:3 [#uses=1]
extractelement <4 x float> %3, i32 0 ; <float>:4 [#uses=1]
@@ -15,7 +15,7 @@ define internal void @""() nounwind {
ret void
}
define internal void @""() nounwind {
- load float* @0, align 16 ; <float>:1 [#uses=1]
+ load float, float* @0, align 16 ; <float>:1 [#uses=1]
insertelement <4 x float> undef, float %1, i32 1 ; <<4 x float>>:2 [#uses=1]
call <4 x float> @llvm.x86.sse.rsqrt.ss( <4 x float> %2 ) ; <<4 x float>>:3 [#uses=1]
extractelement <4 x float> %3, i32 1 ; <float>:4 [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ entry:
; CHECK-LABEL: test_x86_vcvtph2ps_256_m:
; CHECK-NOT: vmov
; CHECK: vcvtph2ps (%
- %tmp1 = load <8 x i16>* %a, align 16
+ %tmp1 = load <8 x i16>, <8 x i16>* %a, align 16
%0 = tail call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %tmp1)
ret <8 x float> %0
}
@@ -54,7 +54,7 @@ define <4 x float> @test_x86_vcvtps2ph_1
; CHECK-NOT: vmov
; CHECK: vcvtph2ps (%
- %load = load i64* %ptr
+ %load = load i64, i64* %ptr
%ins1 = insertelement <2 x i64> undef, i64 %load, i32 0
%ins2 = insertelement <2 x i64> %ins1, i64 0, i32 1
%bc = bitcast <2 x i64> %ins2 to <8 x i16>
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-args-fail.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-args-fail.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-args-fail.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-args-fail.ll Fri Feb 27 15:17:42 2015
@@ -17,6 +17,6 @@ entry:
; WIN32: movl (%rcx), %eax
; WIN64: foo
; WIN64: movl (%rdi), %eax
- %0 = load i32* %p, align 4
+ %0 = load i32, i32* %p, align 4
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll Fri Feb 27 15:17:42 2015
@@ -15,10 +15,10 @@ entry:
store i32 %x, i32* %x.addr, align 4
store i32 %y, i32* %y.addr, align 4
store i32 %z, i32* %z.addr, align 4
- %tmp = load i32* %x.addr, align 4
- %tmp1 = load i32* %y.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
+ %tmp1 = load i32, i32* %y.addr, align 4
%add = add nsw i32 %tmp, %tmp1
- %tmp2 = load i32* %z.addr, align 4
+ %tmp2 = load i32, i32* %z.addr, align 4
%add3 = add nsw i32 %add, %tmp2
ret i32 %add3
}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-call-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-call-bool.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-call-bool.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-call-bool.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ declare i64 @bar(i1)
define i64 @foo(i8* %arg) {
; CHECK-LABEL: foo:
top:
- %0 = load i8* %arg
+ %0 = load i8, i8* %arg
; CHECK: movb
%1 = trunc i8 %0 to i1
; CHECK: andb $1,
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-fold-mem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-fold-mem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-fold-mem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-fold-mem.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define i64 @fold_load(i64* %a, i64 %b) {
; CHECK-LABEL: fold_load
; CHECK: addq (%rdi), %rsi
; CHECK-NEXT: movq %rsi, %rax
- %1 = load i64* %a, align 8
+ %1 = load i64, i64* %a, align 8
%2 = add i64 %1, %b
ret i64 %2
}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll Fri Feb 27 15:17:42 2015
@@ -47,7 +47,7 @@ define double @single_to_double_rm(float
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: ret
entry:
- %0 = load float* %x, align 4
+ %0 = load float, float* %x, align 4
%conv = fpext float %0 to double
ret double %conv
}
@@ -59,7 +59,7 @@ define float @double_to_single_rm(double
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; ALL-NEXT: ret
entry:
- %0 = load double* %x, align 8
+ %0 = load double, double* %x, align 8
%conv = fptrunc double %0 to float
ret float %conv
}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-gep.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-gep.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-gep.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
; PR3181
define i32 @test1(i32 %t3, i32* %t1) nounwind {
%t9 = getelementptr i32, i32* %t1, i32 %t3 ; <i32*> [#uses=1]
- %t15 = load i32* %t9 ; <i32> [#uses=1]
+ %t15 = load i32, i32* %t9 ; <i32> [#uses=1]
ret i32 %t15
; X32-LABEL: test1:
; X32: movl (%eax,%ecx,4), %eax
@@ -21,7 +21,7 @@ define i32 @test1(i32 %t3, i32* %t1) nou
}
define i32 @test2(i64 %t3, i32* %t1) nounwind {
%t9 = getelementptr i32, i32* %t1, i64 %t3 ; <i32*> [#uses=1]
- %t15 = load i32* %t9 ; <i32> [#uses=1]
+ %t15 = load i32, i32* %t9 ; <i32> [#uses=1]
ret i32 %t15
; X32-LABEL: test2:
; X32: movl (%edx,%ecx,4), %e
@@ -38,7 +38,7 @@ define i32 @test2(i64 %t3, i32* %t1) nou
define i8 @test3(i8* %start) nounwind {
entry:
%A = getelementptr i8, i8* %start, i64 -2 ; <i8*> [#uses=1]
- %B = load i8* %A, align 1 ; <i8> [#uses=1]
+ %B = load i8, i8* %A, align 1 ; <i8> [#uses=1]
ret i8 %B
@@ -59,11 +59,11 @@ entry:
%p.addr = alloca double*, align 8 ; <double**> [#uses=2]
store i64 %x, i64* %x.addr
store double* %p, double** %p.addr
- %tmp = load i64* %x.addr ; <i64> [#uses=1]
+ %tmp = load i64, i64* %x.addr ; <i64> [#uses=1]
%add = add nsw i64 %tmp, 16 ; <i64> [#uses=1]
- %tmp1 = load double** %p.addr ; <double*> [#uses=1]
+ %tmp1 = load double*, double** %p.addr ; <double*> [#uses=1]
%arrayidx = getelementptr inbounds double, double* %tmp1, i64 %add ; <double*> [#uses=1]
- %tmp2 = load double* %arrayidx ; <double> [#uses=1]
+ %tmp2 = load double, double* %arrayidx ; <double> [#uses=1]
ret double %tmp2
; X32-LABEL: test4:
@@ -77,7 +77,7 @@ entry:
define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind {
%v8 = getelementptr i8, i8* %A, i32 %I
%v9 = bitcast i8* %v8 to i64*
- %v10 = load i64* %v9
+ %v10 = load i64, i64* %v9
%v11 = add i64 %B, %v10
ret i64 %v11
; X64-LABEL: test5:
@@ -91,7 +91,7 @@ define i64 @test5(i8* %A, i32 %I, i64 %B
; of their blocks.
define void @test6() {
if.end: ; preds = %if.then, %invoke.cont
- %tmp15 = load i64* undef
+ %tmp15 = load i64, i64* undef
%dec = add i64 %tmp15, 13
store i64 %dec, i64* undef
%call17 = invoke i8* @_ZNK18G__FastAllocString4dataEv()
@@ -119,7 +119,7 @@ define i32 @test7({i32,i32,i32}* %tmp1,
%tmp29 = getelementptr inbounds {i32,i32,i32}, {i32,i32,i32}* %tmp1, i32 0, i32 2
- %tmp30 = load i32* %tmp29, align 4
+ %tmp30 = load i32, i32* %tmp29, align 4
%p2 = getelementptr inbounds {i32,i32,i32}, {i32,i32,i32}* %tmp1, i32 0, i32 2
store i32 4, i32* %p2
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-gv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-gv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-gv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-gv.ll Fri Feb 27 15:17:42 2015
@@ -12,15 +12,15 @@ entry:
%retval = alloca i32 ; <i32*> [#uses=2]
%0 = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %1 = load i8 (...)** @f, align 8 ; <i8 (...)*> [#uses=1]
+ %1 = load i8 (...)*, i8 (...)** @f, align 8 ; <i8 (...)*> [#uses=1]
%2 = icmp ne i8 (...)* %1, @kill ; <i1> [#uses=1]
%3 = zext i1 %2 to i32 ; <i32> [#uses=1]
store i32 %3, i32* %0, align 4
- %4 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %4, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-i1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-i1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-i1.ll Fri Feb 27 15:17:42 2015
@@ -27,7 +27,7 @@ entry:
; CHECK: movb {{.*}} %al
; CHECK-NEXT: xorb $1, %al
; CHECK-NEXT: testb $1
- %tmp = load i8* %a, align 1
+ %tmp = load i8, i8* %a, align 1
%tobool = trunc i8 %tmp to i1
%tobool2 = xor i1 %tobool, true
br i1 %tobool2, label %if.then, label %if.end
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ define double @int_to_double_rm(i32* %a)
; AVX: vcvtsi2sdl (%rdi), %xmm0, %xmm0
; ALL-NEXT: ret
entry:
- %0 = load i32* %a
+ %0 = load i32, i32* %a
%1 = sitofp i32 %0 to double
ret double %1
}
@@ -39,7 +39,7 @@ define float @int_to_float_rm(i32* %a) {
; AVX: vcvtsi2ssl (%rdi), %xmm0, %xmm0
; ALL-NEXT: ret
entry:
- %0 = load i32* %a
+ %0 = load i32, i32* %a
%1 = sitofp i32 %0 to float
ret float %1
}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-mem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-mem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-mem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-mem.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
; rdar://6653118
define i32 @loadgv() nounwind {
entry:
- %0 = load i32* @src, align 4
- %1 = load i32* @src, align 4
+ %0 = load i32, i32* @src, align 4
+ %1 = load i32, i32* @src, align 4
%2 = add i32 %0, %1
store i32 %2, i32* @src
ret i32 %2
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-tailcall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-tailcall.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-tailcall.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-tailcall.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
define i32 @stub(i8* %t0) nounwind {
entry:
- %t1 = load i32* inttoptr (i32 139708680 to i32*) ; <i32> [#uses=1]
+ %t1 = load i32, i32* inttoptr (i32 139708680 to i32*) ; <i32> [#uses=1]
%t2 = bitcast i8* %t0 to i32 (i32)* ; <i32 (i32)*> [#uses=1]
%t3 = call fastcc i32 %t2(i32 %t1) ; <i32> [#uses=1]
ret i32 %t3
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-tls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-tls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-tls.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-tls.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
@v = thread_local global i32 0
define i32 @f() nounwind {
entry:
- %t = load i32* @v
+ %t = load i32, i32* @v
%s = add i32 %t, 1
ret i32 %s
}
@@ -16,7 +16,7 @@ entry:
@alias = internal alias i32* @v
define i32 @f_alias() nounwind {
entry:
- %t = load i32* @v
+ %t = load i32, i32* @v
%s = add i32 %t, 1
ret i32 %s
}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-x86-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-x86-64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-x86-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-x86-64.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ define void @test2(i64 %x) nounwind ssp
entry:
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
- %tmp = load i64* %x.addr, align 8
+ %tmp = load i64, i64* %x.addr, align 8
%cmp = icmp sgt i64 %tmp, 42
br i1 %cmp, label %if.then, label %if.end
@@ -53,7 +53,7 @@ define i64 @test3() nounwind {
define i32 @test4(i64 %idxprom9) nounwind {
%arrayidx10 = getelementptr inbounds [153 x i8], [153 x i8]* @rtx_length, i32 0, i64 %idxprom9
- %tmp11 = load i8* %arrayidx10, align 1
+ %tmp11 = load i8, i8* %arrayidx10, align 1
%conv = zext i8 %tmp11 to i32
ret i32 %conv
@@ -212,7 +212,7 @@ declare void @foo() unnamed_addr ssp ali
; w.r.t. the call.
define i32 @test17(i32 *%P) ssp nounwind {
entry:
- %tmp = load i32* %P
+ %tmp = load i32, i32* %P
%cmp = icmp ne i32 %tmp, 5
call void @foo()
br i1 %cmp, label %if.then, label %if.else
Modified: llvm/trunk/test/CodeGen/X86/fast-isel-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-x86.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-x86.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
; CHECK: retl
@G = external global float
define float @test0() nounwind {
- %t = load float* @G
+ %t = load float, float* @G
ret float %t
}
@@ -28,7 +28,7 @@ define void @test1({i32, i32, i32, i32}*
; CHECK: retl
@HHH = external global i32
define i32 @test2() nounwind {
- %t = load i32* @HHH
+ %t = load i32, i32* @HHH
ret i32 %t
}
Modified: llvm/trunk/test/CodeGen/X86/fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@
define i32* @foo(i32* %p, i32* %q, i32** %z) nounwind {
entry:
- %r = load i32* %p
- %s = load i32* %q
- %y = load i32** %z
+ %r = load i32, i32* %p
+ %s = load i32, i32* %q
+ %y = load i32*, i32** %z
br label %fast
fast:
@@ -29,8 +29,8 @@ exit:
define void @bar(double* %p, double* %q) nounwind {
entry:
- %r = load double* %p
- %s = load double* %q
+ %r = load double, double* %p
+ %s = load double, double* %q
br label %fast
fast:
@@ -94,7 +94,7 @@ define void @mul_i8(i8 %a, i8* %p) nounw
}
define void @load_store_i1(i1* %p, i1* %q) nounwind {
- %t = load i1* %p
+ %t = load i1, i1* %p
store i1 %t, i1* %q
ret void
}
@@ -102,7 +102,7 @@ define void @load_store_i1(i1* %p, i1* %
@crash_test1x = external global <2 x i32>, align 8
define void @crash_test1() nounwind ssp {
- %tmp = load <2 x i32>* @crash_test1x, align 8
+ %tmp = load <2 x i32>, <2 x i32>* @crash_test1x, align 8
%neg = xor <2 x i32> %tmp, <i32 -1, i32 -1>
ret void
}
@@ -113,7 +113,7 @@ define i64* @life() nounwind {
%a1 = alloca i64*, align 8
%a2 = bitcast i64** %a1 to i8*
call void @llvm.lifetime.start(i64 -1, i8* %a2) nounwind
- %a3 = load i64** %a1, align 8
+ %a3 = load i64*, i64** %a1, align 8
ret i64* %a3
}
Modified: llvm/trunk/test/CodeGen/X86/fastcc-byval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fastcc-byval.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fastcc-byval.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fastcc-byval.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ define fastcc i32 @bar() nounwind {
%a = getelementptr %struct.MVT, %struct.MVT* %V, i32 0, i32 0
store i32 1, i32* %a
call fastcc void @foo(%struct.MVT* byval %V) nounwind
- %t = load i32* %a
+ %t = load i32, i32* %a
ret i32 %t
}
Modified: llvm/trunk/test/CodeGen/X86/fastcc-sret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fastcc-sret.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fastcc-sret.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fastcc-sret.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define void @foo() nounwind {
call fastcc void @bar( %struct.foo* sret %memtmp ) nounwind
%tmp4 = getelementptr %struct.foo, %struct.foo* %memtmp, i32 0, i32 0
%tmp5 = getelementptr [4 x i32], [4 x i32]* %tmp4, i32 0, i32 0
- %tmp6 = load i32* %tmp5
+ %tmp6 = load i32, i32* %tmp5
store i32 %tmp6, i32* @dst
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/fastcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fastcc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fastcc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fastcc.ll Fri Feb 27 15:17:42 2015
@@ -9,10 +9,10 @@
define i32 @foo() nounwind {
entry:
- %0 = load double* @d, align 8 ; <double> [#uses=1]
- %1 = load double* @c, align 8 ; <double> [#uses=1]
- %2 = load double* @b, align 8 ; <double> [#uses=1]
- %3 = load double* @a, align 8 ; <double> [#uses=1]
+ %0 = load double, double* @d, align 8 ; <double> [#uses=1]
+ %1 = load double, double* @c, align 8 ; <double> [#uses=1]
+ %2 = load double, double* @b, align 8 ; <double> [#uses=1]
+ %3 = load double, double* @a, align 8 ; <double> [#uses=1]
tail call fastcc void @bar( i32 0, i32 1, i32 2, double 1.000000e+00, double %3, double %2, double %1, double %0 ) nounwind
ret i32 0
}
Modified: llvm/trunk/test/CodeGen/X86/fastisel-gep-promote-before-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fastisel-gep-promote-before-add.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fastisel-gep-promote-before-add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fastisel-gep-promote-before-add.ll Fri Feb 27 15:17:42 2015
@@ -6,13 +6,13 @@ define zeroext i8 @gep_promotion(i8* %pt
entry:
%ptr.addr = alloca i8*, align 8
%add = add i8 64, 64 ; 0x40 + 0x40
- %0 = load i8** %ptr.addr, align 8
+ %0 = load i8*, i8** %ptr.addr, align 8
; CHECK-LABEL: _gep_promotion:
; CHECK: movzbl ({{.*}})
%arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
ret i8 %1
}
@@ -22,16 +22,16 @@ entry:
%ptr.addr = alloca i8*, align 8
store i8 %i, i8* %i.addr, align 4
store i8* %ptr, i8** %ptr.addr, align 8
- %0 = load i8* %i.addr, align 4
+ %0 = load i8, i8* %i.addr, align 4
; CHECK-LABEL: _gep_promotion_nonconst:
; CHECK: movzbl ({{.*}})
%xor = xor i8 %0, -128 ; %0 ^ 0x80
%add = add i8 %xor, -127 ; %xor + 0x81
- %1 = load i8** %ptr.addr, align 8
+ %1 = load i8*, i8** %ptr.addr, align 8
%arrayidx = getelementptr inbounds i8, i8* %1, i8 %add
- %2 = load i8* %arrayidx, align 1
+ %2 = load i8, i8* %arrayidx, align 1
ret i8 %2
}
Modified: llvm/trunk/test/CodeGen/X86/fma-do-not-commute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-do-not-commute.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-do-not-commute.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-do-not-commute.ll Fri Feb 27 15:17:42 2015
@@ -18,8 +18,8 @@ entry:
loop:
%sum0 = phi float [ %fma, %loop ], [ %arg, %entry ]
- %addrVal = load float* %addr, align 4
- %addr2Val = load float* %addr2, align 4
+ %addrVal = load float, float* %addr, align 4
+ %addr2Val = load float, float* %addr2, align 4
%fmul = fmul float %addrVal, %addr2Val
%fma = fadd float %sum0, %fmul
br i1 true, label %exit, label %loop
Modified: llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll Fri Feb 27 15:17:42 2015
@@ -4,14 +4,14 @@
; VFMADD
define < 4 x float > @test_x86_fma_vfmadd_ss_load(< 4 x float > %a0, < 4 x float > %a1, float* %a2) {
; CHECK: vfmaddss (%{{.*}})
- %x = load float *%a2
+ %x = load float , float *%a2
%y = insertelement <4 x float> undef, float %x, i32 0
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ss(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %y)
ret < 4 x float > %res
}
define < 4 x float > @test_x86_fma_vfmadd_ss_load2(< 4 x float > %a0, float* %a1, < 4 x float > %a2) {
; CHECK: vfmaddss %{{.*}}, (%{{.*}})
- %x = load float *%a1
+ %x = load float , float *%a1
%y = insertelement <4 x float> undef, float %x, i32 0
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ss(< 4 x float > %a0, < 4 x float > %y, < 4 x float > %a2)
ret < 4 x float > %res
@@ -21,14 +21,14 @@ declare < 4 x float > @llvm.x86.fma.vfma
define < 2 x double > @test_x86_fma_vfmadd_sd_load(< 2 x double > %a0, < 2 x double > %a1, double* %a2) {
; CHECK: vfmaddsd (%{{.*}})
- %x = load double *%a2
+ %x = load double , double *%a2
%y = insertelement <2 x double> undef, double %x, i32 0
%res = call < 2 x double > @llvm.x86.fma.vfmadd.sd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %y)
ret < 2 x double > %res
}
define < 2 x double > @test_x86_fma_vfmadd_sd_load2(< 2 x double > %a0, double* %a1, < 2 x double > %a2) {
; CHECK: vfmaddsd %{{.*}}, (%{{.*}})
- %x = load double *%a1
+ %x = load double , double *%a1
%y = insertelement <2 x double> undef, double %x, i32 0
%res = call < 2 x double > @llvm.x86.fma.vfmadd.sd(< 2 x double > %a0, < 2 x double > %y, < 2 x double > %a2)
ret < 2 x double > %res
@@ -36,13 +36,13 @@ define < 2 x double > @test_x86_fma_vfma
declare < 2 x double > @llvm.x86.fma.vfmadd.sd(< 2 x double >, < 2 x double >, < 2 x double >) nounwind readnone
define < 4 x float > @test_x86_fma_vfmadd_ps_load(< 4 x float > %a0, < 4 x float > %a1, < 4 x float >* %a2) {
; CHECK: vfmaddps (%{{.*}})
- %x = load <4 x float>* %a2
+ %x = load <4 x float>, <4 x float>* %a2
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %x)
ret < 4 x float > %res
}
define < 4 x float > @test_x86_fma_vfmadd_ps_load2(< 4 x float > %a0, < 4 x float >* %a1, < 4 x float > %a2) {
; CHECK: vfmaddps %{{.*}}, (%{{.*}})
- %x = load <4 x float>* %a1
+ %x = load <4 x float>, <4 x float>* %a1
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %a0, < 4 x float > %x, < 4 x float > %a2)
ret < 4 x float > %res
}
@@ -52,21 +52,21 @@ declare < 4 x float > @llvm.x86.fma.vfma
define < 4 x float > @test_x86_fma_vfmadd_ps_load3(< 4 x float >* %a0, < 4 x float >* %a1, < 4 x float > %a2) {
; CHECK: vmovaps
; CHECK: vfmaddps %{{.*}}, (%{{.*}})
- %x = load <4 x float>* %a0
- %y = load <4 x float>* %a1
+ %x = load <4 x float>, <4 x float>* %a0
+ %y = load <4 x float>, <4 x float>* %a1
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %x, < 4 x float > %y, < 4 x float > %a2)
ret < 4 x float > %res
}
define < 2 x double > @test_x86_fma_vfmadd_pd_load(< 2 x double > %a0, < 2 x double > %a1, < 2 x double >* %a2) {
; CHECK: vfmaddpd (%{{.*}})
- %x = load <2 x double>* %a2
+ %x = load <2 x double>, <2 x double>* %a2
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %x)
ret < 2 x double > %res
}
define < 2 x double > @test_x86_fma_vfmadd_pd_load2(< 2 x double > %a0, < 2 x double >* %a1, < 2 x double > %a2) {
; CHECK: vfmaddpd %{{.*}}, (%{{.*}})
- %x = load <2 x double>* %a1
+ %x = load <2 x double>, <2 x double>* %a1
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %a0, < 2 x double > %x, < 2 x double > %a2)
ret < 2 x double > %res
}
@@ -76,8 +76,8 @@ declare < 2 x double > @llvm.x86.fma.vfm
define < 2 x double > @test_x86_fma_vfmadd_pd_load3(< 2 x double >* %a0, < 2 x double >* %a1, < 2 x double > %a2) {
; CHECK: vmovapd
; CHECK: vfmaddpd %{{.*}}, (%{{.*}})
- %x = load <2 x double>* %a0
- %y = load <2 x double>* %a1
+ %x = load <2 x double>, <2 x double>* %a0
+ %y = load <2 x double>, <2 x double>* %a1
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %x, < 2 x double > %y, < 2 x double > %a2)
ret < 2 x double > %res
}
Modified: llvm/trunk/test/CodeGen/X86/fma_patterns.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma_patterns.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma_patterns.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma_patterns.ll Fri Feb 27 15:17:42 2015
@@ -190,7 +190,7 @@ define float @test_x86_fnmsub_ss(float %
; CHECK_FMA4: vfmaddps %xmm1, (%rdi), %xmm0, %xmm0
; CHECK_FMA4: ret
define <4 x float> @test_x86_fmadd_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) {
- %x = load <4 x float>* %a0
+ %x = load <4 x float>, <4 x float>* %a0
%y = fmul <4 x float> %x, %a1
%res = fadd <4 x float> %y, %a2
ret <4 x float> %res
@@ -204,7 +204,7 @@ define <4 x float> @test_x86_fmadd_ps_lo
; CHECK_FMA4: vfmsubps %xmm1, (%rdi), %xmm0, %xmm0
; CHECK_FMA4: ret
define <4 x float> @test_x86_fmsub_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) {
- %x = load <4 x float>* %a0
+ %x = load <4 x float>, <4 x float>* %a0
%y = fmul <4 x float> %x, %a1
%res = fsub <4 x float> %y, %a2
ret <4 x float> %res
Modified: llvm/trunk/test/CodeGen/X86/fmul-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmul-zero.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmul-zero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmul-zero.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=x86-64 | grep mulps
define void @test14(<4 x float>*) nounwind {
- load <4 x float>* %0, align 1
+ load <4 x float>, <4 x float>* %0, align 1
fmul <4 x float> %2, zeroinitializer
store <4 x float> %3, <4 x float>* %0, align 1
ret void
Modified: llvm/trunk/test/CodeGen/X86/fold-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-add.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-add.ll Fri Feb 27 15:17:42 2015
@@ -13,12 +13,12 @@ define fastcc i32 @longest_match(i32 %cu
; CHECK: ret
entry:
- %0 = load i32* @prev_length, align 4 ; <i32> [#uses=3]
+ %0 = load i32, i32* @prev_length, align 4 ; <i32> [#uses=3]
%1 = zext i32 %cur_match to i64 ; <i64> [#uses=1]
%2 = sext i32 %0 to i64 ; <i64> [#uses=1]
%.sum3 = add i64 %1, %2 ; <i64> [#uses=1]
%3 = getelementptr [65536 x i8], [65536 x i8]* @window, i64 0, i64 %.sum3 ; <i8*> [#uses=1]
- %4 = load i8* %3, align 1 ; <i8> [#uses=1]
+ %4 = load i8, i8* %3, align 1 ; <i8> [#uses=1]
%5 = icmp eq i8 %4, 0 ; <i1> [#uses=1]
br i1 %5, label %bb5, label %bb23
Modified: llvm/trunk/test/CodeGen/X86/fold-and-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-and-shift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-and-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-and-shift.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ entry:
%tmp4 = and i32 %tmp2, 1020
%tmp7 = getelementptr i8, i8* %X, i32 %tmp4
%tmp78 = bitcast i8* %tmp7 to i32*
- %tmp9 = load i32* %tmp78
+ %tmp9 = load i32, i32* %tmp78
ret i32 %tmp9
}
@@ -28,7 +28,7 @@ entry:
%tmp4 = and i32 %tmp2, 131070
%tmp7 = getelementptr i16, i16* %X, i32 %tmp4
%tmp78 = bitcast i16* %tmp7 to i32*
- %tmp9 = load i32* %tmp78
+ %tmp9 = load i32, i32* %tmp78
ret i32 %tmp9
}
@@ -46,11 +46,11 @@ define i32 @t3(i16* %i.ptr, i32* %arr) {
; CHECK: ret
entry:
- %i = load i16* %i.ptr
+ %i = load i16, i16* %i.ptr
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%val.ptr = getelementptr inbounds i32, i32* %arr, i32 %index
- %val = load i32* %val.ptr
+ %val = load i32, i32* %val.ptr
%sum = add i32 %val, %i.zext
ret i32 %sum
}
@@ -65,12 +65,12 @@ define i32 @t4(i16* %i.ptr, i32* %arr) {
; CHECK: ret
entry:
- %i = load i16* %i.ptr
+ %i = load i16, i16* %i.ptr
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%index.zext = zext i32 %index to i64
%val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext
- %val = load i32* %val.ptr
+ %val = load i32, i32* %val.ptr
%sum.1 = add i32 %val, %i.zext
%sum.2 = add i32 %sum.1, %index
ret i32 %sum.2
Modified: llvm/trunk/test/CodeGen/X86/fold-call-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-call-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-call-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-call-2.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
define i32 @main() nounwind {
entry:
- load void ()** @f, align 8 ; <void ()*>:0 [#uses=1]
+ load void ()*, void ()** @f, align 8 ; <void ()*>:0 [#uses=1]
tail call void %0( ) nounwind
ret i32 0
}
Modified: llvm/trunk/test/CodeGen/X86/fold-call-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-call-3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-call-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-call-3.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
define void @_Z25RawPointerPerformanceTestPvRN5clang6ActionE(i8* %Val, %"struct.clang::Action"* %Actions) nounwind {
entry:
%0 = alloca %"struct.clang::ActionBase::ActionResult<0u>", align 8 ; <%"struct.clang::ActionBase::ActionResult<0u>"*> [#uses=3]
- %1 = load i32* @NumTrials, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* @NumTrials, align 4 ; <i32> [#uses=1]
%2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %return, label %bb.nph
@@ -25,18 +25,18 @@ bb.nph: ; preds = %entry
bb: ; preds = %bb, %bb.nph
%Trial.01 = phi i32 [ 0, %bb.nph ], [ %12, %bb ] ; <i32> [#uses=1]
%Val_addr.02 = phi i8* [ %Val, %bb.nph ], [ %11, %bb ] ; <i8*> [#uses=1]
- %6 = load i32 (...)*** %3, align 8 ; <i32 (...)**> [#uses=1]
+ %6 = load i32 (...)**, i32 (...)*** %3, align 8 ; <i32 (...)**> [#uses=1]
%7 = getelementptr i32 (...)*, i32 (...)** %6, i64 70 ; <i32 (...)**> [#uses=1]
- %8 = load i32 (...)** %7, align 8 ; <i32 (...)*> [#uses=1]
+ %8 = load i32 (...)*, i32 (...)** %7, align 8 ; <i32 (...)*> [#uses=1]
%9 = bitcast i32 (...)* %8 to { i64, i64 } (%"struct.clang::Action"*, i8*)* ; <{ i64, i64 } (%"struct.clang::Action"*, i8*)*> [#uses=1]
%10 = call { i64, i64 } %9(%"struct.clang::Action"* %Actions, i8* %Val_addr.02) nounwind ; <{ i64, i64 }> [#uses=2]
%mrv_gr = extractvalue { i64, i64 } %10, 0 ; <i64> [#uses=1]
store i64 %mrv_gr, i64* %mrv_gep
%mrv_gr2 = extractvalue { i64, i64 } %10, 1 ; <i64> [#uses=1]
store i64 %mrv_gr2, i64* %4
- %11 = load i8** %5, align 8 ; <i8*> [#uses=1]
+ %11 = load i8*, i8** %5, align 8 ; <i8*> [#uses=1]
%12 = add i32 %Trial.01, 1 ; <i32> [#uses=2]
- %13 = load i32* @NumTrials, align 4 ; <i32> [#uses=1]
+ %13 = load i32, i32* @NumTrials, align 4 ; <i32> [#uses=1]
%14 = icmp ult i32 %12, %13 ; <i1> [#uses=1]
br i1 %14, label %bb, label %return
Modified: llvm/trunk/test/CodeGen/X86/fold-call-oper.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-call-oper.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-call-oper.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-call-oper.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ target datalayout = "e-m:e-i64:64-f80:12
; CHECK: callq *{{.*}}(%rbp)
define void @foldCallOper(i32 (i32*, i32, i32**)* nocapture %p1) #0 {
entry:
- %0 = load i32*** @a, align 8
+ %0 = load i32**, i32*** @a, align 8
br label %for.body.i
for.body.i: ; preds = %for.body.i, %entry
@@ -33,14 +33,14 @@ for.body3.i:
br i1 %tobool.i, label %for.inc8.i, label %if.then.i
if.then.i: ; preds = %for.body3.i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
store i32 %2, i32* @b, align 4
br label %for.inc8.i
for.inc8.i: ; preds = %if.then.i, %for.body3.i
%lftr.wideiv.i = trunc i64 %indvars.iv.i to i32
%arrayidx4.phi.trans.insert.i = getelementptr inbounds [0 x i32*], [0 x i32*]* undef, i64 0, i64 %indvars.iv.i
- %.pre.i = load i32** %arrayidx4.phi.trans.insert.i, align 8
+ %.pre.i = load i32*, i32** %arrayidx4.phi.trans.insert.i, align 8
%phitmp.i = add i64 %indvars.iv.i, 1
br label %for.body3.i
}
Modified: llvm/trunk/test/CodeGen/X86/fold-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-call.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-call.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ define void @test1(i32 %i0, i32 %i1, i32
define void @test2(%struct.X* nocapture %x) {
entry:
%f = getelementptr inbounds %struct.X, %struct.X* %x, i64 0, i32 0
- %0 = load void ()** %f
+ %0 = load void ()*, void ()** %f
store void ()* null, void ()** %f
tail call void %0()
ret void
Modified: llvm/trunk/test/CodeGen/X86/fold-load-unops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load-unops.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load-unops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load-unops.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define float @rcpss(float* %a) {
; CHECK-LABEL: rcpss:
; CHECK: vrcpss (%rdi), %xmm0, %xmm0
- %ld = load float* %a
+ %ld = load float, float* %a
%ins = insertelement <4 x float> undef, float %ld, i32 0
%res = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %ins)
%ext = extractelement <4 x float> %res, i32 0
@@ -20,7 +20,7 @@ define float @rsqrtss(float* %a) {
; CHECK-LABEL: rsqrtss:
; CHECK: vrsqrtss (%rdi), %xmm0, %xmm0
- %ld = load float* %a
+ %ld = load float, float* %a
%ins = insertelement <4 x float> undef, float %ld, i32 0
%res = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %ins)
%ext = extractelement <4 x float> %res, i32 0
@@ -31,7 +31,7 @@ define float @sqrtss(float* %a) {
; CHECK-LABEL: sqrtss:
; CHECK: vsqrtss (%rdi), %xmm0, %xmm0
- %ld = load float* %a
+ %ld = load float, float* %a
%ins = insertelement <4 x float> undef, float %ld, i32 0
%res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ins)
%ext = extractelement <4 x float> %res, i32 0
@@ -42,7 +42,7 @@ define double @sqrtsd(double* %a) {
; CHECK-LABEL: sqrtsd:
; CHECK: vsqrtsd (%rdi), %xmm0, %xmm0
- %ld = load double* %a
+ %ld = load double, double* %a
%ins = insertelement <2 x double> undef, double %ld, i32 0
%res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ins)
%ext = extractelement <2 x double> %res, i32 0
Modified: llvm/trunk/test/CodeGen/X86/fold-load-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load-vec.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load-vec.ll Fri Feb 27 15:17:42 2015
@@ -14,24 +14,24 @@ entry:
store <4 x float>* %source, <4 x float>** %source.addr, align 8
store <2 x float>* %dest, <2 x float>** %dest.addr, align 8
store <2 x float> zeroinitializer, <2 x float>* %tmp, align 8
- %0 = load <4 x float>** %source.addr, align 8
+ %0 = load <4 x float>*, <4 x float>** %source.addr, align 8
%arrayidx = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0
- %1 = load <4 x float>* %arrayidx, align 16
+ %1 = load <4 x float>, <4 x float>* %arrayidx, align 16
%2 = extractelement <4 x float> %1, i32 0
- %3 = load <2 x float>* %tmp, align 8
+ %3 = load <2 x float>, <2 x float>* %tmp, align 8
%4 = insertelement <2 x float> %3, float %2, i32 1
store <2 x float> %4, <2 x float>* %tmp, align 8
- %5 = load <2 x float>* %tmp, align 8
- %6 = load <2 x float>** %dest.addr, align 8
+ %5 = load <2 x float>, <2 x float>* %tmp, align 8
+ %6 = load <2 x float>*, <2 x float>** %dest.addr, align 8
%arrayidx1 = getelementptr inbounds <2 x float>, <2 x float>* %6, i64 0
store <2 x float> %5, <2 x float>* %arrayidx1, align 8
- %7 = load <2 x float>** %dest.addr, align 8
+ %7 = load <2 x float>*, <2 x float>** %dest.addr, align 8
%arrayidx2 = getelementptr inbounds <2 x float>, <2 x float>* %7, i64 0
- %8 = load <2 x float>* %arrayidx2, align 8
+ %8 = load <2 x float>, <2 x float>* %arrayidx2, align 8
%vecext = extractelement <2 x float> %8, i32 0
- %9 = load <2 x float>** %dest.addr, align 8
+ %9 = load <2 x float>*, <2 x float>** %dest.addr, align 8
%arrayidx3 = getelementptr inbounds <2 x float>, <2 x float>* %9, i64 0
- %10 = load <2 x float>* %arrayidx3, align 8
+ %10 = load <2 x float>, <2 x float>* %arrayidx3, align 8
%vecext4 = extractelement <2 x float> %10, i32 1
call void @ext(float %vecext, float %vecext4)
ret void
Modified: llvm/trunk/test/CodeGen/X86/fold-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
cond_true: ; preds = %entry
%new_size.0.i = select i1 false, i32 0, i32 0 ; <i32> [#uses=1]
- %tmp.i = load i32* bitcast (i8* getelementptr (%struct.obstack* @stmt_obstack, i32 0, i32 10) to i32*) ; <i32> [#uses=1]
+ %tmp.i = load i32, i32* bitcast (i8* getelementptr (%struct.obstack* @stmt_obstack, i32 0, i32 10) to i32*) ; <i32> [#uses=1]
%tmp.i.upgrd.1 = trunc i32 %tmp.i to i8 ; <i8> [#uses=1]
%tmp21.i = and i8 %tmp.i.upgrd.1, 1 ; <i8> [#uses=1]
%tmp22.i = icmp eq i8 %tmp21.i, 0 ; <i1> [#uses=1]
@@ -30,7 +30,7 @@ cond_next: ; preds = %entry
define i32 @test2(i16* %P, i16* %Q) nounwind {
- %A = load i16* %P, align 4 ; <i16> [#uses=11]
+ %A = load i16, i16* %P, align 4 ; <i16> [#uses=11]
%C = zext i16 %A to i32 ; <i32> [#uses=1]
%D = and i32 %C, 255 ; <i32> [#uses=1]
br label %L
@@ -54,8 +54,8 @@ define i1 @test3(i32* %P, i32* %Q) nounw
; CHECK: xorl (%e
; CHECK: j
entry:
- %0 = load i32* %P, align 4
- %1 = load i32* %Q, align 4
+ %0 = load i32, i32* %P, align 4
+ %1 = load i32, i32* %Q, align 4
%2 = xor i32 %0, %1
%3 = and i32 %2, 89947
%4 = icmp eq i32 %3, 0
Modified: llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ entry:
bb:
%i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
%tmp2 = getelementptr [1000 x i8], [1000 x i8]* @B, i32 0, i32 %i.019.0
- %tmp3 = load i8* %tmp2, align 4
+ %tmp3 = load i8, i8* %tmp2, align 4
%tmp4 = mul i8 %tmp3, 2
%tmp5 = getelementptr [1000 x i8], [1000 x i8]* @A, i32 0, i32 %i.019.0
store i8 %tmp4, i8* %tmp5, align 4
Modified: llvm/trunk/test/CodeGen/X86/fold-pcmpeqd-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-pcmpeqd-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-pcmpeqd-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-pcmpeqd-2.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@
define void @program_1(%struct._image2d_t* %dest, %struct._image2d_t* %t0, <4 x float> %p0, <4 x float> %p1, <4 x float> %p4, <4 x float> %p5, <4 x float> %p6) nounwind {
entry:
- %tmp3.i = load i32* null ; <i32> [#uses=1]
+ %tmp3.i = load i32, i32* null ; <i32> [#uses=1]
%cmp = icmp slt i32 0, %tmp3.i ; <i1> [#uses=1]
br i1 %cmp, label %forcond, label %ifthen
@@ -28,7 +28,7 @@ ifthen: ; preds = %entry
ret void
forcond: ; preds = %entry
- %tmp3.i536 = load i32* null ; <i32> [#uses=1]
+ %tmp3.i536 = load i32, i32* null ; <i32> [#uses=1]
%cmp12 = icmp slt i32 0, %tmp3.i536 ; <i1> [#uses=1]
br i1 %cmp12, label %forbody, label %afterfor
Modified: llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ declare void @func_28(i64, i64)
define void @int322(i32 %foo) nounwind {
entry:
- %val = load i64* getelementptr (%0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0) ; <i64> [#uses=1]
- %0 = load i32* getelementptr (%struct.S1* @g_10, i32 0, i32 1), align 4 ; <i32> [#uses=1]
+ %val = load i64, i64* getelementptr (%0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0) ; <i64> [#uses=1]
+ %0 = load i32, i32* getelementptr (%struct.S1* @g_10, i32 0, i32 1), align 4 ; <i32> [#uses=1]
%1 = sext i32 %0 to i64 ; <i64> [#uses=1]
%tmp4.i = lshr i64 %val, 32 ; <i64> [#uses=1]
%tmp5.i = trunc i64 %tmp4.i to i32 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/fold-tied-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-tied-op.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-tied-op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-tied-op.ll Fri Feb 27 15:17:42 2015
@@ -23,20 +23,20 @@ target triple = "i386--netbsd"
; Function Attrs: nounwind uwtable
define i64 @fn1() #0 {
entry:
- %0 = load i32* @a, align 4, !tbaa !1
+ %0 = load i32, i32* @a, align 4, !tbaa !1
%1 = inttoptr i32 %0 to %struct.XXH_state64_t*
%total_len = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 0
- %2 = load i32* %total_len, align 4, !tbaa !5
+ %2 = load i32, i32* %total_len, align 4, !tbaa !5
%tobool = icmp eq i32 %2, 0
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
%v3 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 3
- %3 = load i64* %v3, align 4, !tbaa !8
+ %3 = load i64, i64* %v3, align 4, !tbaa !8
%v4 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 4
- %4 = load i64* %v4, align 4, !tbaa !9
+ %4 = load i64, i64* %v4, align 4, !tbaa !9
%v2 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 2
- %5 = load i64* %v2, align 4, !tbaa !10
+ %5 = load i64, i64* %v2, align 4, !tbaa !10
%shl = shl i64 %5, 1
%or = or i64 %shl, %5
%shl2 = shl i64 %3, 2
@@ -54,7 +54,7 @@ if.then:
br label %if.end
if.else: ; preds = %entry
- %6 = load i64* @b, align 8, !tbaa !11
+ %6 = load i64, i64* @b, align 8, !tbaa !11
%xor10 = xor i64 %6, -4417276706812531889
%mul11 = mul nsw i64 %xor10, 400714785074694791
br label %if.end
Modified: llvm/trunk/test/CodeGen/X86/fold-vex.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-vex.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-vex.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-vex.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@
; unless specially configured on some CPUs such as AMD Family 10H.
define <4 x i32> @test1(<4 x i32>* %p0, <4 x i32> %in1) nounwind {
- %in0 = load <4 x i32>* %p0, align 2
+ %in0 = load <4 x i32>, <4 x i32>* %p0, align 2
%a = and <4 x i32> %in0, %in1
ret <4 x i32> %a
Modified: llvm/trunk/test/CodeGen/X86/fold-zext-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-zext-trunc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-zext-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-zext-trunc.ll Fri Feb 27 15:17:42 2015
@@ -12,9 +12,9 @@ define void @foo() nounwind {
; CHECK-NOT: movzbl
; CHECK: calll
entry:
- %tmp17 = load i8* getelementptr inbounds (%struct.S0* @g_98, i32 0, i32 1, i32 0), align 4
+ %tmp17 = load i8, i8* getelementptr inbounds (%struct.S0* @g_98, i32 0, i32 1, i32 0), align 4
%tmp54 = zext i8 %tmp17 to i32
- %foo = load i32* bitcast (i8* getelementptr inbounds (%struct.S0* @g_98, i32 0, i32 1, i32 0) to i32*), align 4
+ %foo = load i32, i32* bitcast (i8* getelementptr inbounds (%struct.S0* @g_98, i32 0, i32 1, i32 0) to i32*), align 4
%conv.i = trunc i32 %foo to i8
tail call void @func_12(i32 %tmp54, i8 zeroext %conv.i) nounwind
ret void
Modified: llvm/trunk/test/CodeGen/X86/force-align-stack-alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/force-align-stack-alloca.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/force-align-stack-alloca.ll (original)
+++ llvm/trunk/test/CodeGen/X86/force-align-stack-alloca.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ target triple = "i386-unknown-linux-gnu"
define i32 @f(i8* %p) nounwind {
entry:
- %0 = load i8* %p
+ %0 = load i8, i8* %p
%conv = sext i8 %0 to i32
ret i32 %conv
}
Modified: llvm/trunk/test/CodeGen/X86/fp-double-rounding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-double-rounding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-double-rounding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-double-rounding.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "x86_64--"
; UNSAFE-NOT: cvt
define void @double_rounding(fp128* %x, float* %f) {
entry:
- %0 = load fp128* %x, align 16
+ %0 = load fp128, fp128* %x, align 16
%1 = fptrunc fp128 %0 to double
%2 = fptrunc double %1 to float
store float %2, float* %f, align 4
Modified: llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll Fri Feb 27 15:17:42 2015
@@ -23,7 +23,7 @@ define <1 x float> @test1(<1 x double>*
; AVX-NEXT: flds (%esp)
; AVX-NEXT: popl %eax
; AVX-NEXT: retl
- %x = load <1 x double>* %p
+ %x = load <1 x double>, <1 x double>* %p
%y = fptrunc <1 x double> %x to <1 x float>
ret <1 x float> %y
}
@@ -40,7 +40,7 @@ define <2 x float> @test2(<2 x double>*
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vcvtpd2psx (%eax), %xmm0
; AVX-NEXT: retl
- %x = load <2 x double>* %p
+ %x = load <2 x double>, <2 x double>* %p
%y = fptrunc <2 x double> %x to <2 x float>
ret <2 x float> %y
}
@@ -59,7 +59,7 @@ define <4 x float> @test3(<4 x double>*
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vcvtpd2psy (%eax), %xmm0
; AVX-NEXT: retl
- %x = load <4 x double>* %p
+ %x = load <4 x double>, <4 x double>* %p
%y = fptrunc <4 x double> %x to <4 x float>
ret <4 x float> %y
}
@@ -83,7 +83,7 @@ define <8 x float> @test4(<8 x double>*
; AVX-NEXT: vcvtpd2psy 32(%eax), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retl
- %x = load <8 x double>* %p
+ %x = load <8 x double>, <8 x double>* %p
%y = fptrunc <8 x double> %x to <8 x float>
ret <8 x float> %y
}
Modified: llvm/trunk/test/CodeGen/X86/fp-stack-O0-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-stack-O0-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-stack-O0-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-stack-O0-crash.ll Fri Feb 27 15:17:42 2015
@@ -11,14 +11,14 @@ entry:
br i1 false, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %tmp = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
- %tmp1 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
+ %tmp = load x86_fp80, x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
+ %tmp1 = load x86_fp80, x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
%cmp = fcmp oeq x86_fp80 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %if.then, label %if.end
cond.false: ; preds = %entry
- %tmp2 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
- %tmp3 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
+ %tmp2 = load x86_fp80, x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
+ %tmp3 = load x86_fp80, x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
%cmp4 = fcmp une x86_fp80 %tmp2, %tmp3 ; <i1> [#uses=1]
br i1 %cmp4, label %if.then, label %if.end
Modified: llvm/trunk/test/CodeGen/X86/fp-stack-compare-cmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-stack-compare-cmov.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-stack-compare-cmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-stack-compare-cmov.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
define float @foo(float* %col.2.0) {
; CHECK: fucompi
; CHECK: fcmov
- %tmp = load float* %col.2.0
+ %tmp = load float, float* %col.2.0
%tmp16 = fcmp olt float %tmp, 0.000000e+00
%tmp20 = fsub float -0.000000e+00, %tmp
%iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
Modified: llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define float @foo(float* %col.2.0) {
; CHECK-NOT: fucompi
; CHECK: j
; CHECK-NOT: fcmov
- %tmp = load float* %col.2.0
+ %tmp = load float, float* %col.2.0
%tmp16 = fcmp olt float %tmp, 0.000000e+00
%tmp20 = fsub float -0.000000e+00, %tmp
%iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
Modified: llvm/trunk/test/CodeGen/X86/fp-stack-ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-stack-ret.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-stack-ret.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-stack-ret.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
; CHECK: fldl
; CHECK-NEXT: ret
define double @test1(double *%P) {
- %A = load double* %P
+ %A = load double, double* %P
ret double %A
}
Modified: llvm/trunk/test/CodeGen/X86/fp-stack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-stack.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-stack.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-stack.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@ target triple = "i386-pc-linux-gnu"
define void @foo() nounwind {
entry:
- %tmp6 = load x86_fp80* undef ; <x86_fp80> [#uses=2]
- %tmp15 = load x86_fp80* undef ; <x86_fp80> [#uses=2]
- %tmp24 = load x86_fp80* undef ; <x86_fp80> [#uses=1]
+ %tmp6 = load x86_fp80, x86_fp80* undef ; <x86_fp80> [#uses=2]
+ %tmp15 = load x86_fp80, x86_fp80* undef ; <x86_fp80> [#uses=2]
+ %tmp24 = load x86_fp80, x86_fp80* undef ; <x86_fp80> [#uses=1]
br i1 undef, label %return, label %bb.nph
bb.nph: ; preds = %entry
Modified: llvm/trunk/test/CodeGen/X86/fp2sint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp2sint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp2sint.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp2sint.ll Fri Feb 27 15:17:42 2015
@@ -4,10 +4,10 @@
define i32 @main(i32 %argc, i8** %argv) {
cond_false.i.i.i: ; preds = %bb.i5
- %tmp35.i = load double* null, align 8 ; <double> [#uses=1]
+ %tmp35.i = load double, double* null, align 8 ; <double> [#uses=1]
%tmp3536.i = fptosi double %tmp35.i to i32 ; <i32> [#uses=1]
%tmp3536140.i = zext i32 %tmp3536.i to i64 ; <i64> [#uses=1]
- %tmp39.i = load double* null, align 4 ; <double> [#uses=1]
+ %tmp39.i = load double, double* null, align 4 ; <double> [#uses=1]
%tmp3940.i = fptosi double %tmp39.i to i32 ; <i32> [#uses=1]
%tmp3940137.i = zext i32 %tmp3940.i to i64 ; <i64> [#uses=1]
%tmp3940137138.i = shl i64 %tmp3940137.i, 32 ; <i64> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.ll Fri Feb 27 15:17:42 2015
@@ -1,19 +1,19 @@
; RUN: llc < %s -march=x86 | FileCheck %s
define double @short(i16* %P) {
- %V = load i16* %P ; <i16> [#uses=1]
+ %V = load i16, i16* %P ; <i16> [#uses=1]
%V2 = sitofp i16 %V to double ; <double> [#uses=1]
ret double %V2
}
define double @int(i32* %P) {
- %V = load i32* %P ; <i32> [#uses=1]
+ %V = load i32, i32* %P ; <i32> [#uses=1]
%V2 = sitofp i32 %V to double ; <double> [#uses=1]
ret double %V2
}
define double @long(i64* %P) {
- %V = load i64* %P ; <i64> [#uses=1]
+ %V = load i64, i64* %P ; <i64> [#uses=1]
%V2 = sitofp i64 %V to double ; <double> [#uses=1]
ret double %V2
}
Modified: llvm/trunk/test/CodeGen/X86/fp_load_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp_load_fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp_load_fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp_load_fold.ll Fri Feb 27 15:17:42 2015
@@ -4,37 +4,37 @@
; Test that the load of the memory location is folded into the operation.
define double @test_add(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fadd double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_mul(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fmul double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_sub(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fsub double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_subr(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fsub double %Y, %X ; <double> [#uses=1]
ret double %R
}
define double @test_div(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fdiv double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_divr(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fdiv double %Y, %X ; <double> [#uses=1]
ret double %R
}
Modified: llvm/trunk/test/CodeGen/X86/frameallocate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/frameallocate.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/frameallocate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/frameallocate.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ declare i32 @printf(i8*, ...)
define void @print_framealloc_from_fp(i8* %fp) {
%alloc = call i8* @llvm.framerecover(i8* bitcast (void(i32*, i32*)* @alloc_func to i8*), i8* %fp)
%alloc_i32 = bitcast i8* %alloc to i32*
- %r = load i32* %alloc_i32
+ %r = load i32, i32* %alloc_i32
call i32 (i8*, ...)* @printf(i8* getelementptr ([10 x i8]* @str, i32 0, i32 0), i32 %r)
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/full-lsr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/full-lsr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/full-lsr.ll (original)
+++ llvm/trunk/test/CodeGen/X86/full-lsr.ll Fri Feb 27 15:17:42 2015
@@ -19,17 +19,17 @@ entry:
bb: ; preds = %bb, %entry
%i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=5]
%1 = getelementptr float, float* %A, i32 %i.03 ; <float*> [#uses=1]
- %2 = load float* %1, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %1, align 4 ; <float> [#uses=1]
%3 = getelementptr float, float* %B, i32 %i.03 ; <float*> [#uses=1]
- %4 = load float* %3, align 4 ; <float> [#uses=1]
+ %4 = load float, float* %3, align 4 ; <float> [#uses=1]
%5 = fadd float %2, %4 ; <float> [#uses=1]
%6 = getelementptr float, float* %C, i32 %i.03 ; <float*> [#uses=1]
store float %5, float* %6, align 4
%7 = add i32 %i.03, 10 ; <i32> [#uses=3]
%8 = getelementptr float, float* %A, i32 %7 ; <float*> [#uses=1]
- %9 = load float* %8, align 4 ; <float> [#uses=1]
+ %9 = load float, float* %8, align 4 ; <float> [#uses=1]
%10 = getelementptr float, float* %B, i32 %7 ; <float*> [#uses=1]
- %11 = load float* %10, align 4 ; <float> [#uses=1]
+ %11 = load float, float* %10, align 4 ; <float> [#uses=1]
%12 = fadd float %9, %11 ; <float> [#uses=1]
%13 = getelementptr float, float* %C, i32 %7 ; <float*> [#uses=1]
store float %12, float* %13, align 4
Modified: llvm/trunk/test/CodeGen/X86/gather-addresses.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/gather-addresses.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/gather-addresses.ll (original)
+++ llvm/trunk/test/CodeGen/X86/gather-addresses.ll Fri Feb 27 15:17:42 2015
@@ -35,8 +35,8 @@
; WIN: movhpd (%rcx,%r[[REG4]],8), %xmm1
define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
- %a = load <4 x i32>* %i
- %b = load <4 x i32>* %h
+ %a = load <4 x i32>, <4 x i32>* %i
+ %b = load <4 x i32>, <4 x i32>* %h
%j = and <4 x i32> %a, %b
%d0 = extractelement <4 x i32> %j, i32 0
%d1 = extractelement <4 x i32> %j, i32 1
@@ -46,10 +46,10 @@ define <4 x double> @foo(double* %p, <4
%q1 = getelementptr double, double* %p, i32 %d1
%q2 = getelementptr double, double* %p, i32 %d2
%q3 = getelementptr double, double* %p, i32 %d3
- %r0 = load double* %q0
- %r1 = load double* %q1
- %r2 = load double* %q2
- %r3 = load double* %q3
+ %r0 = load double, double* %q0
+ %r1 = load double, double* %q1
+ %r2 = load double, double* %q2
+ %r3 = load double, double* %q3
%v0 = insertelement <4 x double> undef, double %r0, i32 0
%v1 = insertelement <4 x double> %v0, double %r1, i32 1
%v2 = insertelement <4 x double> %v1, double %r2, i32 2
@@ -67,8 +67,8 @@ define <4 x double> @foo(double* %p, <4
; LIN32-DAG: {{(mov|and)}}l 8(%esp),
; LIN32-DAG: {{(mov|and)}}l 12(%esp),
define <4 x i64> @old(double* %p, <4 x i32>* %i, <4 x i32>* %h, i64 %f) nounwind {
- %a = load <4 x i32>* %i
- %b = load <4 x i32>* %h
+ %a = load <4 x i32>, <4 x i32>* %i
+ %b = load <4 x i32>, <4 x i32>* %h
%j = and <4 x i32> %a, %b
%d0 = extractelement <4 x i32> %j, i32 0
%d1 = extractelement <4 x i32> %j, i32 1
Modified: llvm/trunk/test/CodeGen/X86/ghc-cc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ghc-cc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ghc-cc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ghc-cc.ll Fri Feb 27 15:17:42 2015
@@ -32,10 +32,10 @@ entry:
; CHECK-NEXT: movl hp, %edi
; CHECK-NEXT: movl sp, %ebp
; CHECK-NEXT: movl base, %ebx
- %0 = load i32* @r1
- %1 = load i32* @hp
- %2 = load i32* @sp
- %3 = load i32* @base
+ %0 = load i32, i32* @r1
+ %1 = load i32, i32* @hp
+ %2 = load i32, i32* @sp
+ %3 = load i32, i32* @base
; CHECK: jmp bar
tail call ghccc void @bar( i32 %3, i32 %2, i32 %1, i32 %0 ) nounwind
ret void
Modified: llvm/trunk/test/CodeGen/X86/ghc-cc64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ghc-cc64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ghc-cc64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ghc-cc64.ll Fri Feb 27 15:17:42 2015
@@ -57,22 +57,22 @@ entry:
; CHECK-NEXT: movq hp(%rip), %r12
; CHECK-NEXT: movq sp(%rip), %rbp
; CHECK-NEXT: movq base(%rip), %r13
- %0 = load double* @d2
- %1 = load double* @d1
- %2 = load float* @f4
- %3 = load float* @f3
- %4 = load float* @f2
- %5 = load float* @f1
- %6 = load i64* @splim
- %7 = load i64* @r6
- %8 = load i64* @r5
- %9 = load i64* @r4
- %10 = load i64* @r3
- %11 = load i64* @r2
- %12 = load i64* @r1
- %13 = load i64* @hp
- %14 = load i64* @sp
- %15 = load i64* @base
+ %0 = load double, double* @d2
+ %1 = load double, double* @d1
+ %2 = load float, float* @f4
+ %3 = load float, float* @f3
+ %4 = load float, float* @f2
+ %5 = load float, float* @f1
+ %6 = load i64, i64* @splim
+ %7 = load i64, i64* @r6
+ %8 = load i64, i64* @r5
+ %9 = load i64, i64* @r4
+ %10 = load i64, i64* @r3
+ %11 = load i64, i64* @r2
+ %12 = load i64, i64* @r1
+ %13 = load i64, i64* @hp
+ %14 = load i64, i64* @sp
+ %15 = load i64, i64* @base
; CHECK: jmp bar
tail call ghccc void @bar( i64 %15, i64 %14, i64 %13, i64 %12, i64 %11,
i64 %10, i64 %9, i64 %8, i64 %7, i64 %6,
Modified: llvm/trunk/test/CodeGen/X86/gs-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/gs-fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/gs-fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/gs-fold.ll Fri Feb 27 15:17:42 2015
@@ -6,9 +6,9 @@ target datalayout = "e-p:64:64:64-i1:8:8
define i32 @test() nounwind uwtable {
entry:
- %0 = load volatile %struct.thread* addrspace(256)* null
+ %0 = load volatile %struct.thread*, %struct.thread* addrspace(256)* null
%c = getelementptr inbounds %struct.thread, %struct.thread* %0, i64 0, i32 2
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
ret i32 %1
}
Modified: llvm/trunk/test/CodeGen/X86/h-register-addressing-32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/h-register-addressing-32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/h-register-addressing-32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/h-register-addressing-32.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define double @foo8(double* nocapture in
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
%t2 = getelementptr double, double* %p, i32 %t1
- %t3 = load double* %t2, align 8
+ %t3 = load double, double* %t2, align 8
ret double %t3
}
; CHECK: foo8:
@@ -16,7 +16,7 @@ define float @foo4(float* nocapture inre
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
%t2 = getelementptr float, float* %p, i32 %t1
- %t3 = load float* %t2, align 8
+ %t3 = load float, float* %t2, align 8
ret float %t3
}
; CHECK: foo4:
@@ -26,7 +26,7 @@ define i16 @foo2(i16* nocapture inreg %p
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
%t2 = getelementptr i16, i16* %p, i32 %t1
- %t3 = load i16* %t2, align 8
+ %t3 = load i16, i16* %t2, align 8
ret i16 %t3
}
; CHECK: foo2:
@@ -36,7 +36,7 @@ define i8 @foo1(i8* nocapture inreg %p,
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
%t2 = getelementptr i8, i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: foo1:
@@ -46,7 +46,7 @@ define i8 @bar8(i8* nocapture inreg %p,
%t0 = lshr i32 %x, 5
%t1 = and i32 %t0, 2040
%t2 = getelementptr i8, i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar8:
@@ -56,7 +56,7 @@ define i8 @bar4(i8* nocapture inreg %p,
%t0 = lshr i32 %x, 6
%t1 = and i32 %t0, 1020
%t2 = getelementptr i8, i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar4:
@@ -66,7 +66,7 @@ define i8 @bar2(i8* nocapture inreg %p,
%t0 = lshr i32 %x, 7
%t1 = and i32 %t0, 510
%t2 = getelementptr i8, i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar2:
Modified: llvm/trunk/test/CodeGen/X86/h-register-addressing-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/h-register-addressing-64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/h-register-addressing-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/h-register-addressing-64.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define double @foo8(double* nocapture in
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
%t2 = getelementptr double, double* %p, i64 %t1
- %t3 = load double* %t2, align 8
+ %t3 = load double, double* %t2, align 8
ret double %t3
}
; CHECK: foo8:
@@ -16,7 +16,7 @@ define float @foo4(float* nocapture inre
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
%t2 = getelementptr float, float* %p, i64 %t1
- %t3 = load float* %t2, align 8
+ %t3 = load float, float* %t2, align 8
ret float %t3
}
; CHECK: foo4:
@@ -26,7 +26,7 @@ define i16 @foo2(i16* nocapture inreg %p
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
%t2 = getelementptr i16, i16* %p, i64 %t1
- %t3 = load i16* %t2, align 8
+ %t3 = load i16, i16* %t2, align 8
ret i16 %t3
}
; CHECK: foo2:
@@ -36,7 +36,7 @@ define i8 @foo1(i8* nocapture inreg %p,
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
%t2 = getelementptr i8, i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: foo1:
@@ -46,7 +46,7 @@ define i8 @bar8(i8* nocapture inreg %p,
%t0 = lshr i64 %x, 5
%t1 = and i64 %t0, 2040
%t2 = getelementptr i8, i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar8:
@@ -56,7 +56,7 @@ define i8 @bar4(i8* nocapture inreg %p,
%t0 = lshr i64 %x, 6
%t1 = and i64 %t0, 1020
%t2 = getelementptr i8, i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar4:
@@ -66,7 +66,7 @@ define i8 @bar2(i8* nocapture inreg %p,
%t0 = lshr i64 %x, 7
%t1 = and i64 %t0, 510
%t2 = getelementptr i8, i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar2:
Modified: llvm/trunk/test/CodeGen/X86/half.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/half.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/half.ll (original)
+++ llvm/trunk/test/CodeGen/X86/half.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define void @test_load_store(half* %in,
; CHECK-LABEL: test_load_store:
; CHECK: movw (%rdi), [[TMP:%[a-z0-9]+]]
; CHECK: movw [[TMP]], (%rsi)
- %val = load half* %in
+ %val = load half, half* %in
store half %val, half* %out
ret void
}
@@ -13,7 +13,7 @@ define void @test_load_store(half* %in,
define i16 @test_bitcast_from_half(half* %addr) {
; CHECK-LABEL: test_bitcast_from_half:
; CHECK: movzwl (%rdi), %eax
- %val = load half* %addr
+ %val = load half, half* %addr
%val_int = bitcast half %val to i16
ret i16 %val_int
}
@@ -31,7 +31,7 @@ define float @test_extend32(half* %addr)
; CHECK-LIBCALL: jmp __gnu_h2f_ieee
; CHECK-FP16: vcvtph2ps
- %val16 = load half* %addr
+ %val16 = load half, half* %addr
%val32 = fpext half %val16 to float
ret float %val32
}
@@ -43,7 +43,7 @@ define double @test_extend64(half* %addr
; CHECK-LIBCALL: cvtss2sd
; CHECK-FP16: vcvtph2ps
; CHECK-FP16: vcvtss2sd
- %val16 = load half* %addr
+ %val16 = load half, half* %addr
%val32 = fpext half %val16 to double
ret double %val32
}
Modified: llvm/trunk/test/CodeGen/X86/hidden-vis-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/hidden-vis-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/hidden-vis-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/hidden-vis-2.ll Fri Feb 27 15:17:42 2015
@@ -5,6 +5,6 @@
define i32 @t() nounwind readonly {
entry:
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/hidden-vis-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/hidden-vis-3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/hidden-vis-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/hidden-vis-3.ll Fri Feb 27 15:17:42 2015
@@ -12,8 +12,8 @@ entry:
; X64: _t:
; X64: movl _y(%rip), %eax
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- %1 = load i32* @y, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* @y, align 4 ; <i32> [#uses=1]
%2 = add i32 %1, %0 ; <i32> [#uses=1]
ret i32 %2
}
Modified: llvm/trunk/test/CodeGen/X86/hidden-vis-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/hidden-vis-4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/hidden-vis-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/hidden-vis-4.ll Fri Feb 27 15:17:42 2015
@@ -7,6 +7,6 @@ entry:
; CHECK-LABEL: t:
; CHECK: movl _x, %eax
; CHECK: .comm _x,4
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/hidden-vis-pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/hidden-vis-pic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/hidden-vis-pic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/hidden-vis-pic.ll Fri Feb 27 15:17:42 2015
@@ -45,6 +45,6 @@ entry:
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
Modified: llvm/trunk/test/CodeGen/X86/hipe-cc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/hipe-cc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/hipe-cc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/hipe-cc.ll Fri Feb 27 15:17:42 2015
@@ -53,18 +53,18 @@ entry:
; CHECK-NEXT: movl 12(%esp), %ebp
; CHECK-NEXT: movl 8(%esp), %eax
; CHECK-NEXT: movl 4(%esp), %edx
- %0 = load i32* %hp_var
- %1 = load i32* %p_var
- %2 = load i32* %arg0_var
- %3 = load i32* %arg1_var
- %4 = load i32* %arg2_var
+ %0 = load i32, i32* %hp_var
+ %1 = load i32, i32* %p_var
+ %2 = load i32, i32* %arg0_var
+ %3 = load i32, i32* %arg1_var
+ %4 = load i32, i32* %arg2_var
; CHECK: jmp bar
tail call cc 11 void @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4) nounwind
ret void
}
define cc 11 void @baz() nounwind {
- %tmp_clos = load i32* @clos
+ %tmp_clos = load i32, i32* @clos
%tmp_clos2 = inttoptr i32 %tmp_clos to i32*
%indirect_call = bitcast i32* %tmp_clos2 to void (i32, i32, i32)*
; CHECK: movl $42, %eax
Modified: llvm/trunk/test/CodeGen/X86/hipe-cc64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/hipe-cc64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/hipe-cc64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/hipe-cc64.ll Fri Feb 27 15:17:42 2015
@@ -62,19 +62,19 @@ entry:
; CHECK-NEXT: movq 24(%rsp), %rsi
; CHECK-NEXT: movq 16(%rsp), %rdx
; CHECK-NEXT: movq 8(%rsp), %rcx
- %0 = load i64* %hp_var
- %1 = load i64* %p_var
- %2 = load i64* %arg0_var
- %3 = load i64* %arg1_var
- %4 = load i64* %arg2_var
- %5 = load i64* %arg3_var
+ %0 = load i64, i64* %hp_var
+ %1 = load i64, i64* %p_var
+ %2 = load i64, i64* %arg0_var
+ %3 = load i64, i64* %arg1_var
+ %4 = load i64, i64* %arg2_var
+ %5 = load i64, i64* %arg3_var
; CHECK: jmp bar
tail call cc 11 void @bar(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) nounwind
ret void
}
define cc 11 void @baz() nounwind {
- %tmp_clos = load i64* @clos
+ %tmp_clos = load i64, i64* @clos
%tmp_clos2 = inttoptr i64 %tmp_clos to i64*
%indirect_call = bitcast i64* %tmp_clos2 to void (i64, i64, i64)*
; CHECK: movl $42, %esi
Modified: llvm/trunk/test/CodeGen/X86/hoist-invariant-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/hoist-invariant-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/hoist-invariant-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/hoist-invariant-load.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ entry:
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !invariant.load !0
+ %0 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !invariant.load !0
%call = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %x, i8* %0)
%inc = add i32 %i.01, 1
%exitcond = icmp eq i32 %inc, 10000
Modified: llvm/trunk/test/CodeGen/X86/i128-mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i128-mul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i128-mul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i128-mul.ll Fri Feb 27 15:17:42 2015
@@ -27,7 +27,7 @@ for.body:
%carry.013 = phi i64 [ %conv6, %for.body ], [ 0, %entry ]
%i.012 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i64, i64* %x, i64 %i.012
- %0 = load i64* %arrayidx, align 8
+ %0 = load i64, i64* %arrayidx, align 8
%conv2 = zext i64 %0 to i128
%mul = mul i128 %conv2, %conv
%conv3 = zext i64 %carry.013 to i128
Modified: llvm/trunk/test/CodeGen/X86/i128-ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i128-ret.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i128-ret.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i128-ret.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
; CHECK: movq 8([[A0]]), %rdx
define i128 @test(i128 *%P) {
- %A = load i128* %P
+ %A = load i128, i128* %P
ret i128 %A
}
Modified: llvm/trunk/test/CodeGen/X86/i1narrowfail.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i1narrowfail.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i1narrowfail.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i1narrowfail.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
; CHECK-LABEL: @foo
; CHECK: orb $16
define void @foo(i64* %ptr) {
- %r11 = load i64* %ptr, align 8
+ %r11 = load i64, i64* %ptr, align 8
%r12 = or i64 16, %r11
store i64 %r12, i64* %ptr, align 8
ret void
Modified: llvm/trunk/test/CodeGen/X86/i256-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i256-add.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i256-add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i256-add.ll Fri Feb 27 15:17:42 2015
@@ -3,15 +3,15 @@
; RUN: grep sbbl %t | count 7
define void @add(i256* %p, i256* %q) nounwind {
- %a = load i256* %p
- %b = load i256* %q
+ %a = load i256, i256* %p
+ %b = load i256, i256* %q
%c = add i256 %a, %b
store i256 %c, i256* %p
ret void
}
define void @sub(i256* %p, i256* %q) nounwind {
- %a = load i256* %p
- %b = load i256* %q
+ %a = load i256, i256* %p
+ %b = load i256, i256* %q
%c = sub i256 %a, %b
store i256 %c, i256* %p
ret void
Modified: llvm/trunk/test/CodeGen/X86/i2k.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i2k.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i2k.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i2k.ll Fri Feb 27 15:17:42 2015
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=x86
define void @foo(i2011* %x, i2011* %y, i2011* %p) nounwind {
- %a = load i2011* %x
- %b = load i2011* %y
+ %a = load i2011, i2011* %x
+ %b = load i2011, i2011* %y
%c = add i2011 %a, %b
store i2011 %c, i2011* %p
ret void
Modified: llvm/trunk/test/CodeGen/X86/i486-fence-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i486-fence-loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i486-fence-loop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i486-fence-loop.ll Fri Feb 27 15:17:42 2015
@@ -16,9 +16,9 @@ entry:
br label %while.body
while.body:
- %0 = load volatile i32* %addr, align 4
+ %0 = load volatile i32, i32* %addr, align 4
fence seq_cst
- %1 = load volatile i32* %addr, align 4
+ %1 = load volatile i32, i32* %addr, align 4
%cmp = icmp sgt i32 %1, %0
br i1 %cmp, label %while.body, label %if.then
Modified: llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
define void @foo(i64* %x, i64* %y) nounwind {
entry:
- %tmp1 = load i64* %y, align 8 ; <i64> [#uses=1]
+ %tmp1 = load i64, i64* %y, align 8 ; <i64> [#uses=1]
store i64 %tmp1, i64* %x, align 8
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll Fri Feb 27 15:17:42 2015
@@ -169,11 +169,11 @@ entry:
; CHECK: testPR4485
define void @testPR4485(x86_fp80* %a) {
entry:
- %0 = load x86_fp80* %a, align 16
+ %0 = load x86_fp80, x86_fp80* %a, align 16
%1 = fmul x86_fp80 %0, 0xK4006B400000000000000
%2 = fmul x86_fp80 %1, 0xK4012F424000000000000
tail call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %2)
- %3 = load x86_fp80* %a, align 16
+ %3 = load x86_fp80, x86_fp80* %a, align 16
%4 = fmul x86_fp80 %3, 0xK4006B400000000000000
%5 = fmul x86_fp80 %4, 0xK4012F424000000000000
tail call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %5)
@@ -367,7 +367,7 @@ entry:
; Function Attrs: ssp
define void @test_live_st(i32 %a1) {
entry:
- %0 = load x86_fp80* undef, align 16
+ %0 = load x86_fp80, x86_fp80* undef, align 16
%cond = icmp eq i32 %a1, 1
br i1 %cond, label %sw.bb4.i, label %_Z5tointRKe.exit
Modified: llvm/trunk/test/CodeGen/X86/inline-asm-out-regs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-asm-out-regs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-asm-out-regs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-asm-out-regs.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
br label %bb1.i
bb1.i: ; preds = %bb6.i.i, %bb1.i, %entry
- %0 = load i32* null, align 8 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 8 ; <i32> [#uses=1]
%1 = icmp ugt i32 %0, 1048575 ; <i1> [#uses=1]
br i1 %1, label %bb2.i, label %bb1.i
@@ -19,7 +19,7 @@ bb2.i: ; preds = %bb1.i
; <i32> [#uses=1]
%2 = lshr i32 %asmresult2.i.i, 8 ; <i32> [#uses=1]
%3 = trunc i32 %2 to i8 ; <i8> [#uses=1]
- %4 = load i32* @pcibios_last_bus, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* @pcibios_last_bus, align 4 ; <i32> [#uses=1]
%5 = icmp slt i32 %4, 0 ; <i1> [#uses=1]
br i1 %5, label %bb5.i.i, label %bb6.i.i
Modified: llvm/trunk/test/CodeGen/X86/inline-asm-ptr-cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-asm-ptr-cast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-asm-ptr-cast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-asm-ptr-cast.ll Fri Feb 27 15:17:42 2015
@@ -16,12 +16,12 @@ entry:
store i64 1, i64* %flags, align 8
store i64 -1, i64* %newflags, align 8
%0 = bitcast i32* %dst to i8*
- %tmp = load i64* %flags, align 8
+ %tmp = load i64, i64* %flags, align 8
%and = and i64 %tmp, 1
%1 = bitcast i32* %src to i8*
- %tmp1 = load i8* %1
+ %tmp1 = load i8, i8* %1
%2 = bitcast i32* %dst to i8*
- %tmp2 = load i8* %2
+ %tmp2 = load i8, i8* %2
call void asm "pushfq \0Aandq $2, (%rsp) \0Aorq $3, (%rsp) \0Apopfq \0Aaddb $4, $1 \0Apushfq \0Apopq $0 \0A", "=*&rm,=*&rm,i,r,r,1,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %newflags, i8* %0, i64 -2, i64 %and, i8 %tmp1, i8 %tmp2) nounwind
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign.ll Fri Feb 27 15:17:42 2015
@@ -11,6 +11,6 @@ entry:
%r = alloca i32, align 16
store i32 -1, i32* %r, align 16
call void asm sideeffect inteldialect "push esi\0A\09xor esi, esi\0A\09mov dword ptr $0, esi\0A\09pop esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r)
- %0 = load i32* %r, align 16
+ %0 = load i32, i32* %r, align 16
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign2.ll Fri Feb 27 15:17:42 2015
@@ -11,6 +11,6 @@ entry:
%r = alloca i32, align 16
store i32 -1, i32* %r, align 16
call void asm sideeffect "push %esi\0A\09xor %esi, %esi\0A\09mov %esi, $0\0A\09pop %esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r)
- %0 = load i32* %r, align 16
+ %0 = load i32, i32* %r, align 16
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign3.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ doit:
br label %skip
skip:
- %0 = load i32* %r, align 128
+ %0 = load i32, i32* %r, align 128
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/inline-asm-tied.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-asm-tied.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-asm-tied.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-asm-tied.ll Fri Feb 27 15:17:42 2015
@@ -11,12 +11,12 @@ entry:
%retval = alloca i64 ; <i64*> [#uses=2]
%_data.addr = alloca i64 ; <i64*> [#uses=4]
store i64 %_data, i64* %_data.addr
- %tmp = load i64* %_data.addr ; <i64> [#uses=1]
+ %tmp = load i64, i64* %_data.addr ; <i64> [#uses=1]
%0 = call i64 asm "bswap %eax\0A\09bswap %edx\0A\09xchgl %eax, %edx", "=A,0,~{dirflag},~{fpsr},~{flags}"(i64 %tmp) nounwind ; <i64> [#uses=1]
store i64 %0, i64* %_data.addr
- %tmp1 = load i64* %_data.addr ; <i64> [#uses=1]
+ %tmp1 = load i64, i64* %_data.addr ; <i64> [#uses=1]
store i64 %tmp1, i64* %retval
- %1 = load i64* %retval ; <i64> [#uses=1]
+ %1 = load i64, i64* %retval ; <i64> [#uses=1]
ret i64 %1
}
Modified: llvm/trunk/test/CodeGen/X86/ins_split_regalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ins_split_regalloc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ins_split_regalloc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ins_split_regalloc.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ target datalayout = "e-i64:64-f80:128-s:
; CHECK: jmpq *[[F_ADDR_TC]]
define void @test(i32 %a, i32 %b, i32 %c) {
entry:
- %fct_f = load void (i32)** @f, align 8
+ %fct_f = load void (i32)*, void (i32)** @f, align 8
tail call void %fct_f(i32 %a)
tail call void %fct_f(i32 %b)
tail call void %fct_f(i32 %c)
Modified: llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-1.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ bb22: ; preds = %bb4
bb4.i: ; preds = %bb22
ret i32 0
walkExprTree.exit: ; preds = %bb22
- %tmp83 = load i16* null, align 4 ; <i16> [#uses=1]
+ %tmp83 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%tmp84 = or i16 %tmp83, 2 ; <i16> [#uses=2]
store i16 %tmp84, i16* null, align 4
%tmp98993 = zext i16 %tmp84 to i32 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-3.ll Fri Feb 27 15:17:42 2015
@@ -35,7 +35,7 @@ bb428: ; preds = %bb366, %bb304
bb433: ; preds = %bb428
ret void
bb650: ; preds = %bb650, %bb428
- %tmp658 = load i8* null, align 8 ; <i8> [#uses=1]
+ %tmp658 = load i8, i8* null, align 8 ; <i8> [#uses=1]
%tmp659 = icmp eq i8 %tmp658, 0 ; <i1> [#uses=1]
br i1 %tmp659, label %bb650, label %bb662
bb662: ; preds = %bb650
@@ -43,7 +43,7 @@ bb662: ; preds = %bb650
bb688: ; preds = %bb662
ret void
bb761: ; preds = %bb662
- %tmp487248736542 = load i32* null, align 4 ; <i32> [#uses=2]
+ %tmp487248736542 = load i32, i32* null, align 4 ; <i32> [#uses=2]
%tmp487648776541 = and i32 %tmp487248736542, 57344 ; <i32> [#uses=1]
%tmp4881 = icmp eq i32 %tmp487648776541, 8192 ; <i1> [#uses=1]
br i1 %tmp4881, label %bb4884, label %bb4897
@@ -54,10 +54,10 @@ bb4884: ; preds = %bb761
bb4897: ; preds = %bb4884, %bb761
ret void
bb4932: ; preds = %bb4884
- %tmp4933 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp4933 = load i32, i32* null, align 4 ; <i32> [#uses=1]
br i1 %foo, label %bb5054, label %bb4940
bb4940: ; preds = %bb4932
- %tmp4943 = load i32* null, align 4 ; <i32> [#uses=2]
+ %tmp4943 = load i32, i32* null, align 4 ; <i32> [#uses=2]
switch i32 %tmp4933, label %bb5054 [
i32 159, label %bb4970
i32 160, label %bb5002
@@ -67,10 +67,10 @@ bb4970: ; preds = %bb4940
%tmp49764977 = and i16 %tmp49746536, 4095 ; <i16> [#uses=1]
%mask498049814982 = zext i16 %tmp49764977 to i64 ; <i64> [#uses=1]
%tmp4984 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* null, i64 %mask498049814982, i32 5 ; <%struct.rec**> [#uses=1]
- %tmp4985 = load %struct.rec** %tmp4984, align 8 ; <%struct.rec*> [#uses=1]
+ %tmp4985 = load %struct.rec*, %struct.rec** %tmp4984, align 8 ; <%struct.rec*> [#uses=1]
%tmp4988 = getelementptr %struct.rec, %struct.rec* %tmp4985, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
%tmp4991 = bitcast %struct.THIRD_UNION* %tmp4988 to i32* ; <i32*> [#uses=1]
- %tmp4992 = load i32* %tmp4991, align 8 ; <i32> [#uses=1]
+ %tmp4992 = load i32, i32* %tmp4991, align 8 ; <i32> [#uses=1]
%tmp49924993 = trunc i32 %tmp4992 to i16 ; <i16> [#uses=1]
%tmp4996 = add i16 %tmp49924993, 0 ; <i16> [#uses=1]
br label %bb5054
@@ -79,10 +79,10 @@ bb5002: ; preds = %bb4940
%tmp50085009 = and i16 %tmp50066537, 4095 ; <i16> [#uses=1]
%mask501250135014 = zext i16 %tmp50085009 to i64 ; <i64> [#uses=1]
%tmp5016 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* null, i64 %mask501250135014, i32 5 ; <%struct.rec**> [#uses=1]
- %tmp5017 = load %struct.rec** %tmp5016, align 8 ; <%struct.rec*> [#uses=1]
+ %tmp5017 = load %struct.rec*, %struct.rec** %tmp5016, align 8 ; <%struct.rec*> [#uses=1]
%tmp5020 = getelementptr %struct.rec, %struct.rec* %tmp5017, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
%tmp5023 = bitcast %struct.THIRD_UNION* %tmp5020 to i32* ; <i32*> [#uses=1]
- %tmp5024 = load i32* %tmp5023, align 8 ; <i32> [#uses=1]
+ %tmp5024 = load i32, i32* %tmp5023, align 8 ; <i32> [#uses=1]
%tmp50245025 = trunc i32 %tmp5024 to i16 ; <i16> [#uses=1]
%tmp5028 = sub i16 %tmp50245025, 0 ; <i16> [#uses=1]
br label %bb5054
Modified: llvm/trunk/test/CodeGen/X86/insertps-O0-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertps-O0-bug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertps-O0-bug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertps-O0-bug.ll Fri Feb 27 15:17:42 2015
@@ -40,11 +40,11 @@ define <4 x float> @test(<4 x float> %a,
; CHECK: insertps $64, [[REG]],
; CHECK: ret
entry:
- %0 = load <4 x float>* %b, align 16
+ %0 = load <4 x float>, <4 x float>* %b, align 16
%1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %0, i32 64)
%2 = alloca <4 x float>, align 16
store <4 x float> %1, <4 x float>* %2, align 16
- %3 = load <4 x float>* %2, align 16
+ %3 = load <4 x float>, <4 x float>* %2, align 16
ret <4 x float> %3
}
Modified: llvm/trunk/test/CodeGen/X86/invalid-shift-immediate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/invalid-shift-immediate.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/invalid-shift-immediate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/invalid-shift-immediate.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
%x_addr = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %x, i32* %x_addr
- %tmp = load i32* %x_addr, align 4 ; <i32> [#uses=1]
+ %tmp = load i32, i32* %x_addr, align 4 ; <i32> [#uses=1]
%tmp1 = ashr i32 %tmp, -2 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 1 ; <i32> [#uses=1]
%tmp23 = trunc i32 %tmp2 to i8 ; <i8> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/isel-optnone.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/isel-optnone.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/isel-optnone.ll (original)
+++ llvm/trunk/test/CodeGen/X86/isel-optnone.ll Fri Feb 27 15:17:42 2015
@@ -2,9 +2,9 @@
define i32* @fooOptnone(i32* %p, i32* %q, i32** %z) #0 {
entry:
- %r = load i32* %p
- %s = load i32* %q
- %y = load i32** %z
+ %r = load i32, i32* %p
+ %s = load i32, i32* %q
+ %y = load i32*, i32** %z
%t0 = add i32 %r, %s
%t1 = add i32 %t0, 1
@@ -21,9 +21,9 @@ entry:
define i32* @fooNormal(i32* %p, i32* %q, i32** %z) #1 {
entry:
- %r = load i32* %p
- %s = load i32* %q
- %y = load i32** %z
+ %r = load i32, i32* %p
+ %s = load i32, i32* %q
+ %y = load i32*, i32** %z
%t0 = add i32 %r, %s
%t1 = add i32 %t0, 1
Modified: llvm/trunk/test/CodeGen/X86/isel-sink.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/isel-sink.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/isel-sink.ll (original)
+++ llvm/trunk/test/CodeGen/X86/isel-sink.ll Fri Feb 27 15:17:42 2015
@@ -18,6 +18,6 @@ T:
store i32 4, i32* %P
ret i32 141
F:
- %V = load i32* %P
+ %V = load i32, i32* %P
ret i32 %V
}
Modified: llvm/trunk/test/CodeGen/X86/isel-sink2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/isel-sink2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/isel-sink2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/isel-sink2.ll Fri Feb 27 15:17:42 2015
@@ -5,13 +5,13 @@
define i8 @test(i32 *%P) nounwind {
%Q = getelementptr i32, i32* %P, i32 1
%R = bitcast i32* %Q to i8*
- %S = load i8* %R
+ %S = load i8, i8* %R
%T = icmp eq i8 %S, 0
br i1 %T, label %TB, label %F
TB:
ret i8 4
F:
%U = getelementptr i8, i8* %R, i32 3
- %V = load i8* %U
+ %V = load i8, i8* %U
ret i8 %V
}
Modified: llvm/trunk/test/CodeGen/X86/isel-sink3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/isel-sink3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/isel-sink3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/isel-sink3.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "i386-apple-darwin7"
define i32 @bar(i32** %P) nounwind {
entry:
- %0 = load i32** %P, align 4 ; <i32*> [#uses=2]
+ %0 = load i32*, i32** %P, align 4 ; <i32*> [#uses=2]
%1 = getelementptr i32, i32* %0, i32 1 ; <i32*> [#uses=1]
%2 = icmp ugt i32* %1, inttoptr (i64 1233 to i32*) ; <i1> [#uses=1]
br i1 %2, label %bb1, label %bb
@@ -22,6 +22,6 @@ bb: ; preds = %entry
bb1: ; preds = %entry, %bb
%3 = getelementptr i32, i32* %1, i32 1 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* %3, align 4 ; <i32> [#uses=1]
ret i32 %4
}
Modified: llvm/trunk/test/CodeGen/X86/jump_sign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/jump_sign.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/jump_sign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/jump_sign.ll Fri Feb 27 15:17:42 2015
@@ -164,7 +164,7 @@ entry:
; PR://13046
define void @func_o() nounwind uwtable {
entry:
- %0 = load i16* undef, align 2
+ %0 = load i16, i16* undef, align 2
br i1 undef, label %if.then.i, label %if.end.i
if.then.i: ; preds = %entry
@@ -238,7 +238,7 @@ entry:
; CHECK: j
; CHECK-NOT: sub
; CHECK: ret
- %0 = load i32* %offset, align 8
+ %0 = load i32, i32* %offset, align 8
%cmp = icmp slt i32 %0, %size
br i1 %cmp, label %return, label %if.end
@@ -287,10 +287,10 @@ entry:
; CHECK: andb
; CHECK: j
; CHECK: ret
- %0 = load i32* @b, align 4
+ %0 = load i32, i32* @b, align 4
%cmp = icmp ult i32 %0, %p1
%conv = zext i1 %cmp to i32
- %1 = load i32* @a, align 4
+ %1 = load i32, i32* @a, align 4
%and = and i32 %conv, %1
%conv1 = trunc i32 %and to i8
%2 = urem i8 %conv1, 3
Modified: llvm/trunk/test/CodeGen/X86/large-constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/large-constants.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/large-constants.ll (original)
+++ llvm/trunk/test/CodeGen/X86/large-constants.ll Fri Feb 27 15:17:42 2015
@@ -40,10 +40,10 @@ fail:
define void @constant_expressions() {
entry:
- %0 = load i64* inttoptr (i64 add (i64 51250129900, i64 0) to i64*)
- %1 = load i64* inttoptr (i64 add (i64 51250129900, i64 8) to i64*)
- %2 = load i64* inttoptr (i64 add (i64 51250129900, i64 16) to i64*)
- %3 = load i64* inttoptr (i64 add (i64 51250129900, i64 24) to i64*)
+ %0 = load i64, i64* inttoptr (i64 add (i64 51250129900, i64 0) to i64*)
+ %1 = load i64, i64* inttoptr (i64 add (i64 51250129900, i64 8) to i64*)
+ %2 = load i64, i64* inttoptr (i64 add (i64 51250129900, i64 16) to i64*)
+ %3 = load i64, i64* inttoptr (i64 add (i64 51250129900, i64 24) to i64*)
%4 = add i64 %0, %1
%5 = add i64 %2, %3
%6 = add i64 %4, %5
@@ -54,10 +54,10 @@ entry:
define void @constant_expressions2() {
entry:
- %0 = load i64* inttoptr (i64 51250129900 to i64*)
- %1 = load i64* inttoptr (i64 51250129908 to i64*)
- %2 = load i64* inttoptr (i64 51250129916 to i64*)
- %3 = load i64* inttoptr (i64 51250129924 to i64*)
+ %0 = load i64, i64* inttoptr (i64 51250129900 to i64*)
+ %1 = load i64, i64* inttoptr (i64 51250129908 to i64*)
+ %2 = load i64, i64* inttoptr (i64 51250129916 to i64*)
+ %3 = load i64, i64* inttoptr (i64 51250129924 to i64*)
%4 = add i64 %0, %1
%5 = add i64 %2, %3
%6 = add i64 %4, %5
Modified: llvm/trunk/test/CodeGen/X86/ldzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ldzero.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ldzero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ldzero.ll Fri Feb 27 15:17:42 2015
@@ -11,15 +11,15 @@ entry:
%d = alloca double, align 8 ; <double*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store double 0.000000e+00, double* %d, align 8
- %tmp1 = load double* %d, align 8 ; <double> [#uses=1]
+ %tmp1 = load double, double* %d, align 8 ; <double> [#uses=1]
%tmp12 = fpext double %tmp1 to x86_fp80 ; <x86_fp80> [#uses=1]
store x86_fp80 %tmp12, x86_fp80* %tmp, align 16
- %tmp3 = load x86_fp80* %tmp, align 16 ; <x86_fp80> [#uses=1]
+ %tmp3 = load x86_fp80, x86_fp80* %tmp, align 16 ; <x86_fp80> [#uses=1]
store x86_fp80 %tmp3, x86_fp80* %retval, align 16
br label %return
return: ; preds = %entry
- %retval4 = load x86_fp80* %retval ; <x86_fp80> [#uses=1]
+ %retval4 = load x86_fp80, x86_fp80* %retval ; <x86_fp80> [#uses=1]
ret x86_fp80 %retval4
}
@@ -30,14 +30,14 @@ entry:
%ld = alloca x86_fp80, align 16 ; <x86_fp80*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store x86_fp80 0xK00000000000000000000, x86_fp80* %ld, align 16
- %tmp1 = load x86_fp80* %ld, align 16 ; <x86_fp80> [#uses=1]
+ %tmp1 = load x86_fp80, x86_fp80* %ld, align 16 ; <x86_fp80> [#uses=1]
%tmp12 = fptrunc x86_fp80 %tmp1 to double ; <double> [#uses=1]
store double %tmp12, double* %tmp, align 8
- %tmp3 = load double* %tmp, align 8 ; <double> [#uses=1]
+ %tmp3 = load double, double* %tmp, align 8 ; <double> [#uses=1]
store double %tmp3, double* %retval, align 8
br label %return
return: ; preds = %entry
- %retval4 = load double* %retval ; <double> [#uses=1]
+ %retval4 = load double, double* %retval ; <double> [#uses=1]
ret double %retval4
}
Modified: llvm/trunk/test/CodeGen/X86/lea-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-5.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea-5.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ while.cond:
; CHECK: leaq -40(%rsp,%r{{[^,]*}},4), %rax
; X32: leal -40(%rsp,%r{{[^,]*}},4), %eax
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp eq i32 %0, 0
%inc = add nsw i32 %d.addr.0, 1
@@ -45,7 +45,7 @@ while.cond:
; CHECK: leaq (%rsp,%r{{[^,]*}},4), %rax
; X32: leal (%rsp,%r{{[^,]*}},4), %eax
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp eq i32 %0, 0
%inc = add nsw i32 %d.addr.0, 1
Modified: llvm/trunk/test/CodeGen/X86/lea-recursion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-recursion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-recursion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea-recursion.ll Fri Feb 27 15:17:42 2015
@@ -13,32 +13,32 @@
define void @foo() {
entry:
- %tmp4 = load i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 0) ; <i32> [#uses=1]
- %tmp8 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 0) ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 0) ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 0) ; <i32> [#uses=1]
%tmp9 = add i32 %tmp4, 1 ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2]
store i32 %tmp10, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 1)
- %tmp8.1 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 1) ; <i32> [#uses=1]
+ %tmp8.1 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 1) ; <i32> [#uses=1]
%tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1]
%tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2]
store i32 %tmp10.1, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 2)
- %tmp8.2 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 2) ; <i32> [#uses=1]
+ %tmp8.2 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 2) ; <i32> [#uses=1]
%tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1]
%tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2]
store i32 %tmp10.2, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 3)
- %tmp8.3 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 3) ; <i32> [#uses=1]
+ %tmp8.3 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 3) ; <i32> [#uses=1]
%tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1]
%tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2]
store i32 %tmp10.3, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 4)
- %tmp8.4 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 4) ; <i32> [#uses=1]
+ %tmp8.4 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 4) ; <i32> [#uses=1]
%tmp9.4 = add i32 %tmp10.3, 1 ; <i32> [#uses=1]
%tmp10.4 = add i32 %tmp9.4, %tmp8.4 ; <i32> [#uses=2]
store i32 %tmp10.4, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 5)
- %tmp8.5 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 5) ; <i32> [#uses=1]
+ %tmp8.5 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 5) ; <i32> [#uses=1]
%tmp9.5 = add i32 %tmp10.4, 1 ; <i32> [#uses=1]
%tmp10.5 = add i32 %tmp9.5, %tmp8.5 ; <i32> [#uses=2]
store i32 %tmp10.5, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 6)
- %tmp8.6 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 6) ; <i32> [#uses=1]
+ %tmp8.6 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 6) ; <i32> [#uses=1]
%tmp9.6 = add i32 %tmp10.5, 1 ; <i32> [#uses=1]
%tmp10.6 = add i32 %tmp9.6, %tmp8.6 ; <i32> [#uses=1]
store i32 %tmp10.6, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 7)
Modified: llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll Fri Feb 27 15:17:42 2015
@@ -71,7 +71,7 @@ define i32 @test6() {
%t = alloca i64, align 8
store i32 1, i32* %x, align 4
store i64 1, i64* %t, align 8 ;; DEAD
- %load = load i32* %x, align 4
+ %load = load i32, i32* %x, align 4
%shl = shl i32 %load, 8
%add = add i32 %shl, -224
%sh_prom = zext i32 %add to i64
Modified: llvm/trunk/test/CodeGen/X86/licm-nested.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/licm-nested.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/licm-nested.ll (original)
+++ llvm/trunk/test/CodeGen/X86/licm-nested.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ entry:
while.cond.preheader: ; preds = %entry
%arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1 ; <i8**> [#uses=1]
- %tmp2 = load i8** %arrayidx ; <i8*> [#uses=1]
+ %tmp2 = load i8*, i8** %arrayidx ; <i8*> [#uses=1]
%call = tail call i32 @atoi(i8* %tmp2) nounwind ; <i32> [#uses=2]
%tobool51 = icmp eq i32 %call, 0 ; <i1> [#uses=1]
br i1 %tobool51, label %while.end, label %bb.nph53
@@ -50,7 +50,7 @@ for.body15:
%tmp73 = shl i64 %indvar57, 1 ; <i64> [#uses=1]
%add = add i64 %tmp73, 4 ; <i64> [#uses=2]
%arrayidx17 = getelementptr [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %tmp68 ; <i8*> [#uses=1]
- %tmp18 = load i8* %arrayidx17 ; <i8> [#uses=1]
+ %tmp18 = load i8, i8* %arrayidx17 ; <i8> [#uses=1]
%tobool19 = icmp eq i8 %tmp18, 0 ; <i1> [#uses=1]
br i1 %tobool19, label %for.inc35, label %if.then
Modified: llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll Fri Feb 27 15:17:42 2015
@@ -71,7 +71,7 @@ BB:
%A2 = alloca <2 x i8>
%A1 = alloca i1
%A = alloca i32
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 -37, i8* %0
%E = extractelement <4 x i64> zeroinitializer, i32 2
%Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
Modified: llvm/trunk/test/CodeGen/X86/load-slice.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/load-slice.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/load-slice.ll (original)
+++ llvm/trunk/test/CodeGen/X86/load-slice.ll Fri Feb 27 15:17:42 2015
@@ -48,7 +48,7 @@ define void @t1(%class.Complex* nocaptur
entry:
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%tmp = bitcast %class.Complex* %arrayidx to i64*
- %tmp1 = load i64* %tmp, align 8
+ %tmp1 = load i64, i64* %tmp, align 8
%t0.sroa.0.0.extract.trunc = trunc i64 %tmp1 to i32
%tmp2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float
%t0.sroa.2.0.extract.shift = lshr i64 %tmp1, 32
@@ -57,11 +57,11 @@ entry:
%add = add i64 %out_start, 8
%arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add
%i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0
- %tmp4 = load float* %i.i, align 4
+ %tmp4 = load float, float* %i.i, align 4
%add.i = fadd float %tmp4, %tmp2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
%r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1
- %tmp5 = load float* %r.i, align 4
+ %tmp5 = load float, float* %r.i, align 4
%add5.i = fadd float %tmp5, %tmp3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
%ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>*
@@ -102,7 +102,7 @@ declare void @llvm.lifetime.end(i64, i8*
define i32 @t2(%class.Complex* nocapture %out, i64 %out_start) {
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%bitcast = bitcast %class.Complex* %arrayidx to i64*
- %chunk64 = load i64* %bitcast, align 8
+ %chunk64 = load i64, i64* %bitcast, align 8
%slice32_low = trunc i64 %chunk64 to i32
%shift48 = lshr i64 %chunk64, 48
%slice32_high = trunc i64 %shift48 to i32
@@ -127,7 +127,7 @@ define i32 @t2(%class.Complex* nocapture
define i32 @t3(%class.Complex* nocapture %out, i64 %out_start) {
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%bitcast = bitcast %class.Complex* %arrayidx to i64*
- %chunk64 = load i64* %bitcast, align 8
+ %chunk64 = load i64, i64* %bitcast, align 8
%slice32_low = trunc i64 %chunk64 to i32
%shift48 = lshr i64 %chunk64, 48
%slice32_high = trunc i64 %shift48 to i32
Modified: llvm/trunk/test/CodeGen/X86/longlong-deadload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/longlong-deadload.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/longlong-deadload.ll (original)
+++ llvm/trunk/test/CodeGen/X86/longlong-deadload.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define void @test(i64* %P) nounwind {
; CHECK: movl 4(%esp), %[[REGISTER:.*]]
; CHECK-NOT: 4(%[[REGISTER]])
; CHECK: ret
- %tmp1 = load i64* %P, align 8 ; <i64> [#uses=1]
+ %tmp1 = load i64, i64* %P, align 8 ; <i64> [#uses=1]
%tmp2 = xor i64 %tmp1, 1 ; <i64> [#uses=1]
store i64 %tmp2, i64* %P, align 8
ret void
Modified: llvm/trunk/test/CodeGen/X86/loop-strength-reduce4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/loop-strength-reduce4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/loop-strength-reduce4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/loop-strength-reduce4.ll Fri Feb 27 15:17:42 2015
@@ -27,30 +27,30 @@ bb: ; preds = %bb, %entry
%t.063.0 = phi i32 [ 0, %entry ], [ %tmp47, %bb ] ; <i32> [#uses=1]
%j.065.0 = shl i32 %indvar, 2 ; <i32> [#uses=4]
%tmp3 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %j.065.0 ; <i32*> [#uses=2]
- %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp3, align 4 ; <i32> [#uses=1]
%tmp6 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %t.063.0 ; <i32*> [#uses=1]
- %tmp7 = load i32* %tmp6, align 4 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %tmp6, align 4 ; <i32> [#uses=1]
%tmp8 = xor i32 %tmp7, %tmp4 ; <i32> [#uses=2]
store i32 %tmp8, i32* %tmp3, align 4
%tmp1378 = or i32 %j.065.0, 1 ; <i32> [#uses=1]
%tmp16 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp1378 ; <i32*> [#uses=2]
- %tmp17 = load i32* %tmp16, align 4 ; <i32> [#uses=1]
+ %tmp17 = load i32, i32* %tmp16, align 4 ; <i32> [#uses=1]
%tmp19 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp8 ; <i32*> [#uses=1]
- %tmp20 = load i32* %tmp19, align 4 ; <i32> [#uses=1]
+ %tmp20 = load i32, i32* %tmp19, align 4 ; <i32> [#uses=1]
%tmp21 = xor i32 %tmp20, %tmp17 ; <i32> [#uses=2]
store i32 %tmp21, i32* %tmp16, align 4
%tmp2680 = or i32 %j.065.0, 2 ; <i32> [#uses=1]
%tmp29 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp2680 ; <i32*> [#uses=2]
- %tmp30 = load i32* %tmp29, align 4 ; <i32> [#uses=1]
+ %tmp30 = load i32, i32* %tmp29, align 4 ; <i32> [#uses=1]
%tmp32 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp21 ; <i32*> [#uses=1]
- %tmp33 = load i32* %tmp32, align 4 ; <i32> [#uses=1]
+ %tmp33 = load i32, i32* %tmp32, align 4 ; <i32> [#uses=1]
%tmp34 = xor i32 %tmp33, %tmp30 ; <i32> [#uses=2]
store i32 %tmp34, i32* %tmp29, align 4
%tmp3982 = or i32 %j.065.0, 3 ; <i32> [#uses=1]
%tmp42 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp3982 ; <i32*> [#uses=2]
- %tmp43 = load i32* %tmp42, align 4 ; <i32> [#uses=1]
+ %tmp43 = load i32, i32* %tmp42, align 4 ; <i32> [#uses=1]
%tmp45 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp34 ; <i32*> [#uses=1]
- %tmp46 = load i32* %tmp45, align 4 ; <i32> [#uses=1]
+ %tmp46 = load i32, i32* %tmp45, align 4 ; <i32> [#uses=1]
%tmp47 = xor i32 %tmp46, %tmp43 ; <i32> [#uses=3]
store i32 %tmp47, i32* %tmp42, align 4
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/loop-strength-reduce7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/loop-strength-reduce7.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/loop-strength-reduce7.ll (original)
+++ llvm/trunk/test/CodeGen/X86/loop-strength-reduce7.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ bb29.i38: ; preds = %bb33.i47, %bb28.i3
%indvar32.i = phi i32 [ %indvar.next33.i, %bb33.i47 ], [ 0, %bb28.i37 ] ; <i32> [#uses=2]
%sfb.314.i = add i32 %indvar32.i, 0 ; <i32> [#uses=3]
%1 = getelementptr [4 x [21 x double]], [4 x [21 x double]]* null, i32 0, i32 %0, i32 %sfb.314.i ; <double*> [#uses=1]
- %2 = load double* %1, align 8 ; <double> [#uses=0]
+ %2 = load double, double* %1, align 8 ; <double> [#uses=0]
br i1 false, label %bb30.i41, label %bb33.i47
bb30.i41: ; preds = %bb29.i38
Modified: llvm/trunk/test/CodeGen/X86/loop-strength-reduce8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/loop-strength-reduce8.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/loop-strength-reduce8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/loop-strength-reduce8.ll Fri Feb 27 15:17:42 2015
@@ -54,8 +54,8 @@ entry:
call void @llvm.va_start(i8* %p1)
%0 = call fastcc %struct.tree_node* @make_node(i32 %code) nounwind ; <%struct.tree_node*> [#uses=2]
%1 = getelementptr [256 x i32], [256 x i32]* @tree_code_length, i32 0, i32 %code ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=2]
- %3 = load i32* @lineno, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 4 ; <i32> [#uses=2]
+ %3 = load i32, i32* @lineno, align 4 ; <i32> [#uses=1]
%4 = bitcast %struct.tree_node* %0 to %struct.tree_exp* ; <%struct.tree_exp*> [#uses=2]
%5 = getelementptr %struct.tree_exp, %struct.tree_exp* %4, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %3, i32* %5, align 4
@@ -64,11 +64,11 @@ entry:
bb: ; preds = %bb, %entry
%i.01 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ] ; <i32> [#uses=2]
- %7 = load i8** %p, align 4 ; <i8*> [#uses=2]
+ %7 = load i8*, i8** %p, align 4 ; <i8*> [#uses=2]
%8 = getelementptr i8, i8* %7, i32 4 ; <i8*> [#uses=1]
store i8* %8, i8** %p, align 4
%9 = bitcast i8* %7 to %struct.tree_node** ; <%struct.tree_node**> [#uses=1]
- %10 = load %struct.tree_node** %9, align 4 ; <%struct.tree_node*> [#uses=1]
+ %10 = load %struct.tree_node*, %struct.tree_node** %9, align 4 ; <%struct.tree_node*> [#uses=1]
%11 = getelementptr %struct.tree_exp, %struct.tree_exp* %4, i32 0, i32 2, i32 %i.01 ; <%struct.tree_node**> [#uses=1]
store %struct.tree_node* %10, %struct.tree_node** %11, align 4
%indvar.next = add i32 %i.01, 1 ; <i32> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/lsr-delayed-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-delayed-fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-delayed-fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-delayed-fold.ll Fri Feb 27 15:17:42 2015
@@ -42,7 +42,7 @@ for.cond:
lbl_264: ; preds = %if.end, %lbl_264.preheader
%g_263.tmp.0 = phi i8 [ %g_263.tmp.1, %for.cond ] ; <i8> [#uses=1]
- %tmp7 = load i16* undef ; <i16> [#uses=1]
+ %tmp7 = load i16, i16* undef ; <i16> [#uses=1]
%conv8 = trunc i16 %tmp7 to i8 ; <i8> [#uses=1]
%mul.i = mul i8 %p_95.addr.0, %p_95.addr.0 ; <i8> [#uses=1]
%mul.i18 = mul i8 %mul.i, %conv8 ; <i8> [#uses=1]
@@ -99,7 +99,7 @@ lor.lhs.false:
%add112 = trunc i64 %tmp45 to i32 ; <i32> [#uses=1]
%add118 = trunc i64 %tmp47 to i32 ; <i32> [#uses=1]
%tmp10 = getelementptr %struct.Bu, %struct.Bu* %bu, i64 %indvar, i32 2 ; <i32*> [#uses=1]
- %tmp11 = load i32* %tmp10 ; <i32> [#uses=0]
+ %tmp11 = load i32, i32* %tmp10 ; <i32> [#uses=0]
tail call void undef(i32 %add22)
tail call void undef(i32 %add28)
tail call void undef(i32 %add34)
Modified: llvm/trunk/test/CodeGen/X86/lsr-i386.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-i386.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-i386.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-i386.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ entry:
bb1: ; preds = %bb6, %bb
%indvar11 = phi i32 [ %indvar.next12, %bb6 ], [ 0, %entry ] ; <i32> [#uses=2]
%tmp21 = add i32 %indvar11, 1 ; <i32> [#uses=1]
- %t = load i32* getelementptr inbounds (%struct.anon* @mp2grad_, i32 0, i32 1)
+ %t = load i32, i32* getelementptr inbounds (%struct.anon* @mp2grad_, i32 0, i32 1)
%tmp15 = mul i32 %n, %t ; <i32> [#uses=1]
%tmp16 = add i32 %tmp21, %tmp15 ; <i32> [#uses=1]
%tmp17 = shl i32 %tmp16, 3 ; <i32> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/lsr-loop-exit-cond.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-loop-exit-cond.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-loop-exit-cond.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-loop-exit-cond.ll Fri Feb 27 15:17:42 2015
@@ -17,9 +17,9 @@
define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind {
entry:
- %0 = load i32* %rk, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %rk, align 4 ; <i32> [#uses=1]
%1 = getelementptr i32, i32* %rk, i64 1 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 4 ; <i32> [#uses=1]
%tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
%tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
br label %bb
@@ -33,36 +33,36 @@ bb: ; preds = %bb1, %entry
%3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
%4 = zext i32 %3 to i64 ; <i64> [#uses=1]
%5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* %5, align 4 ; <i32> [#uses=1]
%7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
%8 = and i32 %7, 255 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
%10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
+ %11 = load i32, i32* %10, align 4 ; <i32> [#uses=1]
%ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
%12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
%13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
- %14 = load i32* %13, align 4 ; <i32> [#uses=1]
+ %14 = load i32, i32* %13, align 4 ; <i32> [#uses=1]
%15 = xor i32 %11, %6 ; <i32> [#uses=1]
%16 = xor i32 %15, %14 ; <i32> [#uses=3]
%17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
%18 = zext i32 %17 to i64 ; <i64> [#uses=1]
%19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
- %20 = load i32* %19, align 4 ; <i32> [#uses=1]
+ %20 = load i32, i32* %19, align 4 ; <i32> [#uses=1]
%21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
%22 = zext i32 %21 to i64 ; <i64> [#uses=1]
%23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
- %24 = load i32* %23, align 4 ; <i32> [#uses=1]
+ %24 = load i32, i32* %23, align 4 ; <i32> [#uses=1]
%ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
%25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
%26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
- %27 = load i32* %26, align 4 ; <i32> [#uses=1]
+ %27 = load i32, i32* %26, align 4 ; <i32> [#uses=1]
%28 = xor i32 %24, %20 ; <i32> [#uses=1]
%29 = xor i32 %28, %27 ; <i32> [#uses=4]
%30 = lshr i32 %16, 24 ; <i32> [#uses=1]
%31 = zext i32 %30 to i64 ; <i64> [#uses=1]
%32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
- %33 = load i32* %32, align 4 ; <i32> [#uses=2]
+ %33 = load i32, i32* %32, align 4 ; <i32> [#uses=2]
%exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
br i1 %exitcond, label %bb2, label %bb1
@@ -74,22 +74,22 @@ bb1: ; preds = %bb
%37 = and i32 %36, 255 ; <i32> [#uses=1]
%38 = zext i32 %37 to i64 ; <i64> [#uses=1]
%39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
- %40 = load i32* %39, align 4 ; <i32> [#uses=1]
- %41 = load i32* %35, align 4 ; <i32> [#uses=1]
+ %40 = load i32, i32* %39, align 4 ; <i32> [#uses=1]
+ %41 = load i32, i32* %35, align 4 ; <i32> [#uses=1]
%42 = xor i32 %40, %33 ; <i32> [#uses=1]
%43 = xor i32 %42, %41 ; <i32> [#uses=1]
%44 = lshr i32 %29, 24 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
%46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
- %47 = load i32* %46, align 4 ; <i32> [#uses=1]
+ %47 = load i32, i32* %46, align 4 ; <i32> [#uses=1]
%48 = and i32 %16, 255 ; <i32> [#uses=1]
%49 = zext i32 %48 to i64 ; <i64> [#uses=1]
%50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
- %51 = load i32* %50, align 4 ; <i32> [#uses=1]
+ %51 = load i32, i32* %50, align 4 ; <i32> [#uses=1]
%ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
%52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
%53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
- %54 = load i32* %53, align 4 ; <i32> [#uses=1]
+ %54 = load i32, i32* %53, align 4 ; <i32> [#uses=1]
%55 = xor i32 %51, %47 ; <i32> [#uses=1]
%56 = xor i32 %55, %54 ; <i32> [#uses=1]
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
@@ -105,26 +105,26 @@ bb2: ; preds = %bb
%60 = and i32 %59, 255 ; <i32> [#uses=1]
%61 = zext i32 %60 to i64 ; <i64> [#uses=1]
%62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
- %63 = load i32* %62, align 4 ; <i32> [#uses=1]
+ %63 = load i32, i32* %62, align 4 ; <i32> [#uses=1]
%64 = and i32 %63, 16711680 ; <i32> [#uses=1]
%65 = or i32 %64, %58 ; <i32> [#uses=1]
- %66 = load i32* %57, align 4 ; <i32> [#uses=1]
+ %66 = load i32, i32* %57, align 4 ; <i32> [#uses=1]
%67 = xor i32 %65, %66 ; <i32> [#uses=2]
%68 = lshr i32 %29, 8 ; <i32> [#uses=1]
%69 = zext i32 %68 to i64 ; <i64> [#uses=1]
%70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
- %71 = load i32* %70, align 4 ; <i32> [#uses=1]
+ %71 = load i32, i32* %70, align 4 ; <i32> [#uses=1]
%72 = and i32 %71, -16777216 ; <i32> [#uses=1]
%73 = and i32 %16, 255 ; <i32> [#uses=1]
%74 = zext i32 %73 to i64 ; <i64> [#uses=1]
%75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
- %76 = load i32* %75, align 4 ; <i32> [#uses=1]
+ %76 = load i32, i32* %75, align 4 ; <i32> [#uses=1]
%77 = and i32 %76, 16711680 ; <i32> [#uses=1]
%78 = or i32 %77, %72 ; <i32> [#uses=1]
%ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
%79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
%80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
- %81 = load i32* %80, align 4 ; <i32> [#uses=1]
+ %81 = load i32, i32* %80, align 4 ; <i32> [#uses=1]
%82 = xor i32 %78, %81 ; <i32> [#uses=2]
%83 = lshr i32 %67, 24 ; <i32> [#uses=1]
%84 = trunc i32 %83 to i8 ; <i8> [#uses=1]
@@ -176,7 +176,7 @@ for.body:
%bi.06 = phi i32 [ 0, %for.body.lr.ph ], [ %i.addr.0.bi.0, %for.body ]
%b.05 = phi i32 [ 0, %for.body.lr.ph ], [ %.b.0, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp ugt i32 %1, %b.05
%.b.0 = select i1 %cmp1, i32 %1, i32 %b.05
%2 = trunc i64 %indvars.iv to i32
Modified: llvm/trunk/test/CodeGen/X86/lsr-normalization.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-normalization.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-normalization.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-normalization.ll Fri Feb 27 15:17:42 2015
@@ -39,7 +39,7 @@ bb8:
bb10: ; preds = %bb8, %bb
%tmp11 = bitcast i8* %tmp5 to %0* ; <%0*> [#uses=1]
call void @_ZNSt15_List_node_base4hookEPS_(%0* %tmp11, %0* %tmp) nounwind
- %tmp12 = load %0** %tmp3 ; <%0*> [#uses=3]
+ %tmp12 = load %0*, %0** %tmp3 ; <%0*> [#uses=3]
%tmp13 = icmp eq %0* %tmp12, %tmp ; <i1> [#uses=1]
br i1 %tmp13, label %bb14, label %bb16
@@ -51,7 +51,7 @@ bb16:
%tmp17 = phi i64 [ %tmp22, %bb16 ], [ 0, %bb10 ] ; <i64> [#uses=1]
%tmp18 = phi %0* [ %tmp20, %bb16 ], [ %tmp12, %bb10 ] ; <%0*> [#uses=1]
%tmp19 = getelementptr inbounds %0, %0* %tmp18, i64 0, i32 0 ; <%0**> [#uses=1]
- %tmp20 = load %0** %tmp19 ; <%0*> [#uses=2]
+ %tmp20 = load %0*, %0** %tmp19 ; <%0*> [#uses=2]
%tmp21 = icmp eq %0* %tmp20, %tmp ; <i1> [#uses=1]
%tmp22 = add i64 %tmp17, 1 ; <i64> [#uses=2]
br i1 %tmp21, label %bb23, label %bb16
@@ -64,7 +64,7 @@ bb25:
%tmp26 = phi i64 [ %tmp31, %bb25 ], [ 0, %bb23 ] ; <i64> [#uses=1]
%tmp27 = phi %0* [ %tmp29, %bb25 ], [ %tmp12, %bb23 ] ; <%0*> [#uses=1]
%tmp28 = getelementptr inbounds %0, %0* %tmp27, i64 0, i32 0 ; <%0**> [#uses=1]
- %tmp29 = load %0** %tmp28 ; <%0*> [#uses=2]
+ %tmp29 = load %0*, %0** %tmp28 ; <%0*> [#uses=2]
%tmp30 = icmp eq %0* %tmp29, %tmp ; <i1> [#uses=1]
%tmp31 = add i64 %tmp26, 1 ; <i64> [#uses=2]
br i1 %tmp30, label %bb32, label %bb25
@@ -75,14 +75,14 @@ bb32:
br label %bb35
bb35: ; preds = %bb32, %bb14
- %tmp36 = load %0** %tmp3 ; <%0*> [#uses=2]
+ %tmp36 = load %0*, %0** %tmp3 ; <%0*> [#uses=2]
%tmp37 = icmp eq %0* %tmp36, %tmp ; <i1> [#uses=1]
br i1 %tmp37, label %bb44, label %bb38
bb38: ; preds = %bb38, %bb35
%tmp39 = phi %0* [ %tmp41, %bb38 ], [ %tmp36, %bb35 ] ; <%0*> [#uses=2]
%tmp40 = getelementptr inbounds %0, %0* %tmp39, i64 0, i32 0 ; <%0**> [#uses=1]
- %tmp41 = load %0** %tmp40 ; <%0*> [#uses=2]
+ %tmp41 = load %0*, %0** %tmp40 ; <%0*> [#uses=2]
%tmp42 = bitcast %0* %tmp39 to i8* ; <i8*> [#uses=1]
call void @_ZdlPv(i8* %tmp42) nounwind
%tmp43 = icmp eq %0* %tmp41, %tmp ; <i1> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll Fri Feb 27 15:17:42 2015
@@ -23,7 +23,7 @@ bb38:
%tmp39 = phi i64 [ %tmp201, %bb200 ], [ 0, %bb ]
%tmp40 = sub i64 0, %tmp39
%tmp47 = getelementptr [5 x %0], [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 0
- %tmp34 = load i32* %tmp47, align 16
+ %tmp34 = load i32, i32* %tmp47, align 16
%tmp203 = icmp slt i32 %tmp34, 12
br i1 %tmp203, label %bb215, label %bb200
@@ -39,13 +39,13 @@ bb215:
store i32 %tmp216, i32* %tmp47, align 16
%tmp217 = sext i32 %tmp216 to i64
%tmp218 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 0
- %tmp219 = load i32* %tmp218, align 8
+ %tmp219 = load i32, i32* %tmp218, align 8
store i32 %tmp219, i32* %tmp48, align 4
%tmp220 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 1
- %tmp221 = load i32* %tmp220, align 4
+ %tmp221 = load i32, i32* %tmp220, align 4
store i32 %tmp221, i32* %tmp49, align 4
%tmp222 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 2
- %tmp223 = load i32* %tmp222, align 8
+ %tmp223 = load i32, i32* %tmp222, align 8
store i32 %tmp223, i32* %tmp50, align 4
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/lsr-reuse-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-reuse-trunc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-reuse-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-reuse-trunc.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@
define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind {
entry:
- %0 = load i32* %n, align 4
+ %0 = load i32, i32* %n, align 4
%1 = icmp sgt i32 %0, 0
br i1 %1, label %bb, label %return
@@ -25,7 +25,7 @@ bb:
%scevgep9 = bitcast float* %scevgep to <4 x float>*
%scevgep10 = getelementptr float, float* %x, i64 %tmp
%scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
- %2 = load <4 x float>* %scevgep1011, align 16
+ %2 = load <4 x float>, <4 x float>* %scevgep1011, align 16
%3 = bitcast <4 x float> %2 to <4 x i32>
%4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%5 = bitcast <4 x i32> %4 to <4 x float>
@@ -48,7 +48,7 @@ bb:
store <4 x float> %19, <4 x float>* %scevgep9, align 16
%tmp12 = add i64 %tmp, 4
%tmp13 = trunc i64 %tmp12 to i32
- %20 = load i32* %n, align 4
+ %20 = load i32, i32* %n, align 4
%21 = icmp sgt i32 %20, %tmp13
%indvar.next = add i64 %indvar, 1
br i1 %21, label %bb, label %return
Modified: llvm/trunk/test/CodeGen/X86/lsr-reuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-reuse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-reuse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-reuse.ll Fri Feb 27 15:17:42 2015
@@ -28,8 +28,8 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%i.next = add nsw i64 %i, 1
@@ -73,16 +73,16 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%j = add i64 %i, 256
%Aj = getelementptr inbounds double, double* %A, i64 %j
%Bj = getelementptr inbounds double, double* %B, i64 %j
%Cj = getelementptr inbounds double, double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
+ %t3 = load double, double* %Bj
+ %t4 = load double, double* %Cj
%o = fdiv double %t3, %t4
store double %o, double* %Aj
%i.next = add nsw i64 %i, 1
@@ -119,16 +119,16 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%j = sub i64 %i, 256
%Aj = getelementptr inbounds double, double* %A, i64 %j
%Bj = getelementptr inbounds double, double* %B, i64 %j
%Cj = getelementptr inbounds double, double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
+ %t3 = load double, double* %Bj
+ %t4 = load double, double* %Cj
%o = fdiv double %t3, %t4
store double %o, double* %Aj
%i.next = add nsw i64 %i, 1
@@ -165,16 +165,16 @@ loop:
%Ak = getelementptr inbounds double, double* %A, i64 %k
%Bk = getelementptr inbounds double, double* %B, i64 %k
%Ck = getelementptr inbounds double, double* %C, i64 %k
- %t1 = load double* %Bk
- %t2 = load double* %Ck
+ %t1 = load double, double* %Bk
+ %t2 = load double, double* %Ck
%m = fmul double %t1, %t2
store double %m, double* %Ak
%j = sub i64 %i, 256
%Aj = getelementptr inbounds double, double* %A, i64 %j
%Bj = getelementptr inbounds double, double* %B, i64 %j
%Cj = getelementptr inbounds double, double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
+ %t3 = load double, double* %Bj
+ %t4 = load double, double* %Cj
%o = fdiv double %t3, %t4
store double %o, double* %Aj
%i.next = add nsw i64 %i, 1
@@ -208,8 +208,8 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%i.next = add nsw i64 %i, 1
@@ -243,8 +243,8 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%i.next = add nsw i64 %i, 1
@@ -281,17 +281,17 @@ loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
%i5 = add i64 %i, 5
%Ai = getelementptr double, double* %A, i64 %i5
- %t2 = load double* %Ai
+ %t2 = load double, double* %Ai
%Bi = getelementptr double, double* %B, i64 %i5
- %t4 = load double* %Bi
+ %t4 = load double, double* %Bi
%t5 = fadd double %t2, %t4
%Ci = getelementptr double, double* %C, i64 %i5
store double %t5, double* %Ci
%i10 = add i64 %i, 10
%Ai10 = getelementptr double, double* %A, i64 %i10
- %t9 = load double* %Ai10
+ %t9 = load double, double* %Ai10
%Bi10 = getelementptr double, double* %B, i64 %i10
- %t11 = load double* %Bi10
+ %t11 = load double, double* %Bi10
%t12 = fsub double %t9, %t11
%Ci10 = getelementptr double, double* %C, i64 %i10
store double %t12, double* %Ci10
@@ -328,17 +328,17 @@ loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
%i5 = add i64 %i, 5
%Ai = getelementptr double, double* %A, i64 %i5
- %t2 = load double* %Ai
+ %t2 = load double, double* %Ai
%Bi = getelementptr double, double* %B, i64 %i5
- %t4 = load double* %Bi
+ %t4 = load double, double* %Bi
%t5 = fadd double %t2, %t4
%Ci = getelementptr double, double* %C, i64 %i5
store double %t5, double* %Ci
%i10 = add i64 %i, 10
%Ai10 = getelementptr double, double* %A, i64 %i10
- %t9 = load double* %Ai10
+ %t9 = load double, double* %Ai10
%Bi10 = getelementptr double, double* %B, i64 %i10
- %t11 = load double* %Bi10
+ %t11 = load double, double* %Bi10
%t12 = fsub double %t9, %t11
%Ci10 = getelementptr double, double* %C, i64 %i10
store double %t12, double* %Ci10
@@ -375,8 +375,8 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%i.next = add nsw i64 %i, 1
@@ -414,7 +414,7 @@ bb:
%indvar16 = phi i64 [ 0, %bb.nph14 ], [ %indvar.next17, %bb3 ] ; <i64> [#uses=3]
%s.113 = phi i32 [ 0, %bb.nph14 ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=2]
%scevgep2526 = getelementptr [123123 x %struct.anon], [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 0 ; <i32*> [#uses=1]
- %1 = load i32* %scevgep2526, align 4 ; <i32> [#uses=2]
+ %1 = load i32, i32* %scevgep2526, align 4 ; <i32> [#uses=2]
%2 = icmp sgt i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %bb.nph, label %bb3
@@ -426,7 +426,7 @@ bb1:
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp19, %bb1 ] ; <i64> [#uses=2]
%s.07 = phi i32 [ %s.113, %bb.nph ], [ %4, %bb1 ] ; <i32> [#uses=1]
%c.08 = getelementptr [123123 x %struct.anon], [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 1, i64 %indvar ; <i32*> [#uses=1]
- %3 = load i32* %c.08, align 4 ; <i32> [#uses=1]
+ %3 = load i32, i32* %c.08, align 4 ; <i32> [#uses=1]
%4 = add nsw i32 %3, %s.07 ; <i32> [#uses=2]
%tmp19 = add i64 %indvar, 1 ; <i64> [#uses=2]
%5 = icmp sgt i64 %tmp23, %tmp19 ; <i1> [#uses=1]
@@ -493,7 +493,7 @@ define void @test(float* %arg, i64 %arg1
bb:
%t = alloca float, align 4 ; <float*> [#uses=3]
%t7 = alloca float, align 4 ; <float*> [#uses=2]
- %t8 = load float* %arg3 ; <float> [#uses=8]
+ %t8 = load float, float* %arg3 ; <float> [#uses=8]
%t9 = ptrtoint float* %arg to i64 ; <i64> [#uses=1]
%t10 = ptrtoint float* %arg4 to i64 ; <i64> [#uses=1]
%t11 = xor i64 %t10, %t9 ; <i64> [#uses=1]
@@ -507,7 +507,7 @@ bb:
br i1 %t18, label %bb19, label %bb213
bb19: ; preds = %bb
- %t20 = load float* %arg2 ; <float> [#uses=1]
+ %t20 = load float, float* %arg2 ; <float> [#uses=1]
br label %bb21
bb21: ; preds = %bb32, %bb19
@@ -526,7 +526,7 @@ bb28:
br i1 %t31, label %bb37, label %bb32
bb32: ; preds = %bb28
- %t33 = load float* %t26 ; <float> [#uses=1]
+ %t33 = load float, float* %t26 ; <float> [#uses=1]
%t34 = fmul float %t23, %t33 ; <float> [#uses=1]
store float %t34, float* %t25
%t35 = fadd float %t23, %t8 ; <float> [#uses=1]
@@ -604,10 +604,10 @@ bb68:
%t95 = bitcast float* %t94 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t96 = mul i64 %t69, -16 ; <i64> [#uses=1]
%t97 = add i64 %t67, %t96 ; <i64> [#uses=2]
- %t98 = load <4 x float>* %t77 ; <<4 x float>> [#uses=1]
- %t99 = load <4 x float>* %t81 ; <<4 x float>> [#uses=1]
- %t100 = load <4 x float>* %t84 ; <<4 x float>> [#uses=1]
- %t101 = load <4 x float>* %t87 ; <<4 x float>> [#uses=1]
+ %t98 = load <4 x float>, <4 x float>* %t77 ; <<4 x float>> [#uses=1]
+ %t99 = load <4 x float>, <4 x float>* %t81 ; <<4 x float>> [#uses=1]
+ %t100 = load <4 x float>, <4 x float>* %t84 ; <<4 x float>> [#uses=1]
+ %t101 = load <4 x float>, <4 x float>* %t87 ; <<4 x float>> [#uses=1]
%t102 = fmul <4 x float> %t98, %t71 ; <<4 x float>> [#uses=1]
%t103 = fadd <4 x float> %t71, %t55 ; <<4 x float>> [#uses=2]
%t104 = fmul <4 x float> %t99, %t73 ; <<4 x float>> [#uses=1]
@@ -644,7 +644,7 @@ bb122:
%t123 = add i64 %t22, -1 ; <i64> [#uses=1]
%t124 = getelementptr inbounds float, float* %arg, i64 %t123 ; <float*> [#uses=1]
%t125 = bitcast float* %t124 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %t126 = load <4 x float>* %t125 ; <<4 x float>> [#uses=1]
+ %t126 = load <4 x float>, <4 x float>* %t125 ; <<4 x float>> [#uses=1]
%t127 = add i64 %t22, 16 ; <i64> [#uses=1]
%t128 = add i64 %t22, 3 ; <i64> [#uses=1]
%t129 = add i64 %t22, 7 ; <i64> [#uses=1]
@@ -692,10 +692,10 @@ bb137:
%t169 = bitcast float* %t168 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t170 = mul i64 %t138, -16 ; <i64> [#uses=1]
%t171 = add i64 %t136, %t170 ; <i64> [#uses=2]
- %t172 = load <4 x float>* %t148 ; <<4 x float>> [#uses=2]
- %t173 = load <4 x float>* %t151 ; <<4 x float>> [#uses=2]
- %t174 = load <4 x float>* %t154 ; <<4 x float>> [#uses=2]
- %t175 = load <4 x float>* %t157 ; <<4 x float>> [#uses=2]
+ %t172 = load <4 x float>, <4 x float>* %t148 ; <<4 x float>> [#uses=2]
+ %t173 = load <4 x float>, <4 x float>* %t151 ; <<4 x float>> [#uses=2]
+ %t174 = load <4 x float>, <4 x float>* %t154 ; <<4 x float>> [#uses=2]
+ %t175 = load <4 x float>, <4 x float>* %t157 ; <<4 x float>> [#uses=2]
%t176 = shufflevector <4 x float> %t143, <4 x float> %t172, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
%t177 = shufflevector <4 x float> %t176, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
%t178 = shufflevector <4 x float> %t172, <4 x float> %t173, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
@@ -734,7 +734,7 @@ bb201:
%t203 = phi float [ %t208, %bb201 ], [ %t199, %bb194 ] ; <float> [#uses=2]
%t204 = getelementptr float, float* %t198, i64 %t202 ; <float*> [#uses=1]
%t205 = getelementptr float, float* %t197, i64 %t202 ; <float*> [#uses=1]
- %t206 = load float* %t204 ; <float> [#uses=1]
+ %t206 = load float, float* %t204 ; <float> [#uses=1]
%t207 = fmul float %t203, %t206 ; <float> [#uses=1]
store float %t207, float* %t205
%t208 = fadd float %t203, %t8 ; <float> [#uses=2]
Modified: llvm/trunk/test/CodeGen/X86/lsr-static-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-static-addr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-static-addr.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-static-addr.ll Fri Feb 27 15:17:42 2015
@@ -30,7 +30,7 @@ entry:
for.body:
%i.06 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr [0 x double], [0 x double]* @A, i64 0, i64 %i.06
- %tmp3 = load double* %arrayidx, align 8
+ %tmp3 = load double, double* %arrayidx, align 8
%mul = fmul double %tmp3, 2.300000e+00
store double %mul, double* %arrayidx, align 8
%inc = add nsw i64 %i.06, 1
Modified: llvm/trunk/test/CodeGen/X86/lsr-wrap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-wrap.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-wrap.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lsr-wrap.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ bb:
%indvar = phi i16 [ 0, %entry ], [ %indvar.next, %bb ] ; <i16> [#uses=2]
%tmp = sub i16 0, %indvar ; <i16> [#uses=1]
%tmp27 = trunc i16 %tmp to i8 ; <i8> [#uses=1]
- %tmp1 = load i32* @g_19, align 4 ; <i32> [#uses=2]
+ %tmp1 = load i32, i32* @g_19, align 4 ; <i32> [#uses=2]
%tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
store i32 %tmp2, i32* @g_19, align 4
%tmp3 = trunc i32 %tmp1 to i8 ; <i8> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/lzcnt-tzcnt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lzcnt-tzcnt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lzcnt-tzcnt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lzcnt-tzcnt.ll Fri Feb 27 15:17:42 2015
@@ -106,7 +106,7 @@ define i64 @test9_ctlz(i64 %v) {
define i16 @test10_ctlz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 %v, 0
%cond = select i1 %tobool, i16 16, i16 %cnt
@@ -119,7 +119,7 @@ define i16 @test10_ctlz(i16* %ptr) {
define i32 @test11_ctlz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 %v, 0
%cond = select i1 %tobool, i32 32, i32 %cnt
@@ -132,7 +132,7 @@ define i32 @test11_ctlz(i32* %ptr) {
define i64 @test12_ctlz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 %v, 0
%cond = select i1 %tobool, i64 64, i64 %cnt
@@ -145,7 +145,7 @@ define i64 @test12_ctlz(i64* %ptr) {
define i16 @test13_ctlz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 0, %v
%cond = select i1 %tobool, i16 16, i16 %cnt
@@ -158,7 +158,7 @@ define i16 @test13_ctlz(i16* %ptr) {
define i32 @test14_ctlz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 0, %v
%cond = select i1 %tobool, i32 32, i32 %cnt
@@ -171,7 +171,7 @@ define i32 @test14_ctlz(i32* %ptr) {
define i64 @test15_ctlz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 0, %v
%cond = select i1 %tobool, i64 64, i64 %cnt
@@ -184,7 +184,7 @@ define i64 @test15_ctlz(i64* %ptr) {
define i16 @test16_ctlz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 0, %v
%cond = select i1 %tobool, i16 %cnt, i16 16
@@ -197,7 +197,7 @@ define i16 @test16_ctlz(i16* %ptr) {
define i32 @test17_ctlz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 0, %v
%cond = select i1 %tobool, i32 %cnt, i32 32
@@ -210,7 +210,7 @@ define i32 @test17_ctlz(i32* %ptr) {
define i64 @test18_ctlz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 0, %v
%cond = select i1 %tobool, i64 %cnt, i64 64
@@ -322,7 +322,7 @@ define i64 @test9_cttz(i64 %v) {
define i16 @test10_cttz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 %v, 0
%cond = select i1 %tobool, i16 16, i16 %cnt
@@ -335,7 +335,7 @@ define i16 @test10_cttz(i16* %ptr) {
define i32 @test11_cttz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 %v, 0
%cond = select i1 %tobool, i32 32, i32 %cnt
@@ -348,7 +348,7 @@ define i32 @test11_cttz(i32* %ptr) {
define i64 @test12_cttz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 %v, 0
%cond = select i1 %tobool, i64 64, i64 %cnt
@@ -361,7 +361,7 @@ define i64 @test12_cttz(i64* %ptr) {
define i16 @test13_cttz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 0, %v
%cond = select i1 %tobool, i16 16, i16 %cnt
@@ -374,7 +374,7 @@ define i16 @test13_cttz(i16* %ptr) {
define i32 @test14_cttz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 0, %v
%cond = select i1 %tobool, i32 32, i32 %cnt
@@ -387,7 +387,7 @@ define i32 @test14_cttz(i32* %ptr) {
define i64 @test15_cttz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 0, %v
%cond = select i1 %tobool, i64 64, i64 %cnt
@@ -400,7 +400,7 @@ define i64 @test15_cttz(i64* %ptr) {
define i16 @test16_cttz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 0, %v
%cond = select i1 %tobool, i16 %cnt, i16 16
@@ -413,7 +413,7 @@ define i16 @test16_cttz(i16* %ptr) {
define i32 @test17_cttz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 0, %v
%cond = select i1 %tobool, i32 %cnt, i32 32
@@ -426,7 +426,7 @@ define i32 @test17_cttz(i32* %ptr) {
define i64 @test18_cttz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 0, %v
%cond = select i1 %tobool, i64 %cnt, i64 64
Modified: llvm/trunk/test/CodeGen/X86/machine-cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-cse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-cse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/machine-cse.ll Fri Feb 27 15:17:42 2015
@@ -147,7 +147,7 @@ define i32 @t2() {
br i1 %c, label %a, label %b
a:
- %l = load i32* @t2_global
+ %l = load i32, i32* @t2_global
ret i32 %l
b:
Modified: llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll Fri Feb 27 15:17:42 2015
@@ -16,16 +16,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -49,16 +49,16 @@ loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -83,17 +83,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -118,17 +118,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -152,16 +152,16 @@ loop:
%indvar = phi i64 [ 18446744073709551615, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -185,16 +185,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -219,17 +219,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -254,17 +254,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
Modified: llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll Fri Feb 27 15:17:42 2015
@@ -14,16 +14,16 @@ loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -42,16 +42,16 @@ loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -71,17 +71,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -101,17 +101,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -130,16 +130,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -158,16 +158,16 @@ loop:
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -187,17 +187,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -217,17 +217,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -246,16 +246,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -274,16 +274,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 3
@@ -302,16 +302,16 @@ loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 3
@@ -331,17 +331,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 3
@@ -361,17 +361,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 3
Modified: llvm/trunk/test/CodeGen/X86/mcinst-lowering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mcinst-lowering.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mcinst-lowering.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mcinst-lowering.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-darwin10.0
define i32 @f0(i32* nocapture %x) nounwind readonly ssp {
entry:
- %tmp1 = load i32* %x ; <i32> [#uses=2]
+ %tmp1 = load i32, i32* %x ; <i32> [#uses=2]
%tobool = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tobool, label %if.end, label %return
Modified: llvm/trunk/test/CodeGen/X86/mem-intrin-base-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mem-intrin-base-reg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mem-intrin-base-reg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mem-intrin-base-reg.ll Fri Feb 27 15:17:42 2015
@@ -25,8 +25,8 @@ no_vectors:
spill_vectors:
%vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
- %v0 = load <4 x i32>* %vp0
- %v1 = load <4 x i32>* %vp1
+ %v0 = load <4 x i32>, <4 x i32>* %vp0
+ %v1 = load <4 x i32>, <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
%icmp = extractelement <4 x i1> %vicmp, i32 0
call void @escape_vla_and_icmp(i8* null, i1 zeroext %icmp)
@@ -50,8 +50,8 @@ no_vectors:
spill_vectors:
%vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
- %v0 = load <4 x i32>* %vp0
- %v1 = load <4 x i32>* %vp1
+ %v0 = load <4 x i32>, <4 x i32>* %vp0
+ %v1 = load <4 x i32>, <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
%icmp = extractelement <4 x i1> %vicmp, i32 0
%vla = alloca i8, i32 %n
@@ -78,8 +78,8 @@ no_vectors:
spill_vectors:
%vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
- %v0 = load <4 x i32>* %vp0
- %v1 = load <4 x i32>* %vp1
+ %v0 = load <4 x i32>, <4 x i32>* %vp0
+ %v1 = load <4 x i32>, <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
%icmp = extractelement <4 x i1> %vicmp, i32 0
%vla = alloca i8, i32 %n
Modified: llvm/trunk/test/CodeGen/X86/mem-promote-integers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mem-promote-integers.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mem-promote-integers.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mem-promote-integers.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
; RUN: llc -march=x86-64 < %s > /dev/null
define <1 x i8> @test_1xi8(<1 x i8> %x, <1 x i8>* %b) {
- %bb = load <1 x i8>* %b
+ %bb = load <1 x i8>, <1 x i8>* %b
%tt = xor <1 x i8> %x, %bb
store <1 x i8> %tt, <1 x i8>* %b
br label %next
@@ -16,7 +16,7 @@ next:
define <1 x i16> @test_1xi16(<1 x i16> %x, <1 x i16>* %b) {
- %bb = load <1 x i16>* %b
+ %bb = load <1 x i16>, <1 x i16>* %b
%tt = xor <1 x i16> %x, %bb
store <1 x i16> %tt, <1 x i16>* %b
br label %next
@@ -27,7 +27,7 @@ next:
define <1 x i32> @test_1xi32(<1 x i32> %x, <1 x i32>* %b) {
- %bb = load <1 x i32>* %b
+ %bb = load <1 x i32>, <1 x i32>* %b
%tt = xor <1 x i32> %x, %bb
store <1 x i32> %tt, <1 x i32>* %b
br label %next
@@ -38,7 +38,7 @@ next:
define <1 x i64> @test_1xi64(<1 x i64> %x, <1 x i64>* %b) {
- %bb = load <1 x i64>* %b
+ %bb = load <1 x i64>, <1 x i64>* %b
%tt = xor <1 x i64> %x, %bb
store <1 x i64> %tt, <1 x i64>* %b
br label %next
@@ -49,7 +49,7 @@ next:
define <1 x i128> @test_1xi128(<1 x i128> %x, <1 x i128>* %b) {
- %bb = load <1 x i128>* %b
+ %bb = load <1 x i128>, <1 x i128>* %b
%tt = xor <1 x i128> %x, %bb
store <1 x i128> %tt, <1 x i128>* %b
br label %next
@@ -60,7 +60,7 @@ next:
define <1 x i256> @test_1xi256(<1 x i256> %x, <1 x i256>* %b) {
- %bb = load <1 x i256>* %b
+ %bb = load <1 x i256>, <1 x i256>* %b
%tt = xor <1 x i256> %x, %bb
store <1 x i256> %tt, <1 x i256>* %b
br label %next
@@ -71,7 +71,7 @@ next:
define <1 x i512> @test_1xi512(<1 x i512> %x, <1 x i512>* %b) {
- %bb = load <1 x i512>* %b
+ %bb = load <1 x i512>, <1 x i512>* %b
%tt = xor <1 x i512> %x, %bb
store <1 x i512> %tt, <1 x i512>* %b
br label %next
@@ -82,7 +82,7 @@ next:
define <2 x i8> @test_2xi8(<2 x i8> %x, <2 x i8>* %b) {
- %bb = load <2 x i8>* %b
+ %bb = load <2 x i8>, <2 x i8>* %b
%tt = xor <2 x i8> %x, %bb
store <2 x i8> %tt, <2 x i8>* %b
br label %next
@@ -93,7 +93,7 @@ next:
define <2 x i16> @test_2xi16(<2 x i16> %x, <2 x i16>* %b) {
- %bb = load <2 x i16>* %b
+ %bb = load <2 x i16>, <2 x i16>* %b
%tt = xor <2 x i16> %x, %bb
store <2 x i16> %tt, <2 x i16>* %b
br label %next
@@ -104,7 +104,7 @@ next:
define <2 x i32> @test_2xi32(<2 x i32> %x, <2 x i32>* %b) {
- %bb = load <2 x i32>* %b
+ %bb = load <2 x i32>, <2 x i32>* %b
%tt = xor <2 x i32> %x, %bb
store <2 x i32> %tt, <2 x i32>* %b
br label %next
@@ -115,7 +115,7 @@ next:
define <2 x i64> @test_2xi64(<2 x i64> %x, <2 x i64>* %b) {
- %bb = load <2 x i64>* %b
+ %bb = load <2 x i64>, <2 x i64>* %b
%tt = xor <2 x i64> %x, %bb
store <2 x i64> %tt, <2 x i64>* %b
br label %next
@@ -126,7 +126,7 @@ next:
define <2 x i128> @test_2xi128(<2 x i128> %x, <2 x i128>* %b) {
- %bb = load <2 x i128>* %b
+ %bb = load <2 x i128>, <2 x i128>* %b
%tt = xor <2 x i128> %x, %bb
store <2 x i128> %tt, <2 x i128>* %b
br label %next
@@ -137,7 +137,7 @@ next:
define <2 x i256> @test_2xi256(<2 x i256> %x, <2 x i256>* %b) {
- %bb = load <2 x i256>* %b
+ %bb = load <2 x i256>, <2 x i256>* %b
%tt = xor <2 x i256> %x, %bb
store <2 x i256> %tt, <2 x i256>* %b
br label %next
@@ -148,7 +148,7 @@ next:
define <2 x i512> @test_2xi512(<2 x i512> %x, <2 x i512>* %b) {
- %bb = load <2 x i512>* %b
+ %bb = load <2 x i512>, <2 x i512>* %b
%tt = xor <2 x i512> %x, %bb
store <2 x i512> %tt, <2 x i512>* %b
br label %next
@@ -159,7 +159,7 @@ next:
define <3 x i8> @test_3xi8(<3 x i8> %x, <3 x i8>* %b) {
- %bb = load <3 x i8>* %b
+ %bb = load <3 x i8>, <3 x i8>* %b
%tt = xor <3 x i8> %x, %bb
store <3 x i8> %tt, <3 x i8>* %b
br label %next
@@ -170,7 +170,7 @@ next:
define <3 x i16> @test_3xi16(<3 x i16> %x, <3 x i16>* %b) {
- %bb = load <3 x i16>* %b
+ %bb = load <3 x i16>, <3 x i16>* %b
%tt = xor <3 x i16> %x, %bb
store <3 x i16> %tt, <3 x i16>* %b
br label %next
@@ -181,7 +181,7 @@ next:
define <3 x i32> @test_3xi32(<3 x i32> %x, <3 x i32>* %b) {
- %bb = load <3 x i32>* %b
+ %bb = load <3 x i32>, <3 x i32>* %b
%tt = xor <3 x i32> %x, %bb
store <3 x i32> %tt, <3 x i32>* %b
br label %next
@@ -192,7 +192,7 @@ next:
define <3 x i64> @test_3xi64(<3 x i64> %x, <3 x i64>* %b) {
- %bb = load <3 x i64>* %b
+ %bb = load <3 x i64>, <3 x i64>* %b
%tt = xor <3 x i64> %x, %bb
store <3 x i64> %tt, <3 x i64>* %b
br label %next
@@ -203,7 +203,7 @@ next:
define <3 x i128> @test_3xi128(<3 x i128> %x, <3 x i128>* %b) {
- %bb = load <3 x i128>* %b
+ %bb = load <3 x i128>, <3 x i128>* %b
%tt = xor <3 x i128> %x, %bb
store <3 x i128> %tt, <3 x i128>* %b
br label %next
@@ -214,7 +214,7 @@ next:
define <3 x i256> @test_3xi256(<3 x i256> %x, <3 x i256>* %b) {
- %bb = load <3 x i256>* %b
+ %bb = load <3 x i256>, <3 x i256>* %b
%tt = xor <3 x i256> %x, %bb
store <3 x i256> %tt, <3 x i256>* %b
br label %next
@@ -225,7 +225,7 @@ next:
define <3 x i512> @test_3xi512(<3 x i512> %x, <3 x i512>* %b) {
- %bb = load <3 x i512>* %b
+ %bb = load <3 x i512>, <3 x i512>* %b
%tt = xor <3 x i512> %x, %bb
store <3 x i512> %tt, <3 x i512>* %b
br label %next
@@ -236,7 +236,7 @@ next:
define <4 x i8> @test_4xi8(<4 x i8> %x, <4 x i8>* %b) {
- %bb = load <4 x i8>* %b
+ %bb = load <4 x i8>, <4 x i8>* %b
%tt = xor <4 x i8> %x, %bb
store <4 x i8> %tt, <4 x i8>* %b
br label %next
@@ -247,7 +247,7 @@ next:
define <4 x i16> @test_4xi16(<4 x i16> %x, <4 x i16>* %b) {
- %bb = load <4 x i16>* %b
+ %bb = load <4 x i16>, <4 x i16>* %b
%tt = xor <4 x i16> %x, %bb
store <4 x i16> %tt, <4 x i16>* %b
br label %next
@@ -258,7 +258,7 @@ next:
define <4 x i32> @test_4xi32(<4 x i32> %x, <4 x i32>* %b) {
- %bb = load <4 x i32>* %b
+ %bb = load <4 x i32>, <4 x i32>* %b
%tt = xor <4 x i32> %x, %bb
store <4 x i32> %tt, <4 x i32>* %b
br label %next
@@ -269,7 +269,7 @@ next:
define <4 x i64> @test_4xi64(<4 x i64> %x, <4 x i64>* %b) {
- %bb = load <4 x i64>* %b
+ %bb = load <4 x i64>, <4 x i64>* %b
%tt = xor <4 x i64> %x, %bb
store <4 x i64> %tt, <4 x i64>* %b
br label %next
@@ -280,7 +280,7 @@ next:
define <4 x i128> @test_4xi128(<4 x i128> %x, <4 x i128>* %b) {
- %bb = load <4 x i128>* %b
+ %bb = load <4 x i128>, <4 x i128>* %b
%tt = xor <4 x i128> %x, %bb
store <4 x i128> %tt, <4 x i128>* %b
br label %next
@@ -291,7 +291,7 @@ next:
define <4 x i256> @test_4xi256(<4 x i256> %x, <4 x i256>* %b) {
- %bb = load <4 x i256>* %b
+ %bb = load <4 x i256>, <4 x i256>* %b
%tt = xor <4 x i256> %x, %bb
store <4 x i256> %tt, <4 x i256>* %b
br label %next
@@ -302,7 +302,7 @@ next:
define <4 x i512> @test_4xi512(<4 x i512> %x, <4 x i512>* %b) {
- %bb = load <4 x i512>* %b
+ %bb = load <4 x i512>, <4 x i512>* %b
%tt = xor <4 x i512> %x, %bb
store <4 x i512> %tt, <4 x i512>* %b
br label %next
@@ -313,7 +313,7 @@ next:
define <5 x i8> @test_5xi8(<5 x i8> %x, <5 x i8>* %b) {
- %bb = load <5 x i8>* %b
+ %bb = load <5 x i8>, <5 x i8>* %b
%tt = xor <5 x i8> %x, %bb
store <5 x i8> %tt, <5 x i8>* %b
br label %next
@@ -324,7 +324,7 @@ next:
define <5 x i16> @test_5xi16(<5 x i16> %x, <5 x i16>* %b) {
- %bb = load <5 x i16>* %b
+ %bb = load <5 x i16>, <5 x i16>* %b
%tt = xor <5 x i16> %x, %bb
store <5 x i16> %tt, <5 x i16>* %b
br label %next
@@ -335,7 +335,7 @@ next:
define <5 x i32> @test_5xi32(<5 x i32> %x, <5 x i32>* %b) {
- %bb = load <5 x i32>* %b
+ %bb = load <5 x i32>, <5 x i32>* %b
%tt = xor <5 x i32> %x, %bb
store <5 x i32> %tt, <5 x i32>* %b
br label %next
@@ -346,7 +346,7 @@ next:
define <5 x i64> @test_5xi64(<5 x i64> %x, <5 x i64>* %b) {
- %bb = load <5 x i64>* %b
+ %bb = load <5 x i64>, <5 x i64>* %b
%tt = xor <5 x i64> %x, %bb
store <5 x i64> %tt, <5 x i64>* %b
br label %next
@@ -357,7 +357,7 @@ next:
define <5 x i128> @test_5xi128(<5 x i128> %x, <5 x i128>* %b) {
- %bb = load <5 x i128>* %b
+ %bb = load <5 x i128>, <5 x i128>* %b
%tt = xor <5 x i128> %x, %bb
store <5 x i128> %tt, <5 x i128>* %b
br label %next
@@ -368,7 +368,7 @@ next:
define <5 x i256> @test_5xi256(<5 x i256> %x, <5 x i256>* %b) {
- %bb = load <5 x i256>* %b
+ %bb = load <5 x i256>, <5 x i256>* %b
%tt = xor <5 x i256> %x, %bb
store <5 x i256> %tt, <5 x i256>* %b
br label %next
@@ -379,7 +379,7 @@ next:
define <5 x i512> @test_5xi512(<5 x i512> %x, <5 x i512>* %b) {
- %bb = load <5 x i512>* %b
+ %bb = load <5 x i512>, <5 x i512>* %b
%tt = xor <5 x i512> %x, %bb
store <5 x i512> %tt, <5 x i512>* %b
br label %next
Modified: llvm/trunk/test/CodeGen/X86/misaligned-memset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misaligned-memset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misaligned-memset.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misaligned-memset.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
call void @llvm.memset.p0i8.i64(i8* bitcast (i64* getelementptr inbounds ([3 x i64]* @a, i32 0, i64 1) to i8*), i8 0, i64 16, i32 1, i1 false)
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
Modified: llvm/trunk/test/CodeGen/X86/misched-aa-colored.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-aa-colored.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-aa-colored.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-aa-colored.ll Fri Feb 27 15:17:42 2015
@@ -156,9 +156,9 @@ entry:
%Op.i = alloca %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", align 8
%0 = bitcast %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i to i8*
%retval.sroa.0.0.idx.i36 = getelementptr inbounds %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199", %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i, i64 0, i32 1, i32 0, i32 0
- %retval.sroa.0.0.copyload.i37 = load i32* %retval.sroa.0.0.idx.i36, align 8
+ %retval.sroa.0.0.copyload.i37 = load i32, i32* %retval.sroa.0.0.idx.i36, align 8
call void @llvm.lifetime.end(i64 24, i8* %0) #1
- %agg.tmp8.sroa.2.0.copyload = load i32* undef, align 8
+ %agg.tmp8.sroa.2.0.copyload = load i32, i32* undef, align 8
%1 = bitcast %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i to i8*
call void @llvm.lifetime.start(i64 16, i8* %1) #1
%2 = getelementptr %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i, i64 0, i32 1
Modified: llvm/trunk/test/CodeGen/X86/misched-aa-mmos.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-aa-mmos.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-aa-mmos.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-aa-mmos.ll Fri Feb 27 15:17:42 2015
@@ -20,11 +20,11 @@ entry:
cond.end.i:
%significand.i18.i = getelementptr inbounds %c1, %c1* %temp_rhs, i64 0, i32 1
%exponent.i = getelementptr inbounds %c1, %c1* %temp_rhs, i64 0, i32 2
- %0 = load i16* %exponent.i, align 8
+ %0 = load i16, i16* %exponent.i, align 8
%sub.i = add i16 %0, -1
store i16 %sub.i, i16* %exponent.i, align 8
%parts.i.i = bitcast %u1* %significand.i18.i to i64**
- %1 = load i64** %parts.i.i, align 8
+ %1 = load i64*, i64** %parts.i.i, align 8
%call5.i = call zeroext i1 @bar(i64* %1, i32 undef) #1
unreachable
Modified: llvm/trunk/test/CodeGen/X86/misched-balance.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-balance.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-balance.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-balance.ll Fri Feb 27 15:17:42 2015
@@ -48,62 +48,62 @@ entry:
; CHECK-LABEL: %end
for.body:
%indvars.iv42.i = phi i64 [ %indvars.iv.next43.i, %for.body ], [ 0, %entry ]
- %tmp57 = load i32* %tmp56, align 4
+ %tmp57 = load i32, i32* %tmp56, align 4
%arrayidx12.us.i61 = getelementptr inbounds i32, i32* %pre, i64 %indvars.iv42.i
- %tmp58 = load i32* %arrayidx12.us.i61, align 4
+ %tmp58 = load i32, i32* %arrayidx12.us.i61, align 4
%mul.us.i = mul nsw i32 %tmp58, %tmp57
%arrayidx8.us.i.1 = getelementptr inbounds i32, i32* %tmp56, i64 1
- %tmp59 = load i32* %arrayidx8.us.i.1, align 4
+ %tmp59 = load i32, i32* %arrayidx8.us.i.1, align 4
%arrayidx12.us.i61.1 = getelementptr inbounds i32, i32* %pre94, i64 %indvars.iv42.i
- %tmp60 = load i32* %arrayidx12.us.i61.1, align 4
+ %tmp60 = load i32, i32* %arrayidx12.us.i61.1, align 4
%mul.us.i.1 = mul nsw i32 %tmp60, %tmp59
%add.us.i.1 = add nsw i32 %mul.us.i.1, %mul.us.i
%arrayidx8.us.i.2 = getelementptr inbounds i32, i32* %tmp56, i64 2
- %tmp61 = load i32* %arrayidx8.us.i.2, align 4
+ %tmp61 = load i32, i32* %arrayidx8.us.i.2, align 4
%arrayidx12.us.i61.2 = getelementptr inbounds i32, i32* %pre95, i64 %indvars.iv42.i
- %tmp62 = load i32* %arrayidx12.us.i61.2, align 4
+ %tmp62 = load i32, i32* %arrayidx12.us.i61.2, align 4
%mul.us.i.2 = mul nsw i32 %tmp62, %tmp61
%add.us.i.2 = add nsw i32 %mul.us.i.2, %add.us.i.1
%arrayidx8.us.i.3 = getelementptr inbounds i32, i32* %tmp56, i64 3
- %tmp63 = load i32* %arrayidx8.us.i.3, align 4
+ %tmp63 = load i32, i32* %arrayidx8.us.i.3, align 4
%arrayidx12.us.i61.3 = getelementptr inbounds i32, i32* %pre96, i64 %indvars.iv42.i
- %tmp64 = load i32* %arrayidx12.us.i61.3, align 4
+ %tmp64 = load i32, i32* %arrayidx12.us.i61.3, align 4
%mul.us.i.3 = mul nsw i32 %tmp64, %tmp63
%add.us.i.3 = add nsw i32 %mul.us.i.3, %add.us.i.2
%arrayidx8.us.i.4 = getelementptr inbounds i32, i32* %tmp56, i64 4
- %tmp65 = load i32* %arrayidx8.us.i.4, align 4
+ %tmp65 = load i32, i32* %arrayidx8.us.i.4, align 4
%arrayidx12.us.i61.4 = getelementptr inbounds i32, i32* %pre97, i64 %indvars.iv42.i
- %tmp66 = load i32* %arrayidx12.us.i61.4, align 4
+ %tmp66 = load i32, i32* %arrayidx12.us.i61.4, align 4
%mul.us.i.4 = mul nsw i32 %tmp66, %tmp65
%add.us.i.4 = add nsw i32 %mul.us.i.4, %add.us.i.3
%arrayidx8.us.i.5 = getelementptr inbounds i32, i32* %tmp56, i64 5
- %tmp67 = load i32* %arrayidx8.us.i.5, align 4
+ %tmp67 = load i32, i32* %arrayidx8.us.i.5, align 4
%arrayidx12.us.i61.5 = getelementptr inbounds i32, i32* %pre98, i64 %indvars.iv42.i
- %tmp68 = load i32* %arrayidx12.us.i61.5, align 4
+ %tmp68 = load i32, i32* %arrayidx12.us.i61.5, align 4
%mul.us.i.5 = mul nsw i32 %tmp68, %tmp67
%add.us.i.5 = add nsw i32 %mul.us.i.5, %add.us.i.4
%arrayidx8.us.i.6 = getelementptr inbounds i32, i32* %tmp56, i64 6
- %tmp69 = load i32* %arrayidx8.us.i.6, align 4
+ %tmp69 = load i32, i32* %arrayidx8.us.i.6, align 4
%arrayidx12.us.i61.6 = getelementptr inbounds i32, i32* %pre99, i64 %indvars.iv42.i
- %tmp70 = load i32* %arrayidx12.us.i61.6, align 4
+ %tmp70 = load i32, i32* %arrayidx12.us.i61.6, align 4
%mul.us.i.6 = mul nsw i32 %tmp70, %tmp69
%add.us.i.6 = add nsw i32 %mul.us.i.6, %add.us.i.5
%arrayidx8.us.i.7 = getelementptr inbounds i32, i32* %tmp56, i64 7
- %tmp71 = load i32* %arrayidx8.us.i.7, align 4
+ %tmp71 = load i32, i32* %arrayidx8.us.i.7, align 4
%arrayidx12.us.i61.7 = getelementptr inbounds i32, i32* %pre100, i64 %indvars.iv42.i
- %tmp72 = load i32* %arrayidx12.us.i61.7, align 4
+ %tmp72 = load i32, i32* %arrayidx12.us.i61.7, align 4
%mul.us.i.7 = mul nsw i32 %tmp72, %tmp71
%add.us.i.7 = add nsw i32 %mul.us.i.7, %add.us.i.6
%arrayidx8.us.i.8 = getelementptr inbounds i32, i32* %tmp56, i64 8
- %tmp73 = load i32* %arrayidx8.us.i.8, align 4
+ %tmp73 = load i32, i32* %arrayidx8.us.i.8, align 4
%arrayidx12.us.i61.8 = getelementptr inbounds i32, i32* %pre101, i64 %indvars.iv42.i
- %tmp74 = load i32* %arrayidx12.us.i61.8, align 4
+ %tmp74 = load i32, i32* %arrayidx12.us.i61.8, align 4
%mul.us.i.8 = mul nsw i32 %tmp74, %tmp73
%add.us.i.8 = add nsw i32 %mul.us.i.8, %add.us.i.7
%arrayidx8.us.i.9 = getelementptr inbounds i32, i32* %tmp56, i64 9
- %tmp75 = load i32* %arrayidx8.us.i.9, align 4
+ %tmp75 = load i32, i32* %arrayidx8.us.i.9, align 4
%arrayidx12.us.i61.9 = getelementptr inbounds i32, i32* %pre102, i64 %indvars.iv42.i
- %tmp76 = load i32* %arrayidx12.us.i61.9, align 4
+ %tmp76 = load i32, i32* %arrayidx12.us.i61.9, align 4
%mul.us.i.9 = mul nsw i32 %tmp76, %tmp75
%add.us.i.9 = add nsw i32 %mul.us.i.9, %add.us.i.8
%arrayidx16.us.i = getelementptr inbounds i32, i32* %tmp55, i64 %indvars.iv42.i
@@ -159,46 +159,46 @@ entry:
br label %for.body
for.body:
%indvars.iv42.i = phi i64 [ %indvars.iv.next43.i, %for.body ], [ 0, %entry ]
- %tmp57 = load i32* %tmp56, align 4
+ %tmp57 = load i32, i32* %tmp56, align 4
%arrayidx12.us.i61 = getelementptr inbounds i32, i32* %pre, i64 %indvars.iv42.i
- %tmp58 = load i32* %arrayidx12.us.i61, align 4
+ %tmp58 = load i32, i32* %arrayidx12.us.i61, align 4
%arrayidx8.us.i.1 = getelementptr inbounds i32, i32* %tmp56, i64 1
- %tmp59 = load i32* %arrayidx8.us.i.1, align 4
+ %tmp59 = load i32, i32* %arrayidx8.us.i.1, align 4
%arrayidx12.us.i61.1 = getelementptr inbounds i32, i32* %pre94, i64 %indvars.iv42.i
- %tmp60 = load i32* %arrayidx12.us.i61.1, align 4
+ %tmp60 = load i32, i32* %arrayidx12.us.i61.1, align 4
%arrayidx8.us.i.2 = getelementptr inbounds i32, i32* %tmp56, i64 2
- %tmp61 = load i32* %arrayidx8.us.i.2, align 4
+ %tmp61 = load i32, i32* %arrayidx8.us.i.2, align 4
%arrayidx12.us.i61.2 = getelementptr inbounds i32, i32* %pre95, i64 %indvars.iv42.i
- %tmp62 = load i32* %arrayidx12.us.i61.2, align 4
+ %tmp62 = load i32, i32* %arrayidx12.us.i61.2, align 4
%arrayidx8.us.i.3 = getelementptr inbounds i32, i32* %tmp56, i64 3
- %tmp63 = load i32* %arrayidx8.us.i.3, align 4
+ %tmp63 = load i32, i32* %arrayidx8.us.i.3, align 4
%arrayidx12.us.i61.3 = getelementptr inbounds i32, i32* %pre96, i64 %indvars.iv42.i
- %tmp64 = load i32* %arrayidx12.us.i61.3, align 4
+ %tmp64 = load i32, i32* %arrayidx12.us.i61.3, align 4
%arrayidx8.us.i.4 = getelementptr inbounds i32, i32* %tmp56, i64 4
- %tmp65 = load i32* %arrayidx8.us.i.4, align 4
+ %tmp65 = load i32, i32* %arrayidx8.us.i.4, align 4
%arrayidx12.us.i61.4 = getelementptr inbounds i32, i32* %pre97, i64 %indvars.iv42.i
- %tmp66 = load i32* %arrayidx12.us.i61.4, align 4
+ %tmp66 = load i32, i32* %arrayidx12.us.i61.4, align 4
%arrayidx8.us.i.5 = getelementptr inbounds i32, i32* %tmp56, i64 5
- %tmp67 = load i32* %arrayidx8.us.i.5, align 4
+ %tmp67 = load i32, i32* %arrayidx8.us.i.5, align 4
%arrayidx12.us.i61.5 = getelementptr inbounds i32, i32* %pre98, i64 %indvars.iv42.i
- %tmp68 = load i32* %arrayidx12.us.i61.5, align 4
+ %tmp68 = load i32, i32* %arrayidx12.us.i61.5, align 4
%arrayidx8.us.i.6 = getelementptr inbounds i32, i32* %tmp56, i64 6
- %tmp69 = load i32* %arrayidx8.us.i.6, align 4
+ %tmp69 = load i32, i32* %arrayidx8.us.i.6, align 4
%arrayidx12.us.i61.6 = getelementptr inbounds i32, i32* %pre99, i64 %indvars.iv42.i
- %tmp70 = load i32* %arrayidx12.us.i61.6, align 4
+ %tmp70 = load i32, i32* %arrayidx12.us.i61.6, align 4
%mul.us.i = mul nsw i32 %tmp58, %tmp57
%arrayidx8.us.i.7 = getelementptr inbounds i32, i32* %tmp56, i64 7
- %tmp71 = load i32* %arrayidx8.us.i.7, align 4
+ %tmp71 = load i32, i32* %arrayidx8.us.i.7, align 4
%arrayidx12.us.i61.7 = getelementptr inbounds i32, i32* %pre100, i64 %indvars.iv42.i
- %tmp72 = load i32* %arrayidx12.us.i61.7, align 4
+ %tmp72 = load i32, i32* %arrayidx12.us.i61.7, align 4
%arrayidx8.us.i.8 = getelementptr inbounds i32, i32* %tmp56, i64 8
- %tmp73 = load i32* %arrayidx8.us.i.8, align 4
+ %tmp73 = load i32, i32* %arrayidx8.us.i.8, align 4
%arrayidx12.us.i61.8 = getelementptr inbounds i32, i32* %pre101, i64 %indvars.iv42.i
- %tmp74 = load i32* %arrayidx12.us.i61.8, align 4
+ %tmp74 = load i32, i32* %arrayidx12.us.i61.8, align 4
%arrayidx8.us.i.9 = getelementptr inbounds i32, i32* %tmp56, i64 9
- %tmp75 = load i32* %arrayidx8.us.i.9, align 4
+ %tmp75 = load i32, i32* %arrayidx8.us.i.9, align 4
%arrayidx12.us.i61.9 = getelementptr inbounds i32, i32* %pre102, i64 %indvars.iv42.i
- %tmp76 = load i32* %arrayidx12.us.i61.9, align 4
+ %tmp76 = load i32, i32* %arrayidx12.us.i61.9, align 4
%mul.us.i.1 = mul nsw i32 %tmp60, %tmp59
%add.us.i.1 = add nsw i32 %mul.us.i.1, %mul.us.i
%mul.us.i.2 = mul nsw i32 %tmp62, %tmp61
@@ -243,20 +243,20 @@ end:
@d = external global i32, align 4
define i32 @encpc1() nounwind {
entry:
- %l1 = load i32* @a, align 16
+ %l1 = load i32, i32* @a, align 16
%conv = shl i32 %l1, 8
%s5 = lshr i32 %l1, 8
%add = or i32 %conv, %s5
store i32 %add, i32* @b
- %l6 = load i32* @a
- %l7 = load i32* @c
+ %l6 = load i32, i32* @a
+ %l7 = load i32, i32* @c
%add.i = add i32 %l7, %l6
%idxprom.i = zext i32 %l7 to i64
%arrayidx.i = getelementptr inbounds i32, i32* @d, i64 %idxprom.i
- %l8 = load i32* %arrayidx.i
+ %l8 = load i32, i32* %arrayidx.i
store i32 346, i32* @c
store i32 20021, i32* @d
- %l9 = load i32* @a
+ %l9 = load i32, i32* @a
store i32 %l8, i32* @a
store i32 %l9, i32* @b
store i32 %add.i, i32* @c
Modified: llvm/trunk/test/CodeGen/X86/misched-code-difference-with-debug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-code-difference-with-debug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-code-difference-with-debug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-code-difference-with-debug.ll Fri Feb 27 15:17:42 2015
@@ -32,10 +32,10 @@ declare i32 @test_function(%class.C*, i8
define void @test_without_debug() {
entry:
%c = alloca %class.C, align 1
- %0 = load i8* @argc, align 1
+ %0 = load i8, i8* @argc, align 1
%conv = sext i8 %0 to i32
%call = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %0, i8 signext 0, i32 %conv)
- %1 = load i8* @argc, align 1
+ %1 = load i8, i8* @argc, align 1
%call2 = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %1, i8 signext 0, i32 %conv)
ret void
}
@@ -46,12 +46,12 @@ entry:
define void @test_with_debug() {
entry:
%c = alloca %class.C, align 1
- %0 = load i8* @argc, align 1
+ %0 = load i8, i8* @argc, align 1
tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !19, metadata !29)
%conv = sext i8 %0 to i32
tail call void @llvm.dbg.value(metadata %class.C* %c, i64 0, metadata !18, metadata !29)
%call = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %0, i8 signext 0, i32 %conv)
- %1 = load i8* @argc, align 1
+ %1 = load i8, i8* @argc, align 1
call void @llvm.dbg.value(metadata %class.C* %c, i64 0, metadata !18, metadata !29)
%call2 = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %1, i8 signext 0, i32 %conv)
ret void
Modified: llvm/trunk/test/CodeGen/X86/misched-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-crash.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
%cmp = icmp ult i64 %_x1, %_x2
%cond = select i1 %cmp, i64 %_x1, i64 %_x2
%cond10 = select i1 %cmp, i64 %_x2, i64 %_x1
- %0 = load i64* null, align 8
+ %0 = load i64, i64* null, align 8
%cmp16 = icmp ult i64 %cond, %0
%cmp23 = icmp ugt i64 %cond10, 0
br i1 %cmp16, label %land.lhs.true21, label %return
@@ -27,7 +27,7 @@ if.then24:
for.body34.i: ; preds = %for.inc39.i, %if.then24
%index.178.i = phi i64 [ %add21.i, %if.then24 ], [ %inc41.i, %for.inc39.i ]
%arrayidx35.i = getelementptr inbounds i8, i8* %plane, i64 %index.178.i
- %1 = load i8* %arrayidx35.i, align 1
+ %1 = load i8, i8* %arrayidx35.i, align 1
%tobool36.i = icmp eq i8 %1, 0
br i1 %tobool36.i, label %for.inc39.i, label %return
Modified: llvm/trunk/test/CodeGen/X86/misched-fusion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-fusion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-fusion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-fusion.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ loop:
loop1:
%cond = icmp eq i32* %var, null
- %next.load = load i32** %next.ptr
+ %next.load = load i32*, i32** %next.ptr
br i1 %cond, label %loop, label %loop2
loop2: ; preds = %loop1
@@ -42,8 +42,8 @@ loop:
loop1:
%var2 = sub i32 %var, 1
%cond = icmp eq i32 %var2, 0
- %next.load = load i32** %next.ptr
- %next.var = load i32* %next.load
+ %next.load = load i32*, i32** %next.ptr
+ %next.var = load i32, i32* %next.load
br i1 %cond, label %loop, label %loop2
loop2:
@@ -70,8 +70,8 @@ loop2a:
loop1: ; preds = %loop2a, %loop2b
%var2 = sub i32 %var, 1
%cond = icmp slt i32 %var2, 0
- %next.load = load i32** %next.ptr
- %next.var = load i32* %next.load
+ %next.load = load i32*, i32** %next.ptr
+ %next.var = load i32, i32* %next.load
br i1 %cond, label %loop2a, label %loop2b
loop2b: ; preds = %loop1
@@ -97,8 +97,8 @@ loop2a:
loop1: ; preds = %loop2a, %loop2b
%var2 = sub i32 %var, 1
%cond = icmp ult i32 %var2, %n
- %next.load = load i32** %next.ptr
- %next.var = load i32* %next.load
+ %next.load = load i32*, i32** %next.ptr
+ %next.var = load i32, i32* %next.load
br i1 %cond, label %loop2a, label %loop2b
loop2b: ; preds = %loop1
Modified: llvm/trunk/test/CodeGen/X86/misched-matmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-matmul.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-matmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-matmul.ll Fri Feb 27 15:17:42 2015
@@ -15,86 +15,86 @@
define void @wrap_mul4(double* nocapture %Out, [4 x double]* nocapture %A, [4 x double]* nocapture %B) #0 {
entry:
%arrayidx1.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 0
- %0 = load double* %arrayidx1.i, align 8
+ %0 = load double, double* %arrayidx1.i, align 8
%arrayidx3.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 0
- %1 = load double* %arrayidx3.i, align 8
+ %1 = load double, double* %arrayidx3.i, align 8
%mul.i = fmul double %0, %1
%arrayidx5.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 1
- %2 = load double* %arrayidx5.i, align 8
+ %2 = load double, double* %arrayidx5.i, align 8
%arrayidx7.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 0
- %3 = load double* %arrayidx7.i, align 8
+ %3 = load double, double* %arrayidx7.i, align 8
%mul8.i = fmul double %2, %3
%add.i = fadd double %mul.i, %mul8.i
%arrayidx10.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 2
- %4 = load double* %arrayidx10.i, align 8
+ %4 = load double, double* %arrayidx10.i, align 8
%arrayidx12.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 0
- %5 = load double* %arrayidx12.i, align 8
+ %5 = load double, double* %arrayidx12.i, align 8
%mul13.i = fmul double %4, %5
%add14.i = fadd double %add.i, %mul13.i
%arrayidx16.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 3
- %6 = load double* %arrayidx16.i, align 8
+ %6 = load double, double* %arrayidx16.i, align 8
%arrayidx18.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 0
- %7 = load double* %arrayidx18.i, align 8
+ %7 = load double, double* %arrayidx18.i, align 8
%mul19.i = fmul double %6, %7
%add20.i = fadd double %add14.i, %mul19.i
%arrayidx25.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 1
- %8 = load double* %arrayidx25.i, align 8
+ %8 = load double, double* %arrayidx25.i, align 8
%mul26.i = fmul double %0, %8
%arrayidx30.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 1
- %9 = load double* %arrayidx30.i, align 8
+ %9 = load double, double* %arrayidx30.i, align 8
%mul31.i = fmul double %2, %9
%add32.i = fadd double %mul26.i, %mul31.i
%arrayidx36.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 1
- %10 = load double* %arrayidx36.i, align 8
+ %10 = load double, double* %arrayidx36.i, align 8
%mul37.i = fmul double %4, %10
%add38.i = fadd double %add32.i, %mul37.i
%arrayidx42.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 1
- %11 = load double* %arrayidx42.i, align 8
+ %11 = load double, double* %arrayidx42.i, align 8
%mul43.i = fmul double %6, %11
%add44.i = fadd double %add38.i, %mul43.i
%arrayidx49.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 2
- %12 = load double* %arrayidx49.i, align 8
+ %12 = load double, double* %arrayidx49.i, align 8
%mul50.i = fmul double %0, %12
%arrayidx54.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 2
- %13 = load double* %arrayidx54.i, align 8
+ %13 = load double, double* %arrayidx54.i, align 8
%mul55.i = fmul double %2, %13
%add56.i = fadd double %mul50.i, %mul55.i
%arrayidx60.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 2
- %14 = load double* %arrayidx60.i, align 8
+ %14 = load double, double* %arrayidx60.i, align 8
%mul61.i = fmul double %4, %14
%add62.i = fadd double %add56.i, %mul61.i
%arrayidx66.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 2
- %15 = load double* %arrayidx66.i, align 8
+ %15 = load double, double* %arrayidx66.i, align 8
%mul67.i = fmul double %6, %15
%add68.i = fadd double %add62.i, %mul67.i
%arrayidx73.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 3
- %16 = load double* %arrayidx73.i, align 8
+ %16 = load double, double* %arrayidx73.i, align 8
%mul74.i = fmul double %0, %16
%arrayidx78.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 3
- %17 = load double* %arrayidx78.i, align 8
+ %17 = load double, double* %arrayidx78.i, align 8
%mul79.i = fmul double %2, %17
%add80.i = fadd double %mul74.i, %mul79.i
%arrayidx84.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 3
- %18 = load double* %arrayidx84.i, align 8
+ %18 = load double, double* %arrayidx84.i, align 8
%mul85.i = fmul double %4, %18
%add86.i = fadd double %add80.i, %mul85.i
%arrayidx90.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 3
- %19 = load double* %arrayidx90.i, align 8
+ %19 = load double, double* %arrayidx90.i, align 8
%mul91.i = fmul double %6, %19
%add92.i = fadd double %add86.i, %mul91.i
%arrayidx95.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 0
- %20 = load double* %arrayidx95.i, align 8
+ %20 = load double, double* %arrayidx95.i, align 8
%mul98.i = fmul double %1, %20
%arrayidx100.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 1
- %21 = load double* %arrayidx100.i, align 8
+ %21 = load double, double* %arrayidx100.i, align 8
%mul103.i = fmul double %3, %21
%add104.i = fadd double %mul98.i, %mul103.i
%arrayidx106.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 2
- %22 = load double* %arrayidx106.i, align 8
+ %22 = load double, double* %arrayidx106.i, align 8
%mul109.i = fmul double %5, %22
%add110.i = fadd double %add104.i, %mul109.i
%arrayidx112.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 3
- %23 = load double* %arrayidx112.i, align 8
+ %23 = load double, double* %arrayidx112.i, align 8
%mul115.i = fmul double %7, %23
%add116.i = fadd double %add110.i, %mul115.i
%mul122.i = fmul double %8, %20
@@ -119,18 +119,18 @@ entry:
%mul187.i = fmul double %19, %23
%add188.i = fadd double %add182.i, %mul187.i
%arrayidx191.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 0
- %24 = load double* %arrayidx191.i, align 8
+ %24 = load double, double* %arrayidx191.i, align 8
%mul194.i = fmul double %1, %24
%arrayidx196.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 1
- %25 = load double* %arrayidx196.i, align 8
+ %25 = load double, double* %arrayidx196.i, align 8
%mul199.i = fmul double %3, %25
%add200.i = fadd double %mul194.i, %mul199.i
%arrayidx202.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 2
- %26 = load double* %arrayidx202.i, align 8
+ %26 = load double, double* %arrayidx202.i, align 8
%mul205.i = fmul double %5, %26
%add206.i = fadd double %add200.i, %mul205.i
%arrayidx208.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 3
- %27 = load double* %arrayidx208.i, align 8
+ %27 = load double, double* %arrayidx208.i, align 8
%mul211.i = fmul double %7, %27
%add212.i = fadd double %add206.i, %mul211.i
%mul218.i = fmul double %8, %24
@@ -155,18 +155,18 @@ entry:
%mul283.i = fmul double %19, %27
%add284.i = fadd double %add278.i, %mul283.i
%arrayidx287.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 0
- %28 = load double* %arrayidx287.i, align 8
+ %28 = load double, double* %arrayidx287.i, align 8
%mul290.i = fmul double %1, %28
%arrayidx292.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 1
- %29 = load double* %arrayidx292.i, align 8
+ %29 = load double, double* %arrayidx292.i, align 8
%mul295.i = fmul double %3, %29
%add296.i = fadd double %mul290.i, %mul295.i
%arrayidx298.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 2
- %30 = load double* %arrayidx298.i, align 8
+ %30 = load double, double* %arrayidx298.i, align 8
%mul301.i = fmul double %5, %30
%add302.i = fadd double %add296.i, %mul301.i
%arrayidx304.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 3
- %31 = load double* %arrayidx304.i, align 8
+ %31 = load double, double* %arrayidx304.i, align 8
%mul307.i = fmul double %7, %31
%add308.i = fadd double %add302.i, %mul307.i
%mul314.i = fmul double %8, %28
Modified: llvm/trunk/test/CodeGen/X86/misched-matrix.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-matrix.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-matrix.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-matrix.ll Fri Feb 27 15:17:42 2015
@@ -94,57 +94,57 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx8 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 0
- %tmp = load i32* %arrayidx8, align 4
+ %tmp = load i32, i32* %arrayidx8, align 4
%arrayidx12 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 0
- %tmp1 = load i32* %arrayidx12, align 4
+ %tmp1 = load i32, i32* %arrayidx12, align 4
%arrayidx8.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 1
- %tmp2 = load i32* %arrayidx8.1, align 4
+ %tmp2 = load i32, i32* %arrayidx8.1, align 4
%arrayidx12.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 0
- %tmp3 = load i32* %arrayidx12.1, align 4
+ %tmp3 = load i32, i32* %arrayidx12.1, align 4
%arrayidx8.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 2
- %tmp4 = load i32* %arrayidx8.2, align 4
+ %tmp4 = load i32, i32* %arrayidx8.2, align 4
%arrayidx12.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 0
- %tmp5 = load i32* %arrayidx12.2, align 4
+ %tmp5 = load i32, i32* %arrayidx12.2, align 4
%arrayidx8.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 3
- %tmp6 = load i32* %arrayidx8.3, align 4
+ %tmp6 = load i32, i32* %arrayidx8.3, align 4
%arrayidx12.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 0
- %tmp8 = load i32* %arrayidx8, align 4
+ %tmp8 = load i32, i32* %arrayidx8, align 4
%arrayidx12.137 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 1
- %tmp9 = load i32* %arrayidx12.137, align 4
- %tmp10 = load i32* %arrayidx8.1, align 4
+ %tmp9 = load i32, i32* %arrayidx12.137, align 4
+ %tmp10 = load i32, i32* %arrayidx8.1, align 4
%arrayidx12.1.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 1
- %tmp11 = load i32* %arrayidx12.1.1, align 4
- %tmp12 = load i32* %arrayidx8.2, align 4
+ %tmp11 = load i32, i32* %arrayidx12.1.1, align 4
+ %tmp12 = load i32, i32* %arrayidx8.2, align 4
%arrayidx12.2.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 1
- %tmp13 = load i32* %arrayidx12.2.1, align 4
- %tmp14 = load i32* %arrayidx8.3, align 4
+ %tmp13 = load i32, i32* %arrayidx12.2.1, align 4
+ %tmp14 = load i32, i32* %arrayidx8.3, align 4
%arrayidx12.3.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 1
- %tmp15 = load i32* %arrayidx12.3.1, align 4
- %tmp16 = load i32* %arrayidx8, align 4
+ %tmp15 = load i32, i32* %arrayidx12.3.1, align 4
+ %tmp16 = load i32, i32* %arrayidx8, align 4
%arrayidx12.239 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 2
- %tmp17 = load i32* %arrayidx12.239, align 4
- %tmp18 = load i32* %arrayidx8.1, align 4
+ %tmp17 = load i32, i32* %arrayidx12.239, align 4
+ %tmp18 = load i32, i32* %arrayidx8.1, align 4
%arrayidx12.1.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 2
- %tmp19 = load i32* %arrayidx12.1.2, align 4
- %tmp20 = load i32* %arrayidx8.2, align 4
+ %tmp19 = load i32, i32* %arrayidx12.1.2, align 4
+ %tmp20 = load i32, i32* %arrayidx8.2, align 4
%arrayidx12.2.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 2
- %tmp21 = load i32* %arrayidx12.2.2, align 4
- %tmp22 = load i32* %arrayidx8.3, align 4
+ %tmp21 = load i32, i32* %arrayidx12.2.2, align 4
+ %tmp22 = load i32, i32* %arrayidx8.3, align 4
%arrayidx12.3.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 2
- %tmp23 = load i32* %arrayidx12.3.2, align 4
- %tmp24 = load i32* %arrayidx8, align 4
+ %tmp23 = load i32, i32* %arrayidx12.3.2, align 4
+ %tmp24 = load i32, i32* %arrayidx8, align 4
%arrayidx12.341 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 3
- %tmp25 = load i32* %arrayidx12.341, align 4
- %tmp26 = load i32* %arrayidx8.1, align 4
+ %tmp25 = load i32, i32* %arrayidx12.341, align 4
+ %tmp26 = load i32, i32* %arrayidx8.1, align 4
%arrayidx12.1.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 3
- %tmp27 = load i32* %arrayidx12.1.3, align 4
- %tmp28 = load i32* %arrayidx8.2, align 4
+ %tmp27 = load i32, i32* %arrayidx12.1.3, align 4
+ %tmp28 = load i32, i32* %arrayidx8.2, align 4
%arrayidx12.2.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 3
- %tmp29 = load i32* %arrayidx12.2.3, align 4
- %tmp30 = load i32* %arrayidx8.3, align 4
+ %tmp29 = load i32, i32* %arrayidx12.2.3, align 4
+ %tmp30 = load i32, i32* %arrayidx8.3, align 4
%arrayidx12.3.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 3
- %tmp31 = load i32* %arrayidx12.3.3, align 4
- %tmp7 = load i32* %arrayidx12.3, align 4
+ %tmp31 = load i32, i32* %arrayidx12.3.3, align 4
+ %tmp7 = load i32, i32* %arrayidx12.3, align 4
%mul = mul nsw i32 %tmp1, %tmp
%mul.1 = mul nsw i32 %tmp3, %tmp2
%mul.2 = mul nsw i32 %tmp5, %tmp4
Modified: llvm/trunk/test/CodeGen/X86/misched-new.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-new.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-new.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-new.ll Fri Feb 27 15:17:42 2015
@@ -90,12 +90,12 @@ define void @hasundef() unnamed_addr uwt
; TOPDOWN: movzbl %al
; TOPDOWN: ret
define void @testSubregTracking() nounwind uwtable ssp align 2 {
- %tmp = load i8* undef, align 1
+ %tmp = load i8, i8* undef, align 1
%tmp6 = sub i8 0, %tmp
- %tmp7 = load i8* undef, align 1
+ %tmp7 = load i8, i8* undef, align 1
%tmp8 = udiv i8 %tmp6, %tmp7
%tmp9 = zext i8 %tmp8 to i64
- %tmp10 = load i8* undef, align 1
+ %tmp10 = load i8, i8* undef, align 1
%tmp11 = zext i8 %tmp10 to i64
%tmp12 = mul i64 %tmp11, %tmp9
%tmp13 = urem i8 %tmp6, %tmp7
Modified: llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define void @t3() nounwind {
; X86-64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-64-NEXT: movb $1, %al
; X86-64-NEXT: jmp _pass_v8qi ## TAILCALL
- %tmp3 = load <8 x i8>* @g_v8qi, align 8
+ %tmp3 = load <8 x i8>, <8 x i8>* @g_v8qi, align 8
%tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
%tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
ret void
Modified: llvm/trunk/test/CodeGen/X86/mmx-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-arith.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-arith.ll Fri Feb 27 15:17:42 2015
@@ -8,48 +8,48 @@
; X64-LABEL: test0
define void @test0(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A
- %tmp3 = load x86_mmx* %B
+ %tmp1 = load x86_mmx, x86_mmx* %A
+ %tmp3 = load x86_mmx, x86_mmx* %B
%tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
%tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
%tmp4 = add <8 x i8> %tmp1a, %tmp3a
%tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
store x86_mmx %tmp4a, x86_mmx* %A
- %tmp7 = load x86_mmx* %B
+ %tmp7 = load x86_mmx, x86_mmx* %B
%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %tmp4a, x86_mmx %tmp7)
store x86_mmx %tmp12, x86_mmx* %A
- %tmp16 = load x86_mmx* %B
+ %tmp16 = load x86_mmx, x86_mmx* %B
%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16)
store x86_mmx %tmp21, x86_mmx* %A
- %tmp27 = load x86_mmx* %B
+ %tmp27 = load x86_mmx, x86_mmx* %B
%tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
%tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
%tmp28 = sub <8 x i8> %tmp21a, %tmp27a
%tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
store x86_mmx %tmp28a, x86_mmx* %A
- %tmp31 = load x86_mmx* %B
+ %tmp31 = load x86_mmx, x86_mmx* %B
%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %tmp28a, x86_mmx %tmp31)
store x86_mmx %tmp36, x86_mmx* %A
- %tmp40 = load x86_mmx* %B
+ %tmp40 = load x86_mmx, x86_mmx* %B
%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %tmp36, x86_mmx %tmp40)
store x86_mmx %tmp45, x86_mmx* %A
- %tmp51 = load x86_mmx* %B
+ %tmp51 = load x86_mmx, x86_mmx* %B
%tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
%tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
%tmp52 = mul <8 x i8> %tmp45a, %tmp51a
%tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
store x86_mmx %tmp52a, x86_mmx* %A
- %tmp57 = load x86_mmx* %B
+ %tmp57 = load x86_mmx, x86_mmx* %B
%tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
%tmp58 = and <8 x i8> %tmp52, %tmp57a
%tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
store x86_mmx %tmp58a, x86_mmx* %A
- %tmp63 = load x86_mmx* %B
+ %tmp63 = load x86_mmx, x86_mmx* %B
%tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
%tmp64 = or <8 x i8> %tmp58, %tmp63a
%tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
store x86_mmx %tmp64a, x86_mmx* %A
- %tmp69 = load x86_mmx* %B
+ %tmp69 = load x86_mmx, x86_mmx* %B
%tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
%tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
%tmp70 = xor <8 x i8> %tmp64b, %tmp69a
@@ -63,37 +63,37 @@ entry:
; X64-LABEL: test1
define void @test1(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A
- %tmp3 = load x86_mmx* %B
+ %tmp1 = load x86_mmx, x86_mmx* %A
+ %tmp3 = load x86_mmx, x86_mmx* %B
%tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
%tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
%tmp4 = add <2 x i32> %tmp1a, %tmp3a
%tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
store x86_mmx %tmp4a, x86_mmx* %A
- %tmp9 = load x86_mmx* %B
+ %tmp9 = load x86_mmx, x86_mmx* %B
%tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
%tmp10 = sub <2 x i32> %tmp4, %tmp9a
%tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
store x86_mmx %tmp10a, x86_mmx* %A
- %tmp15 = load x86_mmx* %B
+ %tmp15 = load x86_mmx, x86_mmx* %B
%tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
%tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
%tmp16 = mul <2 x i32> %tmp10b, %tmp15a
%tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
store x86_mmx %tmp16a, x86_mmx* %A
- %tmp21 = load x86_mmx* %B
+ %tmp21 = load x86_mmx, x86_mmx* %B
%tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
%tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
%tmp22 = and <2 x i32> %tmp16b, %tmp21a
%tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
store x86_mmx %tmp22a, x86_mmx* %A
- %tmp27 = load x86_mmx* %B
+ %tmp27 = load x86_mmx, x86_mmx* %B
%tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
%tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
%tmp28 = or <2 x i32> %tmp22b, %tmp27a
%tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
store x86_mmx %tmp28a, x86_mmx* %A
- %tmp33 = load x86_mmx* %B
+ %tmp33 = load x86_mmx, x86_mmx* %B
%tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
%tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
%tmp34 = xor <2 x i32> %tmp28b, %tmp33a
@@ -107,57 +107,57 @@ entry:
; X64-LABEL: test2
define void @test2(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A
- %tmp3 = load x86_mmx* %B
+ %tmp1 = load x86_mmx, x86_mmx* %A
+ %tmp3 = load x86_mmx, x86_mmx* %B
%tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
%tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
%tmp4 = add <4 x i16> %tmp1a, %tmp3a
%tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
store x86_mmx %tmp4a, x86_mmx* %A
- %tmp7 = load x86_mmx* %B
+ %tmp7 = load x86_mmx, x86_mmx* %B
%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %tmp4a, x86_mmx %tmp7)
store x86_mmx %tmp12, x86_mmx* %A
- %tmp16 = load x86_mmx* %B
+ %tmp16 = load x86_mmx, x86_mmx* %B
%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16)
store x86_mmx %tmp21, x86_mmx* %A
- %tmp27 = load x86_mmx* %B
+ %tmp27 = load x86_mmx, x86_mmx* %B
%tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
%tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
%tmp28 = sub <4 x i16> %tmp21a, %tmp27a
%tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
store x86_mmx %tmp28a, x86_mmx* %A
- %tmp31 = load x86_mmx* %B
+ %tmp31 = load x86_mmx, x86_mmx* %B
%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %tmp28a, x86_mmx %tmp31)
store x86_mmx %tmp36, x86_mmx* %A
- %tmp40 = load x86_mmx* %B
+ %tmp40 = load x86_mmx, x86_mmx* %B
%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %tmp36, x86_mmx %tmp40)
store x86_mmx %tmp45, x86_mmx* %A
- %tmp51 = load x86_mmx* %B
+ %tmp51 = load x86_mmx, x86_mmx* %B
%tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
%tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
%tmp52 = mul <4 x i16> %tmp45a, %tmp51a
%tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
store x86_mmx %tmp52a, x86_mmx* %A
- %tmp55 = load x86_mmx* %B
+ %tmp55 = load x86_mmx, x86_mmx* %B
%tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %tmp52a, x86_mmx %tmp55)
store x86_mmx %tmp60, x86_mmx* %A
- %tmp64 = load x86_mmx* %B
+ %tmp64 = load x86_mmx, x86_mmx* %B
%tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64)
%tmp70 = bitcast x86_mmx %tmp69 to x86_mmx
store x86_mmx %tmp70, x86_mmx* %A
- %tmp75 = load x86_mmx* %B
+ %tmp75 = load x86_mmx, x86_mmx* %B
%tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
%tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
%tmp76 = and <4 x i16> %tmp70a, %tmp75a
%tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
store x86_mmx %tmp76a, x86_mmx* %A
- %tmp81 = load x86_mmx* %B
+ %tmp81 = load x86_mmx, x86_mmx* %B
%tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
%tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
%tmp82 = or <4 x i16> %tmp76b, %tmp81a
%tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
store x86_mmx %tmp82a, x86_mmx* %A
- %tmp87 = load x86_mmx* %B
+ %tmp87 = load x86_mmx, x86_mmx* %B
%tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
%tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
%tmp88 = xor <4 x i16> %tmp82b, %tmp87a
@@ -179,9 +179,9 @@ bb26:
%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]
%sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
%tmp13 = getelementptr <1 x i64>, <1 x i64>* %b, i32 %i.037.0
- %tmp14 = load <1 x i64>* %tmp13
+ %tmp14 = load <1 x i64>, <1 x i64>* %tmp13
%tmp18 = getelementptr <1 x i64>, <1 x i64>* %a, i32 %i.037.0
- %tmp19 = load <1 x i64>* %tmp18
+ %tmp19 = load <1 x i64>, <1 x i64>* %tmp18
%tmp21 = add <1 x i64> %tmp19, %tmp14
%tmp22 = add <1 x i64> %tmp21, %sum.035.0
%tmp25 = add i32 %i.037.0, 1
Modified: llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define i64 @t0(x86_mmx* %p) {
; CHECK-NEXT: paddq %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
; CHECK-NEXT: retq
- %t = load x86_mmx* %p
+ %t = load x86_mmx, x86_mmx* %p
%u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
%s = bitcast x86_mmx %u to i64
ret i64 %s
@@ -20,7 +20,7 @@ define i64 @t1(x86_mmx* %p) {
; CHECK-NEXT: paddd %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
; CHECK-NEXT: retq
- %t = load x86_mmx* %p
+ %t = load x86_mmx, x86_mmx* %p
%u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
%s = bitcast x86_mmx %u to i64
ret i64 %s
@@ -33,7 +33,7 @@ define i64 @t2(x86_mmx* %p) {
; CHECK-NEXT: paddw %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
; CHECK-NEXT: retq
- %t = load x86_mmx* %p
+ %t = load x86_mmx, x86_mmx* %p
%u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
%s = bitcast x86_mmx %u to i64
ret i64 %s
@@ -46,7 +46,7 @@ define i64 @t3(x86_mmx* %p) {
; CHECK-NEXT: paddb %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
; CHECK-NEXT: retq
- %t = load x86_mmx* %p
+ %t = load x86_mmx, x86_mmx* %p
%u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
%s = bitcast x86_mmx %u to i64
ret i64 %s
Modified: llvm/trunk/test/CodeGen/X86/mmx-copy-gprs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-copy-gprs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-copy-gprs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-copy-gprs.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
define void @foo(<1 x i64>* %x, <1 x i64>* %y) nounwind {
entry:
- %tmp1 = load <1 x i64>* %y, align 8 ; <<1 x i64>> [#uses=1]
+ %tmp1 = load <1 x i64>, <1 x i64>* %y, align 8 ; <<1 x i64>> [#uses=1]
store <1 x i64> %tmp1, <1 x i64>* %x, align 8
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ define i64 @t0(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -26,8 +26,8 @@ define i64 @t1(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -43,8 +43,8 @@ define i64 @t2(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -60,8 +60,8 @@ define i64 @t3(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -77,8 +77,8 @@ define i64 @t4(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -94,8 +94,8 @@ define i64 @t5(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -111,8 +111,8 @@ define i64 @t6(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -128,8 +128,8 @@ define i64 @t7(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -144,7 +144,7 @@ define i64 @tt0(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -161,7 +161,7 @@ define i64 @tt1(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -177,7 +177,7 @@ define i64 @tt2(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -193,7 +193,7 @@ define i64 @tt3(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -209,7 +209,7 @@ define i64 @tt4(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -225,7 +225,7 @@ define i64 @tt5(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -241,7 +241,7 @@ define i64 @tt6(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -257,7 +257,7 @@ define i64 @tt7(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -273,7 +273,7 @@ define i64 @tt8(x86_mmx %t, x86_mmx* %q)
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
Modified: llvm/trunk/test/CodeGen/X86/movbe.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/movbe.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/movbe.ll (original)
+++ llvm/trunk/test/CodeGen/X86/movbe.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ define void @test1(i16* nocapture %x, i1
}
define i16 @test2(i16* %x) nounwind {
- %load = load i16* %x, align 2
+ %load = load i16, i16* %x, align 2
%bswap = call i16 @llvm.bswap.i16(i16 %load)
ret i16 %bswap
; CHECK-LABEL: test2:
@@ -36,7 +36,7 @@ define void @test3(i32* nocapture %x, i3
}
define i32 @test4(i32* %x) nounwind {
- %load = load i32* %x, align 4
+ %load = load i32, i32* %x, align 4
%bswap = call i32 @llvm.bswap.i32(i32 %load)
ret i32 %bswap
; CHECK-LABEL: test4:
@@ -56,7 +56,7 @@ define void @test5(i64* %x, i64 %y) noun
}
define i64 @test6(i64* %x) nounwind {
- %load = load i64* %x, align 8
+ %load = load i64, i64* %x, align 8
%bswap = call i64 @llvm.bswap.i64(i64 %load)
ret i64 %bswap
; CHECK-LABEL: test6:
Modified: llvm/trunk/test/CodeGen/X86/movfs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/movfs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/movfs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/movfs.ll Fri Feb 27 15:17:42 2015
@@ -2,7 +2,7 @@
define i32 @foo() nounwind readonly {
entry:
- %tmp = load i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
+ %tmp = load i32*, i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
+ %tmp1 = load i32, i32* %tmp ; <i32> [#uses=1]
ret i32 %tmp1
}
Modified: llvm/trunk/test/CodeGen/X86/movgs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/movgs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/movgs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/movgs.ll Fri Feb 27 15:17:42 2015
@@ -15,8 +15,8 @@ define i32 @test1() nounwind readonly {
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: retq
entry:
- %tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
+ %tmp = load i32*, i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
+ %tmp1 = load i32, i32* %tmp ; <i32> [#uses=1]
ret i32 %tmp1
}
@@ -39,7 +39,7 @@ define i64 @test2(void (i8*)* addrspace(
; X64-NEXT: {{(addq.*%rsp|popq)}}
; X64-NEXT: retq
entry:
- %tmp9 = load void (i8*)* addrspace(256)* %tmp8, align 8
+ %tmp9 = load void (i8*)*, void (i8*)* addrspace(256)* %tmp8, align 8
tail call void %tmp9(i8* undef) nounwind optsize
ret i64 0
}
@@ -56,7 +56,7 @@ define <2 x i64> @pmovsxwd_1(i64 addrspa
; X64-NEXT: pmovsxwd %gs:(%{{(rcx|rdi)}}), %xmm0
; X64-NEXT: retq
entry:
- %0 = load i64 addrspace(256)* %p
+ %0 = load i64, i64 addrspace(256)* %p
%tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0
%1 = bitcast <2 x i64> %tmp2 to <8 x i16>
%2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone
@@ -83,10 +83,10 @@ define i32 @test_no_cse() nounwind reado
; X64-NEXT: addl (%rcx), %eax
; X64-NEXT: retq
entry:
- %tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
- %tmp2 = load i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2 ; <i32> [#uses=1]
+ %tmp = load i32*, i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
+ %tmp1 = load i32, i32* %tmp ; <i32> [#uses=1]
+ %tmp2 = load i32*, i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
+ %tmp3 = load i32, i32* %tmp2 ; <i32> [#uses=1]
%tmp4 = add i32 %tmp1, %tmp3
ret i32 %tmp4
}
Modified: llvm/trunk/test/CodeGen/X86/movmsk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/movmsk.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/movmsk.ll (original)
+++ llvm/trunk/test/CodeGen/X86/movmsk.ll Fri Feb 27 15:17:42 2015
@@ -105,7 +105,7 @@ entry:
%0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind
%idxprom = sext i32 %0 to i64
%arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
ret i32 %1
}
@@ -118,7 +118,7 @@ entry:
%1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind
%idxprom = sext i32 %1 to i64
%arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
- %2 = load i32* %arrayidx, align 4
+ %2 = load i32, i32* %arrayidx, align 4
ret i32 %2
}
Modified: llvm/trunk/test/CodeGen/X86/movtopush.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/movtopush.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/movtopush.ll (original)
+++ llvm/trunk/test/CodeGen/X86/movtopush.ll Fri Feb 27 15:17:42 2015
@@ -196,7 +196,7 @@ bb:
; NORMAL-NEXT: addl $16, %esp
define void @test7(i32* %ptr) optsize {
entry:
- %val = load i32* %ptr
+ %val = load i32, i32* %ptr
call void @good(i32 1, i32 2, i32 %val, i32 4)
ret void
}
@@ -263,7 +263,7 @@ entry:
define void @test10() optsize {
%stack_fptr = alloca void (i32, i32, i32, i32)*
store void (i32, i32, i32, i32)* @good, void (i32, i32, i32, i32)** %stack_fptr
- %good_ptr = load volatile void (i32, i32, i32, i32)** %stack_fptr
+ %good_ptr = load volatile void (i32, i32, i32, i32)*, void (i32, i32, i32, i32)** %stack_fptr
call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di}"()
call void (i32, i32, i32, i32)* %good_ptr(i32 1, i32 2, i32 3, i32 4)
ret void
@@ -282,7 +282,7 @@ define void @test10() optsize {
; NORMAL-NEXT: addl $16, %esp
@the_global = external global i32
define void @test11() optsize {
- %myload = load i32* @the_global
+ %myload = load i32, i32* @the_global
store i32 42, i32* @the_global
call void @good(i32 %myload, i32 2, i32 3, i32 4)
ret void
Modified: llvm/trunk/test/CodeGen/X86/ms-inline-asm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ms-inline-asm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ms-inline-asm.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ms-inline-asm.ll Fri Feb 27 15:17:42 2015
@@ -50,7 +50,7 @@ entry:
store i32 2, i32* %b, align 4
call void asm sideeffect inteldialect "lea ebx, foo\0A\09mov eax, [ebx].0\0A\09mov [ebx].4, ecx", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind
%b1 = getelementptr inbounds %struct.t18_type, %struct.t18_type* %foo, i32 0, i32 1
- %0 = load i32* %b1, align 4
+ %0 = load i32, i32* %b1, align 4
ret i32 %0
; CHECK: t18
; CHECK: {{## InlineAsm Start|#APP}}
@@ -87,7 +87,7 @@ entry:
%res = alloca i32*, align 4
call void asm sideeffect inteldialect "lea edi, dword ptr $0", "*m,~{edi},~{dirflag},~{fpsr},~{flags}"([2 x i32]* @results) nounwind
call void asm sideeffect inteldialect "mov dword ptr $0, edi", "=*m,~{dirflag},~{fpsr},~{flags}"(i32** %res) nounwind
- %0 = load i32** %res, align 4
+ %0 = load i32*, i32** %res, align 4
ret i32* %0
; CHECK-LABEL: t30:
; CHECK: {{## InlineAsm Start|#APP}}
@@ -111,7 +111,7 @@ entry:
%val = alloca i32, align 64
store i32 -1, i32* %val, align 64
call void asm sideeffect inteldialect "mov dword ptr $0, esp", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %val)
- %sp = load i32* %val, align 64
+ %sp = load i32, i32* %val, align 64
ret i32 %sp
; CHECK-LABEL: t31:
; CHECK: pushl %ebp
Modified: llvm/trunk/test/CodeGen/X86/mul128_sext_loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mul128_sext_loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mul128_sext_loop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mul128_sext_loop.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ define void @test(i64* nocapture %arr, i
%carry.02 = phi i128 [ 0, %.lr.ph ], [ %10, %3 ]
%i.01 = phi i64 [ 0, %.lr.ph ], [ %11, %3 ]
%4 = getelementptr inbounds i64, i64* %arr, i64 %i.01
- %5 = load i64* %4, align 8
+ %5 = load i64, i64* %4, align 8
%6 = sext i64 %5 to i128
%7 = mul nsw i128 %6, %2
%8 = add nsw i128 %7, %carry.02
Modified: llvm/trunk/test/CodeGen/X86/muloti.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/muloti.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/muloti.ll (original)
+++ llvm/trunk/test/CodeGen/X86/muloti.ll Fri Feb 27 15:17:42 2015
@@ -45,17 +45,17 @@ entry:
store i64 %a.coerce0, i64* %1
%2 = getelementptr %0, %0* %0, i32 0, i32 1
store i64 %a.coerce1, i64* %2
- %a = load i128* %coerce, align 16
+ %a = load i128, i128* %coerce, align 16
store i128 %a, i128* %a.addr, align 16
%3 = bitcast i128* %coerce1 to %0*
%4 = getelementptr %0, %0* %3, i32 0, i32 0
store i64 %b.coerce0, i64* %4
%5 = getelementptr %0, %0* %3, i32 0, i32 1
store i64 %b.coerce1, i64* %5
- %b = load i128* %coerce1, align 16
+ %b = load i128, i128* %coerce1, align 16
store i128 %b, i128* %b.addr, align 16
- %tmp = load i128* %a.addr, align 16
- %tmp2 = load i128* %b.addr, align 16
+ %tmp = load i128, i128* %a.addr, align 16
+ %tmp2 = load i128, i128* %b.addr, align 16
%6 = call %1 @llvm.umul.with.overflow.i128(i128 %tmp, i128 %tmp2)
; CHECK: cmov
; CHECK: divti3
@@ -70,7 +70,7 @@ overflow:
nooverflow: ; preds = %entry
store i128 %7, i128* %retval
%9 = bitcast i128* %retval to %0*
- %10 = load %0* %9, align 1
+ %10 = load %0, %0* %9, align 1
ret %0 %10
}
Modified: llvm/trunk/test/CodeGen/X86/mult-alt-generic-i686.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mult-alt-generic-i686.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mult-alt-generic-i686.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mult-alt-generic-i686.ll Fri Feb 27 15:17:42 2015
@@ -33,10 +33,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -48,10 +48,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -63,7 +63,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -120,10 +120,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -137,10 +137,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -165,7 +165,7 @@ entry:
define void @multi_m() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*m|r,m|r,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -190,10 +190,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -205,10 +205,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -220,7 +220,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -277,10 +277,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -294,10 +294,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
Modified: llvm/trunk/test/CodeGen/X86/mult-alt-generic-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mult-alt-generic-x86_64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mult-alt-generic-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mult-alt-generic-x86_64.ll Fri Feb 27 15:17:42 2015
@@ -33,10 +33,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -48,10 +48,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -63,7 +63,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -120,10 +120,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -137,10 +137,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -165,7 +165,7 @@ entry:
define void @multi_m() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*m|r,m|r,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -190,10 +190,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -205,10 +205,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -220,7 +220,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -277,10 +277,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -294,10 +294,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
Modified: llvm/trunk/test/CodeGen/X86/mult-alt-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mult-alt-x86.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mult-alt-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mult-alt-x86.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "i686-pc-win32"
define void @single_R() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "=R,R,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -19,7 +19,7 @@ entry:
define void @single_q() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "=q,q,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -27,7 +27,7 @@ entry:
define void @single_Q() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "=Q,Q,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -35,7 +35,7 @@ entry:
define void @single_a() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={ax},{ax},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -43,7 +43,7 @@ entry:
define void @single_b() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={bx},{bx},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -51,7 +51,7 @@ entry:
define void @single_c() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={cx},{cx},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -59,7 +59,7 @@ entry:
define void @single_d() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={dx},{dx},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -67,7 +67,7 @@ entry:
define void @single_S() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={si},{si},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -75,7 +75,7 @@ entry:
define void @single_D() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={di},{di},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -83,7 +83,7 @@ entry:
define void @single_A() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "=A,A,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -106,7 +106,7 @@ entry:
define void @single_y() nounwind {
entry:
- %tmp = load double* @din1, align 8
+ %tmp = load double, double* @din1, align 8
%0 = call double asm "foo $1,$0", "=y,y,~{dirflag},~{fpsr},~{flags}"(double %tmp) nounwind
store double %0, double* @dout0, align 8
ret void
@@ -114,7 +114,7 @@ entry:
define void @single_x() nounwind {
entry:
- %tmp = load double* @din1, align 8
+ %tmp = load double, double* @din1, align 8
%0 = call double asm "foo $1,$0", "=x,x,~{dirflag},~{fpsr},~{flags}"(double %tmp) nounwind
store double %0, double* @dout0, align 8
ret void
@@ -191,70 +191,70 @@ entry:
define void @multi_R() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|R|m,r|R|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_q() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|q|m,r|q|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_Q() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|Q|m,r|Q|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_a() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{ax}|m,r|{ax}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_b() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{bx}|m,r|{bx}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_c() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{cx}|m,r|{cx}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_d() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{dx}|m,r|{dx},~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_S() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{si}|m,r|{si}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_D() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{di}|m,r|{di}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_A() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|A|m,r|A|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -276,14 +276,14 @@ entry:
define void @multi_y() nounwind {
entry:
- %tmp = load double* @din1, align 8
+ %tmp = load double, double* @din1, align 8
call void asm "foo $1,$0", "=*r|y|m,r|y|m,~{dirflag},~{fpsr},~{flags}"(double* @dout0, double %tmp) nounwind
ret void
}
define void @multi_x() nounwind {
entry:
- %tmp = load double* @din1, align 8
+ %tmp = load double, double* @din1, align 8
call void asm "foo $1,$0", "=*r|x|m,r|x|m,~{dirflag},~{fpsr},~{flags}"(double* @dout0, double %tmp) nounwind
ret void
}
Modified: llvm/trunk/test/CodeGen/X86/multiple-loop-post-inc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/multiple-loop-post-inc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/multiple-loop-post-inc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/multiple-loop-post-inc.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define void @foo(float* %I, i64 %IS, flo
entry:
%times4 = alloca float, align 4 ; <float*> [#uses=3]
%timesN = alloca float, align 4 ; <float*> [#uses=2]
- %0 = load float* %Step, align 4 ; <float> [#uses=8]
+ %0 = load float, float* %Step, align 4 ; <float> [#uses=8]
%1 = ptrtoint float* %I to i64 ; <i64> [#uses=1]
%2 = ptrtoint float* %O to i64 ; <i64> [#uses=1]
%tmp = xor i64 %2, %1 ; <i64> [#uses=1]
@@ -34,11 +34,11 @@ entry:
br i1 %9, label %bb, label %return
bb: ; preds = %entry
- %10 = load float* %Start, align 4 ; <float> [#uses=1]
+ %10 = load float, float* %Start, align 4 ; <float> [#uses=1]
br label %bb2
bb1: ; preds = %bb3
- %11 = load float* %I_addr.0, align 4 ; <float> [#uses=1]
+ %11 = load float, float* %I_addr.0, align 4 ; <float> [#uses=1]
%12 = fmul float %11, %x.0 ; <float> [#uses=1]
store float %12, float* %O_addr.0, align 4
%13 = fadd float %x.0, %0 ; <float> [#uses=1]
@@ -127,10 +127,10 @@ bb5:
%scevgep130131 = bitcast float* %scevgep130 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp132 = mul i64 %indvar102, -16 ; <i64> [#uses=1]
%tmp136 = add i64 %tmp135, %tmp132 ; <i64> [#uses=2]
- %36 = load <4 x float>* %scevgep106107, align 16 ; <<4 x float>> [#uses=1]
- %37 = load <4 x float>* %scevgep113114, align 16 ; <<4 x float>> [#uses=1]
- %38 = load <4 x float>* %scevgep117118, align 16 ; <<4 x float>> [#uses=1]
- %39 = load <4 x float>* %scevgep121122, align 16 ; <<4 x float>> [#uses=1]
+ %36 = load <4 x float>, <4 x float>* %scevgep106107, align 16 ; <<4 x float>> [#uses=1]
+ %37 = load <4 x float>, <4 x float>* %scevgep113114, align 16 ; <<4 x float>> [#uses=1]
+ %38 = load <4 x float>, <4 x float>* %scevgep117118, align 16 ; <<4 x float>> [#uses=1]
+ %39 = load <4 x float>, <4 x float>* %scevgep121122, align 16 ; <<4 x float>> [#uses=1]
%40 = fmul <4 x float> %36, %vX0.039 ; <<4 x float>> [#uses=1]
%41 = fadd <4 x float> %vX0.039, %asmtmp.i18 ; <<4 x float>> [#uses=2]
%42 = fmul <4 x float> %37, %vX1.036 ; <<4 x float>> [#uses=1]
@@ -168,7 +168,7 @@ bb.nph:
%I_addr.0.sum = add i64 %14, -1 ; <i64> [#uses=1]
%49 = getelementptr inbounds float, float* %I, i64 %I_addr.0.sum ; <float*> [#uses=1]
%50 = bitcast float* %49 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %51 = load <4 x float>* %50, align 16 ; <<4 x float>> [#uses=1]
+ %51 = load <4 x float>, <4 x float>* %50, align 16 ; <<4 x float>> [#uses=1]
%tmp54 = add i64 %14, 16 ; <i64> [#uses=1]
%tmp56 = add i64 %14, 3 ; <i64> [#uses=1]
%tmp60 = add i64 %14, 7 ; <i64> [#uses=1]
@@ -216,10 +216,10 @@ bb9:
%scevgep8687 = bitcast float* %scevgep86 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp88 = mul i64 %indvar, -16 ; <i64> [#uses=1]
%tmp92 = add i64 %tmp91, %tmp88 ; <i64> [#uses=2]
- %52 = load <4 x float>* %scevgep5859, align 16 ; <<4 x float>> [#uses=2]
- %53 = load <4 x float>* %scevgep6263, align 16 ; <<4 x float>> [#uses=2]
- %54 = load <4 x float>* %scevgep6667, align 16 ; <<4 x float>> [#uses=2]
- %55 = load <4 x float>* %scevgep7071, align 16 ; <<4 x float>> [#uses=2]
+ %52 = load <4 x float>, <4 x float>* %scevgep5859, align 16 ; <<4 x float>> [#uses=2]
+ %53 = load <4 x float>, <4 x float>* %scevgep6263, align 16 ; <<4 x float>> [#uses=2]
+ %54 = load <4 x float>, <4 x float>* %scevgep6667, align 16 ; <<4 x float>> [#uses=2]
+ %55 = load <4 x float>, <4 x float>* %scevgep7071, align 16 ; <<4 x float>> [#uses=2]
%56 = shufflevector <4 x float> %vI0.019, <4 x float> %52, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
%57 = shufflevector <4 x float> %56, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
%58 = shufflevector <4 x float> %52, <4 x float> %53, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
@@ -263,7 +263,7 @@ bb12:
%x.130 = phi float [ %77, %bb12 ], [ %73, %bb11 ] ; <float> [#uses=2]
%I_addr.433 = getelementptr float, float* %I_addr.2, i64 %indvar94 ; <float*> [#uses=1]
%O_addr.432 = getelementptr float, float* %O_addr.2, i64 %indvar94 ; <float*> [#uses=1]
- %75 = load float* %I_addr.433, align 4 ; <float> [#uses=1]
+ %75 = load float, float* %I_addr.433, align 4 ; <float> [#uses=1]
%76 = fmul float %75, %x.130 ; <float> [#uses=1]
store float %76, float* %O_addr.432, align 4
%77 = fadd float %x.130, %0 ; <float> [#uses=2]
@@ -293,7 +293,7 @@ outer:
inner: ; preds = %bb0, %if.end275
%i8 = phi i32 [ %a, %outer ], [ %indvar.next159, %bb0 ] ; <i32> [#uses=2]
- %t338 = load i32* undef ; <i32> [#uses=1]
+ %t338 = load i32, i32* undef ; <i32> [#uses=1]
%t191 = mul i32 %i8, %t338 ; <i32> [#uses=1]
%t179 = add i32 %i6, %t191 ; <i32> [#uses=1]
br label %bb0
Modified: llvm/trunk/test/CodeGen/X86/mulx32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mulx32.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mulx32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mulx32.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i64 @f1(i32 %a, i32 %b) {
}
define i64 @f2(i32 %a, i32* %p) {
- %b = load i32* %p
+ %b = load i32, i32* %p
%x = zext i32 %a to i64
%y = zext i32 %b to i64
%r = mul i64 %x, %y
Modified: llvm/trunk/test/CodeGen/X86/mulx64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mulx64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mulx64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mulx64.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ define i128 @f1(i64 %a, i64 %b) {
}
define i128 @f2(i64 %a, i64* %p) {
- %b = load i64* %p
+ %b = load i64, i64* %p
%x = zext i64 %a to i128
%y = zext i64 %b to i128
%r = mul i128 %x, %y
Modified: llvm/trunk/test/CodeGen/X86/musttail-indirect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/musttail-indirect.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/musttail-indirect.ll (original)
+++ llvm/trunk/test/CodeGen/X86/musttail-indirect.ll Fri Feb 27 15:17:42 2015
@@ -31,8 +31,8 @@
define x86_thiscallcc i32 @f_thunk(%struct.B* %this, i32) {
entry:
%1 = bitcast %struct.B* %this to i32 (%struct.B*, i32)***
- %vtable = load i32 (%struct.B*, i32)*** %1
- %2 = load i32 (%struct.B*, i32)** %vtable
+ %vtable = load i32 (%struct.B*, i32)**, i32 (%struct.B*, i32)*** %1
+ %2 = load i32 (%struct.B*, i32)*, i32 (%struct.B*, i32)** %vtable
%3 = musttail call x86_thiscallcc i32 %2(%struct.B* %this, i32 %0)
ret i32 %3
}
@@ -45,9 +45,9 @@ entry:
define x86_thiscallcc i32 @g_thunk(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca) {
entry:
%1 = bitcast %struct.B* %this to i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)***
- %vtable = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
+ %vtable = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)**, i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
%vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 1
- %2 = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
+ %2 = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
%3 = musttail call x86_thiscallcc i32 %2(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca %0)
ret i32 %3
}
@@ -59,9 +59,9 @@ entry:
define x86_thiscallcc void @h_thunk(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca) {
entry:
%1 = bitcast %struct.B* %this to void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)***
- %vtable = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
+ %vtable = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)**, void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
%vfn = getelementptr inbounds void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 2
- %2 = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
+ %2 = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
musttail call x86_thiscallcc void %2(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca %0)
ret void
}
@@ -73,9 +73,9 @@ entry:
define x86_thiscallcc %struct.A* @i_thunk(%struct.B* %this, <{ %struct.A*, %struct.A, i32, %struct.A }>* inalloca) {
entry:
%1 = bitcast %struct.B* %this to %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)***
- %vtable = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*** %1
+ %vtable = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)**, %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*** %1
%vfn = getelementptr inbounds %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*, %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vtable, i32 3
- %2 = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vfn
+ %2 = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*, %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vfn
%3 = musttail call x86_thiscallcc %struct.A* %2(%struct.B* %this, <{ %struct.A*, %struct.A, i32, %struct.A }>* inalloca %0)
ret %struct.A* %3
}
@@ -86,9 +86,9 @@ entry:
define x86_thiscallcc void @j_thunk(%struct.A* noalias sret %agg.result, %struct.B* %this, i32) {
entry:
%1 = bitcast %struct.B* %this to void (%struct.A*, %struct.B*, i32)***
- %vtable = load void (%struct.A*, %struct.B*, i32)*** %1
+ %vtable = load void (%struct.A*, %struct.B*, i32)**, void (%struct.A*, %struct.B*, i32)*** %1
%vfn = getelementptr inbounds void (%struct.A*, %struct.B*, i32)*, void (%struct.A*, %struct.B*, i32)** %vtable, i32 4
- %2 = load void (%struct.A*, %struct.B*, i32)** %vfn
+ %2 = load void (%struct.A*, %struct.B*, i32)*, void (%struct.A*, %struct.B*, i32)** %vfn
musttail call x86_thiscallcc void %2(%struct.A* sret %agg.result, %struct.B* %this, i32 %0)
ret void
}
@@ -100,11 +100,11 @@ entry:
define x86_stdcallcc i32 @stdcall_thunk(<{ %struct.B*, %struct.A }>* inalloca) {
entry:
%this_ptr = getelementptr inbounds <{ %struct.B*, %struct.A }>, <{ %struct.B*, %struct.A }>* %0, i32 0, i32 0
- %this = load %struct.B** %this_ptr
+ %this = load %struct.B*, %struct.B** %this_ptr
%1 = bitcast %struct.B* %this to i32 (<{ %struct.B*, %struct.A }>*)***
- %vtable = load i32 (<{ %struct.B*, %struct.A }>*)*** %1
+ %vtable = load i32 (<{ %struct.B*, %struct.A }>*)**, i32 (<{ %struct.B*, %struct.A }>*)*** %1
%vfn = getelementptr inbounds i32 (<{ %struct.B*, %struct.A }>*)*, i32 (<{ %struct.B*, %struct.A }>*)** %vtable, i32 1
- %2 = load i32 (<{ %struct.B*, %struct.A }>*)** %vfn
+ %2 = load i32 (<{ %struct.B*, %struct.A }>*)*, i32 (<{ %struct.B*, %struct.A }>*)** %vfn
%3 = musttail call x86_stdcallcc i32 %2(<{ %struct.B*, %struct.A }>* inalloca %0)
ret i32 %3
}
@@ -116,9 +116,9 @@ entry:
define x86_fastcallcc i32 @fastcall_thunk(%struct.B* inreg %this, <{ %struct.A }>* inalloca) {
entry:
%1 = bitcast %struct.B* %this to i32 (%struct.B*, <{ %struct.A }>*)***
- %vtable = load i32 (%struct.B*, <{ %struct.A }>*)*** %1
+ %vtable = load i32 (%struct.B*, <{ %struct.A }>*)**, i32 (%struct.B*, <{ %struct.A }>*)*** %1
%vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A }>*)** %vtable, i32 1
- %2 = load i32 (%struct.B*, <{ %struct.A }>*)** %vfn
+ %2 = load i32 (%struct.B*, <{ %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A }>*)** %vfn
%3 = musttail call x86_fastcallcc i32 %2(%struct.B* inreg %this, <{ %struct.A }>* inalloca %0)
ret i32 %3
}
Modified: llvm/trunk/test/CodeGen/X86/musttail-varargs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/musttail-varargs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/musttail-varargs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/musttail-varargs.ll Fri Feb 27 15:17:42 2015
@@ -107,19 +107,19 @@ define void @g_thunk(i8* %fptr_i8, ...)
define void @h_thunk(%struct.Foo* %this, ...) {
%cond_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 0
- %cond = load i1* %cond_p
+ %cond = load i1, i1* %cond_p
br i1 %cond, label %then, label %else
then:
%a_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 1
- %a_i8 = load i8** %a_p
+ %a_i8 = load i8*, i8** %a_p
%a = bitcast i8* %a_i8 to void (%struct.Foo*, ...)*
musttail call void (%struct.Foo*, ...)* %a(%struct.Foo* %this, ...)
ret void
else:
%b_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 2
- %b_i8 = load i8** %b_p
+ %b_i8 = load i8*, i8** %b_p
%b = bitcast i8* %b_i8 to void (%struct.Foo*, ...)*
store i32 42, i32* @g
musttail call void (%struct.Foo*, ...)* %b(%struct.Foo* %this, ...)
More information about the llvm-commits
mailing list