[llvm-commits] [llvm] r47432 - /llvm/trunk/test/CodeGen/X86/

Tanya Lattner tonic at nondot.org
Wed Feb 20 23:42:27 PST 2008


Author: tbrethou
Date: Thu Feb 21 01:42:26 2008
New Revision: 47432

URL: http://llvm.org/viewvc/llvm-project?rev=47432&view=rev
Log:
Remove llvm-upgrade and update tests.

Modified:
    llvm/trunk/test/CodeGen/X86/2006-08-07-CycleInDAG.ll
    llvm/trunk/test/CodeGen/X86/2006-08-16-CycleInDAG.ll
    llvm/trunk/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll
    llvm/trunk/test/CodeGen/X86/2006-09-01-CycleInDAG.ll
    llvm/trunk/test/CodeGen/X86/2006-10-02-BoolRetCrash.ll
    llvm/trunk/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll
    llvm/trunk/test/CodeGen/X86/2006-10-12-CycleInDAG.ll
    llvm/trunk/test/CodeGen/X86/2006-10-13-CycleInDAG.ll
    llvm/trunk/test/CodeGen/X86/2006-10-19-SwitchUnnecessaryBranching.ll
    llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll
    llvm/trunk/test/CodeGen/X86/2006-11-27-SelectLegalize.ll
    llvm/trunk/test/CodeGen/X86/2006-11-28-Memcpy.ll
    llvm/trunk/test/CodeGen/X86/2006-12-19-IntelSyntax.ll
    llvm/trunk/test/CodeGen/X86/2007-05-05-VecCastExpand.ll
    llvm/trunk/test/CodeGen/X86/and-or-fold.ll
    llvm/trunk/test/CodeGen/X86/asm-global-imm.ll
    llvm/trunk/test/CodeGen/X86/bitcast.ll
    llvm/trunk/test/CodeGen/X86/bswap.ll
    llvm/trunk/test/CodeGen/X86/cmp-test.ll
    llvm/trunk/test/CodeGen/X86/commute-two-addr.ll
    llvm/trunk/test/CodeGen/X86/compare-add.ll
    llvm/trunk/test/CodeGen/X86/compare_folding.llx
    llvm/trunk/test/CodeGen/X86/darwin-no-dead-strip.ll
    llvm/trunk/test/CodeGen/X86/div_const.ll
    llvm/trunk/test/CodeGen/X86/extend.ll
    llvm/trunk/test/CodeGen/X86/extern_weak.ll
    llvm/trunk/test/CodeGen/X86/fast-cc-callee-pops.ll
    llvm/trunk/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
    llvm/trunk/test/CodeGen/X86/fastcall-correct-mangling.ll
    llvm/trunk/test/CodeGen/X86/fildll.ll
    llvm/trunk/test/CodeGen/X86/fold-load.ll
    llvm/trunk/test/CodeGen/X86/fp-immediate-shorten.ll
    llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll
    llvm/trunk/test/CodeGen/X86/fp_constant_op.llx
    llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.llx
    llvm/trunk/test/CodeGen/X86/fp_load_fold.llx
    llvm/trunk/test/CodeGen/X86/imul-lea.ll
    llvm/trunk/test/CodeGen/X86/isnan.llx
    llvm/trunk/test/CodeGen/X86/jump_sign.ll
    llvm/trunk/test/CodeGen/X86/lea-2.ll
    llvm/trunk/test/CodeGen/X86/lea.ll
    llvm/trunk/test/CodeGen/X86/loop-hoist.ll
    llvm/trunk/test/CodeGen/X86/loop-strength-reduce.ll
    llvm/trunk/test/CodeGen/X86/loop-strength-reduce2.ll
    llvm/trunk/test/CodeGen/X86/mul-shift-reassoc.ll
    llvm/trunk/test/CodeGen/X86/negative_zero.ll
    llvm/trunk/test/CodeGen/X86/or-branch.ll
    llvm/trunk/test/CodeGen/X86/overlap-shift.ll
    llvm/trunk/test/CodeGen/X86/packed_struct.ll
    llvm/trunk/test/CodeGen/X86/regpressure.ll
    llvm/trunk/test/CodeGen/X86/rem.ll
    llvm/trunk/test/CodeGen/X86/rotate.ll
    llvm/trunk/test/CodeGen/X86/scalar_sse_minmax.ll
    llvm/trunk/test/CodeGen/X86/setuge.ll
    llvm/trunk/test/CodeGen/X86/shift-coalesce.ll
    llvm/trunk/test/CodeGen/X86/shift-double.llx
    llvm/trunk/test/CodeGen/X86/shift-folding.ll
    llvm/trunk/test/CodeGen/X86/shift-one.ll
    llvm/trunk/test/CodeGen/X86/sse-load-ret.ll
    llvm/trunk/test/CodeGen/X86/store-fp-constant.ll
    llvm/trunk/test/CodeGen/X86/store-global-address.ll
    llvm/trunk/test/CodeGen/X86/store_op_load_fold.ll
    llvm/trunk/test/CodeGen/X86/vec_call.ll
    llvm/trunk/test/CodeGen/X86/vec_clear.ll
    llvm/trunk/test/CodeGen/X86/vec_extract.ll
    llvm/trunk/test/CodeGen/X86/vec_ins_extract.ll
    llvm/trunk/test/CodeGen/X86/vec_select.ll
    llvm/trunk/test/CodeGen/X86/vec_set-3.ll
    llvm/trunk/test/CodeGen/X86/vec_set-4.ll
    llvm/trunk/test/CodeGen/X86/vec_set-5.ll
    llvm/trunk/test/CodeGen/X86/vec_set-6.ll
    llvm/trunk/test/CodeGen/X86/vec_set.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-10.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-2.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-3.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-4.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-5.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-6.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-7.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-8.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle-9.ll
    llvm/trunk/test/CodeGen/X86/vec_shuffle.ll
    llvm/trunk/test/CodeGen/X86/vec_splat-2.ll
    llvm/trunk/test/CodeGen/X86/vec_splat.ll
    llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll
    llvm/trunk/test/CodeGen/X86/vec_zero.ll
    llvm/trunk/test/CodeGen/X86/vector.ll
    llvm/trunk/test/CodeGen/X86/weak.ll
    llvm/trunk/test/CodeGen/X86/x86-64-asm.ll
    llvm/trunk/test/CodeGen/X86/xmm-r64.ll

Modified: llvm/trunk/test/CodeGen/X86/2006-08-07-CycleInDAG.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-08-07-CycleInDAG.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-08-07-CycleInDAG.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-08-07-CycleInDAG.ll Thu Feb 21 01:42:26 2008
@@ -1,34 +1,31 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2
+	%struct.foo = type opaque
 
-%struct.foo = type opaque
+define fastcc i32 @test(%struct.foo* %v, %struct.foo* %vi) {
+	br i1 false, label %ilog2.exit, label %cond_true.i
 
-implementation
+cond_true.i:		; preds = %0
+	ret i32 0
 
-fastcc int %test(%struct.foo* %v, %struct.foo* %vi) {
-	br bool false, label %ilog2.exit, label %cond_true.i
-
-cond_true.i:		; preds = %entry
-	ret int 0
-
-ilog2.exit:		; preds = %entry
-	%tmp24.i = load int* null		; <int> [#uses=1]
-	%tmp13.i12.i = tail call double %ldexp( double 0.000000e+00, int 0 )		; <double> [#uses=1]
-	%tmp13.i13.i = cast double %tmp13.i12.i to float		; <float> [#uses=1]
-	%tmp11.s = load int* null		; <int> [#uses=1]
-	%tmp11.i = cast int %tmp11.s to uint		; <uint> [#uses=1]
-	%n.i = cast int %tmp24.i to uint		; <uint> [#uses=1]
-	%tmp13.i7 = mul uint %tmp11.i, %n.i		; <uint> [#uses=1]
-	%tmp.i8 = tail call sbyte* %calloc( uint %tmp13.i7, uint 4 )		; <sbyte*> [#uses=0]
-	br bool false, label %bb224.preheader.i, label %bb.i
+ilog2.exit:		; preds = %0
+	%tmp24.i = load i32* null		; <i32> [#uses=1]
+	%tmp13.i12.i = tail call double @ldexp( double 0.000000e+00, i32 0 )		; <double> [#uses=1]
+	%tmp13.i13.i = fptrunc double %tmp13.i12.i to float		; <float> [#uses=1]
+	%tmp11.s = load i32* null		; <i32> [#uses=1]
+	%tmp11.i = bitcast i32 %tmp11.s to i32		; <i32> [#uses=1]
+	%n.i = bitcast i32 %tmp24.i to i32		; <i32> [#uses=1]
+	%tmp13.i7 = mul i32 %tmp11.i, %n.i		; <i32> [#uses=1]
+	%tmp.i8 = tail call i8* @calloc( i32 %tmp13.i7, i32 4 )		; <i8*> [#uses=0]
+	br i1 false, label %bb224.preheader.i, label %bb.i
 
 bb.i:		; preds = %ilog2.exit
-	ret int 0
+	ret i32 0
 
 bb224.preheader.i:		; preds = %ilog2.exit
-	%tmp165.i = cast float %tmp13.i13.i to double		; <double> [#uses=0]
-	ret int 0
+	%tmp165.i = fpext float %tmp13.i13.i to double		; <double> [#uses=0]
+	ret i32 0
 }
 
-declare sbyte* %calloc(uint, uint)
+declare i8* @calloc(i32, i32)
 
-declare double %ldexp(double, int)
+declare double @ldexp(double, i32)

Modified: llvm/trunk/test/CodeGen/X86/2006-08-16-CycleInDAG.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-08-16-CycleInDAG.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-08-16-CycleInDAG.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-08-16-CycleInDAG.ll Thu Feb 21 01:42:26 2008
@@ -1,23 +1,23 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
+	%struct.expr = type { %struct.rtx_def*, i32, %struct.expr*, %struct.occr*, %struct.occr*, %struct.rtx_def* }
+	%struct.hash_table = type { %struct.expr**, i32, i32, i32 }
+	%struct.occr = type { %struct.occr*, %struct.rtx_def*, i8, i8 }
+	%struct.rtx_def = type { i16, i8, i8, %struct.u }
+	%struct.u = type { [1 x i64] }
 
-	%struct.expr = type { %struct.rtx_def*, int, %struct.expr*, %struct.occr*, %struct.occr*, %struct.rtx_def* }
-	%struct.hash_table = type { %struct.expr**, uint, uint, int }
-	%struct.occr = type { %struct.occr*, %struct.rtx_def*, sbyte, sbyte }
-	%struct.rtx_def = type { ushort, ubyte, ubyte, %struct.u }
-	%struct.u = type { [1 x long] }
+define void @test() {
+	%tmp = load i32* null		; <i32> [#uses=1]
+	%tmp8 = call i32 @hash_rtx( )		; <i32> [#uses=1]
+	%tmp11 = urem i32 %tmp8, %tmp		; <i32> [#uses=1]
+	br i1 false, label %cond_next, label %return
 
-void %test() {
-	%tmp = load uint* null		; <uint> [#uses=1]
-	%tmp8 = call uint %hash_rtx( )		; <uint> [#uses=1]
-	%tmp11 = rem uint %tmp8, %tmp		; <uint> [#uses=1]
-	br bool false, label %cond_next, label %return
-
-cond_next:		; preds = %entry
-	%tmp17 = getelementptr %struct.expr** null, uint %tmp11		; <%struct.expr**> [#uses=0]
+cond_next:		; preds = %0
+	%gep.upgrd.1 = zext i32 %tmp11 to i64		; <i64> [#uses=1]
+	%tmp17 = getelementptr %struct.expr** null, i64 %gep.upgrd.1		; <%struct.expr**> [#uses=0]
 	ret void
 
-return:		; preds = %entry
+return:		; preds = %0
 	ret void
 }
 
-declare uint %hash_rtx()
+declare i32 @hash_rtx()

Modified: llvm/trunk/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll Thu Feb 21 01:42:26 2008
@@ -1,16 +1,17 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=i386 | \
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=i386 | \
 ; RUN:    not grep {movl %eax, %edx}
 
-int %foo(int %t, int %C) {
+define i32 @foo(i32 %t, i32 %C) {
 entry:
         br label %cond_true
 
 cond_true:              ; preds = %cond_true, %entry
-        %t_addr.0.0 = phi int [ %t, %entry ], [ %tmp7, %cond_true ]             ; <int> [#uses=2]
-        %tmp7 = add int %t_addr.0.0, 1  ; <int> [#uses=1]
-        %tmp = setgt int %C, 39         ; <bool> [#uses=1]
-        br bool %tmp, label %bb12, label %cond_true
+        %t_addr.0.0 = phi i32 [ %t, %entry ], [ %tmp7, %cond_true ]             ; <i32> [#uses=2]
+        %tmp7 = add i32 %t_addr.0.0, 1          ; <i32> [#uses=1]
+        %tmp = icmp sgt i32 %C, 39              ; <i1> [#uses=1]
+        br i1 %tmp, label %bb12, label %cond_true
 
 bb12:           ; preds = %cond_true
-        ret int %t_addr.0.0
+        ret i32 %t_addr.0.0
 }
+

Modified: llvm/trunk/test/CodeGen/X86/2006-09-01-CycleInDAG.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-09-01-CycleInDAG.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-09-01-CycleInDAG.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-09-01-CycleInDAG.ll Thu Feb 21 01:42:26 2008
@@ -1,113 +1,108 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
-
-target endian = little
-target pointersize = 32
+; RUN: llvm-as < %s | llc -march=x86
+target datalayout = "e-p:32:32"
 target triple = "i686-apple-darwin8"
-	%struct.CUMULATIVE_ARGS = type { int, int, int, int, int, int, int, int, int, int, int, int }
-	%struct.FILE = type { ubyte*, int, int, short, short, %struct.__sbuf, int, sbyte*, int (sbyte*)*, int (sbyte*, sbyte*, int)*, long (sbyte*, long, int)*, int (sbyte*, sbyte*, int)*, %struct.__sbuf, %struct.__sFILEX*, int, [3 x ubyte], [1 x ubyte], %struct.__sbuf, int, long }
-	%struct.VEC_edge = type { uint, uint, [1 x %struct.edge_def*] }
-	%struct.VEC_tree = type { uint, uint, [1 x %struct.tree_node*] }
+	%struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+	%struct.VEC_edge = type { i32, i32, [1 x %struct.edge_def*] }
+	%struct.VEC_tree = type { i32, i32, [1 x %struct.tree_node*] }
 	%struct.__sFILEX = type opaque
-	%struct.__sbuf = type { ubyte*, int }
-	%struct._obstack_chunk = type { sbyte*, %struct._obstack_chunk*, [4 x sbyte] }
-	%struct._var_map = type { %struct.partition_def*, int*, int*, %struct.tree_node**, uint, uint, int* }
-	%struct.basic_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.VEC_edge*, %struct.VEC_edge*, %struct.bitmap_head_def*, %struct.bitmap_head_def*, sbyte*, %struct.loop*, [2 x %struct.et_node*], %struct.basic_block_def*, %struct.basic_block_def*, %struct.reorder_block_def*, %struct.bb_ann_d*, long, int, int, int, int }
-	%struct.bb_ann_d = type { %struct.tree_node*, ubyte, %struct.edge_prediction* }
-	%struct.bitmap_element_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, uint, [4 x uint] }
-	%struct.bitmap_head_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, uint, %struct.bitmap_obstack* }
-	%struct.bitmap_iterator = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, uint, uint }
+	%struct.__sbuf = type { i8*, i32 }
+	%struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
+	%struct._var_map = type { %struct.partition_def*, i32*, i32*, %struct.tree_node**, i32, i32, i32* }
+	%struct.basic_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.VEC_edge*, %struct.VEC_edge*, %struct.bitmap_head_def*, %struct.bitmap_head_def*, i8*, %struct.loop*, [2 x %struct.et_node*], %struct.basic_block_def*, %struct.basic_block_def*, %struct.reorder_block_def*, %struct.bb_ann_d*, i64, i32, i32, i32, i32 }
+	%struct.bb_ann_d = type { %struct.tree_node*, i8, %struct.edge_prediction* }
+	%struct.bitmap_element_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, [4 x i32] }
+	%struct.bitmap_head_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, %struct.bitmap_obstack* }
+	%struct.bitmap_iterator = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, i32 }
 	%struct.bitmap_obstack = type { %struct.bitmap_element_def*, %struct.bitmap_head_def*, %struct.obstack }
 	%struct.block_stmt_iterator = type { %struct.tree_stmt_iterator, %struct.basic_block_def* }
-	%struct.coalesce_list_d = type { %struct._var_map*, %struct.partition_pair_d**, bool }
+	%struct.coalesce_list_d = type { %struct._var_map*, %struct.partition_pair_d**, i1 }
 	%struct.conflict_graph_def = type opaque
 	%struct.dataflow_d = type { %struct.varray_head_tag*, [2 x %struct.tree_node*] }
 	%struct.def_operand_ptr = type { %struct.tree_node** }
-	%struct.def_optype_d = type { uint, [1 x %struct.def_operand_ptr] }
+	%struct.def_optype_d = type { i32, [1 x %struct.def_operand_ptr] }
 	%struct.die_struct = type opaque
-	%struct.edge_def = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.edge_def_insns, sbyte*, %struct.location_t*, int, int, long, uint }
+	%struct.edge_def = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.edge_def_insns, i8*, %struct.__sbuf*, i32, i32, i64, i32 }
 	%struct.edge_def_insns = type { %struct.rtx_def* }
-	%struct.edge_iterator = type { uint, %struct.VEC_edge** }
-	%struct.edge_prediction = type { %struct.edge_prediction*, %struct.edge_def*, uint, int }
+	%struct.edge_iterator = type { i32, %struct.VEC_edge** }
+	%struct.edge_prediction = type { %struct.edge_prediction*, %struct.edge_def*, i32, i32 }
 	%struct.eh_status = type opaque
 	%struct.elt_list = type opaque
-	%struct.emit_status = type { int, int, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, int, %struct.location_t, int, ubyte*, %struct.rtx_def** }
+	%struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.__sbuf, i32, i8*, %struct.rtx_def** }
 	%struct.et_node = type opaque
-	%struct.expr_status = type { int, int, int, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
-	%struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, int, int, int, int, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, ubyte, int, long, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, int, %struct.var_refs_queue*, int, int, %struct.rtvec_def*, %struct.tree_node*, int, int, int, %struct.machine_function*, uint, uint, bool, bool, %struct.language_function*, %struct.rtx_def*, uint, int, int, int, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, ubyte, ubyte, ubyte }
-	%struct.ht_identifier = type { ubyte*, uint, uint }
+	%struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
+	%struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i1, i1, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.__sbuf, %struct.varray_head_tag*, %struct.tree_node*, i8, i8, i8 }
+	%struct.ht_identifier = type { i8*, i32, i32 }
 	%struct.initial_value_struct = type opaque
 	%struct.lang_decl = type opaque
 	%struct.lang_type = type opaque
 	%struct.language_function = type opaque
-	%struct.location_t = type { sbyte*, int }
+	%struct.location_t = type { i8*, i32 }
 	%struct.loop = type opaque
-	%struct.machine_function = type { int, uint, sbyte*, int, int }
-	%struct.obstack = type { int, %struct._obstack_chunk*, sbyte*, sbyte*, sbyte*, int, int, %struct._obstack_chunk* (sbyte*, int)*, void (sbyte*, %struct._obstack_chunk*)*, sbyte*, ubyte }
-	%struct.partition_def = type { int, [1 x %struct.partition_elem] }
-	%struct.partition_elem = type { int, %struct.partition_elem*, uint }
-	%struct.partition_pair_d = type { int, int, int, %struct.partition_pair_d* }
-	%struct.phi_arg_d = type { %struct.tree_node*, bool }
+	%struct.machine_function = type { i32, i32, i8*, i32, i32 }
+	%struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (i8*, i32)*, void (i8*, %struct._obstack_chunk*)*, i8*, i8 }
+	%struct.partition_def = type { i32, [1 x %struct.partition_elem] }
+	%struct.partition_elem = type { i32, %struct.partition_elem*, i32 }
+	%struct.partition_pair_d = type { i32, i32, i32, %struct.partition_pair_d* }
+	%struct.phi_arg_d = type { %struct.tree_node*, i1 }
 	%struct.pointer_set_t = type opaque
-	%struct.ptr_info_def = type { ubyte, %struct.bitmap_head_def*, %struct.tree_node* }
+	%struct.ptr_info_def = type { i8, %struct.bitmap_head_def*, %struct.tree_node* }
 	%struct.real_value = type opaque
 	%struct.reg_info_def = type opaque
-	%struct.reorder_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_def*, int, int, int }
+	%struct.reorder_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_def*, i32, i32, i32 }
 	%struct.rtvec_def = type opaque
 	%struct.rtx_def = type opaque
 	%struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
-	%struct.simple_bitmap_def = type { uint, uint, uint, [1 x ulong] }
-	%struct.ssa_op_iter = type { int, int, int, int, int, int, int, int, int, int, int, int, int, int, %struct.stmt_operands_d*, bool }
-	%struct.stmt_ann_d = type { %struct.tree_ann_common_d, ubyte, %struct.basic_block_def*, %struct.stmt_operands_d, %struct.dataflow_d*, %struct.bitmap_head_def*, uint }
+	%struct.simple_bitmap_def = type { i32, i32, i32, [1 x i64] }
+	%struct.ssa_op_iter = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.stmt_operands_d*, i1 }
+	%struct.stmt_ann_d = type { %struct.tree_ann_common_d, i8, %struct.basic_block_def*, %struct.stmt_operands_d, %struct.dataflow_d*, %struct.bitmap_head_def*, i32 }
 	%struct.stmt_operands_d = type { %struct.def_optype_d*, %struct.def_optype_d*, %struct.v_may_def_optype_d*, %struct.vuse_optype_d*, %struct.v_may_def_optype_d* }
 	%struct.temp_slot = type opaque
-	%struct.tree_ann_common_d = type { uint, sbyte*, %struct.tree_node* }
+	%struct.tree_ann_common_d = type { i32, i8*, %struct.tree_node* }
 	%struct.tree_ann_d = type { %struct.stmt_ann_d }
 	%struct.tree_binfo = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.VEC_tree*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.VEC_tree }
-	%struct.tree_block = type { %struct.tree_common, ubyte, [3 x ubyte], %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node* }
-	%struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_ann_d*, ubyte, ubyte, ubyte, ubyte, ubyte }
+	%struct.tree_block = type { %struct.tree_common, i8, [3 x i8], %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node* }
+	%struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_ann_d*, i8, i8, i8, i8, i8 }
 	%struct.tree_complex = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node* }
-	%struct.tree_decl = type { %struct.tree_common, %struct.location_t, uint, %struct.tree_node*, ubyte, ubyte, ubyte, ubyte, ubyte, ubyte, ubyte, uint, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, int, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, long, %struct.lang_decl* }
-	%struct.tree_decl_u1 = type { long }
-	%struct.tree_decl_u1_a = type { uint }
+	%struct.tree_decl = type { %struct.tree_common, %struct.__sbuf, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
+	%struct.tree_decl_u1 = type { i64 }
+	%struct.tree_decl_u1_a = type { i32 }
 	%struct.tree_decl_u2 = type { %struct.function* }
-	%struct.tree_exp = type { %struct.tree_common, %struct.location_t*, int, %struct.tree_node*, [1 x %struct.tree_node*] }
+	%struct.tree_exp = type { %struct.tree_common, %struct.__sbuf*, i32, %struct.tree_node*, [1 x %struct.tree_node*] }
 	%struct.tree_identifier = type { %struct.tree_common, %struct.ht_identifier }
 	%struct.tree_int_cst = type { %struct.tree_common, %struct.tree_int_cst_lowhi }
-	%struct.tree_int_cst_lowhi = type { ulong, long }
+	%struct.tree_int_cst_lowhi = type { i64, i64 }
 	%struct.tree_list = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node* }
-	%struct.tree_live_info_d = type { %struct._var_map*, %struct.bitmap_head_def*, %struct.bitmap_head_def**, int, %struct.bitmap_head_def** }
+	%struct.tree_live_info_d = type { %struct._var_map*, %struct.bitmap_head_def*, %struct.bitmap_head_def**, i32, %struct.bitmap_head_def** }
 	%struct.tree_node = type { %struct.tree_decl }
-	%struct.tree_partition_associator_d = type { %struct.varray_head_tag*, %struct.varray_head_tag*, int*, int*, int, int, %struct._var_map* }
-	%struct.tree_phi_node = type { %struct.tree_common, %struct.tree_node*, int, int, int, %struct.basic_block_def*, %struct.dataflow_d*, [1 x %struct.phi_arg_d] }
+	%struct.tree_partition_associator_d = type { %struct.varray_head_tag*, %struct.varray_head_tag*, i32*, i32*, i32, i32, %struct._var_map* }
+	%struct.tree_phi_node = type { %struct.tree_common, %struct.tree_node*, i32, i32, i32, %struct.basic_block_def*, %struct.dataflow_d*, [1 x %struct.phi_arg_d] }
 	%struct.tree_real_cst = type { %struct.tree_common, %struct.real_value* }
-	%struct.tree_ssa_name = type { %struct.tree_common, %struct.tree_node*, uint, %struct.ptr_info_def*, %struct.tree_node*, sbyte* }
+	%struct.tree_ssa_name = type { %struct.tree_common, %struct.tree_node*, i32, %struct.ptr_info_def*, %struct.tree_node*, i8* }
 	%struct.tree_statement_list = type { %struct.tree_common, %struct.tree_statement_list_node*, %struct.tree_statement_list_node* }
 	%struct.tree_statement_list_node = type { %struct.tree_statement_list_node*, %struct.tree_statement_list_node*, %struct.tree_node* }
 	%struct.tree_stmt_iterator = type { %struct.tree_statement_list_node*, %struct.tree_node* }
-	%struct.tree_string = type { %struct.tree_common, int, [1 x sbyte] }
-	%struct.tree_type = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, uint, ushort, ubyte, ubyte, uint, %struct.tree_node*, %struct.tree_node*, %struct.tree_type_symtab, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, long, %struct.lang_type* }
-	%struct.tree_type_symtab = type { int }
-	%struct.tree_value_handle = type { %struct.tree_common, %struct.value_set*, uint }
-	%struct.tree_vec = type { %struct.tree_common, int, [1 x %struct.tree_node*] }
+	%struct.tree_string = type { %struct.tree_common, i32, [1 x i8] }
+	%struct.tree_type = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i32, i16, i8, i8, i32, %struct.tree_node*, %struct.tree_node*, %struct.tree_decl_u1_a, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_type* }
+	%struct.tree_type_symtab = type { i32 }
+	%struct.tree_value_handle = type { %struct.tree_common, %struct.value_set*, i32 }
+	%struct.tree_vec = type { %struct.tree_common, i32, [1 x %struct.tree_node*] }
 	%struct.tree_vector = type { %struct.tree_common, %struct.tree_node* }
 	%struct.use_operand_ptr = type { %struct.tree_node** }
-	%struct.use_optype_d = type { uint, [1 x %struct.def_operand_ptr] }
+	%struct.use_optype_d = type { i32, [1 x %struct.def_operand_ptr] }
 	%struct.v_def_use_operand_type_t = type { %struct.tree_node*, %struct.tree_node* }
-	%struct.v_may_def_optype_d = type { uint, [1 x %struct.v_def_use_operand_type_t] }
-	%struct.v_must_def_optype_d = type { uint, [1 x %struct.v_def_use_operand_type_t] }
+	%struct.v_may_def_optype_d = type { i32, [1 x %struct.v_def_use_operand_type_t] }
+	%struct.v_must_def_optype_d = type { i32, [1 x %struct.v_def_use_operand_type_t] }
 	%struct.value_set = type opaque
-	%struct.var_ann_d = type { %struct.tree_ann_common_d, ubyte, ubyte, %struct.tree_node*, %struct.varray_head_tag*, uint, uint, uint, %struct.tree_node*, %struct.tree_node* }
-	%struct.var_refs_queue = type { %struct.rtx_def*, uint, int, %struct.var_refs_queue* }
+	%struct.var_ann_d = type { %struct.tree_ann_common_d, i8, i8, %struct.tree_node*, %struct.varray_head_tag*, i32, i32, i32, %struct.tree_node*, %struct.tree_node* }
+	%struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
 	%struct.varasm_status = type opaque
-	%struct.varray_data = type { [1 x long] }
-	%struct.varray_head_tag = type { uint, uint, uint, sbyte*, %struct.varray_data }
-	%struct.vuse_optype_d = type { uint, [1 x %struct.tree_node*] }
-%basic_block_info = external global %struct.varray_head_tag*		; <%struct.varray_head_tag**> [#uses=1]
-
-implementation   ; Functions:
-
+	%struct.varray_data = type { [1 x i64] }
+	%struct.varray_head_tag = type { i32, i32, i32, i8*, %struct.varray_data }
+	%struct.vuse_optype_d = type { i32, [1 x %struct.tree_node*] }
+ at basic_block_info = external global %struct.varray_head_tag*		; <%struct.varray_head_tag**> [#uses=1]
 
-void %calculate_live_on_entry_cond_true3632(%struct.varray_head_tag* %stack3023.6, uint* %tmp3629, %struct.VEC_edge*** %tmp3397.out) {
+define void @calculate_live_on_entry_cond_true3632(%struct.varray_head_tag* %stack3023.6, i32* %tmp3629, %struct.VEC_edge*** %tmp3397.out) {
 newFuncRoot:
 	br label %cond_true3632
 
@@ -116,20 +111,21 @@
 	ret void
 
 cond_true3632:		; preds = %newFuncRoot
-	%tmp3378 = load uint* %tmp3629		; <uint> [#uses=1]
-	%tmp3379 = add uint %tmp3378, 4294967295		; <uint> [#uses=1]
-	%tmp3381 = getelementptr %struct.varray_head_tag* %stack3023.6, int 0, uint 4		; <%struct.varray_data*> [#uses=1]
-	%tmp3382 = cast %struct.varray_data* %tmp3381 to [1 x int]*		; <[1 x int]*> [#uses=1]
-	%tmp3383 = getelementptr [1 x int]* %tmp3382, int 0, uint %tmp3379		; <int*> [#uses=1]
-	%tmp3384 = load int* %tmp3383		; <int> [#uses=1]
-	%tmp3387 = load uint* %tmp3629		; <uint> [#uses=1]
-	%tmp3388 = add uint %tmp3387, 4294967295		; <uint> [#uses=1]
-	store uint %tmp3388, uint* %tmp3629
-	%tmp3391 = load %struct.varray_head_tag** %basic_block_info		; <%struct.varray_head_tag*> [#uses=1]
-	%tmp3393 = getelementptr %struct.varray_head_tag* %tmp3391, int 0, uint 4		; <%struct.varray_data*> [#uses=1]
-	%tmp3394 = cast %struct.varray_data* %tmp3393 to [1 x %struct.basic_block_def*]*		; <[1 x %struct.basic_block_def*]*> [#uses=1]
-	%tmp3395 = getelementptr [1 x %struct.basic_block_def*]* %tmp3394, int 0, int %tmp3384		; <%struct.basic_block_def**> [#uses=1]
+	%tmp3378 = load i32* %tmp3629		; <i32> [#uses=1]
+	%tmp3379 = add i32 %tmp3378, -1		; <i32> [#uses=1]
+	%tmp3381 = getelementptr %struct.varray_head_tag* %stack3023.6, i32 0, i32 4		; <%struct.varray_data*> [#uses=1]
+	%tmp3382 = bitcast %struct.varray_data* %tmp3381 to [1 x i32]*		; <[1 x i32]*> [#uses=1]
+	%gep.upgrd.1 = zext i32 %tmp3379 to i64		; <i64> [#uses=1]
+	%tmp3383 = getelementptr [1 x i32]* %tmp3382, i32 0, i64 %gep.upgrd.1		; <i32*> [#uses=1]
+	%tmp3384 = load i32* %tmp3383		; <i32> [#uses=1]
+	%tmp3387 = load i32* %tmp3629		; <i32> [#uses=1]
+	%tmp3388 = add i32 %tmp3387, -1		; <i32> [#uses=1]
+	store i32 %tmp3388, i32* %tmp3629
+	%tmp3391 = load %struct.varray_head_tag** @basic_block_info		; <%struct.varray_head_tag*> [#uses=1]
+	%tmp3393 = getelementptr %struct.varray_head_tag* %tmp3391, i32 0, i32 4		; <%struct.varray_data*> [#uses=1]
+	%tmp3394 = bitcast %struct.varray_data* %tmp3393 to [1 x %struct.basic_block_def*]*		; <[1 x %struct.basic_block_def*]*> [#uses=1]
+	%tmp3395 = getelementptr [1 x %struct.basic_block_def*]* %tmp3394, i32 0, i32 %tmp3384		; <%struct.basic_block_def**> [#uses=1]
 	%tmp3396 = load %struct.basic_block_def** %tmp3395		; <%struct.basic_block_def*> [#uses=1]
-	%tmp3397 = getelementptr %struct.basic_block_def* %tmp3396, int 0, uint 3		; <%struct.VEC_edge**> [#uses=1]
+	%tmp3397 = getelementptr %struct.basic_block_def* %tmp3396, i32 0, i32 3		; <%struct.VEC_edge**> [#uses=1]
 	br label %bb3502.exitStub
 }

Modified: llvm/trunk/test/CodeGen/X86/2006-10-02-BoolRetCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-10-02-BoolRetCrash.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-10-02-BoolRetCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-10-02-BoolRetCrash.ll Thu Feb 21 01:42:26 2008
@@ -1,6 +1,7 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc 
+; RUN: llvm-as < %s | llc 
 ; PR933
 
-fastcc bool %test() {
-	ret bool true
+define fastcc i1 @test() {
+        ret i1 true
 }
+

Modified: llvm/trunk/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll Thu Feb 21 01:42:26 2008
@@ -1,17 +1,15 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=sse | grep movaps
+; RUN: llvm-as < %s | llc -march=x86 -mattr=sse | grep movaps
 ; Test that the load is NOT folded into the intrinsic, which would zero the top
 ; elts of the loaded vector.
 
-target endian = little
-target pointersize = 32
+target datalayout = "e-p:32:32"
 target triple = "i686-apple-darwin8.7.2"
 
-implementation   ; Functions:
-
-<4 x float> %test(<4 x float> %A, <4 x float>* %B) {
-	%BV = load <4 x float>* %B
-	%tmp28 = tail call <4 x float> %llvm.x86.sse.sub.ss( <4 x float> %A, <4 x float> %BV)
-	ret <4 x float> %tmp28
+define <4 x float> @test(<4 x float> %A, <4 x float>* %B) {
+        %BV = load <4 x float>* %B              ; <<4 x float>> [#uses=1]
+        %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %A, <4 x float> %BV )       ; <<4 x float>> [#uses=1]
+        ret <4 x float> %tmp28
 }
 
-declare <4 x float> %llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
+

Modified: llvm/trunk/test/CodeGen/X86/2006-10-12-CycleInDAG.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-10-12-CycleInDAG.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-10-12-CycleInDAG.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-10-12-CycleInDAG.ll Thu Feb 21 01:42:26 2008
@@ -1,41 +1,41 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
 	%struct.function = type opaque
 	%struct.lang_decl = type opaque
-	%struct.location_t = type { sbyte*, int }
+	%struct.location_t = type { i8*, i32 }
 	%struct.rtx_def = type opaque
-	%struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, ubyte, ubyte, ubyte, ubyte, ubyte }
-	%struct.tree_decl = type { %struct.tree_common, %struct.location_t, uint, %struct.tree_node*, ubyte, ubyte, ubyte, ubyte, ubyte, ubyte, ubyte, ubyte, uint, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, int, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, long, %struct.lang_decl* }
-	%struct.tree_decl_u1 = type { long }
+	%struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
+	%struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
+	%struct.tree_decl_u1 = type { i64 }
 	%struct.tree_decl_u2 = type { %struct.function* }
 	%struct.tree_node = type { %struct.tree_decl }
 	%union.tree_ann_d = type opaque
 
-void %check_format_arg() {
-	br bool false, label %cond_next196, label %bb12.preheader
+define void @check_format_arg() {
+	br i1 false, label %cond_next196, label %bb12.preheader
 
-bb12.preheader:
+bb12.preheader:		; preds = %0
 	ret void
 
-cond_next196:
-	br bool false, label %cond_next330, label %cond_true304
+cond_next196:		; preds = %0
+	br i1 false, label %cond_next330, label %cond_true304
 
-cond_true304:
+cond_true304:		; preds = %cond_next196
 	ret void
 
-cond_next330:
-	br bool false, label %cond_next472, label %bb441
+cond_next330:		; preds = %cond_next196
+	br i1 false, label %cond_next472, label %bb441
 
-bb441:
+bb441:		; preds = %cond_next330
 	ret void
 
-cond_next472:
-	%tmp490 = load %struct.tree_node** null
-	%tmp492 = getelementptr %struct.tree_node* %tmp490, int 0, uint 0, uint 0, uint 3
-	%tmp492 = cast ubyte* %tmp492 to uint*
-	%tmp493 = load uint* %tmp492
-	%tmp495 = cast uint %tmp493 to ubyte
-	%tmp496 = seteq ubyte %tmp495, 11
-	%tmp496 = cast bool %tmp496 to sbyte
-	store sbyte %tmp496, sbyte* null
+cond_next472:		; preds = %cond_next330
+	%tmp490 = load %struct.tree_node** null		; <%struct.tree_node*> [#uses=1]
+	%tmp492 = getelementptr %struct.tree_node* %tmp490, i32 0, i32 0, i32 0, i32 3		; <i8*> [#uses=1]
+	%tmp492.upgrd.1 = bitcast i8* %tmp492 to i32*		; <i32*> [#uses=1]
+	%tmp493 = load i32* %tmp492.upgrd.1		; <i32> [#uses=1]
+	%tmp495 = trunc i32 %tmp493 to i8		; <i8> [#uses=1]
+	%tmp496 = icmp eq i8 %tmp495, 11		; <i1> [#uses=1]
+	%tmp496.upgrd.2 = zext i1 %tmp496 to i8		; <i8> [#uses=1]
+	store i8 %tmp496.upgrd.2, i8* null
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/2006-10-13-CycleInDAG.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-10-13-CycleInDAG.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-10-13-CycleInDAG.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-10-13-CycleInDAG.ll Thu Feb 21 01:42:26 2008
@@ -1,20 +1,19 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
+ at str = external global [18 x i8]		; <[18 x i8]*> [#uses=1]
 
-%str = external global [18 x sbyte]
-
-void %test() {
+define void @test() {
 bb.i:
-	%tmp.i660 = load <4 x float>* null
-	call void (int, ...)* %printf( int 0, sbyte* getelementptr ([18 x sbyte]* %str, int 0, uint 0), double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00 )
-	%tmp152.i = load <4 x uint>* null
-	%tmp156.i = cast <4 x uint> %tmp152.i to <4 x int>
-	%tmp175.i = cast <4 x float> %tmp.i660 to <4 x int>
-	%tmp176.i = xor <4 x int> %tmp156.i, < int -1, int -1, int -1, int -1 >
-	%tmp177.i = and <4 x int> %tmp176.i, %tmp175.i
-	%tmp190.i = or <4 x int> %tmp177.i, zeroinitializer
-	%tmp191.i = cast <4 x int> %tmp190.i to <4 x float>
+	%tmp.i660 = load <4 x float>* null		; <<4 x float>> [#uses=1]
+	call void (i32, ...)* @printf( i32 0, i8* getelementptr ([18 x i8]* @str, i32 0, i64 0), double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00 )
+	%tmp152.i = load <4 x i32>* null		; <<4 x i32>> [#uses=1]
+	%tmp156.i = bitcast <4 x i32> %tmp152.i to <4 x i32>		; <<4 x i32>> [#uses=1]
+	%tmp175.i = bitcast <4 x float> %tmp.i660 to <4 x i32>		; <<4 x i32>> [#uses=1]
+	%tmp176.i = xor <4 x i32> %tmp156.i, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
+	%tmp177.i = and <4 x i32> %tmp176.i, %tmp175.i		; <<4 x i32>> [#uses=1]
+	%tmp190.i = or <4 x i32> %tmp177.i, zeroinitializer		; <<4 x i32>> [#uses=1]
+	%tmp191.i = bitcast <4 x i32> %tmp190.i to <4 x float>		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp191.i, <4 x float>* null
 	ret void
 }
 
-declare void %printf(int, ...)
+declare void @printf(i32, ...)

Modified: llvm/trunk/test/CodeGen/X86/2006-10-19-SwitchUnnecessaryBranching.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-10-19-SwitchUnnecessaryBranching.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-10-19-SwitchUnnecessaryBranching.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-10-19-SwitchUnnecessaryBranching.ll Thu Feb 21 01:42:26 2008
@@ -1,28 +1,26 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | %prcontext je 1 | \
+; RUN: llvm-as < %s | llc -march=x86 | %prcontext je 1 | \
 ; RUN:   grep BB1_1:
 
-%str = internal constant [14 x sbyte] c"Hello world!\0A\00"		; <[14 x sbyte]*> [#uses=1]
-%str = internal constant [13 x sbyte] c"Blah world!\0A\00"		; <[13 x sbyte]*> [#uses=1]
+ at str = internal constant [14 x i8] c"Hello world!\0A\00"		; <[14 x i8]*> [#uses=1]
+ at str.upgrd.1 = internal constant [13 x i8] c"Blah world!\0A\00"		; <[13 x i8]*> [#uses=1]
 
-implementation   ; Functions:
-
-int %main(int %argc, sbyte** %argv) {
+define i32 @main(i32 %argc, i8** %argv) {
 entry:
-	switch int %argc, label %UnifiedReturnBlock [
-		 int 1, label %bb
-		 int 2, label %bb2
+	switch i32 %argc, label %UnifiedReturnBlock [
+		 i32 1, label %bb
+		 i32 2, label %bb2
 	]
 
 bb:		; preds = %entry
-	%tmp1 = tail call int (sbyte*, ...)* %printf( sbyte* getelementptr ([14 x sbyte]* %str, int 0, uint 0) )		; <int> [#uses=0]
-	ret int 0
+	%tmp1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([14 x i8]* @str, i32 0, i64 0) )		; <i32> [#uses=0]
+	ret i32 0
 
 bb2:		; preds = %entry
-	%tmp4 = tail call int (sbyte*, ...)* %printf( sbyte* getelementptr ([13 x sbyte]* %str, int 0, uint 0) )		; <int> [#uses=0]
-	ret int 0
+	%tmp4 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([13 x i8]* @str.upgrd.1, i32 0, i64 0) )		; <i32> [#uses=0]
+	ret i32 0
 
 UnifiedReturnBlock:		; preds = %entry
-	ret int 0
+	ret i32 0
 }
 
-declare int %printf(sbyte*, ...)
+declare i32 @printf(i8*, ...)

Modified: llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll Thu Feb 21 01:42:26 2008
@@ -1,38 +1,39 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86-64 | \
+; RUN: llvm-as < %s | llc -march=x86-64 | \
 ; RUN:   grep movb | count 2
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86-64 | \
+; RUN: llvm-as < %s | llc -march=x86-64 | \
 ; RUN:   grep movzbw
 
-void %handle_vector_size_attribute() {
+
+define void @handle_vector_size_attribute() {
 entry:
-	%tmp69 = load uint* null		; <uint> [#uses=1]
-	switch uint %tmp69, label %bb84 [
-		 uint 2, label %bb77
-		 uint 1, label %bb77
+	%tmp69 = load i32* null		; <i32> [#uses=1]
+	switch i32 %tmp69, label %bb84 [
+		 i32 2, label %bb77
+		 i32 1, label %bb77
 	]
 
 bb77:		; preds = %entry, %entry
-	%tmp99 = udiv ulong 0, 0		; <ulong> [#uses=1]
-	%tmp = load ubyte* null		; <ubyte> [#uses=1]
-	%tmp114 = seteq ulong 0, 0		; <bool> [#uses=1]
-	br bool %tmp114, label %cond_true115, label %cond_next136
+	%tmp99 = udiv i64 0, 0		; <i64> [#uses=1]
+	%tmp = load i8* null		; <i8> [#uses=1]
+	%tmp114 = icmp eq i64 0, 0		; <i1> [#uses=1]
+	br i1 %tmp114, label %cond_true115, label %cond_next136
 
 bb84:		; preds = %entry
 	ret void
 
 cond_true115:		; preds = %bb77
-	%tmp118 = load ubyte* null		; <ubyte> [#uses=1]
-	br bool false, label %cond_next129, label %cond_true120
+	%tmp118 = load i8* null		; <i8> [#uses=1]
+	br i1 false, label %cond_next129, label %cond_true120
 
 cond_true120:		; preds = %cond_true115
-	%tmp127 = udiv ubyte %tmp, %tmp118		; <ubyte> [#uses=1]
-	%tmp127 = cast ubyte %tmp127 to ulong		; <ulong> [#uses=1]
+	%tmp127 = udiv i8 %tmp, %tmp118		; <i8> [#uses=1]
+	%tmp127.upgrd.1 = zext i8 %tmp127 to i64		; <i64> [#uses=1]
 	br label %cond_next129
 
 cond_next129:		; preds = %cond_true120, %cond_true115
-	%iftmp.30.0 = phi ulong [ %tmp127, %cond_true120 ], [ 0, %cond_true115 ]		; <ulong> [#uses=1]
-	%tmp132 = seteq ulong %iftmp.30.0, %tmp99		; <bool> [#uses=1]
-	br bool %tmp132, label %cond_false148, label %cond_next136
+	%iftmp.30.0 = phi i64 [ %tmp127.upgrd.1, %cond_true120 ], [ 0, %cond_true115 ]		; <i64> [#uses=1]
+	%tmp132 = icmp eq i64 %iftmp.30.0, %tmp99		; <i1> [#uses=1]
+	br i1 %tmp132, label %cond_false148, label %cond_next136
 
 cond_next136:		; preds = %cond_next129, %bb77
 	ret void

Modified: llvm/trunk/test/CodeGen/X86/2006-11-27-SelectLegalize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-11-27-SelectLegalize.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-11-27-SelectLegalize.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-11-27-SelectLegalize.ll Thu Feb 21 01:42:26 2008
@@ -1,8 +1,9 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep test.*1
+; RUN: llvm-as < %s | llc -march=x86 | grep test.*1
 ; PR1016
 
-int %test(int %A, int %B, int %C) {
-	%a = trunc int %A to bool
-	%D = select bool %a, int %B, int %C
-	ret int %D
+define i32 @test(i32 %A, i32 %B, i32 %C) {
+        %a = trunc i32 %A to i1         ; <i1> [#uses=1]
+        %D = select i1 %a, i32 %B, i32 %C               ; <i32> [#uses=1]
+        ret i32 %D
 }
+

Modified: llvm/trunk/test/CodeGen/X86/2006-11-28-Memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-11-28-Memcpy.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-11-28-Memcpy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-11-28-Memcpy.ll Thu Feb 21 01:42:26 2008
@@ -1,35 +1,36 @@
 ; PR1022, PR1023
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | \
+; RUN: llvm-as < %s | llc -march=x86 | \
 ; RUN:   grep 3721182122 | count 2
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | \
+; RUN: llvm-as < %s | llc -march=x86 | \
 ; RUN:   grep -E {movl	_?bytes2} | count 1
 
-%fmt = constant [4 x sbyte] c"%x\0A\00"
-%bytes = constant [4 x sbyte] c"\AA\BB\CC\DD"
-%bytes2 = global [4 x sbyte] c"\AA\BB\CC\DD"
+ at fmt = constant [4 x i8] c"%x\0A\00"            ; <[4 x i8]*> [#uses=2]
+ at bytes = constant [4 x i8] c"\AA\BB\CC\DD"              ; <[4 x i8]*> [#uses=1]
+ at bytes2 = global [4 x i8] c"\AA\BB\CC\DD"               ; <[4 x i8]*> [#uses=1]
 
-
-int %test1() {
-        %y = alloca uint
-        %c = cast uint* %y to sbyte*
-        %z = getelementptr [4 x sbyte]* %bytes, int 0, int 0
-        call void %llvm.memcpy.i32( sbyte* %c, sbyte* %z, uint 4, uint 1 )
-        %r = load uint* %y
-        %t = cast [4 x sbyte]* %fmt to sbyte*
-        %tmp = call int (sbyte*, ...)* %printf( sbyte* %t, uint %r )
-        ret int 0
+define i32 @test1() {
+        %y = alloca i32         ; <i32*> [#uses=2]
+        %c = bitcast i32* %y to i8*             ; <i8*> [#uses=1]
+        %z = getelementptr [4 x i8]* @bytes, i32 0, i32 0               ; <i8*> [#uses=1]
+        call void @llvm.memcpy.i32( i8* %c, i8* %z, i32 4, i32 1 )
+        %r = load i32* %y               ; <i32> [#uses=1]
+        %t = bitcast [4 x i8]* @fmt to i8*              ; <i8*> [#uses=1]
+        %tmp = call i32 (i8*, ...)* @printf( i8* %t, i32 %r )           ; <i32> [#uses=0]
+        ret i32 0
 }
 
-void %test2() {
-        %y = alloca uint
-        %c = cast uint* %y to sbyte*
-        %z = getelementptr [4 x sbyte]* %bytes2, int 0, int 0
-        call void %llvm.memcpy.i32( sbyte* %c, sbyte* %z, uint 4, uint 1 )
-        %r = load uint* %y
-        %t = cast [4 x sbyte]* %fmt to sbyte*
-        %tmp = call int (sbyte*, ...)* %printf( sbyte* %t, uint %r )
+define void @test2() {
+        %y = alloca i32         ; <i32*> [#uses=2]
+        %c = bitcast i32* %y to i8*             ; <i8*> [#uses=1]
+        %z = getelementptr [4 x i8]* @bytes2, i32 0, i32 0              ; <i8*> [#uses=1]
+        call void @llvm.memcpy.i32( i8* %c, i8* %z, i32 4, i32 1 )
+        %r = load i32* %y               ; <i32> [#uses=1]
+        %t = bitcast [4 x i8]* @fmt to i8*              ; <i8*> [#uses=1]
+        %tmp = call i32 (i8*, ...)* @printf( i8* %t, i32 %r )           ; <i32> [#uses=0]
         ret void
 }
 
-declare void %llvm.memcpy.i32(sbyte*, sbyte*, uint, uint)
-declare int %printf(sbyte*, ...)
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+
+declare i32 @printf(i8*, ...)
+

Modified: llvm/trunk/test/CodeGen/X86/2006-12-19-IntelSyntax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-12-19-IntelSyntax.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-12-19-IntelSyntax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-12-19-IntelSyntax.ll Thu Feb 21 01:42:26 2008
@@ -1,91 +1,86 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel
 ; PR1061
-
 target datalayout = "e-p:32:32"
-target endian = little
-target pointersize = 32
 target triple = "i686-pc-linux-gnu"
 
-implementation   ; Functions:
-
-void %bar(uint %n) {
+define void @bar(i32 %n) {
 entry:
-	switch uint %n, label %bb12 [
-		 uint 1, label %bb
-		 uint 2, label %bb6
-		 uint 4, label %bb7
-		 uint 5, label %bb8
-		 uint 6, label %bb10
-		 uint 7, label %bb1
-		 uint 8, label %bb3
-		 uint 9, label %bb4
-		 uint 10, label %bb9
-		 uint 11, label %bb2
-		 uint 12, label %bb5
-		 uint 13, label %bb11
+	switch i32 %n, label %bb12 [
+		 i32 1, label %bb
+		 i32 2, label %bb6
+		 i32 4, label %bb7
+		 i32 5, label %bb8
+		 i32 6, label %bb10
+		 i32 7, label %bb1
+		 i32 8, label %bb3
+		 i32 9, label %bb4
+		 i32 10, label %bb9
+		 i32 11, label %bb2
+		 i32 12, label %bb5
+		 i32 13, label %bb11
 	]
 
 bb:		; preds = %entry
-	call void (...)* %foo1( )
+	call void (...)* @foo1( )
 	ret void
 
 bb1:		; preds = %entry
-	call void (...)* %foo2( )
+	call void (...)* @foo2( )
 	ret void
 
 bb2:		; preds = %entry
-	call void (...)* %foo6( )
+	call void (...)* @foo6( )
 	ret void
 
 bb3:		; preds = %entry
-	call void (...)* %foo3( )
+	call void (...)* @foo3( )
 	ret void
 
 bb4:		; preds = %entry
-	call void (...)* %foo4( )
+	call void (...)* @foo4( )
 	ret void
 
 bb5:		; preds = %entry
-	call void (...)* %foo5( )
+	call void (...)* @foo5( )
 	ret void
 
 bb6:		; preds = %entry
-	call void (...)* %foo1( )
+	call void (...)* @foo1( )
 	ret void
 
 bb7:		; preds = %entry
-	call void (...)* %foo2( )
+	call void (...)* @foo2( )
 	ret void
 
 bb8:		; preds = %entry
-	call void (...)* %foo6( )
+	call void (...)* @foo6( )
 	ret void
 
 bb9:		; preds = %entry
-	call void (...)* %foo3( )
+	call void (...)* @foo3( )
 	ret void
 
 bb10:		; preds = %entry
-	call void (...)* %foo4( )
+	call void (...)* @foo4( )
 	ret void
 
 bb11:		; preds = %entry
-	call void (...)* %foo5( )
+	call void (...)* @foo5( )
 	ret void
 
 bb12:		; preds = %entry
-	call void (...)* %foo6( )
+	call void (...)* @foo6( )
 	ret void
 }
 
-declare void %foo1(...)
+declare void @foo1(...)
 
-declare void %foo2(...)
+declare void @foo2(...)
 
-declare void %foo6(...)
+declare void @foo6(...)
 
-declare void %foo3(...)
+declare void @foo3(...)
 
-declare void %foo4(...)
+declare void @foo4(...)
 
-declare void %foo5(...)
+declare void @foo5(...)

Modified: llvm/trunk/test/CodeGen/X86/2007-05-05-VecCastExpand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2007-05-05-VecCastExpand.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/2007-05-05-VecCastExpand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2007-05-05-VecCastExpand.ll Thu Feb 21 01:42:26 2008
@@ -1,21 +1,21 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=i386 -mattr=+sse
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=i386 -mattr=+sse
 ; PR1371
 
-%str = external global [18 x sbyte]
+ at str = external global [18 x i8]		; <[18 x i8]*> [#uses=1]
 
-void %test() {
+define void @test() {
 bb.i:
-	%tmp.i660 = load <4 x float>* null
-	call void (int, ...)* %printf( int 0, sbyte* getelementptr ([18 x sbyte]* %str, int 0, uint 0), double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00 )
-	%tmp152.i = load <4 x uint>* null
-	%tmp156.i = cast <4 x uint> %tmp152.i to <4 x int>
-	%tmp175.i = cast <4 x float> %tmp.i660 to <4 x int>
-	%tmp176.i = xor <4 x int> %tmp156.i, < int -1, int -1, int -1, int -1 >
-	%tmp177.i = and <4 x int> %tmp176.i, %tmp175.i
-	%tmp190.i = or <4 x int> %tmp177.i, zeroinitializer
-	%tmp191.i = cast <4 x int> %tmp190.i to <4 x float>
+	%tmp.i660 = load <4 x float>* null		; <<4 x float>> [#uses=1]
+	call void (i32, ...)* @printf( i32 0, i8* getelementptr ([18 x i8]* @str, i32 0, i64 0), double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00 )
+	%tmp152.i = load <4 x i32>* null		; <<4 x i32>> [#uses=1]
+	%tmp156.i = bitcast <4 x i32> %tmp152.i to <4 x i32>		; <<4 x i32>> [#uses=1]
+	%tmp175.i = bitcast <4 x float> %tmp.i660 to <4 x i32>		; <<4 x i32>> [#uses=1]
+	%tmp176.i = xor <4 x i32> %tmp156.i, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
+	%tmp177.i = and <4 x i32> %tmp176.i, %tmp175.i		; <<4 x i32>> [#uses=1]
+	%tmp190.i = or <4 x i32> %tmp177.i, zeroinitializer		; <<4 x i32>> [#uses=1]
+	%tmp191.i = bitcast <4 x i32> %tmp190.i to <4 x float>		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp191.i, <4 x float>* null
 	ret void
 }
 
-declare void %printf(int, ...)
+declare void @printf(i32, ...)

Modified: llvm/trunk/test/CodeGen/X86/and-or-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/and-or-fold.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/and-or-fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/and-or-fold.ll Thu Feb 21 01:42:26 2008
@@ -1,13 +1,14 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep and | count 1
+; RUN: llvm-as < %s | llc -march=x86 | grep and | count 1
 
 ; The dag combiner should fold together (x&127)|(y&16711680) -> (x|y)&c1
 ; in this case.
-uint %test6(uint %x, ushort %y) {
-        %tmp1 = cast ushort %y to uint
-        %tmp2 = and uint %tmp1, 127             ; <uint> [#uses=1]
-        %tmp4 = shl uint %x, ubyte 16           ; <uint> [#uses=1]
-        %tmp5 = and uint %tmp4, 16711680                ; <uint> [#uses=1]
-        %tmp6 = or uint %tmp2, %tmp5            ; <uint> [#uses=1]
-        ret uint %tmp6
+
+define i32 @test6(i32 %x, i16 %y) {
+        %tmp1 = zext i16 %y to i32              ; <i32> [#uses=1]
+        %tmp2 = and i32 %tmp1, 127              ; <i32> [#uses=1]
+        %tmp4 = shl i32 %x, 16          ; <i32> [#uses=1]
+        %tmp5 = and i32 %tmp4, 16711680         ; <i32> [#uses=1]
+        %tmp6 = or i32 %tmp2, %tmp5             ; <i32> [#uses=1]
+        ret i32 %tmp6
 }
 

Modified: llvm/trunk/test/CodeGen/X86/asm-global-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/asm-global-imm.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/asm-global-imm.ll (original)
+++ llvm/trunk/test/CodeGen/X86/asm-global-imm.ll Thu Feb 21 01:42:26 2008
@@ -1,31 +1,23 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -relocation-model=static | \
+; RUN: llvm-as < %s | llc -march=x86 -relocation-model=static | \
 ; RUN:   grep {test1 \$_GV}
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -relocation-model=static | \
+; RUN: llvm-as < %s | llc -march=x86 -relocation-model=static | \
 ; RUN:   grep {test2 _GV}
 ; PR882
 
 target datalayout = "e-p:32:32"
-target endian = little
-target pointersize = 32
 target triple = "i686-apple-darwin9.0.0d2"
-%GV = weak global int 0         ; <int*> [#uses=2]
-%str = external global [12 x sbyte]             ; <[12 x sbyte]*> [#uses=1]
+ at GV = weak global i32 0		; <i32*> [#uses=2]
+ at str = external global [12 x i8]		; <[12 x i8]*> [#uses=1]
 
-implementation   ; Functions:
-
-void %foo() {
+define void @foo() {
 entry:
-        tail call void asm sideeffect "test1 $0", "i,~{dirflag},~{fpsr},~{flags}"( int* %GV )
-        tail call void asm sideeffect "test2 ${0:c}", "i,~{dirflag},~{fpsr},~{flags}"( int* %GV )
-        ret void
+	tail call void asm sideeffect "test1 $0", "i,~{dirflag},~{fpsr},~{flags}"( i32* @GV )
+	tail call void asm sideeffect "test2 ${0:c}", "i,~{dirflag},~{fpsr},~{flags}"( i32* @GV )
+	ret void
 }
 
-
-void %unknown_bootoption() {
+define void @unknown_bootoption() {
 entry:
-        call void asm sideeffect "ud2\0A\09.word ${0:c}\0A\09.long ${1:c}\0A",
-"i,i,~{dirflag},~{fpsr},~{flags}"( int 235, sbyte* getelementptr ([12 x sbyte]*
-%str, int 0, uint 0) )
-        ret void
+	call void asm sideeffect "ud2\0A\09.word ${0:c}\0A\09.long ${1:c}\0A", "i,i,~{dirflag},~{fpsr},~{flags}"( i32 235, i8* getelementptr ([12 x i8]* @str, i32 0, i64 0) )
+	ret void
 }
-

Modified: llvm/trunk/test/CodeGen/X86/bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bitcast.ll Thu Feb 21 01:42:26 2008
@@ -1,23 +1,24 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86-64
+; RUN: llvm-as < %s | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86-64
 ; PR1033
 
-long %test1(double %t) {
-  %u = bitcast double %t to long
-  ret long %u
+define i64 @test1(double %t) {
+        %u = bitcast double %t to i64           ; <i64> [#uses=1]
+        ret i64 %u
 }
 
-double %test2(long %t) {
-  %u = bitcast long %t to double
-  ret double %u
+define double @test2(i64 %t) {
+        %u = bitcast i64 %t to double           ; <double> [#uses=1]
+        ret double %u
 }
 
-int %test3(float %t) {
-  %u = bitcast float %t to int
-  ret int %u
+define i32 @test3(float %t) {
+        %u = bitcast float %t to i32            ; <i32> [#uses=1]
+        ret i32 %u
 }
 
-float %test4(int %t) {
-  %u = bitcast int %t to float
-  ret float %u
+define float @test4(i32 %t) {
+        %u = bitcast i32 %t to float            ; <float> [#uses=1]
+        ret float %u
 }
+

Modified: llvm/trunk/test/CodeGen/X86/bswap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bswap.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/bswap.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bswap.ll Thu Feb 21 01:42:26 2008
@@ -1,24 +1,27 @@
 ; bswap should be constant folded when it is passed a constant argument
 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | \
+; RUN: llvm-as < %s | llc -march=x86 | \
 ; RUN:   grep bswapl | count 3
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep rolw | count 1
+; RUN: llvm-as < %s | llc -march=x86 | grep rolw | count 1
 
-declare ushort %llvm.bswap.i16(ushort)
-declare uint %llvm.bswap.i32(uint)
-declare ulong %llvm.bswap.i64(ulong)
-
-ushort %W(ushort %A) {
-	%Z = call ushort %llvm.bswap.i16(ushort %A)
-	ret ushort %Z
+declare i16 @llvm.bswap.i16(i16)
+
+declare i32 @llvm.bswap.i32(i32)
+
+declare i64 @llvm.bswap.i64(i64)
+
+define i16 @W(i16 %A) {
+        %Z = call i16 @llvm.bswap.i16( i16 %A )         ; <i16> [#uses=1]
+        ret i16 %Z
 }
 
-uint %X(uint %A) {
-	%Z = call uint %llvm.bswap.i32(uint %A)
-	ret uint %Z
+define i32 @X(i32 %A) {
+        %Z = call i32 @llvm.bswap.i32( i32 %A )         ; <i32> [#uses=1]
+        ret i32 %Z
 }
 
-ulong %Y(ulong %A) {
-	%Z = call ulong %llvm.bswap.i64(ulong %A)
-	ret ulong %Z
+define i64 @Y(i64 %A) {
+        %Z = call i64 @llvm.bswap.i64( i64 %A )         ; <i64> [#uses=1]
+        ret i64 %Z
 }
+

Modified: llvm/trunk/test/CodeGen/X86/cmp-test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmp-test.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmp-test.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmp-test.ll Thu Feb 21 01:42:26 2008
@@ -1,27 +1,27 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep cmp | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep test | count 1
+; RUN: llvm-as < %s | llc -march=x86 | grep cmp | count 1
+; RUN: llvm-as < %s | llc -march=x86 | grep test | count 1
 
-int %f1(int %X, int* %y) {
-	%tmp = load int* %y
-	%tmp = seteq int %tmp, 0
-	br bool %tmp, label %ReturnBlock, label %cond_true
+define i32 @f1(i32 %X, i32* %y) {
+	%tmp = load i32* %y		; <i32> [#uses=1]
+	%tmp.upgrd.1 = icmp eq i32 %tmp, 0		; <i1> [#uses=1]
+	br i1 %tmp.upgrd.1, label %ReturnBlock, label %cond_true
 
-cond_true:
-	ret int 1
+cond_true:		; preds = %0
+	ret i32 1
 
-ReturnBlock:
-	ret int 0
+ReturnBlock:		; preds = %0
+	ret i32 0
 }
 
-int %f2(int %X, int* %y) {
-	%tmp = load int* %y
-        %tmp1 = shl int %tmp, ubyte 3
-	%tmp1 = seteq int %tmp1, 0
-	br bool %tmp1, label %ReturnBlock, label %cond_true
+define i32 @f2(i32 %X, i32* %y) {
+	%tmp = load i32* %y		; <i32> [#uses=1]
+	%tmp1 = shl i32 %tmp, 3		; <i32> [#uses=1]
+	%tmp1.upgrd.2 = icmp eq i32 %tmp1, 0		; <i1> [#uses=1]
+	br i1 %tmp1.upgrd.2, label %ReturnBlock, label %cond_true
 
-cond_true:
-	ret int 1
+cond_true:		; preds = %0
+	ret i32 1
 
-ReturnBlock:
-	ret int 0
+ReturnBlock:		; preds = %0
+	ret i32 0
 }

Modified: llvm/trunk/test/CodeGen/X86/commute-two-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-two-addr.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-two-addr.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-two-addr.ll Thu Feb 21 01:42:26 2008
@@ -2,24 +2,24 @@
 ; insertion of register-register copies.
 
 ; Make sure there are only 3 mov's for each testcase
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   grep {\\\<mov\\\>} | count 6
 
 
 target triple = "i686-pc-linux-gnu"
+ at G = external global i32                ; <i32*> [#uses=2]
 
-%G = external global int
+declare void @ext(i32)
 
-declare void %ext(int)
-
-int %add_test(int %X, int %Y) {
-	%Z = add int %X, %Y      ;; Last use of Y, but not of X.
-	store int %Z, int* %G
-	ret int %X
+define i32 @add_test(i32 %X, i32 %Y) {
+        %Z = add i32 %X, %Y             ; <i32> [#uses=1]
+        store i32 %Z, i32* @G
+        ret i32 %X
 }
 
-int %xor_test(int %X, int %Y) {
-	%Z = xor int %X, %Y      ;; Last use of Y, but not of X.
-	store int %Z, int* %G
-	ret int %X
+define i32 @xor_test(i32 %X, i32 %Y) {
+        %Z = xor i32 %X, %Y             ; <i32> [#uses=1]
+        store i32 %Z, i32* @G
+        ret i32 %X
 }
+

Modified: llvm/trunk/test/CodeGen/X86/compare-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/compare-add.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/compare-add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/compare-add.ll Thu Feb 21 01:42:26 2008
@@ -1,7 +1,8 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep add
-bool %X(int %X) {
-        %Y = add int %X, 14
-        %Z = setne int %Y, 12345
-        ret bool %Z
+; RUN: llvm-as < %s | llc -march=x86 | not grep add
+
+define i1 @X(i32 %X) {
+        %Y = add i32 %X, 14             ; <i32> [#uses=1]
+        %Z = icmp ne i32 %Y, 12345              ; <i1> [#uses=1]
+        ret i1 %Z
 }
 

Modified: llvm/trunk/test/CodeGen/X86/compare_folding.llx
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/compare_folding.llx?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/compare_folding.llx (original)
+++ llvm/trunk/test/CodeGen/X86/compare_folding.llx Thu Feb 21 01:42:26 2008
@@ -1,10 +1,11 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah | \
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah | \
 ; RUN:   grep movsd | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah | \
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah | \
 ; RUN:   grep ucomisd
-declare bool %llvm.isunordered.f64(double,double)
+declare i1 @llvm.isunordered.f64(double, double)
 
-bool %test1(double %X, double %Y) {  ;; Returns isunordered(X,Y)
-	%COM = call bool %llvm.isunordered.f64(double %X, double %Y)
-	ret bool %COM
+define i1 @test1(double %X, double %Y) {
+        %COM = fcmp uno double %X, %Y           ; <i1> [#uses=1]
+        ret i1 %COM
 }
+

Modified: llvm/trunk/test/CodeGen/X86/darwin-no-dead-strip.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/darwin-no-dead-strip.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/darwin-no-dead-strip.ll (original)
+++ llvm/trunk/test/CodeGen/X86/darwin-no-dead-strip.ll Thu Feb 21 01:42:26 2008
@@ -1,7 +1,7 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc | grep no_dead_strip
+; RUN: llvm-as < %s | llc | grep no_dead_strip
 
-target endian = little
-target pointersize = 32
+target datalayout = "e-p:32:32"
 target triple = "i686-apple-darwin8.7.2"
-%x = weak global int 0          ; <int*> [#uses=1]
-%llvm.used = appending global [1 x sbyte*] [ sbyte* cast (int* %x to sbyte*) ]
+ at x = weak global i32 0          ; <i32*> [#uses=1]
+ at llvm.used = appending global [1 x i8*] [ i8* bitcast (i32* @x to i8*) ]                ; <[1 x i8*]*> [#uses=0]
+

Modified: llvm/trunk/test/CodeGen/X86/div_const.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/div_const.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/div_const.ll (original)
+++ llvm/trunk/test/CodeGen/X86/div_const.ll Thu Feb 21 01:42:26 2008
@@ -1,7 +1,7 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep 365384439
+; RUN: llvm-as < %s | llc -march=x86 | grep 365384439
 
-uint %f9188_mul365384439_shift27(uint %A) {
-        %tmp1 = div uint %A, 1577682821         ; <uint> [#uses=1]
-        ret uint %tmp1
+define i32 @f9188_mul365384439_shift27(i32 %A) {
+        %tmp1 = udiv i32 %A, 1577682821         ; <i32> [#uses=1]
+        ret i32 %tmp1
 }
 

Modified: llvm/trunk/test/CodeGen/X86/extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extend.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/extend.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extend.ll Thu Feb 21 01:42:26 2008
@@ -1,19 +1,18 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | grep movzx | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | grep movsx | count 1
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | grep movzx | count 1
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | grep movsx | count 1
 
-%G1 = internal global ubyte 0		; <ubyte*> [#uses=1]
-%G2 = internal global sbyte 0		; <sbyte*> [#uses=1]
+ at G1 = internal global i8 0              ; <i8*> [#uses=1]
+ at G2 = internal global i8 0              ; <i8*> [#uses=1]
 
-implementation   ; Functions:
-
-short %test1() {  ;; one zext
-	%tmp.0 = load ubyte* %G1		; <ubyte> [#uses=1]
-	%tmp.3 = cast ubyte %tmp.0 to short		; <short> [#uses=1]
-	ret short %tmp.3
+define i16 @test1() {
+        %tmp.0 = load i8* @G1           ; <i8> [#uses=1]
+        %tmp.3 = zext i8 %tmp.0 to i16          ; <i16> [#uses=1]
+        ret i16 %tmp.3
 }
 
-short %test2() {  ;; one sext
-	%tmp.0 = load sbyte* %G2		; <sbyte> [#uses=1]
-	%tmp.3 = cast sbyte %tmp.0 to short		; <short> [#uses=1]
-	ret short %tmp.3
+define i16 @test2() {
+        %tmp.0 = load i8* @G2           ; <i8> [#uses=1]
+        %tmp.3 = sext i8 %tmp.0 to i16          ; <i16> [#uses=1]
+        ret i16 %tmp.3
 }
+

Modified: llvm/trunk/test/CodeGen/X86/extern_weak.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extern_weak.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/extern_weak.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extern_weak.ll Thu Feb 21 01:42:26 2008
@@ -1,11 +1,13 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=i686-apple-darwin | grep weak_reference | count 2
+; RUN: llvm-as < %s | llc -mtriple=i686-apple-darwin | grep weak_reference | count 2
 
-%Y = global int (sbyte*)* %X
-declare extern_weak int %X(sbyte*)
+ at Y = global i32 (i8*)* @X               ; <i32 (i8*)**> [#uses=0]
 
-void %bar() {
-	tail call void (...)* %foo( )
-	ret void
+declare extern_weak i32 @X(i8*)
+
+define void @bar() {
+        tail call void (...)* @foo( )
+        ret void
 }
 
-declare extern_weak void %foo(...)
+declare extern_weak void @foo(...)
+

Modified: llvm/trunk/test/CodeGen/X86/fast-cc-callee-pops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-cc-callee-pops.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-cc-callee-pops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-cc-callee-pops.ll Thu Feb 21 01:42:26 2008
@@ -1,8 +1,8 @@
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
 ; RUN:   llc -march=x86 -x86-asm-syntax=intel -mcpu=yonah | grep {ret	20}
 
 ; Check that a fastcc function pops its stack variables before returning.
 
-x86_fastcallcc void %func(long %X, long %Y, float %G, double %Z) {
-	ret void
+define x86_fastcallcc void @func(i64 %X, i64 %Y, float %G, double %Z) {
+        ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-cc-merge-stack-adj.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-cc-merge-stack-adj.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-cc-merge-stack-adj.ll Thu Feb 21 01:42:26 2008
@@ -1,12 +1,13 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   grep {add	ESP, 8}
 
 target triple = "i686-pc-linux-gnu"
 
-declare x86_fastcallcc void %func(int *%X, long %Y)
+declare x86_fastcallcc void @func(i32*, i64)
 
-x86_fastcallcc void %caller(int, long) {
-	%X = alloca int
-	call x86_fastcallcc void %func(int* %X, long 0)   ;; not a tail call
-	ret void
+define x86_fastcallcc void @caller(i32, i64) {
+        %X = alloca i32         ; <i32*> [#uses=1]
+        call x86_fastcallcc void @func( i32* %X, i64 0 )
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/fastcall-correct-mangling.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fastcall-correct-mangling.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fastcall-correct-mangling.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fastcall-correct-mangling.ll Thu Feb 21 01:42:26 2008
@@ -1,8 +1,9 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mtriple=mingw32 | \
+; RUN: llvm-as < %s | llc -march=x86 -mtriple=mingw32 | \
 ; RUN:   grep {@12}
 
 ; Check that a fastcall function gets correct mangling
 
-x86_fastcallcc void %func(long %X, ubyte %Y, ubyte %G, ushort %Z) {
-	ret void
+define x86_fastcallcc void @func(i64 %X, i8 %Y, i8 %G, i16 %Z) {
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/fildll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fildll.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fildll.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fildll.ll Thu Feb 21 01:42:26 2008
@@ -1,11 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=att -mattr=-sse2 | grep fildll | count 2
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=att -mattr=-sse2 | grep fildll | count 2
 
-fastcc double %sint64_to_fp(long %X) {
-	%R = cast long %X to double
-	ret double %R
+define fastcc double @sint64_to_fp(i64 %X) {
+        %R = sitofp i64 %X to double            ; <double> [#uses=1]
+        ret double %R
 }
 
-fastcc double %uint64_to_fp(ulong %X) {
-	%R = cast ulong %X to double
-	ret double %R
+define fastcc double @uint64_to_fp(i64 %X) {
+        %R = uitofp i64 %X to double            ; <double> [#uses=1]
+        ret double %R
 }
+

Modified: llvm/trunk/test/CodeGen/X86/fold-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load.ll Thu Feb 21 01:42:26 2008
@@ -1,27 +1,25 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc
-	%struct._obstack_chunk = type { sbyte*, %struct._obstack_chunk*, [4 x sbyte] }
-	%struct.obstack = type { int, %struct._obstack_chunk*, sbyte*, sbyte*, sbyte*, int, int, %struct._obstack_chunk* (...)*, void (...)*, sbyte*, ubyte }
-%stmt_obstack = external global %struct.obstack		; <%struct.obstack*> [#uses=1]
+; RUN: llvm-as < %s | llc
+	%struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
+	%struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (...)*, void (...)*, i8*, i8 }
+ at stmt_obstack = external global %struct.obstack		; <%struct.obstack*> [#uses=1]
 
-implementation   ; Functions:
-
-void %expand_start_bindings() {
+define void @expand_start_bindings() {
 entry:
-	br bool false, label %cond_true, label %cond_next
+	br i1 false, label %cond_true, label %cond_next
 
 cond_true:		; preds = %entry
-	%new_size.0.i = select bool false, int 0, int 0		; <int> [#uses=1]
-	%tmp.i = load uint* cast (ubyte* getelementptr (%struct.obstack* %stmt_obstack, int 0, uint 10) to uint*)		; <uint> [#uses=1]
-	%tmp.i = cast uint %tmp.i to ubyte		; <ubyte> [#uses=1]
-	%tmp21.i = and ubyte %tmp.i, 1		; <ubyte> [#uses=1]
-	%tmp22.i = seteq ubyte %tmp21.i, 0		; <bool> [#uses=1]
-	br bool %tmp22.i, label %cond_false30.i, label %cond_true23.i
+	%new_size.0.i = select i1 false, i32 0, i32 0		; <i32> [#uses=1]
+	%tmp.i = load i32* bitcast (i8* getelementptr (%struct.obstack* @stmt_obstack, i32 0, i32 10) to i32*)		; <i32> [#uses=1]
+	%tmp.i.upgrd.1 = trunc i32 %tmp.i to i8		; <i8> [#uses=1]
+	%tmp21.i = and i8 %tmp.i.upgrd.1, 1		; <i8> [#uses=1]
+	%tmp22.i = icmp eq i8 %tmp21.i, 0		; <i1> [#uses=1]
+	br i1 %tmp22.i, label %cond_false30.i, label %cond_true23.i
 
 cond_true23.i:		; preds = %cond_true
 	ret void
 
 cond_false30.i:		; preds = %cond_true
-	%tmp35.i = tail call %struct._obstack_chunk* null( int %new_size.0.i )		; <%struct._obstack_chunk*> [#uses=0]
+	%tmp35.i = tail call %struct._obstack_chunk* null( i32 %new_size.0.i )		; <%struct._obstack_chunk*> [#uses=0]
 	ret void
 
 cond_next:		; preds = %entry

Modified: llvm/trunk/test/CodeGen/X86/fp-immediate-shorten.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-immediate-shorten.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-immediate-shorten.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-immediate-shorten.ll Thu Feb 21 01:42:26 2008
@@ -1,6 +1,9 @@
 ;; Test that this FP immediate is stored in the constant pool as a float.
 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=-sse2,-sse3 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=-sse2,-sse3 | \
 ; RUN:   grep {.long.1123418112}
 
-double %D() { ret double 123.0 }
+define double @D() {
+        ret double 1.230000e+02
+}
+

Modified: llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll Thu Feb 21 01:42:26 2008
@@ -1,12 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=i386 | \
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=i386 | \
 ; RUN:   grep {fucomi.*st.\[12\]}
 ; PR1012
 
-float %foo(float *%col.2.0) {
+define float @foo(float* %col.2.0) {
         %tmp = load float* %col.2.0             ; <float> [#uses=3]
-        %tmp16 = setlt float %tmp, 0.000000e+00         ; <bool> [#uses=1]
+        %tmp16 = fcmp olt float %tmp, 0.000000e+00              ; <i1> [#uses=1]
         %tmp20 = sub float -0.000000e+00, %tmp          ; <float> [#uses=1]
-        %iftmp.2.0 = select bool %tmp16, float %tmp20, float %tmp
-	ret float %iftmp.2.0
+        %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp         ; <float> [#uses=1]
+        ret float %iftmp.2.0
 }
 

Modified: llvm/trunk/test/CodeGen/X86/fp_constant_op.llx
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp_constant_op.llx?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp_constant_op.llx (original)
+++ llvm/trunk/test/CodeGen/X86/fp_constant_op.llx Thu Feb 21 01:42:26 2008
@@ -1,35 +1,35 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel -mcpu=i486 | \
-; RUN:   grep {fadd\\|fsub\\|fdiv\\|fmul} | not grep -i ST 
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel -mcpu=i486 | \
+; RUN:   grep {fadd\\|fsub\\|fdiv\\|fmul} | not grep -i ST
 
 ; Test that the load of the constant is folded into the operation.
 
-double %foo_add(double %P) {
-        %tmp.1 = add double %P, 0x405EC00000000000
-        ret double %tmp.1
-}
 
-double %foo_mul(double %P) {
-        %tmp.1 = mul double %P, 0x405EC00000000000
-        ret double %tmp.1
+define double @foo_add(double %P) {
+	%tmp.1 = add double %P, 1.230000e+02		; <double> [#uses=1]
+	ret double %tmp.1
 }
 
-double %foo_sub(double %P) {
-        %tmp.1 = sub double %P, 0x405EC00000000000
-        ret double %tmp.1
+define double @foo_mul(double %P) {
+	%tmp.1 = mul double %P, 1.230000e+02		; <double> [#uses=1]
+	ret double %tmp.1
 }
 
-double %foo_subr(double %P) {
-        %tmp.1 = sub double 0x405EC00000000000, %P
-        ret double %tmp.1
+define double @foo_sub(double %P) {
+	%tmp.1 = sub double %P, 1.230000e+02		; <double> [#uses=1]
+	ret double %tmp.1
 }
 
-double %foo_div(double %P) {
-        %tmp.1 = div double %P, 0x405EC00000000000
-        ret double %tmp.1
+define double @foo_subr(double %P) {
+	%tmp.1 = sub double 1.230000e+02, %P		; <double> [#uses=1]
+	ret double %tmp.1
 }
 
-double %foo_divr(double %P) {
-        %tmp.1 = div double 0x405EC00000000000, %P
-        ret double %tmp.1
+define double @foo_div(double %P) {
+	%tmp.1 = fdiv double %P, 1.230000e+02		; <double> [#uses=1]
+	ret double %tmp.1
 }
 
+define double @foo_divr(double %P) {
+	%tmp.1 = fdiv double 1.230000e+02, %P		; <double> [#uses=1]
+	ret double %tmp.1
+}

Modified: llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.llx
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.llx?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.llx (original)
+++ llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.llx Thu Feb 21 01:42:26 2008
@@ -1,17 +1,20 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep fild | not grep ESP
-double %short(short* %P) {
-	%V = load short* %P
-	%V2 = cast short %V to double
-	ret double %V2
+; RUN: llvm-as < %s | llc -march=x86 | grep fild | not grep ESP
+
+define double @short(i16* %P) {
+        %V = load i16* %P               ; <i16> [#uses=1]
+        %V2 = sitofp i16 %V to double           ; <double> [#uses=1]
+        ret double %V2
 }
-double %int(int* %P) {
-	%V = load int* %P
-	%V2 = cast int %V to double
-	ret double %V2
+
+define double @int(i32* %P) {
+        %V = load i32* %P               ; <i32> [#uses=1]
+        %V2 = sitofp i32 %V to double           ; <double> [#uses=1]
+        ret double %V2
 }
-double %long(long* %P) {
-	%V = load long* %P
-	%V2 = cast long %V to double
-	ret double %V2
+
+define double @long(i64* %P) {
+        %V = load i64* %P               ; <i64> [#uses=1]
+        %V2 = sitofp i64 %V to double           ; <double> [#uses=1]
+        ret double %V2
 }
 

Modified: llvm/trunk/test/CodeGen/X86/fp_load_fold.llx
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp_load_fold.llx?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp_load_fold.llx (original)
+++ llvm/trunk/test/CodeGen/X86/fp_load_fold.llx Thu Feb 21 01:42:26 2008
@@ -1,41 +1,40 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   grep -i ST | not grep {fadd\\|fsub\\|fdiv\\|fmul}
 
 ; Test that the load of the memory location is folded into the operation.
 
-
-double %test_add(double %X, double *%P) {
-	%Y = load double* %P
-        %R = add double %X, %Y
-        ret double %R
+define double @test_add(double %X, double* %P) {
+	%Y = load double* %P		; <double> [#uses=1]
+	%R = add double %X, %Y		; <double> [#uses=1]
+	ret double %R
 }
 
-double %test_mul(double %X, double *%P) {
-	%Y = load double* %P
-        %R = mul double %X, %Y
-        ret double %R
+define double @test_mul(double %X, double* %P) {
+	%Y = load double* %P		; <double> [#uses=1]
+	%R = mul double %X, %Y		; <double> [#uses=1]
+	ret double %R
 }
 
-double %test_sub(double %X, double *%P) {
-	%Y = load double* %P
-        %R = sub double %X, %Y
-        ret double %R
+define double @test_sub(double %X, double* %P) {
+	%Y = load double* %P		; <double> [#uses=1]
+	%R = sub double %X, %Y		; <double> [#uses=1]
+	ret double %R
 }
 
-double %test_subr(double %X, double *%P) {
-	%Y = load double* %P
-        %R = sub double %Y, %X
-        ret double %R
+define double @test_subr(double %X, double* %P) {
+	%Y = load double* %P		; <double> [#uses=1]
+	%R = sub double %Y, %X		; <double> [#uses=1]
+	ret double %R
 }
 
-double %test_div(double %X, double *%P) {
-	%Y = load double* %P
-        %R = div double %X, %Y
-        ret double %R
+define double @test_div(double %X, double* %P) {
+	%Y = load double* %P		; <double> [#uses=1]
+	%R = fdiv double %X, %Y		; <double> [#uses=1]
+	ret double %R
 }
 
-double %test_divr(double %X, double *%P) {
-	%Y = load double* %P
-        %R = div double %Y, %X
-        ret double %R
+define double @test_divr(double %X, double* %P) {
+	%Y = load double* %P		; <double> [#uses=1]
+	%R = fdiv double %Y, %X		; <double> [#uses=1]
+	ret double %R
 }

Modified: llvm/trunk/test/CodeGen/X86/imul-lea.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/imul-lea.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/imul-lea.ll (original)
+++ llvm/trunk/test/CodeGen/X86/imul-lea.ll Thu Feb 21 01:42:26 2008
@@ -1,8 +1,10 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep lea
+; RUN: llvm-as < %s | llc -march=x86 | grep lea
 
-declare int %foo()
-int %test() {
-	%tmp.0 = tail call int %foo( )		; <int> [#uses=1]
-	%tmp.1 = mul int %tmp.0, 9		; <int> [#uses=1]
-	ret int %tmp.1
+declare i32 @foo()
+
+define i32 @test() {
+        %tmp.0 = tail call i32 @foo( )          ; <i32> [#uses=1]
+        %tmp.1 = mul i32 %tmp.0, 9              ; <i32> [#uses=1]
+        ret i32 %tmp.1
 }
+

Modified: llvm/trunk/test/CodeGen/X86/isnan.llx
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/isnan.llx?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/isnan.llx (original)
+++ llvm/trunk/test/CodeGen/X86/isnan.llx Thu Feb 21 01:42:26 2008
@@ -1,7 +1,9 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep call
-declare bool %llvm.isunordered.f64(double)
+; RUN: llvm-as < %s | llc -march=x86 | not grep call
 
-bool %test_isnan(double %X) {
-	%R = call bool %llvm.isunordered.f64(double %X, double %X)
-	ret bool %R
+declare i1 @llvm.isunordered.f64(double)
+
+define i1 @test_isnan(double %X) {
+        %R = fcmp uno double %X, %X             ; <i1> [#uses=1]
+        ret i1 %R
 }
+

Modified: llvm/trunk/test/CodeGen/X86/jump_sign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/jump_sign.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/jump_sign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/jump_sign.ll Thu Feb 21 01:42:26 2008
@@ -1,20 +1,20 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep jns
-int %f(int %X) {
+; RUN: llvm-as < %s | llc -march=x86 | grep jns
+
+define i32 @f(i32 %X) {
 entry:
-        %tmp1 = add int %X, 1           ; <int> [#uses=1]
-        %tmp = setlt int %tmp1, 0               ; <bool> [#uses=1]
-        br bool %tmp, label %cond_true, label %cond_next
+	%tmp1 = add i32 %X, 1		; <i32> [#uses=1]
+	%tmp = icmp slt i32 %tmp1, 0		; <i1> [#uses=1]
+	br i1 %tmp, label %cond_true, label %cond_next
 
-cond_true:              ; preds = %entry
-        %tmp2 = tail call int (...)* %bar( )            ; <int> [#uses=0]
-        br label %cond_next
+cond_true:		; preds = %entry
+	%tmp2 = tail call i32 (...)* @bar( )		; <i32> [#uses=0]
+	br label %cond_next
 
-cond_next:              ; preds = %entry, %cond_true
-        %tmp3 = tail call int (...)* %baz( )            ; <int> [#uses=0]
-        ret int undef
+cond_next:		; preds = %cond_true, %entry
+	%tmp3 = tail call i32 (...)* @baz( )		; <i32> [#uses=0]
+	ret i32 undef
 }
 
-declare int %bar(...)
-
-declare int %baz(...)
+declare i32 @bar(...)
 
+declare i32 @baz(...)

Modified: llvm/trunk/test/CodeGen/X86/lea-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-2.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea-2.ll Thu Feb 21 01:42:26 2008
@@ -1,12 +1,13 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   grep {lea	EAX, DWORD PTR \\\[... + 4\\*... - 5\\\]}
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   not grep add
 
-int %test1(int %A, int %B) {
-        %tmp1 = shl int %A, ubyte 2             ; <int> [#uses=1]
-        %tmp3 = add int %B, -5          ; <int> [#uses=1]
-        %tmp4 = add int %tmp3, %tmp1            ; <int> [#uses=1]
-        ret int %tmp4
+define i32 @test1(i32 %A, i32 %B) {
+        %tmp1 = shl i32 %A, 2           ; <i32> [#uses=1]
+        %tmp3 = add i32 %B, -5          ; <i32> [#uses=1]
+        %tmp4 = add i32 %tmp3, %tmp1            ; <i32> [#uses=1]
+        ret i32 %tmp4
 }
 
+

Modified: llvm/trunk/test/CodeGen/X86/lea.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea.ll Thu Feb 21 01:42:26 2008
@@ -1,7 +1,9 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep orl
-int %test(int %x) {
-	%tmp1 = shl int %x, ubyte 3
-	%tmp2 = add int %tmp1, 7
-	ret int %tmp2
+; RUN: llvm-as < %s | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86 | not grep orl
+
+define i32 @test(i32 %x) {
+        %tmp1 = shl i32 %x, 3           ; <i32> [#uses=1]
+        %tmp2 = add i32 %tmp1, 7                ; <i32> [#uses=1]
+        ret i32 %tmp2
 }
+

Modified: llvm/trunk/test/CodeGen/X86/loop-hoist.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/loop-hoist.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/loop-hoist.ll (original)
+++ llvm/trunk/test/CodeGen/X86/loop-hoist.ll Thu Feb 21 01:42:26 2008
@@ -1,29 +1,26 @@
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
 ; RUN:   llc -relocation-model=dynamic-no-pic -mtriple=i686-apple-darwin8.7.2 |\
 ; RUN:   grep L_Arr.non_lazy_ptr
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
 ; RUN:   llc -relocation-model=dynamic-no-pic -mtriple=i686-apple-darwin8.7.2 |\
 ; RUN:   %prcontext L_Arr.non_lazy_ptr 1 | grep {4(%esp)}
 
-%Arr = external global [0 x int]                ; <[0 x int]*> [#uses=2]
+ at Arr = external global [0 x i32]		; <[0 x i32]*> [#uses=1]
 
-implementation   ; Functions:
-
-void %foo(int %N.in) {
+define void @foo(i32 %N.in) {
 entry:
-        %N = cast int %N.in to uint                ; <uint> [#uses=1]
-        br label %cond_true
+	%N = bitcast i32 %N.in to i32		; <i32> [#uses=1]
+	br label %cond_true
 
-cond_true:              ; preds = %cond_true, %entry
-        %indvar = phi uint [ 0, %entry ], [ %indvar.next, %cond_true ]          ; <uint> [#uses=3]
-        %i.0.0 = cast uint %indvar to int               ; <int> [#uses=1]
-        %tmp = getelementptr [0 x int]* %Arr, int 0, int %i.0.0
-        store int %i.0.0, int* %tmp
-        %indvar.next = add uint %indvar, 1              ; <uint> [#uses=2]
-        %exitcond = seteq uint %indvar.next, %N         ; <bool> [#uses=1]
-        br bool %exitcond, label %return, label %cond_true
+cond_true:		; preds = %cond_true, %entry
+	%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ]		; <i32> [#uses=2]
+	%i.0.0 = bitcast i32 %indvar to i32		; <i32> [#uses=2]
+	%tmp = getelementptr [0 x i32]* @Arr, i32 0, i32 %i.0.0		; <i32*> [#uses=1]
+	store i32 %i.0.0, i32* %tmp
+	%indvar.next = add i32 %indvar, 1		; <i32> [#uses=2]
+	%exitcond = icmp eq i32 %indvar.next, %N		; <i1> [#uses=1]
+	br i1 %exitcond, label %return, label %cond_true
 
-return:         ; preds = %cond_true, %entry
-        ret void
+return:		; preds = %cond_true
+	ret void
 }
-

Modified: llvm/trunk/test/CodeGen/X86/loop-strength-reduce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/loop-strength-reduce.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/loop-strength-reduce.ll (original)
+++ llvm/trunk/test/CodeGen/X86/loop-strength-reduce.ll Thu Feb 21 01:42:26 2008
@@ -1,29 +1,29 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | \
+; RUN: llvm-as < %s | llc -march=x86 | \
 ; RUN:   grep {A(} | count 1
 ;
 ; Make sure the common loop invariant _A(reg) is hoisted up to preheader.
 
-%A = internal global [16 x [16 x int]] zeroinitializer, align 32
+ at A = internal global [16 x [16 x i32]] zeroinitializer, align 32		; <[16 x [16 x i32]]*> [#uses=2]
 
-void %test(int %row, int %N.in) {
+define void @test(i32 %row, i32 %N.in) {
 entry:
-	%N = cast int %N.in to uint
-	%tmp5 = setgt int %N.in, 0
-	br bool %tmp5, label %cond_true, label %return
+	%N = bitcast i32 %N.in to i32		; <i32> [#uses=1]
+	%tmp5 = icmp sgt i32 %N.in, 0		; <i1> [#uses=1]
+	br i1 %tmp5, label %cond_true, label %return
 
-cond_true:
-	%indvar = phi uint [ 0, %entry ], [ %indvar.next, %cond_true ]
-	%i.0.0 = cast uint %indvar to int
-	%tmp2 = add int %i.0.0, 1
-	%tmp = getelementptr [16 x [16 x int]]* %A, int 0, int %row, int %tmp2
-	store int 4, int* %tmp
-	%tmp5 = add int %i.0.0, 2
-	%tmp7 = getelementptr [16 x [16 x int]]* %A, int 0, int %row, int %tmp5
-	store int 5, int* %tmp7
-	%indvar.next = add uint %indvar, 1
-	%exitcond = seteq uint %indvar.next, %N
-	br bool %exitcond, label %return, label %cond_true
+cond_true:		; preds = %cond_true, %entry
+	%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ]		; <i32> [#uses=2]
+	%i.0.0 = bitcast i32 %indvar to i32		; <i32> [#uses=2]
+	%tmp2 = add i32 %i.0.0, 1		; <i32> [#uses=1]
+	%tmp = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2		; <i32*> [#uses=1]
+	store i32 4, i32* %tmp
+	%tmp5.upgrd.1 = add i32 %i.0.0, 2		; <i32> [#uses=1]
+	%tmp7 = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1		; <i32*> [#uses=1]
+	store i32 5, i32* %tmp7
+	%indvar.next = add i32 %indvar, 1		; <i32> [#uses=2]
+	%exitcond = icmp eq i32 %indvar.next, %N		; <i1> [#uses=1]
+	br i1 %exitcond, label %return, label %cond_true
 
-return:
+return:		; preds = %cond_true, %entry
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/loop-strength-reduce2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/loop-strength-reduce2.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/loop-strength-reduce2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/loop-strength-reduce2.ll Thu Feb 21 01:42:26 2008
@@ -1,29 +1,29 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=i686-apple-darwin -relocation-model=pic | grep '\$pb' | grep mov
+; RUN: llvm-as < %s | llc -mtriple=i686-apple-darwin -relocation-model=pic | grep '\$pb' | grep mov
 ;
 ; Make sure the PIC label flags2-"L1$pb" is not moved up to the preheader.
 
-%flags2 = internal global [8193 x sbyte] zeroinitializer, align 32
+ at flags2 = internal global [8193 x i8] zeroinitializer, align 32		; <[8193 x i8]*> [#uses=1]
 
-void %test(int %k, int %i) {
+define void @test(i32 %k, i32 %i) {
 entry:
-	%i = bitcast int %i to uint
-	%k_addr.012 = shl int %i, ubyte 1
-	%tmp14 = setgt int %k_addr.012, 8192
-	br bool %tmp14, label %return, label %bb
+	%k_addr.012 = shl i32 %i, 1		; <i32> [#uses=1]
+	%tmp14 = icmp sgt i32 %k_addr.012, 8192		; <i1> [#uses=1]
+	br i1 %tmp14, label %return, label %bb
 
-bb:
-	%indvar = phi uint [ 0, %entry ], [ %indvar.next, %bb ]
-	%tmp. = shl uint %i, ubyte 1
-	%tmp.15 = mul uint %indvar, %i
-	%tmp.16 = add uint %tmp.15, %tmp.
-	%k_addr.0.0 = bitcast uint %tmp.16 to int
-	%tmp = getelementptr [8193 x sbyte]* %flags2, int 0, uint %tmp.16
-	store sbyte 0, sbyte* %tmp
-	%k_addr.0 = add int %k_addr.0.0, %i
-	%tmp = setgt int %k_addr.0, 8192
-	%indvar.next = add uint %indvar, 1
-	br bool %tmp, label %return, label %bb
+bb:		; preds = %bb, %entry
+	%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ]		; <i32> [#uses=2]
+	%tmp. = shl i32 %i, 1		; <i32> [#uses=1]
+	%tmp.15 = mul i32 %indvar, %i		; <i32> [#uses=1]
+	%tmp.16 = add i32 %tmp.15, %tmp.		; <i32> [#uses=2]
+	%k_addr.0.0 = bitcast i32 %tmp.16 to i32		; <i32> [#uses=1]
+	%gep.upgrd.1 = zext i32 %tmp.16 to i64		; <i64> [#uses=1]
+	%tmp = getelementptr [8193 x i8]* @flags2, i32 0, i64 %gep.upgrd.1		; <i8*> [#uses=1]
+	store i8 0, i8* %tmp
+	%k_addr.0 = add i32 %k_addr.0.0, %i		; <i32> [#uses=1]
+	%tmp.upgrd.2 = icmp sgt i32 %k_addr.0, 8192		; <i1> [#uses=1]
+	%indvar.next = add i32 %indvar, 1		; <i32> [#uses=1]
+	br i1 %tmp.upgrd.2, label %return, label %bb
 
-return:
+return:		; preds = %bb, %entry
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/mul-shift-reassoc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mul-shift-reassoc.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/mul-shift-reassoc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mul-shift-reassoc.ll Thu Feb 21 01:42:26 2008
@@ -1,12 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep lea
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep add
+; RUN: llvm-as < %s | llc -march=x86 | grep lea
+; RUN: llvm-as < %s | llc -march=x86 | not grep add
 
-int %test(int %X, int %Y) {
+define i32 @test(i32 %X, i32 %Y) {
 	; Push the shl through the mul to allow an LEA to be formed, instead
         ; of using a shift and add separately.
-        %tmp.2 = shl int %X, ubyte 1
-        %tmp.3 = mul int %tmp.2, %Y
-        %tmp.5 = add int %tmp.3, %Y
-        ret int %tmp.5
+        %tmp.2 = shl i32 %X, 1          ; <i32> [#uses=1]
+        %tmp.3 = mul i32 %tmp.2, %Y             ; <i32> [#uses=1]
+        %tmp.5 = add i32 %tmp.3, %Y             ; <i32> [#uses=1]
+        ret i32 %tmp.5
 }
 

Modified: llvm/trunk/test/CodeGen/X86/negative_zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/negative_zero.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/negative_zero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/negative_zero.ll Thu Feb 21 01:42:26 2008
@@ -1,6 +1,6 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=-sse2,-sse3 | grep fchs
+; RUN: llvm-as < %s | llc -march=x86 -mattr=-sse2,-sse3 | grep fchs
 
 
-double %T() {
+define double @T() {
 	ret double -1.0   ;; codegen as fld1/fchs, not as a load from cst pool
 }

Modified: llvm/trunk/test/CodeGen/X86/or-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/or-branch.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/or-branch.ll (original)
+++ llvm/trunk/test/CodeGen/X86/or-branch.ll Thu Feb 21 01:42:26 2008
@@ -1,19 +1,19 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep set
+; RUN: llvm-as < %s | llc -march=x86 | not grep set
 
-void %foo(int %X, int %Y, int %Z) {
+define void @foo(i32 %X, i32 %Y, i32 %Z) {
 entry:
-	%tmp = tail call int (...)* %bar( )		; <int> [#uses=0]
-	%tmp = seteq int %X, 0		; <bool> [#uses=1]
-	%tmp3 = setlt int %Y, 5		; <bool> [#uses=1]
-	%tmp4 = or bool %tmp3, %tmp		; <bool> [#uses=1]
-	br bool %tmp4, label %cond_true, label %UnifiedReturnBlock
+	%tmp = tail call i32 (...)* @bar( )		; <i32> [#uses=0]
+	%tmp.upgrd.1 = icmp eq i32 %X, 0		; <i1> [#uses=1]
+	%tmp3 = icmp slt i32 %Y, 5		; <i1> [#uses=1]
+	%tmp4 = or i1 %tmp3, %tmp.upgrd.1		; <i1> [#uses=1]
+	br i1 %tmp4, label %cond_true, label %UnifiedReturnBlock
 
 cond_true:		; preds = %entry
-	%tmp5 = tail call int (...)* %bar( )		; <int> [#uses=0]
+	%tmp5 = tail call i32 (...)* @bar( )		; <i32> [#uses=0]
 	ret void
 
 UnifiedReturnBlock:		; preds = %entry
 	ret void
 }
 
-declare int %bar(...)
+declare i32 @bar(...)

Modified: llvm/trunk/test/CodeGen/X86/overlap-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/overlap-shift.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/overlap-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/overlap-shift.ll Thu Feb 21 01:42:26 2008
@@ -6,13 +6,14 @@
 
 ; Check that the shift gets turned into an LEA.
 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   not grep {mov E.X, E.X}
 
-%G = external global int
+ at G = external global i32                ; <i32*> [#uses=1]
 
-int %test1(int %X) {
-	%Z = shl int %X, ubyte 2
-	volatile store int %Z, int* %G
-	ret int %X
+define i32 @test1(i32 %X) {
+        %Z = shl i32 %X, 2              ; <i32> [#uses=1]
+        volatile store i32 %Z, i32* @G
+        ret i32 %X
 }
+

Modified: llvm/trunk/test/CodeGen/X86/packed_struct.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/packed_struct.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/packed_struct.ll (original)
+++ llvm/trunk/test/CodeGen/X86/packed_struct.ll Thu Feb 21 01:42:26 2008
@@ -1,38 +1,33 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep foos+5
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep foos+1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep foos+9
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep bara+19
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep bara+4
+; RUN: llvm-as < %s | llc -march=x86 | grep foos+5
+; RUN: llvm-as < %s | llc -march=x86 | grep foos+1
+; RUN: llvm-as < %s | llc -march=x86 | grep foos+9
+; RUN: llvm-as < %s | llc -march=x86 | grep bara+19
+; RUN: llvm-as < %s | llc -march=x86 | grep bara+4
 
 ; make sure we compute the correct offset for a packed structure
 
 ;Note: codegen for this could change rendering the above checks wrong
 
-; ModuleID = 'foo.c'
 target datalayout = "e-p:32:32"
-target endian = little
-target pointersize = 32
 target triple = "i686-pc-linux-gnu"
-	%struct.anon = type <{ sbyte, int, int, int }>
-%foos = external global %struct.anon
-%bara = weak global [4 x <{ int, sbyte }>] zeroinitializer
+	%struct.anon = type <{ i8, i32, i32, i32 }>
+ at foos = external global %struct.anon		; <%struct.anon*> [#uses=3]
+ at bara = weak global [4 x <{ i32, i8 }>] zeroinitializer		; <[4 x <{ i32, i8 }>]*> [#uses=2]
 
-implementation   ; Functions:
-
-int %foo() {
+define i32 @foo() {
 entry:
-	%tmp = load int*  getelementptr (%struct.anon* %foos, int 0, uint 1)
-	%tmp3 = load int* getelementptr (%struct.anon* %foos, int 0, uint 2)
-	%tmp6 = load int* getelementptr (%struct.anon* %foos, int 0, uint 3)
-	%tmp4 = add int %tmp3, %tmp
-	%tmp7 = add int %tmp4, %tmp6
-	ret int %tmp7
+	%tmp = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 1)		; <i32> [#uses=1]
+	%tmp3 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 2)		; <i32> [#uses=1]
+	%tmp6 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 3)		; <i32> [#uses=1]
+	%tmp4 = add i32 %tmp3, %tmp		; <i32> [#uses=1]
+	%tmp7 = add i32 %tmp4, %tmp6		; <i32> [#uses=1]
+	ret i32 %tmp7
 }
 
-sbyte %bar() {
+define i8 @bar() {
 entry:
-	%tmp = load sbyte* getelementptr([4 x <{ int, sbyte }>]* %bara, int 0, int 0, uint 1 )
-	%tmp4 = load sbyte* getelementptr ([4 x <{ int, sbyte }>]* %bara, int 0, int 3, uint 1)
-	%tmp5 = add sbyte %tmp4, %tmp
-	ret sbyte %tmp5
+	%tmp = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 0, i32 1)		; <i8> [#uses=1]
+	%tmp4 = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 3, i32 1)		; <i8> [#uses=1]
+	%tmp5 = add i8 %tmp4, %tmp		; <i8> [#uses=1]
+	ret i8 %tmp5
 }

Modified: llvm/trunk/test/CodeGen/X86/regpressure.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/regpressure.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/regpressure.ll (original)
+++ llvm/trunk/test/CodeGen/X86/regpressure.ll Thu Feb 21 01:42:26 2008
@@ -1,118 +1,114 @@
 ;; Both functions in this testcase should codegen to the same function, and
 ;; neither of them should require spilling anything to the stack.
 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -stats |& \
+; RUN: llvm-as < %s | llc -march=x86 -stats |& \
 ; RUN:   not grep {Number of register spills}
 
 ;; This can be compiled to use three registers if the loads are not
 ;; folded into the multiplies, 2 registers otherwise.
-int %regpressure1(int* %P) {
-	%A = load int* %P
-	%Bp = getelementptr int* %P, int 1
-	%B = load int* %Bp
-	%s1 = mul int %A, %B
-	%Cp = getelementptr int* %P, int 2
-	%C = load int* %Cp
-	%s2 = mul int %s1, %C
-	%Dp = getelementptr int* %P, int 3
-	%D = load int* %Dp
-	%s3 = mul int %s2, %D
-	%Ep = getelementptr int* %P, int 4
-	%E = load int* %Ep
-	%s4 = mul int %s3, %E
-	%Fp = getelementptr int* %P, int 5
-	%F = load int* %Fp
-	%s5 = mul int %s4, %F
-	%Gp = getelementptr int* %P, int 6
-	%G = load int* %Gp
-	%s6 = mul int %s5, %G
-	%Hp = getelementptr int* %P, int 7
-	%H = load int* %Hp
-	%s7 = mul int %s6, %H
-	%Ip = getelementptr int* %P, int 8
-	%I = load int* %Ip
-	%s8 = mul int %s7, %I
-	%Jp = getelementptr int* %P, int 9
-	%J = load int* %Jp
-	%s9 = mul int %s8, %J
-	ret int %s9
-}
 
-;; This testcase should produce identical code to the test above.
-int %regpressure2(int* %P) {
-	%A = load int* %P
-	%Bp = getelementptr int* %P, int 1
-	%B = load int* %Bp
-	%Cp = getelementptr int* %P, int 2
-	%C = load int* %Cp
-	%Dp = getelementptr int* %P, int 3
-	%D = load int* %Dp
-	%Ep = getelementptr int* %P, int 4
-	%E = load int* %Ep
-	%Fp = getelementptr int* %P, int 5
-	%F = load int* %Fp
-	%Gp = getelementptr int* %P, int 6
-	%G = load int* %Gp
-	%Hp = getelementptr int* %P, int 7
-	%H = load int* %Hp
-	%Ip = getelementptr int* %P, int 8
-	%I = load int* %Ip
-	%Jp = getelementptr int* %P, int 9
-	%J = load int* %Jp
-	%s1 = mul int %A, %B
-	%s2 = mul int %s1, %C
-	%s3 = mul int %s2, %D
-	%s4 = mul int %s3, %E
-	%s5 = mul int %s4, %F
-	%s6 = mul int %s5, %G
-	%s7 = mul int %s6, %H
-	%s8 = mul int %s7, %I
-	%s9 = mul int %s8, %J
-	ret int %s9
+define i32 @regpressure1(i32* %P) {
+	%A = load i32* %P		; <i32> [#uses=1]
+	%Bp = getelementptr i32* %P, i32 1		; <i32*> [#uses=1]
+	%B = load i32* %Bp		; <i32> [#uses=1]
+	%s1 = mul i32 %A, %B		; <i32> [#uses=1]
+	%Cp = getelementptr i32* %P, i32 2		; <i32*> [#uses=1]
+	%C = load i32* %Cp		; <i32> [#uses=1]
+	%s2 = mul i32 %s1, %C		; <i32> [#uses=1]
+	%Dp = getelementptr i32* %P, i32 3		; <i32*> [#uses=1]
+	%D = load i32* %Dp		; <i32> [#uses=1]
+	%s3 = mul i32 %s2, %D		; <i32> [#uses=1]
+	%Ep = getelementptr i32* %P, i32 4		; <i32*> [#uses=1]
+	%E = load i32* %Ep		; <i32> [#uses=1]
+	%s4 = mul i32 %s3, %E		; <i32> [#uses=1]
+	%Fp = getelementptr i32* %P, i32 5		; <i32*> [#uses=1]
+	%F = load i32* %Fp		; <i32> [#uses=1]
+	%s5 = mul i32 %s4, %F		; <i32> [#uses=1]
+	%Gp = getelementptr i32* %P, i32 6		; <i32*> [#uses=1]
+	%G = load i32* %Gp		; <i32> [#uses=1]
+	%s6 = mul i32 %s5, %G		; <i32> [#uses=1]
+	%Hp = getelementptr i32* %P, i32 7		; <i32*> [#uses=1]
+	%H = load i32* %Hp		; <i32> [#uses=1]
+	%s7 = mul i32 %s6, %H		; <i32> [#uses=1]
+	%Ip = getelementptr i32* %P, i32 8		; <i32*> [#uses=1]
+	%I = load i32* %Ip		; <i32> [#uses=1]
+	%s8 = mul i32 %s7, %I		; <i32> [#uses=1]
+	%Jp = getelementptr i32* %P, i32 9		; <i32*> [#uses=1]
+	%J = load i32* %Jp		; <i32> [#uses=1]
+	%s9 = mul i32 %s8, %J		; <i32> [#uses=1]
+	ret i32 %s9
 }
 
-;; adds should be the same as muls.
-int %regpressure3(short* %P, bool %Cond, int* %Other) {
-	%A = load short* %P
-	%Bp = getelementptr short* %P, int 1
-	%B = load short* %Bp
-	%Cp = getelementptr short* %P, int 2
-	%C = load short* %Cp
-	%Dp = getelementptr short* %P, int 3
-	%D = load short* %Dp
-	%Ep = getelementptr short* %P, int 4
-	%E = load short* %Ep
-	%Fp = getelementptr short* %P, int 5
-	%F = load short* %Fp
-	%Gp = getelementptr short* %P, int 6
-	%G = load short* %Gp
-	%Hp = getelementptr short* %P, int 7
-	%H = load short* %Hp
-	%Ip = getelementptr short* %P, int 8
-	%I = load short* %Ip
-	%Jp = getelementptr short* %P, int 9
-	%J = load short* %Jp
-
-        ;; These casts prevent folding the loads into the adds.
-	%A = cast short %A to int
-	%B = cast short %B to int
-	%D = cast short %D to int
-	%C = cast short %C to int
-	%E = cast short %E to int
-	%F = cast short %F to int
-	%G = cast short %G to int
-	%H = cast short %H to int
-	%I = cast short %I to int
-	%J = cast short %J to int
-	%s1 = add int %A, %B
-	%s2 = add int %C, %s1
-	%s3 = add int %D, %s2
-	%s4 = add int %E, %s3
-	%s5 = add int %F, %s4
-	%s6 = add int %G, %s5
-	%s7 = add int %H, %s6
-	%s8 = add int %I, %s7
-	%s9 = add int %J, %s8
-	ret int %s9
+define i32 @regpressure2(i32* %P) {
+	%A = load i32* %P		; <i32> [#uses=1]
+	%Bp = getelementptr i32* %P, i32 1		; <i32*> [#uses=1]
+	%B = load i32* %Bp		; <i32> [#uses=1]
+	%Cp = getelementptr i32* %P, i32 2		; <i32*> [#uses=1]
+	%C = load i32* %Cp		; <i32> [#uses=1]
+	%Dp = getelementptr i32* %P, i32 3		; <i32*> [#uses=1]
+	%D = load i32* %Dp		; <i32> [#uses=1]
+	%Ep = getelementptr i32* %P, i32 4		; <i32*> [#uses=1]
+	%E = load i32* %Ep		; <i32> [#uses=1]
+	%Fp = getelementptr i32* %P, i32 5		; <i32*> [#uses=1]
+	%F = load i32* %Fp		; <i32> [#uses=1]
+	%Gp = getelementptr i32* %P, i32 6		; <i32*> [#uses=1]
+	%G = load i32* %Gp		; <i32> [#uses=1]
+	%Hp = getelementptr i32* %P, i32 7		; <i32*> [#uses=1]
+	%H = load i32* %Hp		; <i32> [#uses=1]
+	%Ip = getelementptr i32* %P, i32 8		; <i32*> [#uses=1]
+	%I = load i32* %Ip		; <i32> [#uses=1]
+	%Jp = getelementptr i32* %P, i32 9		; <i32*> [#uses=1]
+	%J = load i32* %Jp		; <i32> [#uses=1]
+	%s1 = mul i32 %A, %B		; <i32> [#uses=1]
+	%s2 = mul i32 %s1, %C		; <i32> [#uses=1]
+	%s3 = mul i32 %s2, %D		; <i32> [#uses=1]
+	%s4 = mul i32 %s3, %E		; <i32> [#uses=1]
+	%s5 = mul i32 %s4, %F		; <i32> [#uses=1]
+	%s6 = mul i32 %s5, %G		; <i32> [#uses=1]
+	%s7 = mul i32 %s6, %H		; <i32> [#uses=1]
+	%s8 = mul i32 %s7, %I		; <i32> [#uses=1]
+	%s9 = mul i32 %s8, %J		; <i32> [#uses=1]
+	ret i32 %s9
 }
 
+define i32 @regpressure3(i16* %P, i1 %Cond, i32* %Other) {
+	%A = load i16* %P		; <i16> [#uses=1]
+	%Bp = getelementptr i16* %P, i32 1		; <i16*> [#uses=1]
+	%B = load i16* %Bp		; <i16> [#uses=1]
+	%Cp = getelementptr i16* %P, i32 2		; <i16*> [#uses=1]
+	%C = load i16* %Cp		; <i16> [#uses=1]
+	%Dp = getelementptr i16* %P, i32 3		; <i16*> [#uses=1]
+	%D = load i16* %Dp		; <i16> [#uses=1]
+	%Ep = getelementptr i16* %P, i32 4		; <i16*> [#uses=1]
+	%E = load i16* %Ep		; <i16> [#uses=1]
+	%Fp = getelementptr i16* %P, i32 5		; <i16*> [#uses=1]
+	%F = load i16* %Fp		; <i16> [#uses=1]
+	%Gp = getelementptr i16* %P, i32 6		; <i16*> [#uses=1]
+	%G = load i16* %Gp		; <i16> [#uses=1]
+	%Hp = getelementptr i16* %P, i32 7		; <i16*> [#uses=1]
+	%H = load i16* %Hp		; <i16> [#uses=1]
+	%Ip = getelementptr i16* %P, i32 8		; <i16*> [#uses=1]
+	%I = load i16* %Ip		; <i16> [#uses=1]
+	%Jp = getelementptr i16* %P, i32 9		; <i16*> [#uses=1]
+	%J = load i16* %Jp		; <i16> [#uses=1]
+	%A.upgrd.1 = sext i16 %A to i32		; <i32> [#uses=1]
+	%B.upgrd.2 = sext i16 %B to i32		; <i32> [#uses=1]
+	%D.upgrd.3 = sext i16 %D to i32		; <i32> [#uses=1]
+	%C.upgrd.4 = sext i16 %C to i32		; <i32> [#uses=1]
+	%E.upgrd.5 = sext i16 %E to i32		; <i32> [#uses=1]
+	%F.upgrd.6 = sext i16 %F to i32		; <i32> [#uses=1]
+	%G.upgrd.7 = sext i16 %G to i32		; <i32> [#uses=1]
+	%H.upgrd.8 = sext i16 %H to i32		; <i32> [#uses=1]
+	%I.upgrd.9 = sext i16 %I to i32		; <i32> [#uses=1]
+	%J.upgrd.10 = sext i16 %J to i32		; <i32> [#uses=1]
+	%s1 = add i32 %A.upgrd.1, %B.upgrd.2		; <i32> [#uses=1]
+	%s2 = add i32 %C.upgrd.4, %s1		; <i32> [#uses=1]
+	%s3 = add i32 %D.upgrd.3, %s2		; <i32> [#uses=1]
+	%s4 = add i32 %E.upgrd.5, %s3		; <i32> [#uses=1]
+	%s5 = add i32 %F.upgrd.6, %s4		; <i32> [#uses=1]
+	%s6 = add i32 %G.upgrd.7, %s5		; <i32> [#uses=1]
+	%s7 = add i32 %H.upgrd.8, %s6		; <i32> [#uses=1]
+	%s8 = add i32 %I.upgrd.9, %s7		; <i32> [#uses=1]
+	%s9 = add i32 %J.upgrd.10, %s8		; <i32> [#uses=1]
+	ret i32 %s9
+}

Modified: llvm/trunk/test/CodeGen/X86/rem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rem.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/rem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/rem.ll Thu Feb 21 01:42:26 2008
@@ -1,22 +1,22 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep div
+; RUN: llvm-as < %s | llc -march=x86 | not grep div
 
-int %test1(int %X) {
-        %tmp1 = rem int %X, 255
-        ret int %tmp1
+define i32 @test1(i32 %X) {
+        %tmp1 = srem i32 %X, 255                ; <i32> [#uses=1]
+        ret i32 %tmp1
 }
 
-int %test2(int %X) {
-        %tmp1 = rem int %X, 256 
-        ret int %tmp1
+define i32 @test2(i32 %X) {
+        %tmp1 = srem i32 %X, 256                ; <i32> [#uses=1]
+        ret i32 %tmp1
 }
 
-uint %test3(uint %X) {
-        %tmp1 = rem uint %X, 255
-        ret uint %tmp1
+define i32 @test3(i32 %X) {
+        %tmp1 = urem i32 %X, 255                ; <i32> [#uses=1]
+        ret i32 %tmp1
 }
 
-uint %test4(uint %X) {
-        %tmp1 = rem uint %X, 256  ; just an and
-        ret uint %tmp1
+define i32 @test4(i32 %X) {
+        %tmp1 = urem i32 %X, 256                ; <i32> [#uses=1]
+        ret i32 %tmp1
 }
 

Modified: llvm/trunk/test/CodeGen/X86/rotate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rotate.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/rotate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/rotate.ll Thu Feb 21 01:42:26 2008
@@ -1,92 +1,100 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   grep {ro\[rl\]} | count 12
 
-uint %rotl32(uint %A, ubyte %Amt) {
-	%B = shl uint %A, ubyte %Amt
-	%Amt2 = sub ubyte 32, %Amt
-	%C = shr uint %A, ubyte %Amt2
-	%D = or uint %B, %C
-	ret uint %D
-}
-
-uint %rotr32(uint %A, ubyte %Amt) {
-	%B = shr uint %A, ubyte %Amt
-	%Amt2 = sub ubyte 32, %Amt
-	%C = shl uint %A, ubyte %Amt2
-	%D = or uint %B, %C
-	ret uint %D
-}
-
-uint %rotli32(uint %A) {
-	%B = shl uint %A, ubyte 5
-	%C = shr uint %A, ubyte 27
-	%D = or uint %B, %C
-	ret uint %D
-}
-
-uint %rotri32(uint %A) {
-	%B = shr uint %A, ubyte 5
-	%C = shl uint %A, ubyte 27
-	%D = or uint %B, %C
-	ret uint %D
-}
-
-ushort %rotl16(ushort %A, ubyte %Amt) {
-	%B = shl ushort %A, ubyte %Amt
-	%Amt2 = sub ubyte 16, %Amt
-	%C = shr ushort %A, ubyte %Amt2
-	%D = or ushort %B, %C
-	ret ushort %D
-}
-
-ushort %rotr16(ushort %A, ubyte %Amt) {
-	%B = shr ushort %A, ubyte %Amt
-	%Amt2 = sub ubyte 16, %Amt
-	%C = shl ushort %A, ubyte %Amt2
-	%D = or ushort %B, %C
-	ret ushort %D
-}
-
-ushort %rotli16(ushort %A) {
-	%B = shl ushort %A, ubyte 5
-	%C = shr ushort %A, ubyte 11
-	%D = or ushort %B, %C
-	ret ushort %D
-}
-
-ushort %rotri16(ushort %A) {
-	%B = shr ushort %A, ubyte 5
-	%C = shl ushort %A, ubyte 11
-	%D = or ushort %B, %C
-	ret ushort %D
-}
-
-ubyte %rotl8(ubyte %A, ubyte %Amt) {
-	%B = shl ubyte %A, ubyte %Amt
-	%Amt2 = sub ubyte 8, %Amt
-	%C = shr ubyte %A, ubyte %Amt2
-	%D = or ubyte %B, %C
-	ret ubyte %D
-}
-
-ubyte %rotr8(ubyte %A, ubyte %Amt) {
-	%B = shr ubyte %A, ubyte %Amt
-	%Amt2 = sub ubyte 8, %Amt
-	%C = shl ubyte %A, ubyte %Amt2
-	%D = or ubyte %B, %C
-	ret ubyte %D
-}
-
-ubyte %rotli8(ubyte %A) {
-	%B = shl ubyte %A, ubyte 5
-	%C = shr ubyte %A, ubyte 3
-	%D = or ubyte %B, %C
-	ret ubyte %D
-}
-
-ubyte %rotri8(ubyte %A) {
-	%B = shr ubyte %A, ubyte 5
-	%C = shl ubyte %A, ubyte 3
-	%D = or ubyte %B, %C
-	ret ubyte %D
+define i32 @rotl32(i32 %A, i8 %Amt) {
+	%shift.upgrd.1 = zext i8 %Amt to i32		; <i32> [#uses=1]
+	%B = shl i32 %A, %shift.upgrd.1		; <i32> [#uses=1]
+	%Amt2 = sub i8 32, %Amt		; <i8> [#uses=1]
+	%shift.upgrd.2 = zext i8 %Amt2 to i32		; <i32> [#uses=1]
+	%C = lshr i32 %A, %shift.upgrd.2		; <i32> [#uses=1]
+	%D = or i32 %B, %C		; <i32> [#uses=1]
+	ret i32 %D
+}
+
+define i32 @rotr32(i32 %A, i8 %Amt) {
+	%shift.upgrd.3 = zext i8 %Amt to i32		; <i32> [#uses=1]
+	%B = lshr i32 %A, %shift.upgrd.3		; <i32> [#uses=1]
+	%Amt2 = sub i8 32, %Amt		; <i8> [#uses=1]
+	%shift.upgrd.4 = zext i8 %Amt2 to i32		; <i32> [#uses=1]
+	%C = shl i32 %A, %shift.upgrd.4		; <i32> [#uses=1]
+	%D = or i32 %B, %C		; <i32> [#uses=1]
+	ret i32 %D
+}
+
+define i32 @rotli32(i32 %A) {
+	%B = shl i32 %A, 5		; <i32> [#uses=1]
+	%C = lshr i32 %A, 27		; <i32> [#uses=1]
+	%D = or i32 %B, %C		; <i32> [#uses=1]
+	ret i32 %D
+}
+
+define i32 @rotri32(i32 %A) {
+	%B = lshr i32 %A, 5		; <i32> [#uses=1]
+	%C = shl i32 %A, 27		; <i32> [#uses=1]
+	%D = or i32 %B, %C		; <i32> [#uses=1]
+	ret i32 %D
+}
+
+define i16 @rotl16(i16 %A, i8 %Amt) {
+	%shift.upgrd.5 = zext i8 %Amt to i16		; <i16> [#uses=1]
+	%B = shl i16 %A, %shift.upgrd.5		; <i16> [#uses=1]
+	%Amt2 = sub i8 16, %Amt		; <i8> [#uses=1]
+	%shift.upgrd.6 = zext i8 %Amt2 to i16		; <i16> [#uses=1]
+	%C = lshr i16 %A, %shift.upgrd.6		; <i16> [#uses=1]
+	%D = or i16 %B, %C		; <i16> [#uses=1]
+	ret i16 %D
+}
+
+define i16 @rotr16(i16 %A, i8 %Amt) {
+	%shift.upgrd.7 = zext i8 %Amt to i16		; <i16> [#uses=1]
+	%B = lshr i16 %A, %shift.upgrd.7		; <i16> [#uses=1]
+	%Amt2 = sub i8 16, %Amt		; <i8> [#uses=1]
+	%shift.upgrd.8 = zext i8 %Amt2 to i16		; <i16> [#uses=1]
+	%C = shl i16 %A, %shift.upgrd.8		; <i16> [#uses=1]
+	%D = or i16 %B, %C		; <i16> [#uses=1]
+	ret i16 %D
+}
+
+define i16 @rotli16(i16 %A) {
+	%B = shl i16 %A, 5		; <i16> [#uses=1]
+	%C = lshr i16 %A, 11		; <i16> [#uses=1]
+	%D = or i16 %B, %C		; <i16> [#uses=1]
+	ret i16 %D
+}
+
+define i16 @rotri16(i16 %A) {
+	%B = lshr i16 %A, 5		; <i16> [#uses=1]
+	%C = shl i16 %A, 11		; <i16> [#uses=1]
+	%D = or i16 %B, %C		; <i16> [#uses=1]
+	ret i16 %D
+}
+
+define i8 @rotl8(i8 %A, i8 %Amt) {
+	%B = shl i8 %A, %Amt		; <i8> [#uses=1]
+	%Amt2 = sub i8 8, %Amt		; <i8> [#uses=1]
+	%C = lshr i8 %A, %Amt2		; <i8> [#uses=1]
+	%D = or i8 %B, %C		; <i8> [#uses=1]
+	ret i8 %D
+}
+
+define i8 @rotr8(i8 %A, i8 %Amt) {
+	%B = lshr i8 %A, %Amt		; <i8> [#uses=1]
+	%Amt2 = sub i8 8, %Amt		; <i8> [#uses=1]
+	%C = shl i8 %A, %Amt2		; <i8> [#uses=1]
+	%D = or i8 %B, %C		; <i8> [#uses=1]
+	ret i8 %D
+}
+
+define i8 @rotli8(i8 %A) {
+	%B = shl i8 %A, 5		; <i8> [#uses=1]
+	%C = lshr i8 %A, 3		; <i8> [#uses=1]
+	%D = or i8 %B, %C		; <i8> [#uses=1]
+	ret i8 %D
+}
+
+define i8 @rotri8(i8 %A) {
+	%B = lshr i8 %A, 5		; <i8> [#uses=1]
+	%C = shl i8 %A, 3		; <i8> [#uses=1]
+	%D = or i8 %B, %C		; <i8> [#uses=1]
+	ret i8 %D
 }

Modified: llvm/trunk/test/CodeGen/X86/scalar_sse_minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/scalar_sse_minmax.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/scalar_sse_minmax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/scalar_sse_minmax.ll Thu Feb 21 01:42:26 2008
@@ -1,44 +1,44 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse1,+sse2 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse1,+sse2 | \
 ; RUN:   grep mins | count 3
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse1,+sse2 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse1,+sse2 | \
 ; RUN:   grep maxs | count 2
 
-declare bool %llvm.isunordered.f64( double %x, double %y )
-declare bool %llvm.isunordered.f32( float %x, float %y )
+declare i1 @llvm.isunordered.f64(double, double)
 
-implementation
+declare i1 @llvm.isunordered.f32(float, float)
 
-float %min1(float %x, float %y) {
-        %tmp = setlt float %x, %y               ; <bool> [#uses=1]
-        %retval = select bool %tmp, float %x, float %y          ; <float> [#uses=1]
-        ret float %retval
+define float @min1(float %x, float %y) {
+	%tmp = fcmp olt float %x, %y		; <i1> [#uses=1]
+	%retval = select i1 %tmp, float %x, float %y		; <float> [#uses=1]
+	ret float %retval
 }
-double %min2(double %x, double %y) {
-        %tmp = setlt double %x, %y
-        %retval = select bool %tmp, double %x, double %y
-        ret double %retval
+
+define double @min2(double %x, double %y) {
+	%tmp = fcmp olt double %x, %y		; <i1> [#uses=1]
+	%retval = select i1 %tmp, double %x, double %y		; <double> [#uses=1]
+	ret double %retval
 }
 
-float %max1(float %x, float %y) {
-        %tmp = setge float %x, %y               ; <bool> [#uses=1]
-        %tmp2 = tail call bool %llvm.isunordered.f32( float %x, float %y )
-        %tmp3 = or bool %tmp2, %tmp             ; <bool> [#uses=1]
-        %retval = select bool %tmp3, float %x, float %y         
-        ret float %retval
+define float @max1(float %x, float %y) {
+	%tmp = fcmp oge float %x, %y		; <i1> [#uses=1]
+	%tmp2 = fcmp uno float %x, %y		; <i1> [#uses=1]
+	%tmp3 = or i1 %tmp2, %tmp		; <i1> [#uses=1]
+	%retval = select i1 %tmp3, float %x, float %y		; <float> [#uses=1]
+	ret float %retval
 }
 
-double %max2(double %x, double %y) {
-        %tmp = setge double %x, %y               ; <bool> [#uses=1]
-        %tmp2 = tail call bool %llvm.isunordered.f64( double %x, double %y )
-        %tmp3 = or bool %tmp2, %tmp             ; <bool> [#uses=1]
-        %retval = select bool %tmp3, double %x, double %y
-        ret double %retval
+define double @max2(double %x, double %y) {
+	%tmp = fcmp oge double %x, %y		; <i1> [#uses=1]
+	%tmp2 = fcmp uno double %x, %y		; <i1> [#uses=1]
+	%tmp3 = or i1 %tmp2, %tmp		; <i1> [#uses=1]
+	%retval = select i1 %tmp3, double %x, double %y		; <double> [#uses=1]
+	ret double %retval
 }
 
-<4 x float> %min3(float %tmp37) {
-        %tmp375 = insertelement <4 x float> undef, float %tmp37, uint 0
-        %tmp48 = tail call <4 x float> %llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > )
+define <4 x float> @min3(float %tmp37) {
+	%tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0		; <<4 x float>> [#uses=1]
+	%tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > )		; <<4 x float>> [#uses=1]
 	ret <4 x float> %tmp48
 }
 
-declare <4 x float> %llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)

Modified: llvm/trunk/test/CodeGen/X86/setuge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/setuge.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/setuge.ll (original)
+++ llvm/trunk/test/CodeGen/X86/setuge.ll Thu Feb 21 01:42:26 2008
@@ -1,12 +1,13 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep set
+; RUN: llvm-as < %s | llc -march=x86 | not grep set
 
-declare bool %llvm.isunordered.f32(float, float)
+declare i1 @llvm.isunordered.f32(float, float)
 
-float %cmp(float %A, float %B, float %C, float %D) {
+define float @cmp(float %A, float %B, float %C, float %D) {
 entry:
-	%tmp.1 = call bool %llvm.isunordered.f32(float %A, float %B)
-	%tmp.2 = setge float %A, %B
-	%tmp.3 = or bool %tmp.1, %tmp.2
-	%tmp.4 = select bool %tmp.3, float %C, float %D
-	ret float %tmp.4
+        %tmp.1 = fcmp uno float %A, %B          ; <i1> [#uses=1]
+        %tmp.2 = fcmp oge float %A, %B          ; <i1> [#uses=1]
+        %tmp.3 = or i1 %tmp.1, %tmp.2           ; <i1> [#uses=1]
+        %tmp.4 = select i1 %tmp.3, float %C, float %D           ; <float> [#uses=1]
+        ret float %tmp.4
 }
+

Modified: llvm/trunk/test/CodeGen/X86/shift-coalesce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-coalesce.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-coalesce.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-coalesce.ll Thu Feb 21 01:42:26 2008
@@ -1,13 +1,15 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   grep {shld.*CL}
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   not grep {mov CL, BL}
 
 ; PR687
 
-ulong %foo(ulong %x, long* %X) {
-	%tmp.1 = load long* %X		; <long> [#uses=1]
-	%tmp.3 = cast long %tmp.1 to ubyte		; <ubyte> [#uses=1]
-	%tmp.4 = shl ulong %x, ubyte %tmp.3		; <ulong> [#uses=1]
-	ret ulong %tmp.4
+define i64 @foo(i64 %x, i64* %X) {
+        %tmp.1 = load i64* %X           ; <i64> [#uses=1]
+        %tmp.3 = trunc i64 %tmp.1 to i8         ; <i8> [#uses=1]
+        %shift.upgrd.1 = zext i8 %tmp.3 to i64          ; <i64> [#uses=1]
+        %tmp.4 = shl i64 %x, %shift.upgrd.1             ; <i64> [#uses=1]
+        ret i64 %tmp.4
 }
+

Modified: llvm/trunk/test/CodeGen/X86/shift-double.llx
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-double.llx?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-double.llx (original)
+++ llvm/trunk/test/CodeGen/X86/shift-double.llx Thu Feb 21 01:42:26 2008
@@ -1,31 +1,41 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
 ; RUN:   grep {sh\[lr\]d} | count 5
 
-long %test1(long %X, ubyte %C) {
-	%Y = shl long %X, ubyte %C
-	ret long %Y
+define i64 @test1(i64 %X, i8 %C) {
+        %shift.upgrd.1 = zext i8 %C to i64              ; <i64> [#uses=1]
+        %Y = shl i64 %X, %shift.upgrd.1         ; <i64> [#uses=1]
+        ret i64 %Y
 }
-long %test2(long %X, ubyte %C) {
-	%Y = shr long %X, ubyte %C
-	ret long %Y
+
+define i64 @test2(i64 %X, i8 %C) {
+        %shift.upgrd.2 = zext i8 %C to i64              ; <i64> [#uses=1]
+        %Y = ashr i64 %X, %shift.upgrd.2                ; <i64> [#uses=1]
+        ret i64 %Y
 }
-ulong %test3(ulong %X, ubyte %C) {
-	%Y = shr ulong %X, ubyte %C
-	ret ulong %Y
+
+define i64 @test3(i64 %X, i8 %C) {
+        %shift.upgrd.3 = zext i8 %C to i64              ; <i64> [#uses=1]
+        %Y = lshr i64 %X, %shift.upgrd.3                ; <i64> [#uses=1]
+        ret i64 %Y
 }
 
-uint %test4(uint %A, uint %B, ubyte %C) {
-	%X = shl uint %A, ubyte %C
-	%Cv = sub ubyte 32, %C
-	%Y = shr uint %B, ubyte %Cv
-	%Z = or uint %Y, %X
-	ret uint %Z
+define i32 @test4(i32 %A, i32 %B, i8 %C) {
+        %shift.upgrd.4 = zext i8 %C to i32              ; <i32> [#uses=1]
+        %X = shl i32 %A, %shift.upgrd.4         ; <i32> [#uses=1]
+        %Cv = sub i8 32, %C             ; <i8> [#uses=1]
+        %shift.upgrd.5 = zext i8 %Cv to i32             ; <i32> [#uses=1]
+        %Y = lshr i32 %B, %shift.upgrd.5                ; <i32> [#uses=1]
+        %Z = or i32 %Y, %X              ; <i32> [#uses=1]
+        ret i32 %Z
 }
 
-ushort %test5(ushort %A, ushort %B, ubyte %C) {
-	%X = shl ushort %A, ubyte %C
-	%Cv = sub ubyte 16, %C
-	%Y = shr ushort %B, ubyte %Cv
-	%Z = or ushort %Y, %X
-	ret ushort %Z
+define i16 @test5(i16 %A, i16 %B, i8 %C) {
+        %shift.upgrd.6 = zext i8 %C to i16              ; <i16> [#uses=1]
+        %X = shl i16 %A, %shift.upgrd.6         ; <i16> [#uses=1]
+        %Cv = sub i8 16, %C             ; <i8> [#uses=1]
+        %shift.upgrd.7 = zext i8 %Cv to i16             ; <i16> [#uses=1]
+        %Y = lshr i16 %B, %shift.upgrd.7                ; <i16> [#uses=1]
+        %Z = or i16 %Y, %X              ; <i16> [#uses=1]
+        ret i16 %Z
 }
+

Modified: llvm/trunk/test/CodeGen/X86/shift-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-folding.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-folding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-folding.ll Thu Feb 21 01:42:26 2008
@@ -1,20 +1,23 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | \
+; RUN: llvm-as < %s | llc -march=x86 | \
 ; RUN:   grep {s\[ah\]\[rl\]l} | count 1
 
-int* %test1(int *%P, uint %X) {
-	%Y = shr uint %X, ubyte 2
-	%P2 = getelementptr int* %P, uint %Y
-	ret int* %P2
+define i32* @test1(i32* %P, i32 %X) {
+        %Y = lshr i32 %X, 2             ; <i32> [#uses=1]
+        %gep.upgrd.1 = zext i32 %Y to i64               ; <i64> [#uses=1]
+        %P2 = getelementptr i32* %P, i64 %gep.upgrd.1           ; <i32*> [#uses=1]
+        ret i32* %P2
 }
 
-int* %test2(int *%P, uint %X) {
-	%Y = shl uint %X, ubyte 2
-	%P2 = getelementptr int* %P, uint %Y
-	ret int* %P2
+define i32* @test2(i32* %P, i32 %X) {
+        %Y = shl i32 %X, 2              ; <i32> [#uses=1]
+        %gep.upgrd.2 = zext i32 %Y to i64               ; <i64> [#uses=1]
+        %P2 = getelementptr i32* %P, i64 %gep.upgrd.2           ; <i32*> [#uses=1]
+        ret i32* %P2
 }
 
-int* %test3(int *%P, int %X) {
-	%Y = shr int %X, ubyte 2
-	%P2 = getelementptr int* %P, int %Y
-	ret int* %P2
+define i32* @test3(i32* %P, i32 %X) {
+        %Y = ashr i32 %X, 2             ; <i32> [#uses=1]
+        %P2 = getelementptr i32* %P, i32 %Y             ; <i32*> [#uses=1]
+        ret i32* %P2
 }
+

Modified: llvm/trunk/test/CodeGen/X86/shift-one.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-one.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-one.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-one.ll Thu Feb 21 01:42:26 2008
@@ -1,9 +1,10 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep leal
+; RUN: llvm-as < %s | llc -march=x86 | not grep leal
 
-%x = external global int
+ at x = external global i32                ; <i32*> [#uses=1]
 
-int %test() {
-	%tmp.0 = load int* %x
-	%tmp.1 = shl int %tmp.0, ubyte 1
-	ret int %tmp.1
+define i32 @test() {
+        %tmp.0 = load i32* @x           ; <i32> [#uses=1]
+        %tmp.1 = shl i32 %tmp.0, 1              ; <i32> [#uses=1]
+        ret i32 %tmp.1
 }
+

Modified: llvm/trunk/test/CodeGen/X86/sse-load-ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-load-ret.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-load-ret.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-load-ret.ll Thu Feb 21 01:42:26 2008
@@ -1,17 +1,18 @@
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
 ; RUN:   llc -march=x86 -mcpu=yonah | not grep movss
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
 ; RUN:   llc -march=x86 -mcpu=yonah | not grep xmm
 
-double %test1(double *%P) {
-	%X = load double* %P
-	ret double %X
+define double @test1(double* %P) {
+        %X = load double* %P            ; <double> [#uses=1]
+        ret double %X
 }
 
-double %test2() {
-	ret double 1234.56
+define double @test2() {
+        ret double 1.234560e+03
 }
 
+
 ; FIXME: Todo
 ;double %test3(bool %B) {
 ;	%C = select bool %B, double 123.412, double 523.01123123

Modified: llvm/trunk/test/CodeGen/X86/store-fp-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/store-fp-constant.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/store-fp-constant.ll (original)
+++ llvm/trunk/test/CodeGen/X86/store-fp-constant.ll Thu Feb 21 01:42:26 2008
@@ -1,20 +1,19 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep rodata
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep literal
+; RUN: llvm-as < %s | llc -march=x86 | not grep rodata
+; RUN: llvm-as < %s | llc -march=x86 | not grep literal
 ;
 ; Check that no FP constants in this testcase ends up in the 
 ; constant pool.
-%G = external global float 
 
+ at G = external global float              ; <float*> [#uses=1]
 
-declare void %extfloat(float %F)
-declare void %extdouble(double)
+declare void @extfloat(float)
 
-implementation
+declare void @extdouble(double)
 
-void %testfloatstore() {
-        call void %extfloat(float 0x40934999A0000000)
-        call void %extdouble(double 0x409349A631F8A090)
-	store float 0x402A064C20000000, float* %G
+define void @testfloatstore() {
+        call void @extfloat( float 0x40934999A0000000 )
+        call void @extdouble( double 0x409349A631F8A090 )
+        store float 0x402A064C20000000, float* @G
         ret void
 }
 

Modified: llvm/trunk/test/CodeGen/X86/store-global-address.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/store-global-address.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/store-global-address.ll (original)
+++ llvm/trunk/test/CodeGen/X86/store-global-address.ll Thu Feb 21 01:42:26 2008
@@ -1,9 +1,10 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep movl | count 1
+; RUN: llvm-as < %s | llc -march=x86 | grep movl | count 1
 
-%dst = global int 0
-%ptr = global int* null
+ at dst = global i32 0             ; <i32*> [#uses=1]
+ at ptr = global i32* null         ; <i32**> [#uses=1]
 
-void %test() {
-	store int* %dst, int** %ptr
-	ret void
+define void @test() {
+        store i32* @dst, i32** @ptr
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/store_op_load_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/store_op_load_fold.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/store_op_load_fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/store_op_load_fold.ll Thu Feb 21 01:42:26 2008
@@ -1,12 +1,13 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep mov
+; RUN: llvm-as < %s | llc -march=x86 | not grep mov
 ;
 ; Test the add and load are folded into the store instruction.
 
-%X = internal global short 0
+ at X = internal global i16 0              ; <i16*> [#uses=2]
 
-void %foo() {
-	%tmp.0 = load short* %X
-	%tmp.3 = add short %tmp.0, 329
-	store short %tmp.3, short* %X
-	ret void
+define void @foo() {
+        %tmp.0 = load i16* @X           ; <i16> [#uses=1]
+        %tmp.3 = add i16 %tmp.0, 329            ; <i16> [#uses=1]
+        store i16 %tmp.3, i16* @X
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_call.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_call.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_call.ll Thu Feb 21 01:42:26 2008
@@ -1,11 +1,13 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin8 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin8 | \
 ; RUN:   grep {subl.*60}
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin8 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin8 | \
 ; RUN:   grep {movaps.*32}
 
-void %test() {
-	tail call void %xx( int 1, int 2, int 3, int 4, int 5, int 6, int 7, <2 x long> cast (<4 x int> < int 4, int 3, int 2, int 1 > to <2 x long>), <2 x long> cast (<4 x int> < int 8, int 7, int 6, int 5 > to <2 x long>), <2 x long> cast (<4 x int> < int 6, int 4, int 2, int 0 > to <2 x long>), <2 x long> cast (<4 x int> < int 8, int 4, int 2, int 1 > to <2 x long>), <2 x long> cast (<4 x int> < int 0, int 1, int 3, int 9 > to <2 x long>) )
-	ret void
+
+define void @test() {
+        tail call void @xx( i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <2 x i64> bitcast (<4 x i32> < i32 4, i32 3, i32 2, i32 1 > to <2 x i64>), <2 x i64> bitcast (<4 x i32> < i32 8, i32 7, i32 6, i32 5 > to <2 x i64>), <2 x i64> bitcast (<4 x i32> < i32 6, i32 4, i32 2, i32 0 > to <2 x i64>), <2 x i64> bitcast (<4 x i32> < i32 8, i32 4, i32 2, i32 1 > to <2 x i64>), <2 x i64> bitcast (<4 x i32> < i32 0, i32 1, i32 3, i32 9 > to <2 x i64>) )
+        ret void
 }
 
-declare void %xx(int, int, int, int, int, int, int, <2 x long>, <2 x long>, <2 x long>, <2 x long>, <2 x long>)
+declare void @xx(i32, i32, i32, i32, i32, i32, i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>)
+

Modified: llvm/trunk/test/CodeGen/X86/vec_clear.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_clear.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_clear.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_clear.ll Thu Feb 21 01:42:26 2008
@@ -1,8 +1,10 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | not grep and
-<4 x float> %test(<4 x float>* %v1) {
-	%tmp = load <4 x float>* %v1
-	%tmp15 = cast <4 x float> %tmp to <2 x long>
-	%tmp24 = and <2 x long> %tmp15, cast (<4 x int> < int 0, int 0, int -1, int -1 > to <2 x long>)
-	%tmp31 = cast <2 x long> %tmp24 to <4 x float>
-	ret <4 x float> %tmp31
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | not grep and
+
+define <4 x float> @test(<4 x float>* %v1) {
+        %tmp = load <4 x float>* %v1            ; <<4 x float>> [#uses=1]
+        %tmp15 = bitcast <4 x float> %tmp to <2 x i64>          ; <<2 x i64>> [#uses=1]
+        %tmp24 = and <2 x i64> %tmp15, bitcast (<4 x i32> < i32 0, i32 0, i32 -1, i32 -1 > to <2 x i64>)              ; <<2 x i64>> [#uses=1]
+        %tmp31 = bitcast <2 x i64> %tmp24 to <4 x float>                ; <<4 x float>> [#uses=1]
+        ret <4 x float> %tmp31
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_extract.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_extract.ll Thu Feb 21 01:42:26 2008
@@ -1,36 +1,36 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep movss    %t | count 3
 ; RUN: grep movhlps  %t | count 1
 ; RUN: grep pshufd   %t | count 1
 ; RUN: grep unpckhpd %t | count 1
 
-void %test1(<4 x float>* %F, float* %f) {
-	%tmp = load <4 x float>* %F
-	%tmp7 = add <4 x float> %tmp, %tmp
-	%tmp2 = extractelement <4 x float> %tmp7, uint 0
+define void @test1(<4 x float>* %F, float* %f) {
+	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
+	%tmp7 = add <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
+	%tmp2 = extractelement <4 x float> %tmp7, i32 0		; <float> [#uses=1]
 	store float %tmp2, float* %f
 	ret void
 }
 
-float %test2(<4 x float>* %F, float* %f) {
-	%tmp = load <4 x float>* %F
-	%tmp7 = add <4 x float> %tmp, %tmp
-	%tmp2 = extractelement <4 x float> %tmp7, uint 2
-        ret float %tmp2
+define float @test2(<4 x float>* %F, float* %f) {
+	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
+	%tmp7 = add <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
+	%tmp2 = extractelement <4 x float> %tmp7, i32 2		; <float> [#uses=1]
+	ret float %tmp2
 }
 
-void %test3(float* %R, <4 x float>* %P1) {
-	%X = load <4 x float>* %P1
-	%tmp = extractelement <4 x float> %X, uint 3
+define void @test3(float* %R, <4 x float>* %P1) {
+	%X = load <4 x float>* %P1		; <<4 x float>> [#uses=1]
+	%tmp = extractelement <4 x float> %X, i32 3		; <float> [#uses=1]
 	store float %tmp, float* %R
 	ret void
 }
 
-double %test4(double %A) {
-        %tmp1 = call <2 x double> %foo()
-        %tmp2 = extractelement <2 x double> %tmp1, uint 1
-        %tmp3 = add double %tmp2, %A
-        ret double %tmp3
+define double @test4(double %A) {
+	%tmp1 = call <2 x double> @foo( )		; <<2 x double>> [#uses=1]
+	%tmp2 = extractelement <2 x double> %tmp1, i32 1		; <double> [#uses=1]
+	%tmp3 = add double %tmp2, %A		; <double> [#uses=1]
+	ret double %tmp3
 }
 
-declare <2 x double> %foo()
+declare <2 x double> @foo()

Modified: llvm/trunk/test/CodeGen/X86/vec_ins_extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_ins_extract.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_ins_extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_ins_extract.ll Thu Feb 21 01:42:26 2008
@@ -1,50 +1,50 @@
-; RUN: llvm-upgrade < %s | llvm-as | opt -scalarrepl -instcombine | \
+; RUN: llvm-as < %s | opt -scalarrepl -instcombine | \
 ; RUN:   llc -march=x86 -mcpu=yonah | not grep sub.*esp
 
-; This checks that various insert/extract idiom work without going to the 
+; This checks that various insert/extract idiom work without going to the
 ; stack.
 
-void %test(<4 x float>* %F, float %f) {
+define void @test(<4 x float>* %F, float %f) {
 entry:
 	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
 	%tmp3 = add <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
-	%tmp10 = insertelement <4 x float> %tmp3, float %f, uint 0		; <<4 x float>> [#uses=2]
+	%tmp10 = insertelement <4 x float> %tmp3, float %f, i32 0		; <<4 x float>> [#uses=2]
 	%tmp6 = add <4 x float> %tmp10, %tmp10		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp6, <4 x float>* %F
 	ret void
 }
 
-void %test2(<4 x float>* %F, float %f) {
+define void @test2(<4 x float>* %F, float %f) {
 entry:
 	%G = alloca <4 x float>, align 16		; <<4 x float>*> [#uses=3]
 	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
 	%tmp3 = add <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp3, <4 x float>* %G
-	%tmp = getelementptr <4 x float>* %G, int 0, int 2		; <float*> [#uses=1]
-	store float %f, float* %tmp
+	%tmp.upgrd.1 = getelementptr <4 x float>* %G, i32 0, i32 2		; <float*> [#uses=1]
+	store float %f, float* %tmp.upgrd.1
 	%tmp4 = load <4 x float>* %G		; <<4 x float>> [#uses=2]
 	%tmp6 = add <4 x float> %tmp4, %tmp4		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp6, <4 x float>* %F
 	ret void
 }
 
-void %test3(<4 x float>* %F, float* %f) {
+define void @test3(<4 x float>* %F, float* %f) {
 entry:
 	%G = alloca <4 x float>, align 16		; <<4 x float>*> [#uses=2]
 	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
 	%tmp3 = add <4 x float> %tmp, %tmp		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp3, <4 x float>* %G
-	%tmp = getelementptr <4 x float>* %G, int 0, int 2		; <float*> [#uses=1]
-	%tmp = load float* %tmp		; <float> [#uses=1]
-	store float %tmp, float* %f
+	%tmp.upgrd.2 = getelementptr <4 x float>* %G, i32 0, i32 2		; <float*> [#uses=1]
+	%tmp.upgrd.3 = load float* %tmp.upgrd.2		; <float> [#uses=1]
+	store float %tmp.upgrd.3, float* %f
 	ret void
 }
 
-void %test4(<4 x float>* %F, float* %f) {
+define void @test4(<4 x float>* %F, float* %f) {
 entry:
 	%tmp = load <4 x float>* %F		; <<4 x float>> [#uses=2]
-	%tmp5.lhs = extractelement <4 x float> %tmp, uint 0		; <float> [#uses=1]
-	%tmp5.rhs = extractelement <4 x float> %tmp, uint 0		; <float> [#uses=1]
+	%tmp5.lhs = extractelement <4 x float> %tmp, i32 0		; <float> [#uses=1]
+	%tmp5.rhs = extractelement <4 x float> %tmp, i32 0		; <float> [#uses=1]
 	%tmp5 = add float %tmp5.lhs, %tmp5.rhs		; <float> [#uses=1]
 	store float %tmp5, float* %f
 	ret void

Modified: llvm/trunk/test/CodeGen/X86/vec_select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_select.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_select.ll Thu Feb 21 01:42:26 2008
@@ -1,11 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse
 
-void %test(int %C, <4 x float>* %A, <4 x float>* %B) {
-	%tmp = load <4 x float>* %A
-	%tmp3 = load <4 x float>* %B
-	%tmp9 = mul <4 x float> %tmp3, %tmp3
-	%tmp = seteq int %C, 0
-	%iftmp.38.0 = select bool %tmp, <4 x float> %tmp9, <4 x float> %tmp
-	store <4 x float> %iftmp.38.0, <4 x float>* %A
-	ret void
+define void @test(i32 %C, <4 x float>* %A, <4 x float>* %B) {
+        %tmp = load <4 x float>* %A             ; <<4 x float>> [#uses=1]
+        %tmp3 = load <4 x float>* %B            ; <<4 x float>> [#uses=2]
+        %tmp9 = mul <4 x float> %tmp3, %tmp3            ; <<4 x float>> [#uses=1]
+        %tmp.upgrd.1 = icmp eq i32 %C, 0                ; <i1> [#uses=1]
+        %iftmp.38.0 = select i1 %tmp.upgrd.1, <4 x float> %tmp9, <4 x float> %tmp               ; <<4 x float>> [#uses=1]
+        store <4 x float> %iftmp.38.0, <4 x float>* %A
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_set-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-3.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-3.ll Thu Feb 21 01:42:26 2008
@@ -1,17 +1,18 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep shufps %t | count 1
 ; RUN: grep pshufd %t | count 1
 
-<4 x float> %test(float %a) {
-	%tmp = insertelement <4 x float> zeroinitializer, float %a, uint 1
-	%tmp5 = insertelement <4 x float> %tmp, float 0.000000e+00, uint 2
-	%tmp6 = insertelement <4 x float> %tmp5, float 0.000000e+00, uint 3
-	ret <4 x float> %tmp6
+define <4 x float> @test(float %a) {
+        %tmp = insertelement <4 x float> zeroinitializer, float %a, i32 1               ; <<4 x float>> [#uses=1]
+        %tmp5 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 2               ; <<4 x float>> [#uses=1]
+        %tmp6 = insertelement <4 x float> %tmp5, float 0.000000e+00, i32 3              ; <<4 x float>> [#uses=1]
+        ret <4 x float> %tmp6
 }
 
-<2 x long> %test2(int %a) {
-	%tmp7 = insertelement <4 x int> zeroinitializer, int %a, uint 2
-	%tmp9 = insertelement <4 x int> %tmp7, int 0, uint 3
-	%tmp10 = cast <4 x int> %tmp9 to <2 x long>
-	ret <2 x long> %tmp10
+define <2 x i64> @test2(i32 %a) {
+        %tmp7 = insertelement <4 x i32> zeroinitializer, i32 %a, i32 2          ; <<4 x i32>> [#uses=1]
+        %tmp9 = insertelement <4 x i32> %tmp7, i32 0, i32 3             ; <<4 x i32>> [#uses=1]
+        %tmp10 = bitcast <4 x i32> %tmp9 to <2 x i64>           ; <<2 x i64>> [#uses=1]
+        ret <2 x i64> %tmp10
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_set-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-4.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-4.ll Thu Feb 21 01:42:26 2008
@@ -1,24 +1,24 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | grep pinsrw | count 2
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep pinsrw | count 2
 
-<2 x long> %test(short %a) {
+define <2 x i64> @test(i16 %a) {
 entry:
-	%tmp10 = insertelement <8 x short> zeroinitializer, short %a, uint 3		; <<8 x short>> [#uses=1]
-	%tmp12 = insertelement <8 x short> %tmp10, short 0, uint 4		; <<8 x short>> [#uses=1]
-	%tmp14 = insertelement <8 x short> %tmp12, short 0, uint 5		; <<8 x short>> [#uses=1]
-	%tmp16 = insertelement <8 x short> %tmp14, short 0, uint 6		; <<8 x short>> [#uses=1]
-	%tmp18 = insertelement <8 x short> %tmp16, short 0, uint 7		; <<8 x short>> [#uses=1]
-	%tmp19 = cast <8 x short> %tmp18 to <2 x long>		; <<2 x long>> [#uses=1]
-	ret <2 x long> %tmp19
+	%tmp10 = insertelement <8 x i16> zeroinitializer, i16 %a, i32 3		; <<8 x i16>> [#uses=1]
+	%tmp12 = insertelement <8 x i16> %tmp10, i16 0, i32 4		; <<8 x i16>> [#uses=1]
+	%tmp14 = insertelement <8 x i16> %tmp12, i16 0, i32 5		; <<8 x i16>> [#uses=1]
+	%tmp16 = insertelement <8 x i16> %tmp14, i16 0, i32 6		; <<8 x i16>> [#uses=1]
+	%tmp18 = insertelement <8 x i16> %tmp16, i16 0, i32 7		; <<8 x i16>> [#uses=1]
+	%tmp19 = bitcast <8 x i16> %tmp18 to <2 x i64>		; <<2 x i64>> [#uses=1]
+	ret <2 x i64> %tmp19
 }
 
-<2 x long> %test2(sbyte %a) {
+define <2 x i64> @test2(i8 %a) {
 entry:
-	%tmp24 = insertelement <16 x sbyte> zeroinitializer, sbyte %a, uint 10
-	%tmp26 = insertelement <16 x sbyte> %tmp24, sbyte 0, uint 11
-	%tmp28 = insertelement <16 x sbyte> %tmp26, sbyte 0, uint 12
-	%tmp30 = insertelement <16 x sbyte> %tmp28, sbyte 0, uint 13
-	%tmp32 = insertelement <16 x sbyte> %tmp30, sbyte 0, uint 14
-	%tmp34 = insertelement <16 x sbyte> %tmp32, sbyte 0, uint 15
-	%tmp35 = cast <16 x sbyte> %tmp34 to <2 x long>
-	ret <2 x long> %tmp35
+	%tmp24 = insertelement <16 x i8> zeroinitializer, i8 %a, i32 10		; <<16 x i8>> [#uses=1]
+	%tmp26 = insertelement <16 x i8> %tmp24, i8 0, i32 11		; <<16 x i8>> [#uses=1]
+	%tmp28 = insertelement <16 x i8> %tmp26, i8 0, i32 12		; <<16 x i8>> [#uses=1]
+	%tmp30 = insertelement <16 x i8> %tmp28, i8 0, i32 13		; <<16 x i8>> [#uses=1]
+	%tmp32 = insertelement <16 x i8> %tmp30, i8 0, i32 14		; <<16 x i8>> [#uses=1]
+	%tmp34 = insertelement <16 x i8> %tmp32, i8 0, i32 15		; <<16 x i8>> [#uses=1]
+	%tmp35 = bitcast <16 x i8> %tmp34 to <2 x i64>		; <<2 x i64>> [#uses=1]
+	ret <2 x i64> %tmp35
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_set-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-5.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-5.ll Thu Feb 21 01:42:26 2008
@@ -1,30 +1,30 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep movlhps   %t | count 1
 ; RUN: grep unpcklps  %t | count 1
 ; RUN: grep punpckldq %t | count 1
 ; RUN: grep movq      %t | count 1
 
-<4 x float> %test1(float %a, float %b) {
-	%tmp = insertelement <4 x float> zeroinitializer, float %a, uint 0
-	%tmp6 = insertelement <4 x float> %tmp, float 0.000000e+00, uint 1
-	%tmp8 = insertelement <4 x float> %tmp6, float %b, uint 2
-	%tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, uint 3
+define <4 x float> @test1(float %a, float %b) {
+	%tmp = insertelement <4 x float> zeroinitializer, float %a, i32 0		; <<4 x float>> [#uses=1]
+	%tmp6 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1		; <<4 x float>> [#uses=1]
+	%tmp8 = insertelement <4 x float> %tmp6, float %b, i32 2		; <<4 x float>> [#uses=1]
+	%tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, i32 3		; <<4 x float>> [#uses=1]
 	ret <4 x float> %tmp9
 }
 
-<4 x float> %test2(float %a, float %b) {
-	%tmp = insertelement <4 x float> zeroinitializer, float %a, uint 0
-	%tmp7 = insertelement <4 x float> %tmp, float %b, uint 1
-	%tmp8 = insertelement <4 x float> %tmp7, float 0.000000e+00, uint 2
-	%tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, uint 3
+define <4 x float> @test2(float %a, float %b) {
+	%tmp = insertelement <4 x float> zeroinitializer, float %a, i32 0		; <<4 x float>> [#uses=1]
+	%tmp7 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
+	%tmp8 = insertelement <4 x float> %tmp7, float 0.000000e+00, i32 2		; <<4 x float>> [#uses=1]
+	%tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, i32 3		; <<4 x float>> [#uses=1]
 	ret <4 x float> %tmp9
 }
 
-<2 x long> %test3(int %a, int %b) {
-	%tmp = insertelement <4 x int> zeroinitializer, int %a, uint 0
-	%tmp6 = insertelement <4 x int> %tmp, int %b, uint 1
-	%tmp8 = insertelement <4 x int> %tmp6, int 0, uint 2
-	%tmp10 = insertelement <4 x int> %tmp8, int 0, uint 3
-	%tmp11 = cast <4 x int> %tmp10 to <2 x long>
-	ret <2 x long> %tmp11
+define <2 x i64> @test3(i32 %a, i32 %b) {
+	%tmp = insertelement <4 x i32> zeroinitializer, i32 %a, i32 0		; <<4 x i32>> [#uses=1]
+	%tmp6 = insertelement <4 x i32> %tmp, i32 %b, i32 1		; <<4 x i32>> [#uses=1]
+	%tmp8 = insertelement <4 x i32> %tmp6, i32 0, i32 2		; <<4 x i32>> [#uses=1]
+	%tmp10 = insertelement <4 x i32> %tmp8, i32 0, i32 3		; <<4 x i32>> [#uses=1]
+	%tmp11 = bitcast <4 x i32> %tmp10 to <2 x i64>		; <<2 x i64>> [#uses=1]
+	ret <2 x i64> %tmp11
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_set-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-6.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-6.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-6.ll Thu Feb 21 01:42:26 2008
@@ -1,10 +1,11 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep unpcklps %t | count 1
 ; RUN: grep shufps   %t | count 1
 
-<4 x float> %test(float %a, float %b, float %c) {
-	%tmp = insertelement <4 x float> zeroinitializer, float %a, uint 1
-	%tmp8 = insertelement <4 x float> %tmp, float %b, uint 2
-	%tmp10 = insertelement <4 x float> %tmp8, float %c, uint 3
-	ret <4 x float> %tmp10
+define <4 x float> @test(float %a, float %b, float %c) {
+        %tmp = insertelement <4 x float> zeroinitializer, float %a, i32 1               ; <<4 x float>> [#uses=1]
+        %tmp8 = insertelement <4 x float> %tmp, float %b, i32 2         ; <<4 x float>> [#uses=1]
+        %tmp10 = insertelement <4 x float> %tmp8, float %c, i32 3               ; <<4 x float>> [#uses=1]
+        ret <4 x float> %tmp10
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_set.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set.ll Thu Feb 21 01:42:26 2008
@@ -1,14 +1,15 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | grep punpckl | count 7
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep punpckl | count 7
 
-void %test(<8 x short>* %b, short %a0, short %a1, short %a2, short %a3, short %a4, short %a5, short %a6, short %a7) {
-	%tmp = insertelement <8 x short> zeroinitializer, short %a0, uint 0
-	%tmp2 = insertelement <8 x short> %tmp, short %a1, uint 1
-	%tmp4 = insertelement <8 x short> %tmp2, short %a2, uint 2
-	%tmp6 = insertelement <8 x short> %tmp4, short %a3, uint 3
-	%tmp8 = insertelement <8 x short> %tmp6, short %a4, uint 4
-	%tmp10 = insertelement <8 x short> %tmp8, short %a5, uint 5
-	%tmp12 = insertelement <8 x short> %tmp10, short %a6, uint 6
-	%tmp14 = insertelement <8 x short> %tmp12, short %a7, uint 7
-	store <8 x short> %tmp14, <8 x short>* %b
-	ret void
+define void @test(<8 x i16>* %b, i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) {
+        %tmp = insertelement <8 x i16> zeroinitializer, i16 %a0, i32 0          ; <<8 x i16>> [#uses=1]
+        %tmp2 = insertelement <8 x i16> %tmp, i16 %a1, i32 1            ; <<8 x i16>> [#uses=1]
+        %tmp4 = insertelement <8 x i16> %tmp2, i16 %a2, i32 2           ; <<8 x i16>> [#uses=1]
+        %tmp6 = insertelement <8 x i16> %tmp4, i16 %a3, i32 3           ; <<8 x i16>> [#uses=1]
+        %tmp8 = insertelement <8 x i16> %tmp6, i16 %a4, i32 4           ; <<8 x i16>> [#uses=1]
+        %tmp10 = insertelement <8 x i16> %tmp8, i16 %a5, i32 5          ; <<8 x i16>> [#uses=1]
+        %tmp12 = insertelement <8 x i16> %tmp10, i16 %a6, i32 6         ; <<8 x i16>> [#uses=1]
+        %tmp14 = insertelement <8 x i16> %tmp12, i16 %a7, i32 7         ; <<8 x i16>> [#uses=1]
+        store <8 x i16> %tmp14, <8 x i16>* %b
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-10.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-10.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-10.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-10.ll Thu Feb 21 01:42:26 2008
@@ -1,27 +1,27 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | \
 ; RUN:   grep unpcklps | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | \
 ; RUN:   grep unpckhps | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | \
 ; RUN:   not grep {sub.*esp}
 
-void %test(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) {
-        %tmp = load <4 x float>* %B             ; <<4 x float>> [#uses=2]
-        %tmp3 = load <4 x float>* %A            ; <<4 x float>> [#uses=2]
-        %tmp = extractelement <4 x float> %tmp3, uint 0         ; <float> [#uses=1]
-        %tmp7 = extractelement <4 x float> %tmp, uint 0         ; <float> [#uses=1]
-        %tmp8 = extractelement <4 x float> %tmp3, uint 1                ; <float> [#uses=1]
-        %tmp9 = extractelement <4 x float> %tmp, uint 1         ; <float> [#uses=1]
-        %tmp10 = insertelement <4 x float> undef, float %tmp, uint 0            ; <<4 x float>> [#uses=1]
-        %tmp11 = insertelement <4 x float> %tmp10, float %tmp7, uint 1          ; <<4 x float>> [#uses=1]
-        %tmp12 = insertelement <4 x float> %tmp11, float %tmp8, uint 2          ; <<4 x float>> [#uses=1]
-        %tmp13 = insertelement <4 x float> %tmp12, float %tmp9, uint 3          ; <<4 x float>> [#uses=1]
-        store <4 x float> %tmp13, <4 x float>* %res
-        ret void
+define void @test(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) {
+	%tmp = load <4 x float>* %B		; <<4 x float>> [#uses=2]
+	%tmp3 = load <4 x float>* %A		; <<4 x float>> [#uses=2]
+	%tmp.upgrd.1 = extractelement <4 x float> %tmp3, i32 0		; <float> [#uses=1]
+	%tmp7 = extractelement <4 x float> %tmp, i32 0		; <float> [#uses=1]
+	%tmp8 = extractelement <4 x float> %tmp3, i32 1		; <float> [#uses=1]
+	%tmp9 = extractelement <4 x float> %tmp, i32 1		; <float> [#uses=1]
+	%tmp10 = insertelement <4 x float> undef, float %tmp.upgrd.1, i32 0		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp10, float %tmp7, i32 1		; <<4 x float>> [#uses=1]
+	%tmp12 = insertelement <4 x float> %tmp11, float %tmp8, i32 2		; <<4 x float>> [#uses=1]
+	%tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3		; <<4 x float>> [#uses=1]
+	store <4 x float> %tmp13, <4 x float>* %res
+	ret void
 }
 
-void %test2(<4 x float> %X, <4 x float>* %res) {
-	%tmp5 = shufflevector <4 x float> %X, <4 x float> undef, <4 x uint> < uint 2, uint 6, uint 3, uint 7 >
+define void @test2(<4 x float> %X, <4 x float>* %res) {
+	%tmp5 = shufflevector <4 x float> %X, <4 x float> undef, <4 x i32> < i32 2, i32 6, i32 3, i32 7 >		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp5, <4 x float>* %res
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-2.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-2.ll Thu Feb 21 01:42:26 2008
@@ -1,47 +1,47 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep pshufhw %t | count 1
 ; RUN: grep pshuflw %t | count 1
 ; RUN: grep movhps  %t | count 1
 
-void %test1(<2 x long>* %res, <2 x long>* %A) {
-	%tmp = load <2 x long>* %A
-	%tmp = cast <2 x long> %tmp to <8 x short>
-	%tmp0 = extractelement <8 x short> %tmp, uint 0
-	%tmp1 = extractelement <8 x short> %tmp, uint 1
-	%tmp2 = extractelement <8 x short> %tmp, uint 2
-	%tmp3 = extractelement <8 x short> %tmp, uint 3
-	%tmp4 = extractelement <8 x short> %tmp, uint 4
-	%tmp5 = extractelement <8 x short> %tmp, uint 5
-	%tmp6 = extractelement <8 x short> %tmp, uint 6
-	%tmp7 = extractelement <8 x short> %tmp, uint 7
-	%tmp8 = insertelement <8 x short> undef, short %tmp2, uint 0
-	%tmp9 = insertelement <8 x short> %tmp8, short %tmp1, uint 1
-	%tmp10 = insertelement <8 x short> %tmp9, short %tmp0, uint 2
-	%tmp11 = insertelement <8 x short> %tmp10, short %tmp3, uint 3
-	%tmp12 = insertelement <8 x short> %tmp11, short %tmp6, uint 4
-	%tmp13 = insertelement <8 x short> %tmp12, short %tmp5, uint 5
-	%tmp14 = insertelement <8 x short> %tmp13, short %tmp4, uint 6
-	%tmp15 = insertelement <8 x short> %tmp14, short %tmp7, uint 7
-	%tmp15 = cast <8 x short> %tmp15 to <2 x long>
-	store <2 x long> %tmp15, <2 x long>* %res
+define void @test1(<2 x i64>* %res, <2 x i64>* %A) {
+	%tmp = load <2 x i64>* %A		; <<2 x i64>> [#uses=1]
+	%tmp.upgrd.1 = bitcast <2 x i64> %tmp to <8 x i16>		; <<8 x i16>> [#uses=8]
+	%tmp0 = extractelement <8 x i16> %tmp.upgrd.1, i32 0		; <i16> [#uses=1]
+	%tmp1 = extractelement <8 x i16> %tmp.upgrd.1, i32 1		; <i16> [#uses=1]
+	%tmp2 = extractelement <8 x i16> %tmp.upgrd.1, i32 2		; <i16> [#uses=1]
+	%tmp3 = extractelement <8 x i16> %tmp.upgrd.1, i32 3		; <i16> [#uses=1]
+	%tmp4 = extractelement <8 x i16> %tmp.upgrd.1, i32 4		; <i16> [#uses=1]
+	%tmp5 = extractelement <8 x i16> %tmp.upgrd.1, i32 5		; <i16> [#uses=1]
+	%tmp6 = extractelement <8 x i16> %tmp.upgrd.1, i32 6		; <i16> [#uses=1]
+	%tmp7 = extractelement <8 x i16> %tmp.upgrd.1, i32 7		; <i16> [#uses=1]
+	%tmp8 = insertelement <8 x i16> undef, i16 %tmp2, i32 0		; <<8 x i16>> [#uses=1]
+	%tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 1		; <<8 x i16>> [#uses=1]
+	%tmp10 = insertelement <8 x i16> %tmp9, i16 %tmp0, i32 2		; <<8 x i16>> [#uses=1]
+	%tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 3		; <<8 x i16>> [#uses=1]
+	%tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp6, i32 4		; <<8 x i16>> [#uses=1]
+	%tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 5		; <<8 x i16>> [#uses=1]
+	%tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp4, i32 6		; <<8 x i16>> [#uses=1]
+	%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 7		; <<8 x i16>> [#uses=1]
+	%tmp15.upgrd.2 = bitcast <8 x i16> %tmp15 to <2 x i64>		; <<2 x i64>> [#uses=1]
+	store <2 x i64> %tmp15.upgrd.2, <2 x i64>* %res
 	ret void
 }
 
-void %test2(<4 x float>* %r, <2 x int>* %A) {
-	%tmp = load <4 x float>* %r
-	%tmp = cast <2 x int>* %A to double*
-	%tmp = load double* %tmp
-	%tmp = insertelement <2 x double> undef, double %tmp, uint 0
-	%tmp5 = insertelement <2 x double> %tmp, double undef, uint 1
-	%tmp6 = cast <2 x double> %tmp5 to <4 x float>
-	%tmp = extractelement <4 x float> %tmp, uint 0
-	%tmp7 = extractelement <4 x float> %tmp, uint 1
-	%tmp8 = extractelement <4 x float> %tmp6, uint 0
-	%tmp9 = extractelement <4 x float> %tmp6, uint 1
-	%tmp10 = insertelement <4 x float> undef, float %tmp, uint 0
-	%tmp11 = insertelement <4 x float> %tmp10, float %tmp7, uint 1
-	%tmp12 = insertelement <4 x float> %tmp11, float %tmp8, uint 2
-	%tmp13 = insertelement <4 x float> %tmp12, float %tmp9, uint 3
+define void @test2(<4 x float>* %r, <2 x i32>* %A) {
+	%tmp = load <4 x float>* %r		; <<4 x float>> [#uses=2]
+	%tmp.upgrd.3 = bitcast <2 x i32>* %A to double*		; <double*> [#uses=1]
+	%tmp.upgrd.4 = load double* %tmp.upgrd.3		; <double> [#uses=1]
+	%tmp.upgrd.5 = insertelement <2 x double> undef, double %tmp.upgrd.4, i32 0		; <<2 x double>> [#uses=1]
+	%tmp5 = insertelement <2 x double> %tmp.upgrd.5, double undef, i32 1		; <<2 x double>> [#uses=1]
+	%tmp6 = bitcast <2 x double> %tmp5 to <4 x float>		; <<4 x float>> [#uses=2]
+	%tmp.upgrd.6 = extractelement <4 x float> %tmp, i32 0		; <float> [#uses=1]
+	%tmp7 = extractelement <4 x float> %tmp, i32 1		; <float> [#uses=1]
+	%tmp8 = extractelement <4 x float> %tmp6, i32 0		; <float> [#uses=1]
+	%tmp9 = extractelement <4 x float> %tmp6, i32 1		; <float> [#uses=1]
+	%tmp10 = insertelement <4 x float> undef, float %tmp.upgrd.6, i32 0		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp10, float %tmp7, i32 1		; <<4 x float>> [#uses=1]
+	%tmp12 = insertelement <4 x float> %tmp11, float %tmp8, i32 2		; <<4 x float>> [#uses=1]
+	%tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp13, <4 x float>* %r
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-3.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-3.ll Thu Feb 21 01:42:26 2008
@@ -1,20 +1,21 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep movlhps %t | count 1
 ; RUN: grep movhlps %t | count 1
 
-<4 x float> %test1(<4 x float>* %x, <4 x float>* %y) {
-	%tmp = load <4 x float>* %y
-	%tmp5 = load <4 x float>* %x
-	%tmp9 = add <4 x float> %tmp5, %tmp
-	%tmp21 = sub <4 x float> %tmp5, %tmp
-	%tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x uint> < uint 0, uint 1, uint 4, uint 5 >
-	ret <4 x float> %tmp27
+define <4 x float> @test1(<4 x float>* %x, <4 x float>* %y) {
+        %tmp = load <4 x float>* %y             ; <<4 x float>> [#uses=2]
+        %tmp5 = load <4 x float>* %x            ; <<4 x float>> [#uses=2]
+        %tmp9 = add <4 x float> %tmp5, %tmp             ; <<4 x float>> [#uses=1]
+        %tmp21 = sub <4 x float> %tmp5, %tmp            ; <<4 x float>> [#uses=1]
+        %tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x i32> < i32 0, i32 1, i32 4, i32 5 >                ; <<4 x float>> [#uses=1]
+        ret <4 x float> %tmp27
 }
 
-<4 x float> %movhl(<4 x float>* %x, <4 x float>* %y) {
+define <4 x float> @movhl(<4 x float>* %x, <4 x float>* %y) {
 entry:
-	%tmp = load <4 x float>* %y
-	%tmp3 = load <4 x float>* %x
-	%tmp4 = shufflevector <4 x float> %tmp3, <4 x float> %tmp, <4 x uint> < uint 2, uint 3, uint 6, uint 7 >
-	ret <4 x float> %tmp4
+        %tmp = load <4 x float>* %y             ; <<4 x float>> [#uses=1]
+        %tmp3 = load <4 x float>* %x            ; <<4 x float>> [#uses=1]
+        %tmp4 = shufflevector <4 x float> %tmp3, <4 x float> %tmp, <4 x i32> < i32 2, i32 3, i32 6, i32 7 >           ; <<4 x float>> [#uses=1]
+        ret <4 x float> %tmp4
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-4.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-4.ll Thu Feb 21 01:42:26 2008
@@ -1,10 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 > %t
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 > %t
 ; RUN: grep shuf %t | count 2
 ; RUN: not grep unpck %t
-void %test(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x float>* %C) {
-	%tmp3 = load <4 x float>* %B
-	%tmp5 = load <4 x float>* %C
-	%tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x uint> < uint 1, uint 4, uint 1, uint 5 >
-	store <4 x float> %tmp11, <4 x float>* %res
-	ret void
+
+define void @test(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x float>* %C) {
+        %tmp3 = load <4 x float>* %B            ; <<4 x float>> [#uses=1]
+        %tmp5 = load <4 x float>* %C            ; <<4 x float>> [#uses=1]
+        %tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x i32> < i32 1, i32 4, i32 1, i32 5 >         ; <<4 x float>> [#uses=1]
+        store <4 x float> %tmp11, <4 x float>* %res
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-5.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-5.ll Thu Feb 21 01:42:26 2008
@@ -1,13 +1,13 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t  -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t  -f
 ; RUN: grep movhlps %t | count 1
 ; RUN: grep shufps  %t | count 1
 
-void %test() {
-	%tmp1 = load <4 x float>* null
-	%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x uint> < uint 0, uint 1, uint 6, uint 7 >
-	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x uint> < uint 2, uint 3, uint 6, uint 7 >
-	%tmp4 = add <4 x float> %tmp2, %tmp3
-	store <4 x float> %tmp4, <4 x float>* null
-	ret void
+define void @test() {
+        %tmp1 = load <4 x float>* null          ; <<4 x float>> [#uses=2]
+        %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >             ; <<4 x float>> [#uses=1]
+        %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 >                ; <<4 x float>> [#uses=1]
+        %tmp4 = add <4 x float> %tmp2, %tmp3            ; <<4 x float>> [#uses=1]
+        store <4 x float> %tmp4, <4 x float>* null
+        ret void
 }
 

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-6.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-6.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-6.ll Thu Feb 21 01:42:26 2008
@@ -1,43 +1,42 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep movapd %t | count 1
 ; RUN: grep movaps %t | count 1
 ; RUN: grep movups %t | count 2
 
 target triple = "i686-apple-darwin"
+ at x = global [4 x i32] [ i32 1, i32 2, i32 3, i32 4 ]		; <[4 x i32]*> [#uses=4]
 
-%x = global [4 x int] [ int 1, int 2, int 3, int 4 ]
-
-<2 x long> %test1() {
-	%tmp = load int* getelementptr ([4 x int]* %x, int 0, int 0)
-	%tmp3 = load int* getelementptr ([4 x int]* %x, int 0, int 1)
-	%tmp5 = load int* getelementptr ([4 x int]* %x, int 0, int 2)
-	%tmp7 = load int* getelementptr ([4 x int]* %x, int 0, int 3)
-	%tmp = insertelement <4 x int> undef, int %tmp, uint 0
-	%tmp13 = insertelement <4 x int> %tmp, int %tmp3, uint 1
-	%tmp14 = insertelement <4 x int> %tmp13, int %tmp5, uint 2
-	%tmp15 = insertelement <4 x int> %tmp14, int %tmp7, uint 3
-	%tmp16 = cast <4 x int> %tmp15 to <2 x long>
-	ret <2 x long> %tmp16
+define <2 x i64> @test1() {
+	%tmp = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 0)		; <i32> [#uses=1]
+	%tmp3 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 1)		; <i32> [#uses=1]
+	%tmp5 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 2)		; <i32> [#uses=1]
+	%tmp7 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 3)		; <i32> [#uses=1]
+	%tmp.upgrd.1 = insertelement <4 x i32> undef, i32 %tmp, i32 0		; <<4 x i32>> [#uses=1]
+	%tmp13 = insertelement <4 x i32> %tmp.upgrd.1, i32 %tmp3, i32 1		; <<4 x i32>> [#uses=1]
+	%tmp14 = insertelement <4 x i32> %tmp13, i32 %tmp5, i32 2		; <<4 x i32>> [#uses=1]
+	%tmp15 = insertelement <4 x i32> %tmp14, i32 %tmp7, i32 3		; <<4 x i32>> [#uses=1]
+	%tmp16 = bitcast <4 x i32> %tmp15 to <2 x i64>		; <<2 x i64>> [#uses=1]
+	ret <2 x i64> %tmp16
 }
 
-<4 x float> %test2(int %dummy, float %a, float %b, float %c, float %d) {
-	%tmp = insertelement <4 x float> undef, float %a, uint 0
-	%tmp11 = insertelement <4 x float> %tmp, float %b, uint 1
-	%tmp12 = insertelement <4 x float> %tmp11, float %c, uint 2
-	%tmp13 = insertelement <4 x float> %tmp12, float %d, uint 3
+define <4 x float> @test2(i32 %dummy, float %a, float %b, float %c, float %d) {
+	%tmp = insertelement <4 x float> undef, float %a, i32 0		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
+	%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2		; <<4 x float>> [#uses=1]
+	%tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3		; <<4 x float>> [#uses=1]
 	ret <4 x float> %tmp13
 }
 
-<4 x float> %test3(float %a, float %b, float %c, float %d) {
-	%tmp = insertelement <4 x float> undef, float %a, uint 0
-	%tmp11 = insertelement <4 x float> %tmp, float %b, uint 1
-	%tmp12 = insertelement <4 x float> %tmp11, float %c, uint 2
-	%tmp13 = insertelement <4 x float> %tmp12, float %d, uint 3
+define <4 x float> @test3(float %a, float %b, float %c, float %d) {
+	%tmp = insertelement <4 x float> undef, float %a, i32 0		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
+	%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2		; <<4 x float>> [#uses=1]
+	%tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3		; <<4 x float>> [#uses=1]
 	ret <4 x float> %tmp13
 }
 
-<2 x double> %test4(double %a, double %b) {
-	%tmp = insertelement <2 x double> undef, double %a, uint 0
-	%tmp7 = insertelement <2 x double> %tmp, double %b, uint 1
+define <2 x double> @test4(double %a, double %b) {
+	%tmp = insertelement <2 x double> undef, double %a, i32 0		; <<2 x double>> [#uses=1]
+	%tmp7 = insertelement <2 x double> %tmp, double %b, i32 1		; <<2 x double>> [#uses=1]
 	ret <2 x double> %tmp7
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-7.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-7.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-7.ll Thu Feb 21 01:42:26 2008
@@ -1,10 +1,11 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t  -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t  -f
 ; RUN: grep xorps %t | count 1
 ; RUN: not grep shufps %t
 
-void %test() {
-	cast <4 x int> zeroinitializer to <4 x float>
-	shufflevector <4 x float> %0, <4 x float> zeroinitializer, <4 x uint> zeroinitializer
-	store <4 x float> %1, <4 x float>* null
-	unreachable
+define void @test() {
+        bitcast <4 x i32> zeroinitializer to <4 x float>                ; <<4 x float>>:1 [#uses=1]
+        shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> zeroinitializer         ; <<4 x float>>:2 [#uses=1]
+        store <4 x float> %2, <4 x float>* null
+        unreachable
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-8.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-8.ll Thu Feb 21 01:42:26 2008
@@ -1,9 +1,10 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | \
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | \
 ; RUN:   not grep shufps
 
-void %test(<4 x float>* %res, <4 x float>* %A) {
-	%tmp1 = load <4 x float>* %A
-	%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x uint> < uint 0, uint 5, uint 6, uint 7 >
-	store <4 x float> %tmp2, <4 x float>* %res
-	ret void
+define void @test(<4 x float>* %res, <4 x float>* %A) {
+        %tmp1 = load <4 x float>* %A            ; <<4 x float>> [#uses=1]
+        %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> < i32 0, i32 5, i32 6, i32 7 >          ; <<4 x float>> [#uses=1]
+        store <4 x float> %tmp2, <4 x float>* %res
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle-9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle-9.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle-9.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle-9.ll Thu Feb 21 01:42:26 2008
@@ -1,20 +1,20 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep punpck %t | count 2
 ; RUN: not grep pextrw %t
 
-<4 x int> %test(sbyte** %ptr) {
+define <4 x i32> @test(i8** %ptr) {
 entry:
-	%tmp = load sbyte** %ptr
-	%tmp = cast sbyte* %tmp to float*
-	%tmp = load float* %tmp
-	%tmp = insertelement <4 x float> undef, float %tmp, uint 0
-	%tmp9 = insertelement <4 x float> %tmp, float 0.000000e+00, uint 1
-	%tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, uint 2
-	%tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, uint 3
-	%tmp21 = cast <4 x float> %tmp11 to <16 x sbyte>
-	%tmp22 = shufflevector <16 x sbyte> %tmp21, <16 x sbyte> zeroinitializer, <16 x uint> < uint 0, uint 16, uint 1, uint 17, uint 2, uint 18, uint 3, uint 19, uint 4, uint 20, uint 5, uint 21, uint 6, uint 22, uint 7, uint 23 >
-	%tmp31 = cast <16 x sbyte> %tmp22 to <8 x short>
-	%tmp = shufflevector <8 x short> zeroinitializer, <8 x short> %tmp31, <8 x uint> < uint 0, uint 8, uint 1, uint 9, uint 2, uint 10, uint 3, uint 11 >
-	%tmp36 = cast <8 x short> %tmp to <4 x int>
-	ret <4 x int> %tmp36
+	%tmp = load i8** %ptr		; <i8*> [#uses=1]
+	%tmp.upgrd.1 = bitcast i8* %tmp to float*		; <float*> [#uses=1]
+	%tmp.upgrd.2 = load float* %tmp.upgrd.1		; <float> [#uses=1]
+	%tmp.upgrd.3 = insertelement <4 x float> undef, float %tmp.upgrd.2, i32 0		; <<4 x float>> [#uses=1]
+	%tmp9 = insertelement <4 x float> %tmp.upgrd.3, float 0.000000e+00, i32 1		; <<4 x float>> [#uses=1]
+	%tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 2		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 3		; <<4 x float>> [#uses=1]
+	%tmp21 = bitcast <4 x float> %tmp11 to <16 x i8>		; <<16 x i8>> [#uses=1]
+	%tmp22 = shufflevector <16 x i8> %tmp21, <16 x i8> zeroinitializer, <16 x i32> < i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23 >		; <<16 x i8>> [#uses=1]
+	%tmp31 = bitcast <16 x i8> %tmp22 to <8 x i16>		; <<8 x i16>> [#uses=1]
+	%tmp.upgrd.4 = shufflevector <8 x i16> zeroinitializer, <8 x i16> %tmp31, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 >		; <<8 x i16>> [#uses=1]
+	%tmp36 = bitcast <8 x i16> %tmp.upgrd.4 to <4 x i32>		; <<4 x i32>> [#uses=1]
+	ret <4 x i32> %tmp36
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuffle.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuffle.ll Thu Feb 21 01:42:26 2008
@@ -1,44 +1,44 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f
 ; RUN: grep shufp   %t | count 1
 ; RUN: grep movupd  %t | count 1
 ; RUN: grep pshufhw %t | count 1
 
-void %test_v4sf(<4 x float>* %P, float %X, float %Y) {
-	%tmp = insertelement <4 x float> zeroinitializer, float %X, uint 0
-	%tmp2 = insertelement <4 x float> %tmp, float %X, uint 1
-	%tmp4 = insertelement <4 x float> %tmp2, float %Y, uint 2
-	%tmp6 = insertelement <4 x float> %tmp4, float %Y, uint 3
+define void @test_v4sf(<4 x float>* %P, float %X, float %Y) {
+	%tmp = insertelement <4 x float> zeroinitializer, float %X, i32 0		; <<4 x float>> [#uses=1]
+	%tmp2 = insertelement <4 x float> %tmp, float %X, i32 1		; <<4 x float>> [#uses=1]
+	%tmp4 = insertelement <4 x float> %tmp2, float %Y, i32 2		; <<4 x float>> [#uses=1]
+	%tmp6 = insertelement <4 x float> %tmp4, float %Y, i32 3		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp6, <4 x float>* %P
 	ret void
 }
 
-void %test_v2sd(<2 x double>* %P, double %X, double %Y) {
-	%tmp = insertelement <2 x double> zeroinitializer, double %X, uint 0
-	%tmp2 = insertelement <2 x double> %tmp, double %Y, uint 1
+define void @test_v2sd(<2 x double>* %P, double %X, double %Y) {
+	%tmp = insertelement <2 x double> zeroinitializer, double %X, i32 0		; <<2 x double>> [#uses=1]
+	%tmp2 = insertelement <2 x double> %tmp, double %Y, i32 1		; <<2 x double>> [#uses=1]
 	store <2 x double> %tmp2, <2 x double>* %P
 	ret void
 }
 
-void %test_v8i16(<2 x long>* %res, <2 x long>* %A) {
-	%tmp = load <2 x long>* %A
-	%tmp = cast <2 x long> %tmp to <8 x short>
-	%tmp = extractelement <8 x short> %tmp, uint 0
-	%tmp1 = extractelement <8 x short> %tmp, uint 1
-	%tmp2 = extractelement <8 x short> %tmp, uint 2
-	%tmp3 = extractelement <8 x short> %tmp, uint 3
-	%tmp4 = extractelement <8 x short> %tmp, uint 6
-	%tmp5 = extractelement <8 x short> %tmp, uint 5
-	%tmp6 = extractelement <8 x short> %tmp, uint 4
-	%tmp7 = extractelement <8 x short> %tmp, uint 7
-	%tmp8 = insertelement <8 x short> undef, short %tmp, uint 0
-	%tmp9 = insertelement <8 x short> %tmp8, short %tmp1, uint 1
-	%tmp10 = insertelement <8 x short> %tmp9, short %tmp2, uint 2
-	%tmp11 = insertelement <8 x short> %tmp10, short %tmp3, uint 3
-	%tmp12 = insertelement <8 x short> %tmp11, short %tmp4, uint 4
-	%tmp13 = insertelement <8 x short> %tmp12, short %tmp5, uint 5
-	%tmp14 = insertelement <8 x short> %tmp13, short %tmp6, uint 6
-	%tmp15 = insertelement <8 x short> %tmp14, short %tmp7, uint 7
-	%tmp15 = cast <8 x short> %tmp15 to <2 x long>
-	store <2 x long> %tmp15, <2 x long>* %res
+define void @test_v8i16(<2 x i64>* %res, <2 x i64>* %A) {
+	%tmp = load <2 x i64>* %A		; <<2 x i64>> [#uses=1]
+	%tmp.upgrd.1 = bitcast <2 x i64> %tmp to <8 x i16>		; <<8 x i16>> [#uses=8]
+	%tmp.upgrd.2 = extractelement <8 x i16> %tmp.upgrd.1, i32 0		; <i16> [#uses=1]
+	%tmp1 = extractelement <8 x i16> %tmp.upgrd.1, i32 1		; <i16> [#uses=1]
+	%tmp2 = extractelement <8 x i16> %tmp.upgrd.1, i32 2		; <i16> [#uses=1]
+	%tmp3 = extractelement <8 x i16> %tmp.upgrd.1, i32 3		; <i16> [#uses=1]
+	%tmp4 = extractelement <8 x i16> %tmp.upgrd.1, i32 6		; <i16> [#uses=1]
+	%tmp5 = extractelement <8 x i16> %tmp.upgrd.1, i32 5		; <i16> [#uses=1]
+	%tmp6 = extractelement <8 x i16> %tmp.upgrd.1, i32 4		; <i16> [#uses=1]
+	%tmp7 = extractelement <8 x i16> %tmp.upgrd.1, i32 7		; <i16> [#uses=1]
+	%tmp8 = insertelement <8 x i16> undef, i16 %tmp.upgrd.2, i32 0		; <<8 x i16>> [#uses=1]
+	%tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 1		; <<8 x i16>> [#uses=1]
+	%tmp10 = insertelement <8 x i16> %tmp9, i16 %tmp2, i32 2		; <<8 x i16>> [#uses=1]
+	%tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 3		; <<8 x i16>> [#uses=1]
+	%tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 4		; <<8 x i16>> [#uses=1]
+	%tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 5		; <<8 x i16>> [#uses=1]
+	%tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp6, i32 6		; <<8 x i16>> [#uses=1]
+	%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 7		; <<8 x i16>> [#uses=1]
+	%tmp15.upgrd.3 = bitcast <8 x i16> %tmp15 to <2 x i64>		; <<2 x i64>> [#uses=1]
+	store <2 x i64> %tmp15.upgrd.3, <2 x i64>* %res
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_splat-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_splat-2.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_splat-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_splat-2.ll Thu Feb 21 01:42:26 2008
@@ -1,26 +1,26 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | grep pshufd | count 1
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep pshufd | count 1
 
-void %test(<2 x long>* %P, sbyte %x) {
-	%tmp = insertelement <16 x sbyte> zeroinitializer, sbyte %x, uint 0		; <<16 x sbyte>> [#uses=1]
-	%tmp36 = insertelement <16 x sbyte> %tmp, sbyte %x, uint 1
-	%tmp38 = insertelement <16 x sbyte> %tmp36, sbyte %x, uint 2
-	%tmp40 = insertelement <16 x sbyte> %tmp38, sbyte %x, uint 3
-	%tmp42 = insertelement <16 x sbyte> %tmp40, sbyte %x, uint 4
-	%tmp44 = insertelement <16 x sbyte> %tmp42, sbyte %x, uint 5
-	%tmp46 = insertelement <16 x sbyte> %tmp44, sbyte %x, uint 6
-	%tmp48 = insertelement <16 x sbyte> %tmp46, sbyte %x, uint 7
-	%tmp50 = insertelement <16 x sbyte> %tmp48, sbyte %x, uint 8
-	%tmp52 = insertelement <16 x sbyte> %tmp50, sbyte %x, uint 9
-	%tmp54 = insertelement <16 x sbyte> %tmp52, sbyte %x, uint 10
-	%tmp56 = insertelement <16 x sbyte> %tmp54, sbyte %x, uint 11
-	%tmp58 = insertelement <16 x sbyte> %tmp56, sbyte %x, uint 12
-	%tmp60 = insertelement <16 x sbyte> %tmp58, sbyte %x, uint 13
-	%tmp62 = insertelement <16 x sbyte> %tmp60, sbyte %x, uint 14
-	%tmp64 = insertelement <16 x sbyte> %tmp62, sbyte %x, uint 15
-	%tmp68 = load <2 x long>* %P
-	%tmp71 = cast <2 x long> %tmp68 to <16 x sbyte>
-	%tmp73 = add <16 x sbyte> %tmp71, %tmp64
-	%tmp73 = cast <16 x sbyte> %tmp73 to <2 x long>
-	store <2 x long> %tmp73, <2 x long>* %P
+define void @test(<2 x i64>* %P, i8 %x) {
+	%tmp = insertelement <16 x i8> zeroinitializer, i8 %x, i32 0		; <<16 x i8>> [#uses=1]
+	%tmp36 = insertelement <16 x i8> %tmp, i8 %x, i32 1		; <<16 x i8>> [#uses=1]
+	%tmp38 = insertelement <16 x i8> %tmp36, i8 %x, i32 2		; <<16 x i8>> [#uses=1]
+	%tmp40 = insertelement <16 x i8> %tmp38, i8 %x, i32 3		; <<16 x i8>> [#uses=1]
+	%tmp42 = insertelement <16 x i8> %tmp40, i8 %x, i32 4		; <<16 x i8>> [#uses=1]
+	%tmp44 = insertelement <16 x i8> %tmp42, i8 %x, i32 5		; <<16 x i8>> [#uses=1]
+	%tmp46 = insertelement <16 x i8> %tmp44, i8 %x, i32 6		; <<16 x i8>> [#uses=1]
+	%tmp48 = insertelement <16 x i8> %tmp46, i8 %x, i32 7		; <<16 x i8>> [#uses=1]
+	%tmp50 = insertelement <16 x i8> %tmp48, i8 %x, i32 8		; <<16 x i8>> [#uses=1]
+	%tmp52 = insertelement <16 x i8> %tmp50, i8 %x, i32 9		; <<16 x i8>> [#uses=1]
+	%tmp54 = insertelement <16 x i8> %tmp52, i8 %x, i32 10		; <<16 x i8>> [#uses=1]
+	%tmp56 = insertelement <16 x i8> %tmp54, i8 %x, i32 11		; <<16 x i8>> [#uses=1]
+	%tmp58 = insertelement <16 x i8> %tmp56, i8 %x, i32 12		; <<16 x i8>> [#uses=1]
+	%tmp60 = insertelement <16 x i8> %tmp58, i8 %x, i32 13		; <<16 x i8>> [#uses=1]
+	%tmp62 = insertelement <16 x i8> %tmp60, i8 %x, i32 14		; <<16 x i8>> [#uses=1]
+	%tmp64 = insertelement <16 x i8> %tmp62, i8 %x, i32 15		; <<16 x i8>> [#uses=1]
+	%tmp68 = load <2 x i64>* %P		; <<2 x i64>> [#uses=1]
+	%tmp71 = bitcast <2 x i64> %tmp68 to <16 x i8>		; <<16 x i8>> [#uses=1]
+	%tmp73 = add <16 x i8> %tmp71, %tmp64		; <<16 x i8>> [#uses=1]
+	%tmp73.upgrd.1 = bitcast <16 x i8> %tmp73 to <2 x i64>		; <<2 x i64>> [#uses=1]
+	store <2 x i64> %tmp73.upgrd.1, <2 x i64>* %P
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_splat.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_splat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_splat.ll Thu Feb 21 01:42:26 2008
@@ -1,22 +1,22 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | grep shufps 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse3 | grep movddup
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep shufps               
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse3 | grep movddup
 
-void %test_v4sf(<4 x float>* %P, <4 x float>* %Q, float %X) {
-	%tmp = insertelement <4 x float> zeroinitializer, float %X, uint 0
-	%tmp2 = insertelement <4 x float> %tmp, float %X, uint 1
-	%tmp4 = insertelement <4 x float> %tmp2, float %X, uint 2
-	%tmp6 = insertelement <4 x float> %tmp4, float %X, uint 3
-	%tmp8 = load <4 x float>* %Q
-	%tmp10 = mul <4 x float> %tmp8, %tmp6
+define void @test_v4sf(<4 x float>* %P, <4 x float>* %Q, float %X) {
+	%tmp = insertelement <4 x float> zeroinitializer, float %X, i32 0		; <<4 x float>> [#uses=1]
+	%tmp2 = insertelement <4 x float> %tmp, float %X, i32 1		; <<4 x float>> [#uses=1]
+	%tmp4 = insertelement <4 x float> %tmp2, float %X, i32 2		; <<4 x float>> [#uses=1]
+	%tmp6 = insertelement <4 x float> %tmp4, float %X, i32 3		; <<4 x float>> [#uses=1]
+	%tmp8 = load <4 x float>* %Q		; <<4 x float>> [#uses=1]
+	%tmp10 = mul <4 x float> %tmp8, %tmp6		; <<4 x float>> [#uses=1]
 	store <4 x float> %tmp10, <4 x float>* %P
 	ret void
 }
 
-void %test_v2sd(<2 x double>* %P, <2 x double>* %Q, double %X) {
-	%tmp = insertelement <2 x double> zeroinitializer, double %X, uint 0
-	%tmp2 = insertelement <2 x double> %tmp, double %X, uint 1
-	%tmp4 = load <2 x double>* %Q
-	%tmp6 = mul <2 x double> %tmp4, %tmp2
+define void @test_v2sd(<2 x double>* %P, <2 x double>* %Q, double %X) {
+	%tmp = insertelement <2 x double> zeroinitializer, double %X, i32 0		; <<2 x double>> [#uses=1]
+	%tmp2 = insertelement <2 x double> %tmp, double %X, i32 1		; <<2 x double>> [#uses=1]
+	%tmp4 = load <2 x double>* %Q		; <<2 x double>> [#uses=1]
+	%tmp6 = mul <2 x double> %tmp4, %tmp2		; <<2 x double>> [#uses=1]
 	store <2 x double> %tmp6, <2 x double>* %P
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll Thu Feb 21 01:42:26 2008
@@ -1,47 +1,41 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse,+sse2 -o %t -f
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse,+sse2 -o %t -f
 ; RUN: grep minss %t | grep CPI | count 2
 ; RUN: grep CPI   %t | not grep movss
 
-target endian = little
-target pointersize = 32
+target datalayout = "e-p:32:32"
 target triple = "i686-apple-darwin8.7.2"
 
-implementation   ; Functions:
-
-ushort %test1(float %f) {
-        %tmp = insertelement <4 x float> undef, float %f, uint 0                ; <<4 x float>> [#uses=1]
-        %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, uint 1             ; <<4 x float>> [#uses=1]
-        %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, uint 2           ; <<4 x float>> [#uses=1]
-        %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, uint 3           ; <<4 x float>> [#uses=1]
-        %tmp28 = tail call <4 x float> %llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )               ; <<4 x float>> [#uses=1]
-        %tmp37 = tail call <4 x float> %llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )               ; <<4 x float>> [#uses=1]
-        %tmp48 = tail call <4 x float> %llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )               ; <<4 x float>> [#uses=1]
-        %tmp59 = tail call <4 x float> %llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer )          ; <<4 x float>> [#uses=1]
-        %tmp = tail call int %llvm.x86.sse.cvttss2si( <4 x float> %tmp59 )              ; <int> [#uses=1]
-        %tmp69 = cast int %tmp to ushort                ; <ushort> [#uses=1]
-        ret ushort %tmp69
+define i16 @test1(float %f) {
+	%tmp = insertelement <4 x float> undef, float %f, i32 0		; <<4 x float>> [#uses=1]
+	%tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2		; <<4 x float>> [#uses=1]
+	%tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3		; <<4 x float>> [#uses=1]
+	%tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )		; <<4 x float>> [#uses=1]
+	%tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )		; <<4 x float>> [#uses=1]
+	%tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > )		; <<4 x float>> [#uses=1]
+	%tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer )		; <<4 x float>> [#uses=1]
+	%tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 )		; <i32> [#uses=1]
+	%tmp69 = trunc i32 %tmp.upgrd.1 to i16		; <i16> [#uses=1]
+	ret i16 %tmp69
 }
 
-ushort %test2(float %f) {
-        %tmp28 = sub float %f, 1.000000e+00             ; <float> [#uses=1]
-        %tmp37 = mul float %tmp28, 5.000000e-01         ; <float> [#uses=1]
-        %tmp375 = insertelement <4 x float> undef, float %tmp37, uint 0         ; <<4 x float>> [#uses=1]
-        %tmp48 = tail call <4 x float> %llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > )           ; <<4 x float>> [#uses=1]
-        %tmp59 = tail call <4 x float> %llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> < float 0.000000e+00, float undef, float undef, float undef > )            ; <<4 x float>> [#uses=1]
-        %tmp = tail call int %llvm.x86.sse.cvttss2si( <4 x float> %tmp59 )              ; <int> [#uses=1]
-        %tmp69 = cast int %tmp to ushort                ; <ushort> [#uses=1]
-        ret ushort %tmp69
+define i16 @test2(float %f) {
+	%tmp28 = sub float %f, 1.000000e+00		; <float> [#uses=1]
+	%tmp37 = mul float %tmp28, 5.000000e-01		; <float> [#uses=1]
+	%tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0		; <<4 x float>> [#uses=1]
+	%tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > )		; <<4 x float>> [#uses=1]
+	%tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> < float 0.000000e+00, float undef, float undef, float undef > )		; <<4 x float>> [#uses=1]
+	%tmp = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 )		; <i32> [#uses=1]
+	%tmp69 = trunc i32 %tmp to i16		; <i16> [#uses=1]
+	ret i16 %tmp69
 }
 
+declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
 
-declare <4 x float> %llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
-
-declare <4 x float> %llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
-
-declare <4 x float> %llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
-
-declare <4 x float> %llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
 
-declare int %llvm.x86.sse.cvttss2si(<4 x float>)
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
 
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
 
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)

Modified: llvm/trunk/test/CodeGen/X86/vec_zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_zero.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_zero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_zero.ll Thu Feb 21 01:42:26 2008
@@ -1,15 +1,16 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | grep xorps | count 2
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep xorps | count 2
 
-void %foo(<4 x float> *%P) {
-	%T = load <4 x float> * %P
-	%S = add <4 x float> zeroinitializer, %T
-	store <4 x float> %S, <4 x float>* %P
-	ret void
+define void @foo(<4 x float>* %P) {
+        %T = load <4 x float>* %P               ; <<4 x float>> [#uses=1]
+        %S = add <4 x float> zeroinitializer, %T                ; <<4 x float>> [#uses=1]
+        store <4 x float> %S, <4 x float>* %P
+        ret void
 }
 
-void %bar(<4 x int> *%P) {
-	%T = load <4 x int> * %P
-	%S = add <4 x int> zeroinitializer, %T
-	store <4 x int> %S, <4 x int>* %P
-	ret void
+define void @bar(<4 x i32>* %P) {
+        %T = load <4 x i32>* %P         ; <<4 x i32>> [#uses=1]
+        %S = add <4 x i32> zeroinitializer, %T          ; <<4 x i32>> [#uses=1]
+        store <4 x i32> %S, <4 x i32>* %P
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector.ll Thu Feb 21 01:42:26 2008
@@ -1,156 +1,155 @@
 ; Test that vectors are scalarized/lowered correctly.
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=i386
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=i386
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah
 
+%d8 = type <8 x double>
 %f1 = type <1 x float>
 %f2 = type <2 x float>
 %f4 = type <4 x float>
-%i4 = type <4 x int>
 %f8 = type <8 x float>
-%d8 = type <8 x double>
+%i4 = type <4 x i32>
 
-implementation
 
 ;;; TEST HANDLING OF VARIOUS VECTOR SIZES
 
-void %test_f1(%f1 *%P, %f1* %Q, %f1 *%S) {
-  %p = load %f1 *%P
-  %q = load %f1* %Q
-  %R = add %f1 %p, %q
-  store %f1 %R, %f1 *%S
-  ret void
-}
-
-void %test_f2(%f2 *%P, %f2* %Q, %f2 *%S) {
-  %p = load %f2* %P
-  %q = load %f2* %Q
-  %R = add %f2 %p, %q
-  store %f2 %R, %f2 *%S
-  ret void
-}
-
-void %test_f4(%f4 *%P, %f4* %Q, %f4 *%S) {
-  %p = load %f4* %P
-  %q = load %f4* %Q
-  %R = add %f4 %p, %q
-  store %f4 %R, %f4 *%S
-  ret void
-}
-
-void %test_f8(%f8 *%P, %f8* %Q, %f8 *%S) {
-  %p = load %f8* %P
-  %q = load %f8* %Q
-  %R = add %f8 %p, %q
-  store %f8 %R, %f8 *%S
-  ret void
-}
-
-void %test_fmul(%f8 *%P, %f8* %Q, %f8 *%S) {
-  %p = load %f8* %P
-  %q = load %f8* %Q
-  %R = mul %f8 %p, %q
-  store %f8 %R, %f8 *%S
-  ret void
-}
-
-void %test_div(%f8 *%P, %f8* %Q, %f8 *%S) {
-  %p = load %f8* %P
-  %q = load %f8* %Q
-  %R = div %f8 %p, %q
-  store %f8 %R, %f8 *%S
-  ret void
+define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
+        %p = load %f1* %P               ; <%f1> [#uses=1]
+        %q = load %f1* %Q               ; <%f1> [#uses=1]
+        %R = add %f1 %p, %q             ; <%f1> [#uses=1]
+        store %f1 %R, %f1* %S
+        ret void
 }
 
-;;; TEST VECTOR CONSTRUCTS
+define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
+        %p = load %f2* %P               ; <%f2> [#uses=1]
+        %q = load %f2* %Q               ; <%f2> [#uses=1]
+        %R = add %f2 %p, %q             ; <%f2> [#uses=1]
+        store %f2 %R, %f2* %S
+        ret void
+}
+
+define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
+        %p = load %f4* %P               ; <%f4> [#uses=1]
+        %q = load %f4* %Q               ; <%f4> [#uses=1]
+        %R = add %f4 %p, %q             ; <%f4> [#uses=1]
+        store %f4 %R, %f4* %S
+        ret void
+}
+
+define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
+        %p = load %f8* %P               ; <%f8> [#uses=1]
+        %q = load %f8* %Q               ; <%f8> [#uses=1]
+        %R = add %f8 %p, %q             ; <%f8> [#uses=1]
+        store %f8 %R, %f8* %S
+        ret void
+}
 
-void %test_cst(%f4 *%P, %f4 *%S) {
-  %p = load %f4* %P
-  %R = add %f4 %p, <float 0x3FB99999A0000000, float 1.0, float 2.0, float 4.5>
-  store %f4 %R, %f4 *%S
-  ret void
+define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
+        %p = load %f8* %P               ; <%f8> [#uses=1]
+        %q = load %f8* %Q               ; <%f8> [#uses=1]
+        %R = mul %f8 %p, %q             ; <%f8> [#uses=1]
+        store %f8 %R, %f8* %S
+        ret void
 }
 
-void %test_zero(%f4 *%P, %f4 *%S) {
-  %p = load %f4* %P
-  %R = add %f4 %p, zeroinitializer
-  store %f4 %R, %f4 *%S
-  ret void
+define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
+        %p = load %f8* %P               ; <%f8> [#uses=1]
+        %q = load %f8* %Q               ; <%f8> [#uses=1]
+        %R = fdiv %f8 %p, %q            ; <%f8> [#uses=1]
+        store %f8 %R, %f8* %S
+        ret void
 }
 
-void %test_undef(%f4 *%P, %f4 *%S) {
-  %p = load %f4* %P
-  %R = add %f4 %p, undef
-  store %f4 %R, %f4 *%S
-  ret void
+;;; TEST VECTOR CONSTRUCTS
+
+define void @test_cst(%f4* %P, %f4* %S) {
+        %p = load %f4* %P               ; <%f4> [#uses=1]
+        %R = add %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 >             ; <%f4> [#uses=1]
+        store %f4 %R, %f4* %S
+        ret void
 }
 
-void %test_constant_insert(%f4 *%S) {
-  %R = insertelement %f4 zeroinitializer, float 10.0, uint 0
-  store %f4 %R, %f4 *%S
-  ret void
+define void @test_zero(%f4* %P, %f4* %S) {
+        %p = load %f4* %P               ; <%f4> [#uses=1]
+        %R = add %f4 %p, zeroinitializer                ; <%f4> [#uses=1]
+        store %f4 %R, %f4* %S
+        ret void
 }
 
-void %test_variable_buildvector(float %F, %f4 *%S) {
-  %R = insertelement %f4 zeroinitializer, float %F, uint 0
-  store %f4 %R, %f4 *%S
-  ret void
+define void @test_undef(%f4* %P, %f4* %S) {
+        %p = load %f4* %P               ; <%f4> [#uses=1]
+        %R = add %f4 %p, undef          ; <%f4> [#uses=1]
+        store %f4 %R, %f4* %S
+        ret void
 }
 
-void %test_scalar_to_vector(float %F, %f4 *%S) {
-  %R = insertelement %f4 undef, float %F, uint 0   ;; R = scalar_to_vector F
-  store %f4 %R, %f4 *%S
-  ret void
+define void @test_constant_insert(%f4* %S) {
+        %R = insertelement %f4 zeroinitializer, float 1.000000e+01, i32 0               ; <%f4> [#uses
+        store %f4 %R, %f4* %S
+        ret void
 }
 
-float %test_extract_elt(%f8 *%P) {
-  %p = load %f8* %P
-  %R = extractelement %f8 %p, uint 3
-  ret float %R
+define void @test_variable_buildvector(float %F, %f4* %S) {
+        %R = insertelement %f4 zeroinitializer, float %F, i32 0         ; <%f4> [#uses=1]
+        store %f4 %R, %f4* %S
+        ret void
 }
 
-double %test_extract_elt2(%d8 *%P) {
-  %p = load %d8* %P
-  %R = extractelement %d8 %p, uint 3
-  ret double %R
+define void @test_scalar_to_vector(float %F, %f4* %S) {
+        %R = insertelement %f4 undef, float %F, i32 0           ; <%f4> [#uses=1]
+        store %f4 %R, %f4* %S
+        ret void
 }
 
-void %test_cast_1(<4 x float>* %b, <4 x int>* %a) {
-  %tmp = load <4 x float>* %b
-  %tmp2 = add <4 x float> %tmp, <float 1.0, float 2.0, float 3.0, float 4.0>
-  %tmp3 = cast <4 x float> %tmp2 to <4 x int>
-  %tmp4 = add <4 x int> %tmp3, <int 1, int 2, int 3, int 4>
-  store <4 x int> %tmp4, <4 x int>* %a
-  ret void
+define float @test_extract_elt(%f8* %P) {
+        %p = load %f8* %P               ; <%f8> [#uses=1]
+        %R = extractelement %f8 %p, i32 3               ; <float> [#uses=1]
+        ret float %R
+}
+
+define double @test_extract_elt2(%d8* %P) {
+        %p = load %d8* %P               ; <%d8> [#uses=1]
+        %R = extractelement %d8 %p, i32 3               ; <double> [#uses=1]
+        ret double %R
+}
+
+define void @test_cast_1(%f4* %b, %i4* %a) {
+        %tmp = load %f4* %b             ; <%f4> [#uses=1]
+        %tmp2 = add %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 >              ; <%f4> [#uses=1]
+        %tmp3 = bitcast %f4 %tmp2 to %i4                ; <%i4> [#uses=1]
+        %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 >           ; <%i4> [#uses=1]
+        store %i4 %tmp4, %i4* %a
+        ret void
 }
 
-void %test_cast_2(<8 x float>* %a, <8 x int>* %b) {
-  %T = load <8 x float>* %a
-  %T2 = cast <8 x float> %T to <8 x int>
-  store <8 x int> %T2, <8 x int>* %b
-  ret void
+define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
+        %T = load %f8* %a               ; <%f8> [#uses=1]
+        %T2 = bitcast %f8 %T to <8 x i32>               ; <<8 x i32>> [#uses=1]
+        store <8 x i32> %T2, <8 x i32>* %b
+        ret void
 }
 
 
 ;;; TEST IMPORTANT IDIOMS
 
-void %splat(%f4* %P, %f4* %Q, float %X) {
-        %tmp = insertelement %f4 undef, float %X, uint 0
-        %tmp2 = insertelement %f4 %tmp, float %X, uint 1
-        %tmp4 = insertelement %f4 %tmp2, float %X, uint 2
-        %tmp6 = insertelement %f4 %tmp4, float %X, uint 3
-	%q = load %f4* %Q
-	%R = add %f4 %q, %tmp6
+define void @splat(%f4* %P, %f4* %Q, float %X) {
+        %tmp = insertelement %f4 undef, float %X, i32 0         ; <%f4> [#uses=1]
+        %tmp2 = insertelement %f4 %tmp, float %X, i32 1         ; <%f4> [#uses=1]
+        %tmp4 = insertelement %f4 %tmp2, float %X, i32 2                ; <%f4> [#uses=1]
+        %tmp6 = insertelement %f4 %tmp4, float %X, i32 3                ; <%f4> [#uses=1]
+        %q = load %f4* %Q               ; <%f4> [#uses=1]
+        %R = add %f4 %q, %tmp6          ; <%f4> [#uses=1]
         store %f4 %R, %f4* %P
         ret void
 }
 
-void %splat_i4(%i4* %P, %i4* %Q, int %X) {
-        %tmp = insertelement %i4 undef, int %X, uint 0
-        %tmp2 = insertelement %i4 %tmp, int %X, uint 1
-        %tmp4 = insertelement %i4 %tmp2, int %X, uint 2
-        %tmp6 = insertelement %i4 %tmp4, int %X, uint 3
-	%q = load %i4* %Q
-	%R = add %i4 %q, %tmp6
+define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
+        %tmp = insertelement %i4 undef, i32 %X, i32 0           ; <%i4> [#uses=1]
+        %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1           ; <%i4> [#uses=1]
+        %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2          ; <%i4> [#uses=1]
+        %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3          ; <%i4> [#uses=1]
+        %q = load %i4* %Q               ; <%i4> [#uses=1]
+        %R = add %i4 %q, %tmp6          ; <%i4> [#uses=1]
         store %i4 %R, %i4* %P
         ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/weak.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/weak.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/weak.ll (original)
+++ llvm/trunk/test/CodeGen/X86/weak.ll Thu Feb 21 01:42:26 2008
@@ -1,3 +1,4 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
-%a = extern_weak global int
-%b = global int* %a
+; RUN: llvm-as < %s | llc -march=x86
+ at a = extern_weak global i32             ; <i32*> [#uses=1]
+ at b = global i32* @a             ; <i32**> [#uses=0]
+

Modified: llvm/trunk/test/CodeGen/X86/x86-64-asm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x86-64-asm.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/x86-64-asm.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x86-64-asm.ll Thu Feb 21 01:42:26 2008
@@ -1,15 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc
+; RUN: llvm-as < %s | llc
 ; PR1029
 
 target datalayout = "e-p:64:64"
-target endian = little
-target pointersize = 64
 target triple = "x86_64-unknown-linux-gnu"
 
-implementation   ; Functions:
-
-void %frame_dummy() {
+define void @frame_dummy() {
 entry:
-	%tmp1 = tail call void (sbyte*)* (void (sbyte*)*)* asm "", "=r,0,~{dirflag},~{fpsr},~{flags}"( void (sbyte*)* null )		; <void (sbyte*)*> [#uses=0]
-	ret void
+        %tmp1 = tail call void (i8*)* (void (i8*)*)* asm "", "=r,0,~{dirflag},~{fpsr},~{flags}"( void (i8*)* null )           ; <void (i8*)*> [#uses=0]
+        ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/xmm-r64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/xmm-r64.ll?rev=47432&r1=47431&r2=47432&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/X86/xmm-r64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/xmm-r64.ll Thu Feb 21 01:42:26 2008
@@ -1,11 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86-64
+; RUN: llvm-as < %s | llc -march=x86-64
 
-<4 x int> %test() {
-	%tmp1039 = call <4 x int> %llvm.x86.sse2.psll.d( <4 x int> zeroinitializer, <4 x int> zeroinitializer )		; <<4 x int>> [#uses=1]
-	%tmp1040 = cast <4 x int> %tmp1039 to <2 x long>		; <<2 x long>> [#uses=1]
-	%tmp1048 = add <2 x long> %tmp1040, zeroinitializer		; <<2 x long>> [#uses=1]
-	%tmp1048 = cast <2 x long> %tmp1048 to <4 x int>		; <<4 x int>> [#uses=1]
-	ret <4 x int>  %tmp1048
+define <4 x i32> @test() {
+        %tmp1039 = call <4 x i32> @llvm.x86.sse2.psll.d( <4 x i32> zeroinitializer, <4 x i32> zeroinitializer )               ; <<4 x i32>> [#uses=1]
+        %tmp1040 = bitcast <4 x i32> %tmp1039 to <2 x i64>              ; <<2 x i64>> [#uses=1]
+        %tmp1048 = add <2 x i64> %tmp1040, zeroinitializer              ; <<2 x i64>> [#uses=1]
+        %tmp1048.upgrd.1 = bitcast <2 x i64> %tmp1048 to <4 x i32>              ; <<4 x i32>> [#uses=1]
+        ret <4 x i32> %tmp1048.upgrd.1
 }
 
-declare <4 x int> %llvm.x86.sse2.psll.d(<4 x int>, <4 x int>)
+declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>)
+





More information about the llvm-commits mailing list