[llvm-branch-commits] [llvm-branch] r172541 [7/8] - in /llvm/branches/AMDILBackend: ./ autoconf/ bindings/ocaml/executionengine/ bindings/ocaml/llvm/ bindings/ocaml/target/ cmake/ cmake/modules/ cmake/platforms/ docs/ docs/CommandGuide/ docs/_themes/ docs/_themes/llvm-theme/ docs/_themes/llvm-theme/static/ docs/llvm-theme/ docs/llvm-theme/static/ docs/tutorial/ examples/ExceptionDemo/ examples/Fibonacci/ examples/Kaleidoscope/Chapter4/ examples/Kaleidoscope/Chapter5/ examples/Kaleidoscope/Chapter6/ examples/Kaleidoscope/Chapt...

Richard Relph Richard.Relph at amd.com
Tue Jan 15 09:16:26 PST 2013


Removed: llvm/branches/AMDILBackend/test/CodeGen/X86/stack-protector-linux.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/stack-protector-linux.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/stack-protector-linux.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/stack-protector-linux.ll (removed)
@@ -1,28 +0,0 @@
-; RUN: llc -mtriple=i386-pc-linux-gnu < %s -o - | grep %gs:
-; RUN: llc -mtriple=x86_64-pc-linux-gnu < %s -o - | grep %fs:
-; RUN: llc -code-model=kernel -mtriple=x86_64-pc-linux-gnu < %s -o - | grep %gs:
-; RUN: llc -mtriple=x86_64-apple-darwin < %s -o - | grep "__stack_chk_guard"
-; RUN: llc -mtriple=x86_64-apple-darwin < %s -o - | grep "__stack_chk_fail"
-
-@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"		; <[11 x i8]*> [#uses=1]
-
-define void @test(i8* %a) nounwind ssp {
-entry:
-	%a_addr = alloca i8*		; <i8**> [#uses=2]
-	%buf = alloca [8 x i8]		; <[8 x i8]*> [#uses=2]
-	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store i8* %a, i8** %a_addr
-	%buf1 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%0 = load i8** %a_addr, align 4		; <i8*> [#uses=1]
-	%1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind		; <i8*> [#uses=0]
-	%buf2 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%2 = call i32 (i8*, ...)* @printf(i8* getelementptr ([11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind		; <i32> [#uses=0]
-	br label %return
-
-return:		; preds = %entry
-	ret void
-}
-
-declare i8* @strcpy(i8*, i8*) nounwind
-
-declare i32 @printf(i8*, ...) nounwind

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/tailcall-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/tailcall-64.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/tailcall-64.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/tailcall-64.ll Tue Jan 15 11:16:16 2013
@@ -1,6 +1,4 @@
-; RUN: llc < %s | FileCheck %s
-target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin11.4.0"
+; RUN: llc -mtriple=x86_64-apple-macosx -mcpu=core2 < %s | FileCheck %s
 
 declare i64 @testi()
 
@@ -93,4 +91,67 @@
   ret { i64, i64 } %mrv7
 }
 
+; Check that we can fold an indexed load into a tail call instruction.
+; CHECK: fold_indexed_load
+; CHECK: leaq (%rsi,%rsi,4), %[[RAX:r..]]
+; CHECK: jmpq *16(%{{r..}},%[[RAX]],8)  # TAILCALL
+%struct.funcs = type { i32 (i8*, i32*, i32)*, i32 (i8*)*, i32 (i8*)*, i32 (i8*, i32)*, i32 }
+ at func_table = external global [0 x %struct.funcs]
+define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp {
+entry:
+  %dsplen = getelementptr inbounds [0 x %struct.funcs]* @func_table, i64 0, i64 %idxprom, i32 2
+  %x1 = load i32 (i8*)** %dsplen, align 8
+  %call = tail call i32 %x1(i8* %mbstr) nounwind
+  ret void
+}
+
+; <rdar://problem/12282281> Fold an indexed load into the tail call instruction.
+; Calling a varargs function with 6 arguments requires 7 registers (%al is the
+; vector count for varargs functions). This leaves %r11 as the only available
+; scratch register.
+;
+; It is not possible to fold an indexed load into TCRETURNmi64 in that case.
+;
+; typedef int (*funcptr)(void*, ...);
+; extern const funcptr funcs[];
+; int f(int n) {
+;   return funcs[n](0, 0, 0, 0, 0, 0);
+; }
+;
+; CHECK: rdar12282281
+; CHECK: jmpq *%r11 # TAILCALL
+ at funcs = external constant [0 x i32 (i8*, ...)*]
+
+define i32 @rdar12282281(i32 %n) nounwind uwtable ssp {
+entry:
+  %idxprom = sext i32 %n to i64
+  %arrayidx = getelementptr inbounds [0 x i32 (i8*, ...)*]* @funcs, i64 0, i64 %idxprom
+  %0 = load i32 (i8*, ...)** %arrayidx, align 8
+  %call = tail call i32 (i8*, ...)* %0(i8* null, i32 0, i32 0, i32 0, i32 0, i32 0) nounwind
+  ret i32 %call
+}
+
+define x86_fp80 @fp80_call(x86_fp80 %x) nounwind  {
+entry:
+; CHECK: fp80_call:
+; CHECK: jmp _fp80_callee
+  %call = tail call x86_fp80 @fp80_callee(x86_fp80 %x) nounwind
+  ret x86_fp80 %call
+}
+
+declare x86_fp80 @fp80_callee(x86_fp80)
+
+; rdar://12229511
+define x86_fp80 @trunc_fp80(x86_fp80 %x) nounwind  {
+entry:
+; CHECK: trunc_fp80
+; CHECK: callq _trunc
+; CHECK-NOT: jmp _trunc
+; CHECK: ret
+  %conv = fptrunc x86_fp80 %x to double
+  %call = tail call double @trunc(double %conv) nounwind readnone
+  %conv1 = fpext double %call to x86_fp80
+  ret x86_fp80 %conv1
+}
 
+declare double @trunc(double) nounwind readnone

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/targetLoweringGeneric.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/targetLoweringGeneric.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/targetLoweringGeneric.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/targetLoweringGeneric.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=i386-apple-darwin9 -fast-isel=false -O0 < %s | FileCheck %s
+; RUN: llc -mtriple=i386-apple-darwin9 -mcpu=corei7 -fast-isel=false -O0 < %s | FileCheck %s
 
 ; Gather non-machine specific tests for the transformations in
 ; CodeGen/SelectionDAG/TargetLowering.  Currently, these

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/tls-pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/tls-pic.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/tls-pic.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/tls-pic.ll Tue Jan 15 11:16:16 2013
@@ -76,12 +76,12 @@
 
 ; X32:    f5:
 ; X32:      leal {{[jk]}}@TLSLDM(%ebx)
-; X32-NEXT: calll ___tls_get_addr at PLT
-; X32-NEXT: movl {{[jk]}}@DTPOFF(%eax)
-; X32-NEXT: addl {{[jk]}}@DTPOFF(%eax)
+; X32: calll ___tls_get_addr at PLT
+; X32: movl {{[jk]}}@DTPOFF(%e
+; X32: addl {{[jk]}}@DTPOFF(%e
 
 ; X64:    f5:
 ; X64:      leaq {{[jk]}}@TLSLD(%rip), %rdi
-; X64-NEXT: callq	__tls_get_addr at PLT
-; X64-NEXT: movl {{[jk]}}@DTPOFF(%rax)
-; X64-NEXT: addl {{[jk]}}@DTPOFF(%rax)
+; X64: callq	__tls_get_addr at PLT
+; X64: movl {{[jk]}}@DTPOFF(%r
+; X64: addl {{[jk]}}@DTPOFF(%r

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/trunc-ext-ld-st.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/trunc-ext-ld-st.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/trunc-ext-ld-st.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/trunc-ext-ld-st.ll Tue Jan 15 11:16:16 2013
@@ -2,8 +2,7 @@
 
 ;CHECK: load_2_i8
 ; A single 16-bit load
-;CHECK: movzwl
-;CHECK: pshufb
+;CHECK: pmovzxbq
 ;CHECK: paddq
 ;CHECK: pshufb
 ; A single 16-bit store
@@ -19,8 +18,7 @@
 
 ;CHECK: load_2_i16
 ; Read 32-bits
-;CHECK: movd
-;CHECK: pshufb
+;CHECK: pmovzxwq
 ;CHECK: paddq
 ;CHECK: pshufb
 ;CHECK: movd
@@ -33,7 +31,7 @@
 } 
 
 ;CHECK: load_2_i32
-;CHECK: pshufd
+;CHECK: pmovzxdq
 ;CHECK: paddq
 ;CHECK: pshufd
 ;CHECK: ret
@@ -45,8 +43,7 @@
 } 
 
 ;CHECK: load_4_i8
-;CHECK: movd
-;CHECK: pshufb
+;CHECK: pmovzxbd
 ;CHECK: paddd
 ;CHECK: pshufb
 ;CHECK: ret
@@ -58,7 +55,7 @@
 } 
 
 ;CHECK: load_4_i16
-;CHECK: punpcklwd
+;CHECK: pmovzxwd
 ;CHECK: paddd
 ;CHECK: pshufb
 ;CHECK: ret
@@ -70,7 +67,7 @@
 } 
 
 ;CHECK: load_8_i8
-;CHECK: punpcklbw
+;CHECK: pmovzxbw
 ;CHECK: paddw
 ;CHECK: pshufb
 ;CHECK: ret

Removed: llvm/branches/AMDILBackend/test/CodeGen/X86/unreachable-stack-protector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/unreachable-stack-protector.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/unreachable-stack-protector.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/unreachable-stack-protector.ll (removed)
@@ -1,19 +0,0 @@
-; RUN: llc < %s -disable-cgp-delete-dead-blocks | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin10.0.0"
-
-declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
-
-define void @test5() nounwind optsize noinline ssp {
-entry:
-; CHECK: movq ___stack_chk_guard at GOTPCREL(%rip)
-  %buf = alloca [64 x i8], align 16
-  %0 = call i64 @llvm.objectsize.i64(i8* undef, i1 false)
-  br i1 false, label %if.end, label %if.then
-
-if.then:                                          ; preds = %entry
-  unreachable
-
-if.end:                                           ; preds = %entry
-  ret void
-}

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/vec_compare-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/vec_compare-2.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/vec_compare-2.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/vec_compare-2.ll Tue Jan 15 11:16:16 2013
@@ -10,8 +10,7 @@
 entry:
 ; CHECK: cfi_def_cfa_offset
 ; CHECK-NOT: set
-; CHECK: punpcklwd
-; CHECK: pshufd
+; CHECK: pmovzxwq
 ; CHECK: pshufb
   %shr.i = ashr <4 x i32> zeroinitializer, <i32 3, i32 3, i32 3, i32 3> ; <<4 x i32>> [#uses=1]
   %cmp318.i = sext <4 x i1> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/vec_fpext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/vec_fpext.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/vec_fpext.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/vec_fpext.ll Tue Jan 15 11:16:16 2013
@@ -1,14 +1,38 @@
 ; RUN: llc < %s -march=x86 -mattr=+sse41,-avx | FileCheck %s
+; RUN: llc < %s -march=x86 -mattr=+avx | FileCheck --check-prefix=AVX %s
 
 ; PR11674
 define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
 entry:
-; TODO: We should be able to generate cvtps2pd for the load.
-; For now, just check that we generate something sane.
-; CHECK: cvtss2sd
-; CHECK: cvtss2sd
+; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
+; AVX: vcvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
   %0 = load <2 x float>* %in, align 8
   %1 = fpext <2 x float> %0 to <2 x double>
   store <2 x double> %1, <2 x double>* %out, align 1
   ret void
 }
+
+define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
+entry:
+; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
+; CHECK: cvtps2pd 8(%{{.+}}), %xmm{{[0-9]+}}
+; AVX: vcvtps2pd (%{{.+}}), %ymm{{[0-9]+}}
+  %0 = load <4 x float>* %in
+  %1 = fpext <4 x float> %0 to <4 x double>
+  store <4 x double> %1, <4 x double>* %out, align 1
+  ret void
+}
+
+define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
+entry:
+; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
+; CHECK: cvtps2pd 8(%{{.+}}), %xmm{{[0-9]+}}
+; CHECK: cvtps2pd 16(%{{.+}}), %xmm{{[0-9]+}}
+; CHECK: cvtps2pd 24(%{{.+}}), %xmm{{[0-9]+}}
+; AVX: vcvtps2pd (%{{.+}}), %ymm{{[0-9]+}}
+; AVX: vcvtps2pd 16(%{{.+}}), %ymm{{[0-9]+}}
+  %0 = load <8 x float>* %in
+  %1 = fpext <8 x float> %0 to <8 x double>
+  store <8 x double> %1, <8 x double>* %out, align 1
+  ret void
+}

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/vec_shuffle-26.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/vec_shuffle-26.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/vec_shuffle-26.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/vec_shuffle-26.ll Tue Jan 15 11:16:16 2013
@@ -1,6 +1,5 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -o %t
-; RUN: grep unpcklps %t | count 1
-; RUN: grep unpckhps %t | count 3
+; RUN: llc < %s -march=x86 -mcpu=generic -mattr=sse41 | FileCheck %s
+; RUN: llc < %s -march=x86 -mcpu=atom | FileCheck -check-prefix=ATOM %s
 
 ; Transpose example using the more generic vector shuffle. Return float8
 ; instead of float16
@@ -14,6 +13,17 @@
 
 define <8 x float> @__transpose2(<4 x float> %p0, <4 x float> %p1, <4 x float> %p2, <4 x float> %p3) nounwind {
 entry:
+; CHECK: transpose2
+; CHECK: unpckhps
+; CHECK: unpckhps
+; CHECK: unpcklps
+; CHECK: unpckhps
+; Different instruction order for Atom.
+; ATOM: transpose2
+; ATOM: unpckhps
+; ATOM: unpckhps
+; ATOM: unpckhps
+; ATOM: unpcklps
 	%unpcklps = shufflevector <4 x float> %p0, <4 x float> %p2, <4 x i32> < i32 0, i32 4, i32 1, i32 5 >		; <<4 x float>> [#uses=2]
 	%unpckhps = shufflevector <4 x float> %p0, <4 x float> %p2, <4 x i32> < i32 2, i32 6, i32 3, i32 7 >		; <<4 x float>> [#uses=2]
 	%unpcklps8 = shufflevector <4 x float> %p1, <4 x float> %p3, <4 x i32> < i32 0, i32 4, i32 1, i32 5 >		; <<4 x float>> [#uses=2]
@@ -27,3 +37,32 @@
 ;       %r3 = shufflevector <8 x float> %r1,  <8 x float> %r2,  <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15 >; 
 	ret <8 x float> %r2
 }
+
+define <2 x i64> @lo_hi_shift(float* nocapture %x, float* nocapture %y) nounwind {
+entry:
+; movhps should happen before extractps to assure it gets the correct value.
+; CHECK: lo_hi_shift
+; CHECK: movhps ([[BASEREG:%[a-z]+]]),
+; CHECK: extractps ${{[0-9]+}}, %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]])
+; CHECK: extractps ${{[0-9]+}}, %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]])
+; ATOM: lo_hi_shift
+; ATOM: movhps ([[BASEREG:%[a-z]+]]),
+; ATOM: movd %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]])
+; ATOM: movd %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]])
+  %v.i = bitcast float* %y to <4 x float>*
+  %0 = load <4 x float>* %v.i, align 1
+  %1 = bitcast float* %x to <1 x i64>*
+  %.val = load <1 x i64>* %1, align 1
+  %2 = bitcast <1 x i64> %.val to <2 x float>
+  %shuffle.i = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+  %shuffle1.i = shufflevector <4 x float> %0, <4 x float> %shuffle.i, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+  %cast.i = bitcast <4 x float> %0 to <2 x i64>
+  %extract.i = extractelement <2 x i64> %cast.i, i32 1
+  %3 = bitcast float* %x to i64*
+  store i64 %extract.i, i64* %3, align 4
+  %4 = bitcast <4 x float> %0 to <16 x i8>
+  %5 = bitcast <4 x float> %shuffle1.i to <16 x i8>
+  %palignr = shufflevector <16 x i8> %5, <16 x i8> %4, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+  %6 = bitcast <16 x i8> %palignr to <2 x i64>
+  ret <2 x i64> %6
+}

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/vec_shuffle-30.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/vec_shuffle-30.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/vec_shuffle-30.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/vec_shuffle-30.ll Tue Jan 15 11:16:16 2013
@@ -1,21 +1,25 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -o %t
-; RUN: grep pshufhw %t | grep -- -95 | count 1
-; RUN: grep shufps %t | count 1
-; RUN: not grep pslldq %t
+; RUN: llc < %s -march=x86 -mattr=+avx | FileCheck %s
 
+; CHECK: test
 ; Test case when creating pshufhw, we incorrectly set the higher order bit
 ; for an undef,
 define void @test(<8 x i16>* %dest, <8 x i16> %in) nounwind {
 entry:
+; CHECK-NOT: vmovaps
+; CHECK: vmovlpd
+; CHECK: vpshufhw        $-95
   %0 = load <8 x i16>* %dest
   %1 = shufflevector <8 x i16> %0, <8 x i16> %in, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 13, i32 undef, i32 14, i32 14>
   store <8 x i16> %1, <8 x i16>* %dest
   ret void
-}                              
+}
 
+; CHECK: test2
 ; A test case where we shouldn't generate a punpckldq but a pshufd and a pslldq
 define void @test2(<4 x i32>* %dest, <4 x i32> %in) nounwind {
 entry:
+; CHECK-NOT: pslldq
+; CHECK: shufps
   %0 = shufflevector <4 x i32> %in, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> < i32 undef, i32 5, i32 undef, i32 2>
   store <4 x i32> %0, <4 x i32>* %dest
   ret void

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/vec_ss_load_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/vec_ss_load_fold.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/vec_ss_load_fold.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/vec_ss_load_fold.ll Tue Jan 15 11:16:16 2013
@@ -70,3 +70,17 @@
 ; CHECK: call
 ; CHECK: roundss $4, %xmm{{.*}}, %xmm0
 }
+
+; PR13576 
+define  <2 x double> @test5() nounwind uwtable readnone noinline {
+entry:
+  %0 = tail call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> <double
+4.569870e+02, double 1.233210e+02>, i32 128) nounwind readnone
+  ret <2 x double> %0
+; CHECK: test5:
+; CHECK: mov
+; CHECK: mov
+; CHECK: cvtsi2sd
+}
+
+declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/widen_cast-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/widen_cast-1.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/widen_cast-1.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/widen_cast-1.ll Tue Jan 15 11:16:16 2013
@@ -1,5 +1,5 @@
 ; RUN: llc -march=x86 -mcpu=generic -mattr=+sse42 < %s | FileCheck %s
-; RUN: llc -march=x86 -mcpu=atom -mattr=+sse42 < %s | FileCheck -check-prefix=ATOM %s
+; RUN: llc -march=x86 -mcpu=atom < %s | FileCheck -check-prefix=ATOM %s
 
 ; CHECK: paddd
 ; CHECK: movl

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/widen_load-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/widen_load-1.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/widen_load-1.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/widen_load-1.ll Tue Jan 15 11:16:16 2013
@@ -1,12 +1,17 @@
-; RUN: llc %s -o - -march=x86-64 -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+; RUN: llc %s -o - -march=x86-64 -mattr=-avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=SSE
+; RUN: llc %s -o - -march=x86-64 -mattr=+avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=AVX
 ; PR4891
 ; PR5626
 
 ; This load should be before the call, not after.
 
-; CHECK: movaps    compl+128(%rip), %xmm0
-; CHECK: movaps  %xmm0, (%rsp)
-; CHECK: callq   killcommon
+; SSE: movaps    compl+128(%rip), %xmm0
+; SSE: movaps  %xmm0, (%rsp)
+; SSE: callq   killcommon
+
+; AVX: vmovapd    compl+128(%rip), %xmm0
+; AVX: vmovapd  %xmm0, (%rsp)
+; AVX: callq   killcommon
 
 @compl = linkonce global [20 x i64] zeroinitializer, align 64 ; <[20 x i64]*> [#uses=1]
 

Modified: llvm/branches/AMDILBackend/test/CodeGen/X86/widen_load-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/CodeGen/X86/widen_load-2.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/CodeGen/X86/widen_load-2.ll (original)
+++ llvm/branches/AMDILBackend/test/CodeGen/X86/widen_load-2.ll Tue Jan 15 11:16:16 2013
@@ -170,7 +170,7 @@
 ; CHECK: rot
 %i8vec3pack = type { <3 x i8>, i8 }
 define %i8vec3pack  @rot() nounwind {
-; CHECK: movd {{-?[0-9]+}}(%rsp), {{%xmm[0-9]}}
+; CHECK: pmovzxbd {{-?[0-9]+}}(%rsp), {{%xmm[0-9]}}
 entry:
   %X = alloca %i8vec3pack, align 4
   %rot = alloca %i8vec3pack, align 4

Removed: llvm/branches/AMDILBackend/test/DebugInfo/2010-04-13-PubType.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/DebugInfo/2010-04-13-PubType.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/DebugInfo/2010-04-13-PubType.ll (original)
+++ llvm/branches/AMDILBackend/test/DebugInfo/2010-04-13-PubType.ll (removed)
@@ -1,47 +0,0 @@
-; RUN: llc -O0 -asm-verbose < %s > %t
-; RUN: grep "External Name" %t | grep -v X
-; RUN: grep "External Name" %t | grep Y | count 1
-; Test to check type with no definition is listed in pubtypes section.
-%struct.X = type opaque
-%struct.Y = type { i32 }
-
-define i32 @foo(%struct.X* %x, %struct.Y* %y) nounwind ssp {
-entry:
-  %x_addr = alloca %struct.X*                     ; <%struct.X**> [#uses=1]
-  %y_addr = alloca %struct.Y*                     ; <%struct.Y**> [#uses=1]
-  %retval = alloca i32                            ; <i32*> [#uses=2]
-  %0 = alloca i32                                 ; <i32*> [#uses=2]
-  %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  call void @llvm.dbg.declare(metadata !{%struct.X** %x_addr}, metadata !0), !dbg !13
-  store %struct.X* %x, %struct.X** %x_addr
-  call void @llvm.dbg.declare(metadata !{%struct.Y** %y_addr}, metadata !14), !dbg !13
-  store %struct.Y* %y, %struct.Y** %y_addr
-  store i32 0, i32* %0, align 4, !dbg !13
-  %1 = load i32* %0, align 4, !dbg !13            ; <i32> [#uses=1]
-  store i32 %1, i32* %retval, align 4, !dbg !13
-  br label %return, !dbg !13
-
-return:                                           ; preds = %entry
-  %retval1 = load i32* %retval, !dbg !13          ; <i32> [#uses=1]
-  ret i32 %retval1, !dbg !15
-}
-
-declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
-
-!0 = metadata !{i32 524545, metadata !1, metadata !"x", metadata !2, i32 7, metadata !7} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 524334, i32 0, metadata !2, metadata !"foo", metadata !"foo", metadata !"foo", metadata !2, i32 7, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 524329, metadata !"a.c", metadata !"/tmp/", metadata !3} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 524305, i32 0, i32 1, metadata !"a.c", metadata !"/tmp/", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, i1 false, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{i32 524309, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!5 = metadata !{metadata !6, metadata !7, metadata !9}
-!6 = metadata !{i32 524324, metadata !2, metadata !"int", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!7 = metadata !{i32 524303, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !8} ; [ DW_TAG_pointer_type ]
-!8 = metadata !{i32 524307, metadata !2, metadata !"X", metadata !2, i32 3, i64 0, i64 0, i64 0, i32 4, null, null, i32 0, null} ; [ DW_TAG_structure_type ]
-!9 = metadata !{i32 524303, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ]
-!10 = metadata !{i32 524307, metadata !2, metadata !"Y", metadata !2, i32 4, i64 32, i64 32, i64 0, i32 0, null, metadata !11, i32 0, null} ; [ DW_TAG_structure_type ]
-!11 = metadata !{metadata !12}
-!12 = metadata !{i32 524301, metadata !10, metadata !"x", metadata !2, i32 5, i64 32, i64 32, i64 0, i32 0, metadata !6} ; [ DW_TAG_member ]
-!13 = metadata !{i32 7, i32 0, metadata !1, null}
-!14 = metadata !{i32 524545, metadata !1, metadata !"y", metadata !2, i32 7, metadata !9} ; [ DW_TAG_arg_variable ]
-!15 = metadata !{i32 7, i32 0, metadata !16, null}
-!16 = metadata !{i32 524299, metadata !1, i32 7, i32 0} ; [ DW_TAG_lexical_block ]

Modified: llvm/branches/AMDILBackend/test/DebugInfo/X86/DW_AT_byte_size.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/DebugInfo/X86/DW_AT_byte_size.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/DebugInfo/X86/DW_AT_byte_size.ll (original)
+++ llvm/branches/AMDILBackend/test/DebugInfo/X86/DW_AT_byte_size.ll Tue Jan 15 11:16:16 2013
@@ -4,7 +4,8 @@
 ; Checks that we don't emit a size for a pointer type.
 ; CHECK: DW_TAG_pointer_type
 ; CHECK-NEXT: DW_AT_type
-; CHECK-NOT-NEXT: DW_AT_byte_size
+; CHECK-NOT: DW_AT_byte_size
+; CHECK: .debug_info contents
 
 %struct.A = type { i32 }
 

Modified: llvm/branches/AMDILBackend/test/DebugInfo/X86/concrete_out_of_line.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/DebugInfo/X86/concrete_out_of_line.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/DebugInfo/X86/concrete_out_of_line.ll (original)
+++ llvm/branches/AMDILBackend/test/DebugInfo/X86/concrete_out_of_line.ll Tue Jan 15 11:16:16 2013
@@ -7,16 +7,15 @@
 ; first check that we have a TAG_subprogram at a given offset and it has
 ; AT_inline.
 
-; CHECK: 0x00000134:   DW_TAG_subprogram [18]
-; CHECK-NEXT:     DW_AT_MIPS_linkage_name
+; CHECK: 0x0000011e:   DW_TAG_subprogram [18]
 ; CHECK-NEXT:     DW_AT_specification
 ; CHECK-NEXT:     DW_AT_inline
 
 
 ; and then that a TAG_subprogram refers to it with AT_abstract_origin.
 
-; CHECK: 0x00000184:   DW_TAG_subprogram [20]
-; CHECK-NEXT: DW_AT_abstract_origin [DW_FORM_ref4]    (cu + 0x0134 => {0x00000134})
+; CHECK: 0x0000015f:   DW_TAG_subprogram [20]
+; CHECK-NEXT: DW_AT_abstract_origin [DW_FORM_ref4]    (cu + 0x011e => {0x0000011e})
 
 define i32 @_ZN17nsAutoRefCnt7ReleaseEv() {
 entry:

Modified: llvm/branches/AMDILBackend/test/DebugInfo/X86/enum-fwd-decl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/DebugInfo/X86/enum-fwd-decl.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/DebugInfo/X86/enum-fwd-decl.ll (original)
+++ llvm/branches/AMDILBackend/test/DebugInfo/X86/enum-fwd-decl.ll Tue Jan 15 11:16:16 2013
@@ -5,16 +5,14 @@
 
 !llvm.dbg.cu = !{!0}
 
-!0 = metadata !{i32 786449, i32 0, i32 4, metadata !"foo.cpp", metadata !"/Users/echristo/tmp", metadata !"clang version 3.2 (trunk 157772) (llvm/trunk 157761)", i1 true, i1 false, metadata !"", i32 0, metadata !1, metadata !6, metadata !6, metadata !7} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, i32 0, i32 4, metadata !"foo.cpp", metadata !"/tmp", metadata !"clang version 3.2 (trunk 165274) (llvm/trunk 165272)", i1 true, i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3} ; [ DW_TAG_compile_unit ] [/tmp/foo.cpp] [DW_LANG_C_plus_plus]
 !1 = metadata !{metadata !2}
-!2 = metadata !{metadata !3}
-!3 = metadata !{i32 786436, null, metadata !"E", metadata !4, i32 1, i64 16, i64 16, i32 0, i32 4, null, metadata !5, i32 0, i32 0} ; [ DW_TAG_enumeration_type ]
-!4 = metadata !{i32 786473, metadata !"foo.cpp", metadata !"/Users/echristo/tmp", null} ; [ DW_TAG_file_type ]
-!5 = metadata !{i32 0}
-!6 = metadata !{metadata !5}
-!7 = metadata !{metadata !8}
-!8 = metadata !{metadata !9}
-!9 = metadata !{i32 786484, i32 0, null, metadata !"e", metadata !"e", metadata !"", metadata !4, i32 2, metadata !3, i32 0, i32 1, i16* @e} ; [ DW_TAG_variable ]
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{metadata !5}
+!5 = metadata !{i32 786484, i32 0, null, metadata !"e", metadata !"e", metadata !"", metadata !6, i32 2, metadata !7, i32 0, i32 1, i16* @e} ; [ DW_TAG_variable ] [e] [line 2] [def]
+!6 = metadata !{i32 786473, metadata !"foo.cpp", metadata !"/tmp", null} ; [ DW_TAG_file_type ]
+!7 = metadata !{i32 786436, null, metadata !"E", metadata !6, i32 1, i64 16, i64 16, i32 0, i32 4, null, null, i32 0} ; [ DW_TAG_enumeration_type ] [E] [line 1, size 16, align 16, offset 0] [fwd] [from ]
 
 ; CHECK: DW_TAG_enumeration_type
 ; CHECK-NEXT: DW_AT_name

Removed: llvm/branches/AMDILBackend/test/DebugInfo/X86/pr13303.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/DebugInfo/X86/pr13303.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/DebugInfo/X86/pr13303.ll (original)
+++ llvm/branches/AMDILBackend/test/DebugInfo/X86/pr13303.ll (removed)
@@ -1,28 +0,0 @@
-; RUN: llc %s -o %t -filetype=obj -mtriple=x86_64-unknown-linux-gnu
-; RUN: llvm-dwarfdump %t | FileCheck %s
-; PR13303
-
-; Check that the prologue ends with is_stmt here.
-; CHECK: 0x0000000000000000 {{.*}} is_stmt
-
-define i32 @main() nounwind uwtable {
-entry:
-  %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  ret i32 0, !dbg !10
-}
-
-!llvm.dbg.cu = !{!0}
-
-!0 = metadata !{i32 786449, i32 0, i32 12, metadata !"PR13303.c", metadata !"/home/probinson", metadata !"clang version 3.2 (trunk 160143)", i1 true, i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1} ; [ DW_TAG_compile_unit ] [/home/probinson/PR13303.c] [DW_LANG_C99]
-!1 = metadata !{metadata !2}
-!2 = metadata !{i32 0}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !5}
-!5 = metadata !{i32 786478, i32 0, metadata !6, metadata !"main", metadata !"main", metadata !"", metadata !6, i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @main, null, null, metadata !1, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [main]
-!6 = metadata !{i32 786473, metadata !"PR13303.c", metadata !"/home/probinson", null} ; [ DW_TAG_file_type ]
-!7 = metadata !{i32 786453, i32 0, metadata !"", i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{metadata !9}
-!9 = metadata !{i32 786468, null, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!10 = metadata !{i32 1, i32 14, metadata !11, null}
-!11 = metadata !{i32 786443, metadata !5, i32 1, i32 12, metadata !6, i32 0} ; [ DW_TAG_lexical_block ] [/home/probinson/PR13303.c]

Modified: llvm/branches/AMDILBackend/test/DebugInfo/X86/stringpool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/DebugInfo/X86/stringpool.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/DebugInfo/X86/stringpool.ll (original)
+++ llvm/branches/AMDILBackend/test/DebugInfo/X86/stringpool.ll Tue Jan 15 11:16:16 2013
@@ -16,8 +16,8 @@
 
 ; Verify that we refer to 'yyyy' with a relocation.
 ; LINUX:      .long   .Lstring3               # DW_AT_name
-; LINUX-NEXT: .long   39                      # DW_AT_type
-; LINUX-NEXT: .byte   1                       # DW_AT_external
+; LINUX-NEXT: .long   38                      # DW_AT_type
+; LINUX-NEXT:                                 # DW_AT_external
 ; LINUX-NEXT: .byte   1                       # DW_AT_decl_file
 ; LINUX-NEXT: .byte   1                       # DW_AT_decl_line
 ; LINUX-NEXT: .byte   9                       # DW_AT_location

Modified: llvm/branches/AMDILBackend/test/DebugInfo/bug_null_debuginfo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/DebugInfo/bug_null_debuginfo.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/DebugInfo/bug_null_debuginfo.ll (original)
+++ llvm/branches/AMDILBackend/test/DebugInfo/bug_null_debuginfo.ll Tue Jan 15 11:16:16 2013
@@ -1,5 +1,4 @@
-; RUN: llc
-
+; RUN: llc < %s
 
 !llvm.dbg.cu = !{!0}
 

Modified: llvm/branches/AMDILBackend/test/DebugInfo/dwarfdump-test.test
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/DebugInfo/dwarfdump-test.test?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/DebugInfo/dwarfdump-test.test (original)
+++ llvm/branches/AMDILBackend/test/DebugInfo/dwarfdump-test.test Tue Jan 15 11:16:16 2013
@@ -17,6 +17,8 @@
 RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test4.elf-x86-64 \
 RUN:   --address=0x55c --functions \
 RUN:   | FileCheck %s -check-prefix MANY_SEQ_IN_LINE_TABLE
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test4.elf-x86-64 \
+RUN:   | FileCheck %s -check-prefix DEBUG_RANGES
 
 MAIN: main
 MAIN-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:16:10
@@ -44,3 +46,11 @@
 
 MANY_SEQ_IN_LINE_TABLE: _Z1cv
 MANY_SEQ_IN_LINE_TABLE-NEXT: /tmp/dbginfo/sequences{{[/\\]}}c.cc:2:0
+
+DEBUG_RANGES:      .debug_ranges contents:
+DEBUG_RANGES-NEXT: 00000000 000000000000055c 0000000000000567
+DEBUG_RANGES-NEXT: 00000000 0000000000000567 000000000000056d
+DEBUG_RANGES-NEXT: 00000000 <End of list>
+DEBUG_RANGES-NEXT: 00000030 0000000000000570 000000000000057b
+DEBUG_RANGES-NEXT: 00000030 0000000000000567 000000000000056d
+DEBUG_RANGES-NEXT: 00000030 <End of list>

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/2002-12-16-ArgTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/2002-12-16-ArgTest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/2002-12-16-ArgTest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/2002-12-16-ArgTest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,5 @@
 ; RUN: %lli %s > /dev/null
+; XFAIL: arm
 
 @.LC0 = internal global [10 x i8] c"argc: %d\0A\00"		; <[10 x i8]*> [#uses=1]
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2002-12-16-ArgTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2002-12-16-ArgTest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2002-12-16-ArgTest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2002-12-16-ArgTest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 @.LC0 = internal global [10 x i8] c"argc: %d\0A\00"		; <[10 x i8]*> [#uses=1]
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-ArgumentBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-ArgumentBug.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-ArgumentBug.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-ArgumentBug.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @foo(i32 %X, i32 %Y, double %A) {
 	%cond212 = fcmp une double %A, 1.000000e+00		; <i1> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-LoopTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-LoopTest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-LoopTest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-LoopTest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 	call i32 @mylog( i32 4 )		; <i32>:1 [#uses=0]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-PhiTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-PhiTest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-PhiTest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-04-PhiTest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 ; <label>:0

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-09-SARTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-09-SARTest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-09-SARTest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-09-SARTest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; We were accidentally inverting the signedness of right shifts.  Whoops.
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-10-FUCOM.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-10-FUCOM.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-10-FUCOM.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-10-FUCOM.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 	%X = fadd double 0.000000e+00, 1.000000e+00		; <double> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-15-AlignmentTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-15-AlignmentTest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-15-AlignmentTest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-01-15-AlignmentTest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @bar(i8* %X) {
         ; pointer should be 4 byte aligned!

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-06-LivenessClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-06-LivenessClobber.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-06-LivenessClobber.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-06-LivenessClobber.ll Tue Jan 15 11:16:16 2013
@@ -1,6 +1,6 @@
 ; This testcase should return with an exit code of 1.
 ;
-; RUN: not %lli -use-mcjit %s
+; RUN: not %lli -mtriple=%mcjit_triple -use-mcjit %s
 
 @test = global i64 0		; <i64*> [#uses=1]
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-07-ArgumentTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-07-ArgumentTest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-07-ArgumentTest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-07-ArgumentTest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s test
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s test
 
 declare i32 @puts(i8*)
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-11-PHIRegAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-11-PHIRegAllocBug.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-11-PHIRegAllocBug.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-05-11-PHIRegAllocBug.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 target datalayout = "e-p:32:32"
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-06-04-bzip2-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-06-04-bzip2-bug.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-06-04-bzip2-bug.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-06-04-bzip2-bug.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; Testcase distilled from 256.bzip2.
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-06-05-PHIBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-06-05-PHIBug.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-06-05-PHIBug.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-06-05-PHIBug.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; Testcase distilled from 256.bzip2.
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-15-AllocaAssertion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-15-AllocaAssertion.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-15-AllocaAssertion.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-15-AllocaAssertion.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; This testcase failed to work because two variable sized allocas confused the
 ; local register allocator.

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-21-EnvironmentTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-21-EnvironmentTest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-21-EnvironmentTest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-21-EnvironmentTest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ;
 ; Regression Test: EnvironmentTest.ll

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-23-RegisterAllocatePhysReg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-23-RegisterAllocatePhysReg.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-23-RegisterAllocatePhysReg.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-08-23-RegisterAllocatePhysReg.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; This testcase exposes a bug in the local register allocator where it runs out
 ; of registers (due to too many overlapping live ranges), but then attempts to

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-10-18-PHINode-ConstantExpr-CondCode-Failure.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-10-18-PHINode-ConstantExpr-CondCode-Failure.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-10-18-PHINode-ConstantExpr-CondCode-Failure.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2003-10-18-PHINode-ConstantExpr-CondCode-Failure.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 @A = global i32 0		; <i32*> [#uses=1]
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2005-12-02-TailCallBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2005-12-02-TailCallBug.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2005-12-02-TailCallBug.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2005-12-02-TailCallBug.ll Tue Jan 15 11:16:16 2013
@@ -1,5 +1,5 @@
 ; PR672
-; RUN: %lli -use-mcjit %s
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s
 ; XFAIL: mcjit-ia32
 
 define i32 @main() {

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2007-12-10-APIntLoadStore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2007-12-10-APIntLoadStore.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2007-12-10-APIntLoadStore.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2007-12-10-APIntLoadStore.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit -force-interpreter %s
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit -force-interpreter %s
 ; PR1836
 
 define i32 @main() {

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2008-06-05-APInt-OverAShr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2008-06-05-APInt-OverAShr.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2008-06-05-APInt-OverAShr.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2008-06-05-APInt-OverAShr.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit -force-interpreter=true %s | grep 1
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit -force-interpreter=true %s | grep 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
 target triple = "i686-pc-linux-gnu"

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2010-01-15-UndefValue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2010-01-15-UndefValue.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2010-01-15-UndefValue.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/2010-01-15-UndefValue.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit -force-interpreter=true %s
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit -force-interpreter=true %s > /dev/null
 
 define i32 @main() {
        %a = add i32 0, undef

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/fpbitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/fpbitcast.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/fpbitcast.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/fpbitcast.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit -force-interpreter=true %s | grep 40091eb8
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit -force-interpreter=true %s | grep 40091eb8
 ;
 define i32 @test(double %x) {
 entry:

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/hello.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/hello.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/hello.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/hello.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 @.LC0 = internal global [12 x i8] c"Hello World\00"		; <[12 x i8]*> [#uses=1]
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/hello2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/hello2.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/hello2.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/hello2.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 @X = global i32 7		; <i32*> [#uses=0]
 @msg = internal global [13 x i8] c"Hello World\0A\00"		; <[13 x i8]*> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/lit.local.cfg?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/lit.local.cfg (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/lit.local.cfg Tue Jan 15 11:16:16 2013
@@ -8,13 +8,17 @@
 root = getRoot(config)
 
 targets = set(root.targets_to_build.split())
-if ('X86' in targets) | ('ARM' in targets) | ('Mips' in targets):
+if ('X86' in targets) | ('ARM' in targets) | ('Mips' in targets) | \
+   ('PowerPC' in targets):
     config.unsupported = False
 else:
     config.unsupported = True
 
-if root.host_arch not in ['x86', 'x86_64', 'ARM', 'Mips']:
+if root.host_arch not in ['x86', 'x86_64', 'ARM', 'Mips', 'PowerPC']:
     config.unsupported = True
 
-if root.host_os in ['Win32', 'Cygwin', 'MingW', 'Windows', 'Darwin']:
+if root.host_os in ['Darwin']:
+    config.unsupported = True
+
+if 'powerpc' in root.target_triple and not 'powerpc64' in root.target_triple:
     config.unsupported = True

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/simplesttest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/simplesttest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/simplesttest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/simplesttest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 	ret i32 0

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/simpletest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/simpletest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/simpletest.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/simpletest.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @bar() {
 	ret i32 0

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/stubs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/stubs.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/stubs.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/stubs.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit -disable-lazy-compilation=false %s
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit -disable-lazy-compilation=false %s
 
 define i32 @main() nounwind {
 entry:

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-arith.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-arith.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-arith.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 	%A = add i8 0, 12		; <i8> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-branch.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-branch.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-branch.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; test unconditional branch
 define i32 @main() {

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-call-no-external-funcs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-call-no-external-funcs.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-call-no-external-funcs.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-call-no-external-funcs.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @_Z14func_exit_codev() nounwind uwtable {
 entry:

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-call.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-call.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-call.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 declare void @exit(i32)
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-cast.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-cast.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-cast.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @foo() {
 	ret i32 0

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-common-symbols.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-common-symbols.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-common-symbols.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-common-symbols.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit -O0 -disable-lazy-compilation=false %s
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit -O0 -disable-lazy-compilation=false %s
 
 ; The intention of this test is to verify that symbols mapped to COMMON in ELF
 ; work as expected.

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-constantexpr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-constantexpr.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-constantexpr.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-constantexpr.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; This tests to make sure that we can evaluate weird constant expressions
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-fp-no-external-funcs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-fp-no-external-funcs.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-fp-no-external-funcs.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-fp-no-external-funcs.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define double @test(double* %DP, double %Arg) {
 	%D = load double* %DP		; <double> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-fp.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-fp.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-fp.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define double @test(double* %DP, double %Arg) {
 	%D = load double* %DP		; <double> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-global-init-nonzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-global-init-nonzero.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-global-init-nonzero.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-global-init-nonzero.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 @count = global i32 1, align 4
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-global.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-global.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-global.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 @count = global i32 0, align 4
 

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-loadstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-loadstore.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-loadstore.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-loadstore.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define void @test(i8* %P, i16* %P.upgrd.1, i32* %P.upgrd.2, i64* %P.upgrd.3) {
 	%V = load i8* %P		; <i8> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-local.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-local.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-local.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-local.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() nounwind uwtable {
 entry:

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-logical.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-logical.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-logical.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-logical.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 	%A = and i8 4, 8		; <i8> [#uses=2]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-loop.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-loop.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-loop.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 ; <label>:0

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-phi.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-phi.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-phi.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; test phi node
 @Y = global i32 6		; <i32*> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-ret.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-ret.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-ret.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 ; test return instructions
 define void @test1() {

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-return.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-return.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-return.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() nounwind uwtable {
 entry:

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-setcond-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-setcond-fp.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-setcond-fp.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-setcond-fp.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 
 define i32 @main() {

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-setcond-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-setcond-int.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-setcond-int.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-setcond-int.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 	%int1 = add i32 0, 0		; <i32> [#uses=6]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-shift.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-shift.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/MCJIT/test-shift.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: %lli -use-mcjit %s > /dev/null
+; RUN: %lli -mtriple=%mcjit_triple -use-mcjit %s > /dev/null
 
 define i32 @main() {
 	%shamt = add i8 0, 1		; <i8> [#uses=8]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/lit.local.cfg?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/lit.local.cfg (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/lit.local.cfg Tue Jan 15 11:16:16 2013
@@ -1 +1,12 @@
 config.suffixes = ['.ll', '.c', '.cpp']
+
+def getRoot(config):
+    if not config.parent:
+        return config
+    return getRoot(config.parent)
+
+root = getRoot(config)
+
+if root.host_arch in ['PowerPC']:
+    config.unsupported = True
+

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/test-fp-no-external-funcs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/test-fp-no-external-funcs.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/test-fp-no-external-funcs.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/test-fp-no-external-funcs.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,5 @@
 ; RUN: %lli  %s > /dev/null
+; XFAIL: arm
 
 define double @test(double* %DP, double %Arg) {
 	%D = load double* %DP		; <double> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/ExecutionEngine/test-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/ExecutionEngine/test-fp.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/ExecutionEngine/test-fp.ll (original)
+++ llvm/branches/AMDILBackend/test/ExecutionEngine/test-fp.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,5 @@
 ; RUN: %lli %s > /dev/null
+; XFAIL: arm
 
 define double @test(double* %DP, double %Arg) {
 	%D = load double* %DP		; <double> [#uses=1]

Modified: llvm/branches/AMDILBackend/test/Feature/linker_private_linkages.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Feature/linker_private_linkages.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Feature/linker_private_linkages.ll (original)
+++ llvm/branches/AMDILBackend/test/Feature/linker_private_linkages.ll Tue Jan 15 11:16:16 2013
@@ -4,4 +4,3 @@
 
 @foo = linker_private hidden global i32 0
 @bar = linker_private_weak hidden global i32 0
- at qux = linker_private_weak_def_auto global i32 0

Modified: llvm/branches/AMDILBackend/test/Instrumentation/AddressSanitizer/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Instrumentation/AddressSanitizer/basic.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Instrumentation/AddressSanitizer/basic.ll (original)
+++ llvm/branches/AMDILBackend/test/Instrumentation/AddressSanitizer/basic.ll Tue Jan 15 11:16:16 2013
@@ -23,15 +23,14 @@
 ; CHECK:   icmp sge i8 %{{.*}}, %[[LOAD_SHADOW]]
 ; CHECK:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
 ;
-; The actual load comes next because ASan adds the crash block
-; to the end of the function.
-; CHECK:   %tmp1 = load i32* %a
-; CHECK:   ret i32 %tmp1
-
 ; The crash block reports the error.
 ; CHECK:   call void @__asan_report_load4(i64 %[[LOAD_ADDR]])
 ; CHECK:   unreachable
 ;
+; The actual load.
+; CHECK:   %tmp1 = load i32* %a
+; CHECK:   ret i32 %tmp1
+
 
 
 entry:
@@ -57,17 +56,36 @@
 ; CHECK:   icmp sge i8 %{{.*}}, %[[STORE_SHADOW]]
 ; CHECK:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
 ;
-; The actual load comes next because ASan adds the crash block
-; to the end of the function.
-; CHECK:   store i32 42, i32* %a
-; CHECK:   ret void
-;
 ; The crash block reports the error.
 ; CHECK:   call void @__asan_report_store4(i64 %[[STORE_ADDR]])
 ; CHECK:   unreachable
 ;
+; The actual load.
+; CHECK:   store i32 42, i32* %a
+; CHECK:   ret void
+;
 
 entry:
   store i32 42, i32* %a
   ret void
 }
+
+; Check that asan leaves just one alloca.
+
+declare void @alloca_test_use([10 x i8]*)
+define void @alloca_test() address_safety {
+entry:
+  %x = alloca [10 x i8], align 1
+  %y = alloca [10 x i8], align 1
+  %z = alloca [10 x i8], align 1
+  call void @alloca_test_use([10 x i8]* %x)
+  call void @alloca_test_use([10 x i8]* %y)
+  call void @alloca_test_use([10 x i8]* %z)
+  ret void
+}
+
+; CHECK: define void @alloca_test()
+; CHECK: = alloca
+; CHECK-NOT: = alloca
+; CHECK: ret void
+

Modified: llvm/branches/AMDILBackend/test/Instrumentation/AddressSanitizer/instrument_global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Instrumentation/AddressSanitizer/instrument_global.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Instrumentation/AddressSanitizer/instrument_global.ll (original)
+++ llvm/branches/AMDILBackend/test/Instrumentation/AddressSanitizer/instrument_global.ll Tue Jan 15 11:16:16 2013
@@ -6,8 +6,8 @@
 ; If a global is present, __asan_[un]register_globals should be called from
 ; module ctor/dtor
 
-; CHECK: llvm.global_dtors
 ; CHECK: llvm.global_ctors
+; CHECK: llvm.global_dtors
 
 ; CHECK: define internal void @asan.module_ctor
 ; CHECK-NOT: ret

Modified: llvm/branches/AMDILBackend/test/Instrumentation/ThreadSanitizer/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Instrumentation/ThreadSanitizer/atomic.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Instrumentation/ThreadSanitizer/atomic.ll (original)
+++ llvm/branches/AMDILBackend/test/Instrumentation/ThreadSanitizer/atomic.ll Tue Jan 15 11:16:16 2013
@@ -8,7 +8,7 @@
   ret i8 %0
 }
 ; CHECK: atomic8_load_unordered
-; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
+; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 0)
 
 define i8 @atomic8_load_monotonic(i8* %a) nounwind uwtable {
 entry:
@@ -16,7 +16,7 @@
   ret i8 %0
 }
 ; CHECK: atomic8_load_monotonic
-; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
+; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 0)
 
 define i8 @atomic8_load_acquire(i8* %a) nounwind uwtable {
 entry:
@@ -24,7 +24,7 @@
   ret i8 %0
 }
 ; CHECK: atomic8_load_acquire
-; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 4)
+; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 2)
 
 define i8 @atomic8_load_seq_cst(i8* %a) nounwind uwtable {
 entry:
@@ -32,7 +32,7 @@
   ret i8 %0
 }
 ; CHECK: atomic8_load_seq_cst
-; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 32)
+; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 5)
 
 define void @atomic8_store_unordered(i8* %a) nounwind uwtable {
 entry:
@@ -40,7 +40,7 @@
   ret void
 }
 ; CHECK: atomic8_store_unordered
-; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
+; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 0)
 
 define void @atomic8_store_monotonic(i8* %a) nounwind uwtable {
 entry:
@@ -48,7 +48,7 @@
   ret void
 }
 ; CHECK: atomic8_store_monotonic
-; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
+; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 0)
 
 define void @atomic8_store_release(i8* %a) nounwind uwtable {
 entry:
@@ -56,7 +56,7 @@
   ret void
 }
 ; CHECK: atomic8_store_release
-; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 8)
+; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 3)
 
 define void @atomic8_store_seq_cst(i8* %a) nounwind uwtable {
 entry:
@@ -64,7 +64,287 @@
   ret void
 }
 ; CHECK: atomic8_store_seq_cst
-; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 32)
+; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 5)
+
+define void @atomic8_xchg_monotonic(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i8* %a, i8 0 monotonic
+  ret void
+}
+; CHECK: atomic8_xchg_monotonic
+; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 0)
+
+define void @atomic8_add_monotonic(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw add i8* %a, i8 0 monotonic
+  ret void
+}
+; CHECK: atomic8_add_monotonic
+; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 0)
+
+define void @atomic8_sub_monotonic(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i8* %a, i8 0 monotonic
+  ret void
+}
+; CHECK: atomic8_sub_monotonic
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 0)
+
+define void @atomic8_and_monotonic(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw and i8* %a, i8 0 monotonic
+  ret void
+}
+; CHECK: atomic8_and_monotonic
+; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 0)
+
+define void @atomic8_or_monotonic(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw or i8* %a, i8 0 monotonic
+  ret void
+}
+; CHECK: atomic8_or_monotonic
+; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 0)
+
+define void @atomic8_xor_monotonic(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i8* %a, i8 0 monotonic
+  ret void
+}
+; CHECK: atomic8_xor_monotonic
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 0)
+
+define void @atomic8_xchg_acquire(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i8* %a, i8 0 acquire
+  ret void
+}
+; CHECK: atomic8_xchg_acquire
+; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 2)
+
+define void @atomic8_add_acquire(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw add i8* %a, i8 0 acquire
+  ret void
+}
+; CHECK: atomic8_add_acquire
+; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 2)
+
+define void @atomic8_sub_acquire(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i8* %a, i8 0 acquire
+  ret void
+}
+; CHECK: atomic8_sub_acquire
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 2)
+
+define void @atomic8_and_acquire(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw and i8* %a, i8 0 acquire
+  ret void
+}
+; CHECK: atomic8_and_acquire
+; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 2)
+
+define void @atomic8_or_acquire(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw or i8* %a, i8 0 acquire
+  ret void
+}
+; CHECK: atomic8_or_acquire
+; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 2)
+
+define void @atomic8_xor_acquire(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i8* %a, i8 0 acquire
+  ret void
+}
+; CHECK: atomic8_xor_acquire
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 2)
+
+define void @atomic8_xchg_release(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i8* %a, i8 0 release
+  ret void
+}
+; CHECK: atomic8_xchg_release
+; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 3)
+
+define void @atomic8_add_release(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw add i8* %a, i8 0 release
+  ret void
+}
+; CHECK: atomic8_add_release
+; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 3)
+
+define void @atomic8_sub_release(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i8* %a, i8 0 release
+  ret void
+}
+; CHECK: atomic8_sub_release
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 3)
+
+define void @atomic8_and_release(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw and i8* %a, i8 0 release
+  ret void
+}
+; CHECK: atomic8_and_release
+; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 3)
+
+define void @atomic8_or_release(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw or i8* %a, i8 0 release
+  ret void
+}
+; CHECK: atomic8_or_release
+; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 3)
+
+define void @atomic8_xor_release(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i8* %a, i8 0 release
+  ret void
+}
+; CHECK: atomic8_xor_release
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 3)
+
+define void @atomic8_xchg_acq_rel(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i8* %a, i8 0 acq_rel
+  ret void
+}
+; CHECK: atomic8_xchg_acq_rel
+; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 4)
+
+define void @atomic8_add_acq_rel(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw add i8* %a, i8 0 acq_rel
+  ret void
+}
+; CHECK: atomic8_add_acq_rel
+; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 4)
+
+define void @atomic8_sub_acq_rel(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i8* %a, i8 0 acq_rel
+  ret void
+}
+; CHECK: atomic8_sub_acq_rel
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 4)
+
+define void @atomic8_and_acq_rel(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw and i8* %a, i8 0 acq_rel
+  ret void
+}
+; CHECK: atomic8_and_acq_rel
+; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 4)
+
+define void @atomic8_or_acq_rel(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw or i8* %a, i8 0 acq_rel
+  ret void
+}
+; CHECK: atomic8_or_acq_rel
+; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 4)
+
+define void @atomic8_xor_acq_rel(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i8* %a, i8 0 acq_rel
+  ret void
+}
+; CHECK: atomic8_xor_acq_rel
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 4)
+
+define void @atomic8_xchg_seq_cst(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i8* %a, i8 0 seq_cst
+  ret void
+}
+; CHECK: atomic8_xchg_seq_cst
+; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 5)
+
+define void @atomic8_add_seq_cst(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw add i8* %a, i8 0 seq_cst
+  ret void
+}
+; CHECK: atomic8_add_seq_cst
+; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 5)
+
+define void @atomic8_sub_seq_cst(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i8* %a, i8 0 seq_cst
+  ret void
+}
+; CHECK: atomic8_sub_seq_cst
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 5)
+
+define void @atomic8_and_seq_cst(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw and i8* %a, i8 0 seq_cst
+  ret void
+}
+; CHECK: atomic8_and_seq_cst
+; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 5)
+
+define void @atomic8_or_seq_cst(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw or i8* %a, i8 0 seq_cst
+  ret void
+}
+; CHECK: atomic8_or_seq_cst
+; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 5)
+
+define void @atomic8_xor_seq_cst(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i8* %a, i8 0 seq_cst
+  ret void
+}
+; CHECK: atomic8_xor_seq_cst
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 5)
+
+define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable {
+entry:
+  cmpxchg i8* %a, i8 0, i8 1 monotonic
+  ret void
+}
+; CHECK: atomic8_cas_monotonic
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0)
+
+define void @atomic8_cas_acquire(i8* %a) nounwind uwtable {
+entry:
+  cmpxchg i8* %a, i8 0, i8 1 acquire
+  ret void
+}
+; CHECK: atomic8_cas_acquire
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2)
+
+define void @atomic8_cas_release(i8* %a) nounwind uwtable {
+entry:
+  cmpxchg i8* %a, i8 0, i8 1 release
+  ret void
+}
+; CHECK: atomic8_cas_release
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3)
+
+define void @atomic8_cas_acq_rel(i8* %a) nounwind uwtable {
+entry:
+  cmpxchg i8* %a, i8 0, i8 1 acq_rel
+  ret void
+}
+; CHECK: atomic8_cas_acq_rel
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4)
+
+define void @atomic8_cas_seq_cst(i8* %a) nounwind uwtable {
+entry:
+  cmpxchg i8* %a, i8 0, i8 1 seq_cst
+  ret void
+}
+; CHECK: atomic8_cas_seq_cst
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5)
 
 define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable {
 entry:
@@ -72,7 +352,7 @@
   ret i16 %0
 }
 ; CHECK: atomic16_load_unordered
-; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
+; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 0)
 
 define i16 @atomic16_load_monotonic(i16* %a) nounwind uwtable {
 entry:
@@ -80,7 +360,7 @@
   ret i16 %0
 }
 ; CHECK: atomic16_load_monotonic
-; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
+; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 0)
 
 define i16 @atomic16_load_acquire(i16* %a) nounwind uwtable {
 entry:
@@ -88,7 +368,7 @@
   ret i16 %0
 }
 ; CHECK: atomic16_load_acquire
-; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 4)
+; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 2)
 
 define i16 @atomic16_load_seq_cst(i16* %a) nounwind uwtable {
 entry:
@@ -96,7 +376,7 @@
   ret i16 %0
 }
 ; CHECK: atomic16_load_seq_cst
-; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 32)
+; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 5)
 
 define void @atomic16_store_unordered(i16* %a) nounwind uwtable {
 entry:
@@ -104,7 +384,7 @@
   ret void
 }
 ; CHECK: atomic16_store_unordered
-; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
+; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 0)
 
 define void @atomic16_store_monotonic(i16* %a) nounwind uwtable {
 entry:
@@ -112,7 +392,7 @@
   ret void
 }
 ; CHECK: atomic16_store_monotonic
-; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
+; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 0)
 
 define void @atomic16_store_release(i16* %a) nounwind uwtable {
 entry:
@@ -120,7 +400,7 @@
   ret void
 }
 ; CHECK: atomic16_store_release
-; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 8)
+; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 3)
 
 define void @atomic16_store_seq_cst(i16* %a) nounwind uwtable {
 entry:
@@ -128,196 +408,1380 @@
   ret void
 }
 ; CHECK: atomic16_store_seq_cst
-; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 32)
+; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 5)
 
-define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
+define void @atomic16_xchg_monotonic(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i32* %a unordered, align 4
-  ret i32 %0
+  atomicrmw xchg i16* %a, i16 0 monotonic
+  ret void
 }
-; CHECK: atomic32_load_unordered
-; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
+; CHECK: atomic16_xchg_monotonic
+; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 0)
 
-define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable {
+define void @atomic16_add_monotonic(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i32* %a monotonic, align 4
-  ret i32 %0
+  atomicrmw add i16* %a, i16 0 monotonic
+  ret void
 }
-; CHECK: atomic32_load_monotonic
-; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
+; CHECK: atomic16_add_monotonic
+; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 0)
 
-define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable {
+define void @atomic16_sub_monotonic(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i32* %a acquire, align 4
-  ret i32 %0
+  atomicrmw sub i16* %a, i16 0 monotonic
+  ret void
 }
-; CHECK: atomic32_load_acquire
-; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 4)
+; CHECK: atomic16_sub_monotonic
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 0)
 
-define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic16_and_monotonic(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i32* %a seq_cst, align 4
-  ret i32 %0
+  atomicrmw and i16* %a, i16 0 monotonic
+  ret void
 }
-; CHECK: atomic32_load_seq_cst
-; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 32)
+; CHECK: atomic16_and_monotonic
+; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 0)
 
-define void @atomic32_store_unordered(i32* %a) nounwind uwtable {
+define void @atomic16_or_monotonic(i16* %a) nounwind uwtable {
 entry:
-  store atomic i32 0, i32* %a unordered, align 4
+  atomicrmw or i16* %a, i16 0 monotonic
   ret void
 }
-; CHECK: atomic32_store_unordered
-; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
+; CHECK: atomic16_or_monotonic
+; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 0)
 
-define void @atomic32_store_monotonic(i32* %a) nounwind uwtable {
+define void @atomic16_xor_monotonic(i16* %a) nounwind uwtable {
 entry:
-  store atomic i32 0, i32* %a monotonic, align 4
+  atomicrmw xor i16* %a, i16 0 monotonic
   ret void
 }
-; CHECK: atomic32_store_monotonic
-; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
+; CHECK: atomic16_xor_monotonic
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 0)
 
-define void @atomic32_store_release(i32* %a) nounwind uwtable {
+define void @atomic16_xchg_acquire(i16* %a) nounwind uwtable {
 entry:
-  store atomic i32 0, i32* %a release, align 4
+  atomicrmw xchg i16* %a, i16 0 acquire
   ret void
 }
-; CHECK: atomic32_store_release
-; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 8)
+; CHECK: atomic16_xchg_acquire
+; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 2)
 
-define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic16_add_acquire(i16* %a) nounwind uwtable {
 entry:
-  store atomic i32 0, i32* %a seq_cst, align 4
+  atomicrmw add i16* %a, i16 0 acquire
   ret void
 }
-; CHECK: atomic32_store_seq_cst
-; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 32)
+; CHECK: atomic16_add_acquire
+; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 2)
 
-define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
+define void @atomic16_sub_acquire(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i64* %a unordered, align 8
-  ret i64 %0
+  atomicrmw sub i16* %a, i16 0 acquire
+  ret void
 }
-; CHECK: atomic64_load_unordered
-; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
+; CHECK: atomic16_sub_acquire
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 2)
 
-define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable {
+define void @atomic16_and_acquire(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i64* %a monotonic, align 8
-  ret i64 %0
+  atomicrmw and i16* %a, i16 0 acquire
+  ret void
 }
-; CHECK: atomic64_load_monotonic
-; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
+; CHECK: atomic16_and_acquire
+; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 2)
 
-define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable {
+define void @atomic16_or_acquire(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i64* %a acquire, align 8
-  ret i64 %0
+  atomicrmw or i16* %a, i16 0 acquire
+  ret void
 }
-; CHECK: atomic64_load_acquire
-; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 4)
+; CHECK: atomic16_or_acquire
+; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 2)
 
-define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic16_xor_acquire(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i64* %a seq_cst, align 8
-  ret i64 %0
+  atomicrmw xor i16* %a, i16 0 acquire
+  ret void
 }
-; CHECK: atomic64_load_seq_cst
-; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 32)
+; CHECK: atomic16_xor_acquire
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 2)
 
-define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
+define void @atomic16_xchg_release(i16* %a) nounwind uwtable {
 entry:
-  store atomic i64 0, i64* %a unordered, align 8
+  atomicrmw xchg i16* %a, i16 0 release
   ret void
 }
-; CHECK: atomic64_store_unordered
-; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
+; CHECK: atomic16_xchg_release
+; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 3)
 
-define void @atomic64_store_monotonic(i64* %a) nounwind uwtable {
+define void @atomic16_add_release(i16* %a) nounwind uwtable {
 entry:
-  store atomic i64 0, i64* %a monotonic, align 8
+  atomicrmw add i16* %a, i16 0 release
   ret void
 }
-; CHECK: atomic64_store_monotonic
-; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
+; CHECK: atomic16_add_release
+; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 3)
 
-define void @atomic64_store_release(i64* %a) nounwind uwtable {
+define void @atomic16_sub_release(i16* %a) nounwind uwtable {
 entry:
-  store atomic i64 0, i64* %a release, align 8
+  atomicrmw sub i16* %a, i16 0 release
   ret void
 }
-; CHECK: atomic64_store_release
-; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 8)
+; CHECK: atomic16_sub_release
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 3)
 
-define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic16_and_release(i16* %a) nounwind uwtable {
 entry:
-  store atomic i64 0, i64* %a seq_cst, align 8
+  atomicrmw and i16* %a, i16 0 release
   ret void
 }
-; CHECK: atomic64_store_seq_cst
-; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 32)
+; CHECK: atomic16_and_release
+; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 3)
 
-define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
+define void @atomic16_or_release(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i128* %a unordered, align 16
-  ret i128 %0
+  atomicrmw or i16* %a, i16 0 release
+  ret void
 }
-; CHECK: atomic128_load_unordered
-; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
+; CHECK: atomic16_or_release
+; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 3)
 
-define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable {
+define void @atomic16_xor_release(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i128* %a monotonic, align 16
-  ret i128 %0
+  atomicrmw xor i16* %a, i16 0 release
+  ret void
 }
-; CHECK: atomic128_load_monotonic
-; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
+; CHECK: atomic16_xor_release
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 3)
 
-define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable {
+define void @atomic16_xchg_acq_rel(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i128* %a acquire, align 16
-  ret i128 %0
+  atomicrmw xchg i16* %a, i16 0 acq_rel
+  ret void
 }
-; CHECK: atomic128_load_acquire
-; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 4)
+; CHECK: atomic16_xchg_acq_rel
+; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 4)
 
-define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic16_add_acq_rel(i16* %a) nounwind uwtable {
 entry:
-  %0 = load atomic i128* %a seq_cst, align 16
-  ret i128 %0
+  atomicrmw add i16* %a, i16 0 acq_rel
+  ret void
 }
-; CHECK: atomic128_load_seq_cst
-; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 32)
+; CHECK: atomic16_add_acq_rel
+; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 4)
 
-define void @atomic128_store_unordered(i128* %a) nounwind uwtable {
+define void @atomic16_sub_acq_rel(i16* %a) nounwind uwtable {
 entry:
-  store atomic i128 0, i128* %a unordered, align 16
+  atomicrmw sub i16* %a, i16 0 acq_rel
   ret void
 }
-; CHECK: atomic128_store_unordered
-; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
+; CHECK: atomic16_sub_acq_rel
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 4)
 
-define void @atomic128_store_monotonic(i128* %a) nounwind uwtable {
+define void @atomic16_and_acq_rel(i16* %a) nounwind uwtable {
 entry:
-  store atomic i128 0, i128* %a monotonic, align 16
+  atomicrmw and i16* %a, i16 0 acq_rel
   ret void
 }
-; CHECK: atomic128_store_monotonic
-; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
+; CHECK: atomic16_and_acq_rel
+; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 4)
 
-define void @atomic128_store_release(i128* %a) nounwind uwtable {
+define void @atomic16_or_acq_rel(i16* %a) nounwind uwtable {
 entry:
-  store atomic i128 0, i128* %a release, align 16
+  atomicrmw or i16* %a, i16 0 acq_rel
   ret void
 }
-; CHECK: atomic128_store_release
-; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 8)
+; CHECK: atomic16_or_acq_rel
+; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 4)
 
-define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic16_xor_acq_rel(i16* %a) nounwind uwtable {
 entry:
-  store atomic i128 0, i128* %a seq_cst, align 16
+  atomicrmw xor i16* %a, i16 0 acq_rel
   ret void
 }
-; CHECK: atomic128_store_seq_cst
-; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 32)
+; CHECK: atomic16_xor_acq_rel
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 4)
+
+define void @atomic16_xchg_seq_cst(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i16* %a, i16 0 seq_cst
+  ret void
+}
+; CHECK: atomic16_xchg_seq_cst
+; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 5)
+
+define void @atomic16_add_seq_cst(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw add i16* %a, i16 0 seq_cst
+  ret void
+}
+; CHECK: atomic16_add_seq_cst
+; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 5)
+
+define void @atomic16_sub_seq_cst(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i16* %a, i16 0 seq_cst
+  ret void
+}
+; CHECK: atomic16_sub_seq_cst
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 5)
+
+define void @atomic16_and_seq_cst(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw and i16* %a, i16 0 seq_cst
+  ret void
+}
+; CHECK: atomic16_and_seq_cst
+; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 5)
+
+define void @atomic16_or_seq_cst(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw or i16* %a, i16 0 seq_cst
+  ret void
+}
+; CHECK: atomic16_or_seq_cst
+; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 5)
+
+define void @atomic16_xor_seq_cst(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i16* %a, i16 0 seq_cst
+  ret void
+}
+; CHECK: atomic16_xor_seq_cst
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 5)
+
+define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable {
+entry:
+  cmpxchg i16* %a, i16 0, i16 1 monotonic
+  ret void
+}
+; CHECK: atomic16_cas_monotonic
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0)
+
+define void @atomic16_cas_acquire(i16* %a) nounwind uwtable {
+entry:
+  cmpxchg i16* %a, i16 0, i16 1 acquire
+  ret void
+}
+; CHECK: atomic16_cas_acquire
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2)
+
+define void @atomic16_cas_release(i16* %a) nounwind uwtable {
+entry:
+  cmpxchg i16* %a, i16 0, i16 1 release
+  ret void
+}
+; CHECK: atomic16_cas_release
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3)
+
+define void @atomic16_cas_acq_rel(i16* %a) nounwind uwtable {
+entry:
+  cmpxchg i16* %a, i16 0, i16 1 acq_rel
+  ret void
+}
+; CHECK: atomic16_cas_acq_rel
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4)
+
+define void @atomic16_cas_seq_cst(i16* %a) nounwind uwtable {
+entry:
+  cmpxchg i16* %a, i16 0, i16 1 seq_cst
+  ret void
+}
+; CHECK: atomic16_cas_seq_cst
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5)
+
+define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i32* %a unordered, align 4
+  ret i32 %0
+}
+; CHECK: atomic32_load_unordered
+; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 0)
+
+define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i32* %a monotonic, align 4
+  ret i32 %0
+}
+; CHECK: atomic32_load_monotonic
+; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 0)
+
+define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i32* %a acquire, align 4
+  ret i32 %0
+}
+; CHECK: atomic32_load_acquire
+; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 2)
+
+define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i32* %a seq_cst, align 4
+  ret i32 %0
+}
+; CHECK: atomic32_load_seq_cst
+; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 5)
+
+define void @atomic32_store_unordered(i32* %a) nounwind uwtable {
+entry:
+  store atomic i32 0, i32* %a unordered, align 4
+  ret void
+}
+; CHECK: atomic32_store_unordered
+; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 0)
+
+define void @atomic32_store_monotonic(i32* %a) nounwind uwtable {
+entry:
+  store atomic i32 0, i32* %a monotonic, align 4
+  ret void
+}
+; CHECK: atomic32_store_monotonic
+; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 0)
+
+define void @atomic32_store_release(i32* %a) nounwind uwtable {
+entry:
+  store atomic i32 0, i32* %a release, align 4
+  ret void
+}
+; CHECK: atomic32_store_release
+; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 3)
+
+define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  store atomic i32 0, i32* %a seq_cst, align 4
+  ret void
+}
+; CHECK: atomic32_store_seq_cst
+; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 5)
+
+define void @atomic32_xchg_monotonic(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i32* %a, i32 0 monotonic
+  ret void
+}
+; CHECK: atomic32_xchg_monotonic
+; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 0)
+
+define void @atomic32_add_monotonic(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw add i32* %a, i32 0 monotonic
+  ret void
+}
+; CHECK: atomic32_add_monotonic
+; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 0)
+
+define void @atomic32_sub_monotonic(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i32* %a, i32 0 monotonic
+  ret void
+}
+; CHECK: atomic32_sub_monotonic
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 0)
+
+define void @atomic32_and_monotonic(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw and i32* %a, i32 0 monotonic
+  ret void
+}
+; CHECK: atomic32_and_monotonic
+; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 0)
+
+define void @atomic32_or_monotonic(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw or i32* %a, i32 0 monotonic
+  ret void
+}
+; CHECK: atomic32_or_monotonic
+; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 0)
+
+define void @atomic32_xor_monotonic(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i32* %a, i32 0 monotonic
+  ret void
+}
+; CHECK: atomic32_xor_monotonic
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 0)
+
+define void @atomic32_xchg_acquire(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i32* %a, i32 0 acquire
+  ret void
+}
+; CHECK: atomic32_xchg_acquire
+; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 2)
+
+define void @atomic32_add_acquire(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw add i32* %a, i32 0 acquire
+  ret void
+}
+; CHECK: atomic32_add_acquire
+; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 2)
+
+define void @atomic32_sub_acquire(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i32* %a, i32 0 acquire
+  ret void
+}
+; CHECK: atomic32_sub_acquire
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 2)
+
+define void @atomic32_and_acquire(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw and i32* %a, i32 0 acquire
+  ret void
+}
+; CHECK: atomic32_and_acquire
+; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 2)
+
+define void @atomic32_or_acquire(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw or i32* %a, i32 0 acquire
+  ret void
+}
+; CHECK: atomic32_or_acquire
+; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 2)
+
+define void @atomic32_xor_acquire(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i32* %a, i32 0 acquire
+  ret void
+}
+; CHECK: atomic32_xor_acquire
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 2)
+
+define void @atomic32_xchg_release(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i32* %a, i32 0 release
+  ret void
+}
+; CHECK: atomic32_xchg_release
+; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 3)
+
+define void @atomic32_add_release(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw add i32* %a, i32 0 release
+  ret void
+}
+; CHECK: atomic32_add_release
+; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 3)
+
+define void @atomic32_sub_release(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i32* %a, i32 0 release
+  ret void
+}
+; CHECK: atomic32_sub_release
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 3)
+
+define void @atomic32_and_release(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw and i32* %a, i32 0 release
+  ret void
+}
+; CHECK: atomic32_and_release
+; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 3)
+
+define void @atomic32_or_release(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw or i32* %a, i32 0 release
+  ret void
+}
+; CHECK: atomic32_or_release
+; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 3)
+
+define void @atomic32_xor_release(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i32* %a, i32 0 release
+  ret void
+}
+; CHECK: atomic32_xor_release
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 3)
+
+define void @atomic32_xchg_acq_rel(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i32* %a, i32 0 acq_rel
+  ret void
+}
+; CHECK: atomic32_xchg_acq_rel
+; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 4)
+
+define void @atomic32_add_acq_rel(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw add i32* %a, i32 0 acq_rel
+  ret void
+}
+; CHECK: atomic32_add_acq_rel
+; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 4)
+
+define void @atomic32_sub_acq_rel(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i32* %a, i32 0 acq_rel
+  ret void
+}
+; CHECK: atomic32_sub_acq_rel
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 4)
+
+define void @atomic32_and_acq_rel(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw and i32* %a, i32 0 acq_rel
+  ret void
+}
+; CHECK: atomic32_and_acq_rel
+; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 4)
+
+define void @atomic32_or_acq_rel(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw or i32* %a, i32 0 acq_rel
+  ret void
+}
+; CHECK: atomic32_or_acq_rel
+; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 4)
+
+define void @atomic32_xor_acq_rel(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i32* %a, i32 0 acq_rel
+  ret void
+}
+; CHECK: atomic32_xor_acq_rel
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 4)
+
+define void @atomic32_xchg_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i32* %a, i32 0 seq_cst
+  ret void
+}
+; CHECK: atomic32_xchg_seq_cst
+; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 5)
+
+define void @atomic32_add_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw add i32* %a, i32 0 seq_cst
+  ret void
+}
+; CHECK: atomic32_add_seq_cst
+; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 5)
+
+define void @atomic32_sub_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i32* %a, i32 0 seq_cst
+  ret void
+}
+; CHECK: atomic32_sub_seq_cst
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 5)
+
+define void @atomic32_and_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw and i32* %a, i32 0 seq_cst
+  ret void
+}
+; CHECK: atomic32_and_seq_cst
+; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 5)
+
+define void @atomic32_or_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw or i32* %a, i32 0 seq_cst
+  ret void
+}
+; CHECK: atomic32_or_seq_cst
+; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 5)
+
+define void @atomic32_xor_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i32* %a, i32 0 seq_cst
+  ret void
+}
+; CHECK: atomic32_xor_seq_cst
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 5)
+
+define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable {
+entry:
+  cmpxchg i32* %a, i32 0, i32 1 monotonic
+  ret void
+}
+; CHECK: atomic32_cas_monotonic
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0)
+
+define void @atomic32_cas_acquire(i32* %a) nounwind uwtable {
+entry:
+  cmpxchg i32* %a, i32 0, i32 1 acquire
+  ret void
+}
+; CHECK: atomic32_cas_acquire
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2)
+
+define void @atomic32_cas_release(i32* %a) nounwind uwtable {
+entry:
+  cmpxchg i32* %a, i32 0, i32 1 release
+  ret void
+}
+; CHECK: atomic32_cas_release
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3)
+
+define void @atomic32_cas_acq_rel(i32* %a) nounwind uwtable {
+entry:
+  cmpxchg i32* %a, i32 0, i32 1 acq_rel
+  ret void
+}
+; CHECK: atomic32_cas_acq_rel
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4)
+
+define void @atomic32_cas_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  cmpxchg i32* %a, i32 0, i32 1 seq_cst
+  ret void
+}
+; CHECK: atomic32_cas_seq_cst
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5)
+
+define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i64* %a unordered, align 8
+  ret i64 %0
+}
+; CHECK: atomic64_load_unordered
+; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 0)
+
+define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i64* %a monotonic, align 8
+  ret i64 %0
+}
+; CHECK: atomic64_load_monotonic
+; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 0)
+
+define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i64* %a acquire, align 8
+  ret i64 %0
+}
+; CHECK: atomic64_load_acquire
+; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 2)
+
+define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i64* %a seq_cst, align 8
+  ret i64 %0
+}
+; CHECK: atomic64_load_seq_cst
+; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 5)
+
+define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
+entry:
+  store atomic i64 0, i64* %a unordered, align 8
+  ret void
+}
+; CHECK: atomic64_store_unordered
+; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 0)
+
+define void @atomic64_store_monotonic(i64* %a) nounwind uwtable {
+entry:
+  store atomic i64 0, i64* %a monotonic, align 8
+  ret void
+}
+; CHECK: atomic64_store_monotonic
+; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 0)
+
+define void @atomic64_store_release(i64* %a) nounwind uwtable {
+entry:
+  store atomic i64 0, i64* %a release, align 8
+  ret void
+}
+; CHECK: atomic64_store_release
+; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 3)
+
+define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  store atomic i64 0, i64* %a seq_cst, align 8
+  ret void
+}
+; CHECK: atomic64_store_seq_cst
+; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 5)
+
+define void @atomic64_xchg_monotonic(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i64* %a, i64 0 monotonic
+  ret void
+}
+; CHECK: atomic64_xchg_monotonic
+; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 0)
+
+define void @atomic64_add_monotonic(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw add i64* %a, i64 0 monotonic
+  ret void
+}
+; CHECK: atomic64_add_monotonic
+; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 0)
+
+define void @atomic64_sub_monotonic(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i64* %a, i64 0 monotonic
+  ret void
+}
+; CHECK: atomic64_sub_monotonic
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 0)
+
+define void @atomic64_and_monotonic(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw and i64* %a, i64 0 monotonic
+  ret void
+}
+; CHECK: atomic64_and_monotonic
+; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 0)
+
+define void @atomic64_or_monotonic(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw or i64* %a, i64 0 monotonic
+  ret void
+}
+; CHECK: atomic64_or_monotonic
+; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 0)
+
+define void @atomic64_xor_monotonic(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i64* %a, i64 0 monotonic
+  ret void
+}
+; CHECK: atomic64_xor_monotonic
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 0)
+
+define void @atomic64_xchg_acquire(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i64* %a, i64 0 acquire
+  ret void
+}
+; CHECK: atomic64_xchg_acquire
+; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 2)
+
+define void @atomic64_add_acquire(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw add i64* %a, i64 0 acquire
+  ret void
+}
+; CHECK: atomic64_add_acquire
+; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 2)
+
+define void @atomic64_sub_acquire(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i64* %a, i64 0 acquire
+  ret void
+}
+; CHECK: atomic64_sub_acquire
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 2)
+
+define void @atomic64_and_acquire(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw and i64* %a, i64 0 acquire
+  ret void
+}
+; CHECK: atomic64_and_acquire
+; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 2)
+
+define void @atomic64_or_acquire(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw or i64* %a, i64 0 acquire
+  ret void
+}
+; CHECK: atomic64_or_acquire
+; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 2)
+
+define void @atomic64_xor_acquire(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i64* %a, i64 0 acquire
+  ret void
+}
+; CHECK: atomic64_xor_acquire
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 2)
+
+define void @atomic64_xchg_release(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i64* %a, i64 0 release
+  ret void
+}
+; CHECK: atomic64_xchg_release
+; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 3)
+
+define void @atomic64_add_release(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw add i64* %a, i64 0 release
+  ret void
+}
+; CHECK: atomic64_add_release
+; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 3)
+
+define void @atomic64_sub_release(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i64* %a, i64 0 release
+  ret void
+}
+; CHECK: atomic64_sub_release
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 3)
+
+define void @atomic64_and_release(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw and i64* %a, i64 0 release
+  ret void
+}
+; CHECK: atomic64_and_release
+; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 3)
+
+define void @atomic64_or_release(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw or i64* %a, i64 0 release
+  ret void
+}
+; CHECK: atomic64_or_release
+; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 3)
+
+define void @atomic64_xor_release(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i64* %a, i64 0 release
+  ret void
+}
+; CHECK: atomic64_xor_release
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 3)
+
+define void @atomic64_xchg_acq_rel(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i64* %a, i64 0 acq_rel
+  ret void
+}
+; CHECK: atomic64_xchg_acq_rel
+; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 4)
+
+define void @atomic64_add_acq_rel(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw add i64* %a, i64 0 acq_rel
+  ret void
+}
+; CHECK: atomic64_add_acq_rel
+; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 4)
+
+define void @atomic64_sub_acq_rel(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i64* %a, i64 0 acq_rel
+  ret void
+}
+; CHECK: atomic64_sub_acq_rel
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 4)
+
+define void @atomic64_and_acq_rel(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw and i64* %a, i64 0 acq_rel
+  ret void
+}
+; CHECK: atomic64_and_acq_rel
+; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 4)
+
+define void @atomic64_or_acq_rel(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw or i64* %a, i64 0 acq_rel
+  ret void
+}
+; CHECK: atomic64_or_acq_rel
+; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 4)
+
+define void @atomic64_xor_acq_rel(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i64* %a, i64 0 acq_rel
+  ret void
+}
+; CHECK: atomic64_xor_acq_rel
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 4)
+
+define void @atomic64_xchg_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i64* %a, i64 0 seq_cst
+  ret void
+}
+; CHECK: atomic64_xchg_seq_cst
+; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 5)
+
+define void @atomic64_add_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw add i64* %a, i64 0 seq_cst
+  ret void
+}
+; CHECK: atomic64_add_seq_cst
+; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 5)
+
+define void @atomic64_sub_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i64* %a, i64 0 seq_cst
+  ret void
+}
+; CHECK: atomic64_sub_seq_cst
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 5)
+
+define void @atomic64_and_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw and i64* %a, i64 0 seq_cst
+  ret void
+}
+; CHECK: atomic64_and_seq_cst
+; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 5)
+
+define void @atomic64_or_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw or i64* %a, i64 0 seq_cst
+  ret void
+}
+; CHECK: atomic64_or_seq_cst
+; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 5)
+
+define void @atomic64_xor_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i64* %a, i64 0 seq_cst
+  ret void
+}
+; CHECK: atomic64_xor_seq_cst
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 5)
+
+define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable {
+entry:
+  cmpxchg i64* %a, i64 0, i64 1 monotonic
+  ret void
+}
+; CHECK: atomic64_cas_monotonic
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0)
+
+define void @atomic64_cas_acquire(i64* %a) nounwind uwtable {
+entry:
+  cmpxchg i64* %a, i64 0, i64 1 acquire
+  ret void
+}
+; CHECK: atomic64_cas_acquire
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2)
+
+define void @atomic64_cas_release(i64* %a) nounwind uwtable {
+entry:
+  cmpxchg i64* %a, i64 0, i64 1 release
+  ret void
+}
+; CHECK: atomic64_cas_release
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3)
+
+define void @atomic64_cas_acq_rel(i64* %a) nounwind uwtable {
+entry:
+  cmpxchg i64* %a, i64 0, i64 1 acq_rel
+  ret void
+}
+; CHECK: atomic64_cas_acq_rel
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4)
+
+define void @atomic64_cas_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  cmpxchg i64* %a, i64 0, i64 1 seq_cst
+  ret void
+}
+; CHECK: atomic64_cas_seq_cst
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5)
+
+define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i128* %a unordered, align 16
+  ret i128 %0
+}
+; CHECK: atomic128_load_unordered
+; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 0)
+
+define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i128* %a monotonic, align 16
+  ret i128 %0
+}
+; CHECK: atomic128_load_monotonic
+; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 0)
+
+define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i128* %a acquire, align 16
+  ret i128 %0
+}
+; CHECK: atomic128_load_acquire
+; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 2)
+
+define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  %0 = load atomic i128* %a seq_cst, align 16
+  ret i128 %0
+}
+; CHECK: atomic128_load_seq_cst
+; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 5)
+
+define void @atomic128_store_unordered(i128* %a) nounwind uwtable {
+entry:
+  store atomic i128 0, i128* %a unordered, align 16
+  ret void
+}
+; CHECK: atomic128_store_unordered
+; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 0)
+
+define void @atomic128_store_monotonic(i128* %a) nounwind uwtable {
+entry:
+  store atomic i128 0, i128* %a monotonic, align 16
+  ret void
+}
+; CHECK: atomic128_store_monotonic
+; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 0)
+
+define void @atomic128_store_release(i128* %a) nounwind uwtable {
+entry:
+  store atomic i128 0, i128* %a release, align 16
+  ret void
+}
+; CHECK: atomic128_store_release
+; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 3)
+
+define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  store atomic i128 0, i128* %a seq_cst, align 16
+  ret void
+}
+; CHECK: atomic128_store_seq_cst
+; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 5)
+
+define void @atomic128_xchg_monotonic(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i128* %a, i128 0 monotonic
+  ret void
+}
+; CHECK: atomic128_xchg_monotonic
+; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 0)
+
+define void @atomic128_add_monotonic(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw add i128* %a, i128 0 monotonic
+  ret void
+}
+; CHECK: atomic128_add_monotonic
+; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 0)
+
+define void @atomic128_sub_monotonic(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i128* %a, i128 0 monotonic
+  ret void
+}
+; CHECK: atomic128_sub_monotonic
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 0)
+
+define void @atomic128_and_monotonic(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw and i128* %a, i128 0 monotonic
+  ret void
+}
+; CHECK: atomic128_and_monotonic
+; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 0)
+
+define void @atomic128_or_monotonic(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw or i128* %a, i128 0 monotonic
+  ret void
+}
+; CHECK: atomic128_or_monotonic
+; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 0)
+
+define void @atomic128_xor_monotonic(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i128* %a, i128 0 monotonic
+  ret void
+}
+; CHECK: atomic128_xor_monotonic
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 0)
+
+define void @atomic128_xchg_acquire(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i128* %a, i128 0 acquire
+  ret void
+}
+; CHECK: atomic128_xchg_acquire
+; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 2)
+
+define void @atomic128_add_acquire(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw add i128* %a, i128 0 acquire
+  ret void
+}
+; CHECK: atomic128_add_acquire
+; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 2)
+
+define void @atomic128_sub_acquire(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i128* %a, i128 0 acquire
+  ret void
+}
+; CHECK: atomic128_sub_acquire
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 2)
+
+define void @atomic128_and_acquire(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw and i128* %a, i128 0 acquire
+  ret void
+}
+; CHECK: atomic128_and_acquire
+; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 2)
+
+define void @atomic128_or_acquire(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw or i128* %a, i128 0 acquire
+  ret void
+}
+; CHECK: atomic128_or_acquire
+; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 2)
+
+define void @atomic128_xor_acquire(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i128* %a, i128 0 acquire
+  ret void
+}
+; CHECK: atomic128_xor_acquire
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 2)
+
+define void @atomic128_xchg_release(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i128* %a, i128 0 release
+  ret void
+}
+; CHECK: atomic128_xchg_release
+; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 3)
+
+define void @atomic128_add_release(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw add i128* %a, i128 0 release
+  ret void
+}
+; CHECK: atomic128_add_release
+; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 3)
+
+define void @atomic128_sub_release(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i128* %a, i128 0 release
+  ret void
+}
+; CHECK: atomic128_sub_release
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 3)
+
+define void @atomic128_and_release(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw and i128* %a, i128 0 release
+  ret void
+}
+; CHECK: atomic128_and_release
+; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 3)
+
+define void @atomic128_or_release(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw or i128* %a, i128 0 release
+  ret void
+}
+; CHECK: atomic128_or_release
+; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 3)
+
+define void @atomic128_xor_release(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i128* %a, i128 0 release
+  ret void
+}
+; CHECK: atomic128_xor_release
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 3)
+
+define void @atomic128_xchg_acq_rel(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i128* %a, i128 0 acq_rel
+  ret void
+}
+; CHECK: atomic128_xchg_acq_rel
+; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 4)
+
+define void @atomic128_add_acq_rel(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw add i128* %a, i128 0 acq_rel
+  ret void
+}
+; CHECK: atomic128_add_acq_rel
+; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 4)
+
+define void @atomic128_sub_acq_rel(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i128* %a, i128 0 acq_rel
+  ret void
+}
+; CHECK: atomic128_sub_acq_rel
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 4)
+
+define void @atomic128_and_acq_rel(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw and i128* %a, i128 0 acq_rel
+  ret void
+}
+; CHECK: atomic128_and_acq_rel
+; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 4)
+
+define void @atomic128_or_acq_rel(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw or i128* %a, i128 0 acq_rel
+  ret void
+}
+; CHECK: atomic128_or_acq_rel
+; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 4)
+
+define void @atomic128_xor_acq_rel(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i128* %a, i128 0 acq_rel
+  ret void
+}
+; CHECK: atomic128_xor_acq_rel
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 4)
+
+define void @atomic128_xchg_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xchg i128* %a, i128 0 seq_cst
+  ret void
+}
+; CHECK: atomic128_xchg_seq_cst
+; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 5)
+
+define void @atomic128_add_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw add i128* %a, i128 0 seq_cst
+  ret void
+}
+; CHECK: atomic128_add_seq_cst
+; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 5)
+
+define void @atomic128_sub_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw sub i128* %a, i128 0 seq_cst
+  ret void
+}
+; CHECK: atomic128_sub_seq_cst
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 5)
+
+define void @atomic128_and_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw and i128* %a, i128 0 seq_cst
+  ret void
+}
+; CHECK: atomic128_and_seq_cst
+; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 5)
+
+define void @atomic128_or_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw or i128* %a, i128 0 seq_cst
+  ret void
+}
+; CHECK: atomic128_or_seq_cst
+; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 5)
+
+define void @atomic128_xor_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw xor i128* %a, i128 0 seq_cst
+  ret void
+}
+; CHECK: atomic128_xor_seq_cst
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 5)
+
+define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable {
+entry:
+  cmpxchg i128* %a, i128 0, i128 1 monotonic
+  ret void
+}
+; CHECK: atomic128_cas_monotonic
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 0)
+
+define void @atomic128_cas_acquire(i128* %a) nounwind uwtable {
+entry:
+  cmpxchg i128* %a, i128 0, i128 1 acquire
+  ret void
+}
+; CHECK: atomic128_cas_acquire
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 2)
+
+define void @atomic128_cas_release(i128* %a) nounwind uwtable {
+entry:
+  cmpxchg i128* %a, i128 0, i128 1 release
+  ret void
+}
+; CHECK: atomic128_cas_release
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 3)
+
+define void @atomic128_cas_acq_rel(i128* %a) nounwind uwtable {
+entry:
+  cmpxchg i128* %a, i128 0, i128 1 acq_rel
+  ret void
+}
+; CHECK: atomic128_cas_acq_rel
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 4)
+
+define void @atomic128_cas_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  cmpxchg i128* %a, i128 0, i128 1 seq_cst
+  ret void
+}
+; CHECK: atomic128_cas_seq_cst
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 5)
+
+define void @atomic_signal_fence_acquire() nounwind uwtable {
+entry:
+  fence singlethread acquire
+  ret void
+}
+; CHECK: atomic_signal_fence_acquire
+; CHECK: call void @__tsan_atomic_signal_fence(i32 2)
+
+define void @atomic_thread_fence_acquire() nounwind uwtable {
+entry:
+  fence  acquire
+  ret void
+}
+; CHECK: atomic_thread_fence_acquire
+; CHECK: call void @__tsan_atomic_thread_fence(i32 2)
+
+define void @atomic_signal_fence_release() nounwind uwtable {
+entry:
+  fence singlethread release
+  ret void
+}
+; CHECK: atomic_signal_fence_release
+; CHECK: call void @__tsan_atomic_signal_fence(i32 3)
+
+define void @atomic_thread_fence_release() nounwind uwtable {
+entry:
+  fence  release
+  ret void
+}
+; CHECK: atomic_thread_fence_release
+; CHECK: call void @__tsan_atomic_thread_fence(i32 3)
+
+define void @atomic_signal_fence_acq_rel() nounwind uwtable {
+entry:
+  fence singlethread acq_rel
+  ret void
+}
+; CHECK: atomic_signal_fence_acq_rel
+; CHECK: call void @__tsan_atomic_signal_fence(i32 4)
+
+define void @atomic_thread_fence_acq_rel() nounwind uwtable {
+entry:
+  fence  acq_rel
+  ret void
+}
+; CHECK: atomic_thread_fence_acq_rel
+; CHECK: call void @__tsan_atomic_thread_fence(i32 4)
+
+define void @atomic_signal_fence_seq_cst() nounwind uwtable {
+entry:
+  fence singlethread seq_cst
+  ret void
+}
+; CHECK: atomic_signal_fence_seq_cst
+; CHECK: call void @__tsan_atomic_signal_fence(i32 5)
+
+define void @atomic_thread_fence_seq_cst() nounwind uwtable {
+entry:
+  fence  seq_cst
+  ret void
+}
+; CHECK: atomic_thread_fence_seq_cst
+; CHECK: call void @__tsan_atomic_thread_fence(i32 5)

Modified: llvm/branches/AMDILBackend/test/MC/ARM/arm-arithmetic-aliases.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/ARM/arm-arithmetic-aliases.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/ARM/arm-arithmetic-aliases.s (original)
+++ llvm/branches/AMDILBackend/test/MC/ARM/arm-arithmetic-aliases.s Tue Jan 15 11:16:16 2013
@@ -124,3 +124,7 @@
 @ CHECK: bicseq r2, r2, #6              @ encoding: [0x06,0x20,0xd2,0x03]
 @ CHECK: bicseq r2, r2, r3              @ encoding: [0x03,0x20,0xd2,0x01]
 @ CHECK: bicseq r2, r2, r3              @ encoding: [0x03,0x20,0xd2,0x01]
+
+add r0, pc, #123
+
+@ CHECK: adr	r0, #123                @ encoding: [0x7b,0x00,0x8f,0xe2]

Modified: llvm/branches/AMDILBackend/test/MC/ARM/basic-thumb-instructions.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/ARM/basic-thumb-instructions.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/ARM/basic-thumb-instructions.s (original)
+++ llvm/branches/AMDILBackend/test/MC/ARM/basic-thumb-instructions.s Tue Jan 15 11:16:16 2013
@@ -259,8 +259,8 @@
 
 @ CHECK: ldr	r1, _foo                @ encoding: [A,0x49]
              @   fixup A - offset: 0, value: _foo, kind: fixup_arm_thumb_cp
-@ CHECK: ldr     r3, #604                @ encoding: [0x97,0x4b]
-@ CHECK: ldr     r3, #368                @ encoding: [0x5c,0x4b]
+@ CHECK: ldr     r3, [pc, #604]         @ encoding: [0x97,0x4b]
+@ CHECK: ldr     r3, [pc, #368]         @ encoding: [0x5c,0x4b]
 
 @------------------------------------------------------------------------------
 @ LDR (register)

Modified: llvm/branches/AMDILBackend/test/MC/ARM/diagnostics.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/ARM/diagnostics.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/ARM/diagnostics.s (original)
+++ llvm/branches/AMDILBackend/test/MC/ARM/diagnostics.s Tue Jan 15 11:16:16 2013
@@ -47,7 +47,47 @@
 @ CHECK-ERRORS: error: immediate shift value out of range
 @ CHECK-ERRORS:         adc r4, r5, r6, ror #32
 
+        @ Out of range shift immediate values for load/store.
+        str r1, [r2, r3, lsl #invalid]
+        ldr r4, [r5], r6, lsl #-1
+        pld r4, [r5, r6, lsl #32]
+        str r4, [r5], r6, lsr #-1
+        ldr r4, [r5, r6, lsr #33]
+        pld r4, [r5, r6, asr #-1]
+        str r4, [r5, r6, asr #33]
+        ldr r4, [r5, r6, ror #-1]
+        pld r4, [r5, r6, ror #32]
+        pld r4, [r5, r6, rrx #0]
 
+@ CHECK-ERRORS: error: shift amount must be an immediate
+@ CHECK-ERRORS:         str r1, [r2, r3, lsl #invalid]
+@ CHECK-ERRORS:                              ^
+@ CHECK-ERRORS: error: immediate shift value out of range
+@ CHECK-ERRORS:         ldr r4, [r5], r6, lsl #-1
+@ CHECK-ERRORS:                              ^
+@ CHECK-ERRORS: error: immediate shift value out of range
+@ CHECK-ERRORS:         pld r4, [r5, r6, lsl #32]
+@ CHECK-ERRORS:                              ^
+@ CHECK-ERRORS: error: immediate shift value out of range
+@ CHECK-ERRORS:         str r4, [r5], r6, lsr #-1
+@ CHECK-ERRORS:                              ^
+@ CHECK-ERRORS: error: immediate shift value out of range
+@ CHECK-ERRORS:         ldr r4, [r5, r6, lsr #33]
+@ CHECK-ERRORS:                              ^
+@ CHECK-ERRORS: error: immediate shift value out of range
+@ CHECK-ERRORS:         pld r4, [r5, r6, asr #-1]
+@ CHECK-ERRORS:                              ^
+@ CHECK-ERRORS: error: immediate shift value out of range
+@ CHECK-ERRORS:         str r4, [r5, r6, asr #33]
+@ CHECK-ERRORS:                              ^
+@ CHECK-ERRORS: error: immediate shift value out of range
+@ CHECK-ERRORS:         ldr r4, [r5, r6, ror #-1]
+@ CHECK-ERRORS:                              ^
+@ CHECK-ERRORS: error: immediate shift value out of range
+@ CHECK-ERRORS:         pld r4, [r5, r6, ror #32]
+@ CHECK-ERRORS: error: ']' expected
+@ CHECK-ERRORS:         pld r4, [r5, r6, rrx #0]
+        
         @ Out of range 16-bit immediate on BKPT
         bkpt #65536
 
@@ -321,3 +361,13 @@
 @ CHECK-ERRORS: error: invalid operand for instruction
 @ CHECK-ERRORS:         cps f,#1
 @ CHECK-ERRORS:               ^
+
+        @ Bad operands for msr
+        msr #0, #0
+        msr foo, #0
+@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS:         msr #0, #0
+@ CHECK-ERRORS:             ^
+@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS:         msr foo, #0
+@ CHECK-ERRORS:             ^

Modified: llvm/branches/AMDILBackend/test/MC/AsmParser/directive_lcomm.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/AsmParser/directive_lcomm.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/AsmParser/directive_lcomm.s (original)
+++ llvm/branches/AMDILBackend/test/MC/AsmParser/directive_lcomm.s Tue Jan 15 11:16:16 2013
@@ -1,9 +1,14 @@
 # RUN: llvm-mc -triple i386-apple-darwin10 %s | FileCheck %s
+# RUN: llvm-mc -triple i386-pc-mingw32 %s | FileCheck %s
+# RUN: not llvm-mc -triple i386-linux-gnu %s 2>&1 | FileCheck %s -check-prefix=ERROR
 
 # CHECK: TEST0:
-# CHECK: .zerofill __DATA,__bss,a,7,4
-# CHECK: .zerofill __DATA,__bss,b,8
-# CHECK: .zerofill __DATA,__bss,c,0
+# CHECK: .lcomm a,7,4
+# CHECK: .lcomm b,8
+# CHECK: .lcomm c,0
+
+# ELF doesn't like alignment on .lcomm.
+# ERROR: alignment not supported on this target
 TEST0:  
         .lcomm a, 8-1, 4
         .lcomm b,8

Modified: llvm/branches/AMDILBackend/test/MC/AsmParser/labels.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/AsmParser/labels.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/AsmParser/labels.s (original)
+++ llvm/branches/AMDILBackend/test/MC/AsmParser/labels.s Tue Jan 15 11:16:16 2013
@@ -41,7 +41,7 @@
 // CHECK: .comm "a 6",1
         .comm "a 6", 1
 
-// CHECK: .zerofill __DATA,__bss,"a 7",1,0
+// CHECK: .lcomm "a 7",1
         .lcomm "a 7", 1
 
 // FIXME: We don't bother to support .lsym.

Modified: llvm/branches/AMDILBackend/test/MC/AsmParser/macro-args.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/AsmParser/macro-args.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/AsmParser/macro-args.s (original)
+++ llvm/branches/AMDILBackend/test/MC/AsmParser/macro-args.s Tue Jan 15 11:16:16 2013
@@ -4,10 +4,18 @@
     movl   \var at GOTOFF(%ebx),\re2g
 .endm
 
+.macro GET_DEFAULT var, re2g=%ebx, re3g=%ecx
+movl 2(\re2g, \re3g, 2), \var
+.endm
+
+GET         is_sse, %eax
+// CHECK: movl  is_sse at GOTOFF(%ebx), %eax
 
-GET    is_sse, %eax
+GET_DEFAULT %ebx, , %edx
+// CHECK: movl  2(%ebx,%edx,2), %ebx
 
-// CHECK: movl	is_sse at GOTOFF(%ebx), %eax
+GET_DEFAULT %ebx, %edx
+// CHECK: movl  2(%edx,%ecx,2), %ebx
 
 .macro bar
     .long $n

Modified: llvm/branches/AMDILBackend/test/MC/AsmParser/macro-rept-err1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/AsmParser/macro-rept-err1.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/AsmParser/macro-rept-err1.s (original)
+++ llvm/branches/AMDILBackend/test/MC/AsmParser/macro-rept-err1.s Tue Jan 15 11:16:16 2013
@@ -3,4 +3,4 @@
 
 .endr
 
-// CHECK: unexpected '.endr' directive, no current .rept
+// CHECK: unmatched '.endr' directive

Modified: llvm/branches/AMDILBackend/test/MC/AsmParser/macros.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/AsmParser/macros.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/AsmParser/macros.s (original)
+++ llvm/branches/AMDILBackend/test/MC/AsmParser/macros.s Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -triple x86_64-apple-darwin10 %s 2> %t.err | FileCheck %s
+// RUN: not llvm-mc -triple i386-unknown-unknown %s 2> %t.err | FileCheck %s
 // RUN: FileCheck --check-prefix=CHECK-ERRORS %s < %t.err
 
 .macro .test0
@@ -28,12 +28,66 @@
 .globl "$0 $1 $2 $$3 $n"
 .endmacro
 
-// CHECK: .globl	"1 23  $3 2"
-test3 1,2 3
+// CHECK: .globl	"1 (23)  $3 2"
+test3 1, (2 3)
+
+// CHECK: .globl "1 2  $3 2"
+test3 1 2
 
 .macro test4
 .globl "$0 -- $1"
 .endmacro
 
-// CHECK: .globl	"ab)(,) -- (cd)"
-test4 a b)(,),(cd)
+// CHECK: .globl  "(ab)(,)) -- (cd)"
+test4 (a b)(,)),(cd)
+
+// CHECK: .globl  "(ab)(,)) -- (cd)"
+test4 (a b)(,)),(cd)
+
+.macro test5 _a
+.globl "\_a"
+.endm
+
+// CHECK: .globl zed1
+test5 zed1
+
+.macro test6 $a
+.globl "\$a"
+.endm
+
+// CHECK: .globl zed2
+test6 zed2
+
+.macro test7 .a
+.globl "\.a"
+.endm
+
+// CHECK: .globl zed3
+test7 zed3
+
+.macro test8 _a, _b, _c
+.globl "\_a,\_b,\_c"
+.endmacro
+
+.macro test9 _a _b _c
+.globl "\_a \_b \_c"
+.endmacro
+
+// CHECK: .globl  "a,b,c"
+test8 a, b, c
+// CHECK: .globl  "%1,%2,%3"
+test8 %1 %2 %3 #a comment
+// CHECK: .globl "x-y,z,1"
+test8 x - y z 1
+// CHECK: .globl  "1 2 3"
+test9 1, 2,3
+
+test8 1,2 3
+// CHECK-ERRORS: error: macro argument '_c' is missing
+// CHECK-ERRORS-NEXT: test8 1,2 3
+// CHECK-ERRORS-NEXT:           ^
+
+test8 1 2, 3
+// CHECK-ERRORS: error: expected ' ' for macro argument separator
+// CHECK-ERRORS-NEXT:test8 1 2, 3
+// CHECK-ERRORS-NEXT:         ^

Removed: llvm/branches/AMDILBackend/test/MC/COFF/global_ctors.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/COFF/global_ctors.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/COFF/global_ctors.ll (original)
+++ llvm/branches/AMDILBackend/test/MC/COFF/global_ctors.ll (removed)
@@ -1,28 +0,0 @@
-; Test that global ctors are emitted into the proper COFF section for the
-; target. Mingw uses .ctors, whereas MSVC uses .CRT$XC*.
-; RUN: llc < %s -mtriple i686-pc-win32 | FileCheck %s --check-prefix WIN32 
-; RUN: llc < %s -mtriple x86_64-pc-win32 | FileCheck %s --check-prefix WIN32 
-; RUN: llc < %s -mtriple i686-pc-mingw32 | FileCheck %s --check-prefix MINGW32 
-; RUN: llc < %s -mtriple x86_64-pc-mingw32 | FileCheck %s --check-prefix MINGW32 
-
- at .str = private unnamed_addr constant [13 x i8] c"constructing\00", align 1
- at .str2 = private unnamed_addr constant [5 x i8] c"main\00", align 1
-
- at llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @a_global_ctor }]
-
-declare i32 @puts(i8*)
-
-define void @a_global_ctor() nounwind {
-  %1 = call i32 @puts(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0))
-  ret void
-}
-
-define i32 @main() nounwind {
-  %1 = call i32 @puts(i8* getelementptr inbounds ([5 x i8]* @.str2, i32 0, i32 0))
-  ret i32 0
-}
-
-; WIN32: .section .CRT$XCU,"r"
-; WIN32: a_global_ctor
-; MINGW32: .section .ctors,"w"
-; MINGW32: a_global_ctor

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/invalid-VLD1DUPq8_UPD-arm.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/invalid-VLD1DUPq8_UPD-arm.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/invalid-VLD1DUPq8_UPD-arm.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/invalid-VLD1DUPq8_UPD-arm.txt Tue Jan 15 11:16:16 2013
@@ -1,5 +1,4 @@
-# RUN: llvm-mc --disassemble %s -triple=armv7-unknown-unknwon -mcpu=cortex-a8 2>&1 | grep "invalid instruction encoding"
-# XFAIL: *
+# RUN: llvm-mc --disassemble %s -triple=armv7-unknown-unknwon -mcpu=cortex-a8 2>&1 | FileCheck %s
 
 # Opcode=737 Name=VLD1DUPq8_UPD Format=ARM_FORMAT_NLdSt(30)
 #  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0 
@@ -9,3 +8,4 @@
 # 
 # 'a' == 1 and data_size == 8 is invalid
 0x3d 0x3c 0xa0 0xf4
+# CHECK: invalid instruction encoding

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/neon.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/neon.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/neon.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/neon.txt Tue Jan 15 11:16:16 2013
@@ -1931,14 +1931,6 @@
 # CHECK: vmov.f32	d0, #1.600000e+01
 # CHECK: vmov.f32	q0, #1.600000e+01
 
-# rdar://10798451
-0xe7 0xf9 0x32 0x1d
-# CHECK vld2.8	{d17[], d19[]}, [r7, :16], r2
-0xe7 0xf9 0x3d 0x1d
-# CHECK vld2.8	{d17[], d19[]}, [r7, :16]!
-0xe7 0xf9 0x3f 0x1d
-# CHECK vld2.8	{d17[], d19[]}, [r7, :16]
-
 # rdar://11034702
 0x0d 0x87 0x04 0xf4
 # CHECK: vst1.8	{d8}, [r4]!            

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/neont2.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/neont2.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/neont2.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/neont2.txt Tue Jan 15 11:16:16 2013
@@ -2042,3 +2042,13 @@
 # CHECK: vld2.16	{d0[], d2[]}, [r3], r4  
 0xa3 0xf9 0xa4 0x0d
 # CHECK: vld2.32	{d0[], d2[]}, [r3], r4  
+
+
+# rdar://10798451
+0xe7 0xf9 0x32 0x1d
+# CHECK: vld2.8	{d17[], d19[]}, [r7, :16], r2
+0xe7 0xf9 0x3d 0x1d
+# CHECK: vld2.8	{d17[], d19[]}, [r7, :16]!
+0xe7 0xf9 0x3f 0x1d
+# CHECK: vld2.8	{d17[], d19[]}, [r7, :16]
+

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb-printf.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb-printf.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb-printf.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb-printf.txt Tue Jan 15 11:16:16 2013
@@ -7,17 +7,17 @@
 # CHECK-NEXT:	add	r3, sp, #20
 # CHECK-NEXT:	ldr	r5, [r3], #4
 # CHECK-NEXT:	str	r3, [sp]
-# CHECK-NEXT:	ldr	r3, #52
+# CHECK-NEXT:	ldr	r3, [pc, #52]
 # CHECK-NEXT:	add	r3, pc
 # CHECK-NEXT:	ldr	r0, [r3]
 # CHECK-NEXT:	ldr	r4, [r0]
-# CHECK-NEXT:	ldr	r0, #48
+# CHECK-NEXT:	ldr	r0, [pc, #48]
 # CHECK-NEXT:	add	r0, pc
 # CHECK-NEXT:	ldr	r0, [r0]
 # CHECK-NEXT:	ldr	r0, [r0]
 # CHECK-NEXT:	blx	#191548
 # CHECK-NEXT:	cbnz	r0, #6
-# CHECK-NEXT:	ldr	r1, #40
+# CHECK-NEXT:	ldr	r1, [pc, #40]
 # CHECK-NEXT:	add	r1, pc
 # CHECK-NEXT:	ldr	r1, [r1]
 # CHECK-NEXT:	b	#0

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb-tests.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb-tests.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb-tests.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb-tests.txt Tue Jan 15 11:16:16 2013
@@ -30,7 +30,7 @@
 # CHECK:	ldm	r0!, {r1}
 0x02 0xc8
 
-# CHECK:	ldr	r5, #432
+# CHECK:	ldr	r5, [pc, #432]
 0x6c 0x4d
 
 # CHECK:	str	r0, [r3]

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb1.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb1.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb1.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb1.txt Tue Jan 15 11:16:16 2013
@@ -160,6 +160,7 @@
 # CHECK: ldr r1, [sp]
 # CHECK: ldr r2, [sp, #24]
 # CHECK: ldr r3, [sp, #1020]
+# CHECK: ldr r1, [pc, #12]
 
 
 0x29 0x68
@@ -168,6 +169,7 @@
 0x00 0x99
 0x06 0x9a
 0xff 0x9b
+0x03 0x49
 
 #------------------------------------------------------------------------------
 # LDR (register)

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb2.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb2.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb2.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/ARM/thumb2.txt Tue Jan 15 11:16:16 2013
@@ -169,6 +169,9 @@
 
 0x13 0xf5 0xce 0xa9
 
+# CHECK: b.w   #208962
+
+0x33 0xf0 0x21 0xb8 # rdar://12585795
 
 #------------------------------------------------------------------------------
 # BFC

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64.txt Tue Jan 15 11:16:16 2013
@@ -3,7 +3,7 @@
 # CHECK: daddiu $11, $26, 31949
 0x67 0x4b 0x7c 0xcd
 
-# CHECK: daddu $26, $at, $11
+# CHECK: daddu $26, $1, $11
 0x00 0x2b 0xd0 0x2d
 
 # CHECK: ddiv $zero, $26, $22
@@ -30,10 +30,10 @@
 # CHECK: dsllv $gp, $27, $24
 0x03 0x1b 0xe0 0x14
 
-# CHECK: dsra $at, $at, 30
+# CHECK: dsra $1, $1, 30
 0x00 0x01 0x0f 0xbb
 
-# CHECK: dsrav $at, $at, $fp
+# CHECK: dsrav $1, $1, $fp
 0x03 0xc1 0x08 0x17
 
 # CHECK: dsrl $10, $gp, 24
@@ -45,10 +45,10 @@
 # CHECK: dsubu $gp, $27, $24
 0x03 0x78 0xe0 0x2f
 
-# CHECK: lw $27, -15155($at)
+# CHECK: lw $27, -15155($1)
 0x8c 0x3b 0xc4 0xcd
 
-# CHECK: lui $at, 1
+# CHECK: lui $1, 1
 0x3c 0x01 0x00 0x01
 
 # CHECK: lwu $3, -1746($3)
@@ -57,7 +57,7 @@
 # CHECK: lui $ra, 1
 0x3c 0x1f 0x00 0x01
 
-# CHECK: sw $26, -15159($at)
+# CHECK: sw $26, -15159($1)
 0xac 0x3a 0xc4 0xc9
 
 # CHECK: ld $26, 3958($zero)

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64_le.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64_le.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64_le.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64_le.txt Tue Jan 15 11:16:16 2013
@@ -3,7 +3,7 @@
 # CHECK: daddiu $11, $26, 31949
 0xcd 0x7c 0x4b 0x67
 
-# CHECK: daddu $26, $at, $11
+# CHECK: daddu $26, $1, $11
 0x2d 0xd0 0x2b 0x00
 
 # CHECK: ddiv $zero, $26, $22
@@ -30,10 +30,10 @@
 # CHECK: dsllv $gp, $27, $24
 0x14 0xe0 0x1b 0x03
 
-# CHECK: dsra $at, $at, 30
+# CHECK: dsra $1, $1, 30
 0xbb 0x0f 0x01 0x00
 
-# CHECK: dsrav $at, $at, $fp
+# CHECK: dsrav $1, $1, $fp
 0x17 0x08 0xc1 0x03
 
 # CHECK: dsrl $10, $gp, 24
@@ -45,10 +45,10 @@
 # CHECK: dsubu $gp, $27, $24
 0x2f 0xe0 0x78 0x03
 
-# CHECK: lw $27, -15155($at)
+# CHECK: lw $27, -15155($1)
 0xcd 0xc4 0x3b 0x8c
 
-# CHECK: lui $at, 1
+# CHECK: lui $1, 1
 0x01 0x00 0x01 0x3c
 
 # CHECK: lwu $3, -1746($3)
@@ -57,7 +57,7 @@
 # CHECK: lui $ra, 1
 0x01 0x00 0x1f 0x3c
 
-# CHECK: sw $26, -15159($at)
+# CHECK: sw $26, -15159($1)
 0xc9 0xc4 0x3a 0xac
 
 # CHECK: ld $26, 3958($zero)

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64r2.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64r2.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64r2.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64r2.txt Tue Jan 15 11:16:16 2013
@@ -3,7 +3,7 @@
 # CHECK: daddiu $11, $26, 31949
 0x67 0x4b 0x7c 0xcd
 
-# CHECK: daddu $26, $at, $11
+# CHECK: daddu $26, $1, $11
 0x00 0x2b 0xd0 0x2d
 
 # CHECK: ddiv $zero, $26, $22
@@ -30,10 +30,10 @@
 # CHECK: dsllv $gp, $27, $24
 0x03 0x1b 0xe0 0x14
 
-# CHECK: dsra $at, $at, 30
+# CHECK: dsra $1, $1, 30
 0x00 0x01 0x0f 0xbb
 
-# CHECK: dsrav $at, $at, $fp
+# CHECK: dsrav $1, $1, $fp
 0x03 0xc1 0x08 0x17
 
 # CHECK: dsrl $10, $gp, 24
@@ -45,10 +45,10 @@
 # CHECK: dsubu $gp, $27, $24
 0x03 0x78 0xe0 0x2f
 
-# CHECK: lw $27, -15155($at)
+# CHECK: lw $27, -15155($1)
 0x8c 0x3b 0xc4 0xcd
 
-# CHECK: lui $at, 1
+# CHECK: lui $1, 1
 0x3c 0x01 0x00 0x01
 
 # CHECK: lwu $3, -1746($3)
@@ -57,7 +57,7 @@
 # CHECK: lui $ra, 1
 0x3c 0x1f 0x00 0x01
 
-# CHECK: sw $26, -15159($at)
+# CHECK: sw $26, -15159($1)
 0xac 0x3a 0xc4 0xc9
 
 # CHECK: ld $26, 3958($zero)

Modified: llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64r2_le.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64r2_le.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64r2_le.txt (original)
+++ llvm/branches/AMDILBackend/test/MC/Disassembler/Mips/mips64r2_le.txt Tue Jan 15 11:16:16 2013
@@ -3,7 +3,7 @@
 # CHECK: daddiu $11, $26, 31949
 0xcd 0x7c 0x4b 0x67
 
-# CHECK: daddu $26, $at, $11
+# CHECK: daddu $26, $1, $11
 0x2d 0xd0 0x2b 0x00
 
 # CHECK: ddiv $zero, $26, $22
@@ -30,10 +30,10 @@
 # CHECK: dsllv $gp, $27, $24
 0x14 0xe0 0x1b 0x03
 
-# CHECK: dsra $at, $at, 30
+# CHECK: dsra $1, $1, 30
 0xbb 0x0f 0x01 0x00
 
-# CHECK: dsrav $at, $at, $fp
+# CHECK: dsrav $1, $1, $fp
 0x17 0x08 0xc1 0x03
 
 # CHECK: dsrl $10, $gp, 24
@@ -45,10 +45,10 @@
 # CHECK: dsubu $gp, $27, $24
 0x2f 0xe0 0x78 0x03
 
-# CHECK: lw $27, -15155($at)
+# CHECK: lw $27, -15155($1)
 0xcd 0xc4 0x3b 0x8c
 
-# CHECK: lui $at, 1
+# CHECK: lui $1, 1
 0x01 0x00 0x01 0x3c
 
 # CHECK: lwu $3, -1746($3)
@@ -57,7 +57,7 @@
 # CHECK: lui $ra, 1
 0x01 0x00 0x1f 0x3c
 
-# CHECK: sw $26, -15159($at)
+# CHECK: sw $26, -15159($1)
 0xc9 0xc4 0x3a 0xac
 
 # CHECK: ld $26, 3958($zero)

Modified: llvm/branches/AMDILBackend/test/MC/MachO/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/MachO/lit.local.cfg?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/MachO/lit.local.cfg (original)
+++ llvm/branches/AMDILBackend/test/MC/MachO/lit.local.cfg Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-config.suffixes = ['.s']
+config.suffixes = ['.s', '.ll']
 
 targets = set(config.root.targets_to_build.split())
 if not 'X86' in targets:

Modified: llvm/branches/AMDILBackend/test/MC/Mips/elf-N64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Mips/elf-N64.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Mips/elf-N64.ll (original)
+++ llvm/branches/AMDILBackend/test/MC/Mips/elf-N64.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: llc -filetype=obj -march=mips64el -mcpu=mips64 %s -o - | elf-dump --dump-section-data  | FileCheck %s
+; RUN: llc -filetype=obj -march=mips64el -mcpu=mips64 -disable-mips-delay-filler %s -o - | elf-dump --dump-section-data  | FileCheck %s
 
 ; Check for N64 relocation production.
 ;

Modified: llvm/branches/AMDILBackend/test/MC/Mips/higher_highest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Mips/higher_highest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Mips/higher_highest.ll (original)
+++ llvm/branches/AMDILBackend/test/MC/Mips/higher_highest.ll Tue Jan 15 11:16:16 2013
@@ -1,5 +1,8 @@
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64  -force-mips-long-branch -filetype=obj < %s -o - | elf-dump --dump-section-data | FileCheck %s
-
+; DISABLE: llc -march=mips64el -mcpu=mips64 -mattr=n64  -force-mips-long-branch -filetype=obj < %s -o - | elf-dump --dump-section-data | FileCheck %s
+; RUN: false
+; XFAIL: *
+; Disabled because currently we don't have a way to generate these relocations.
+;
 ; Check that the R_MIPS_HIGHER and R_MIPS_HIGHEST relocations were created.
 
 ; CHECK:     ('r_type', 0x1d)

Modified: llvm/branches/AMDILBackend/test/MC/Mips/mips64shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Mips/mips64shift.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Mips/mips64shift.ll (original)
+++ llvm/branches/AMDILBackend/test/MC/Mips/mips64shift.ll Tue Jan 15 11:16:16 2013
@@ -1,5 +1,8 @@
-; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 %s -o - | llvm-objdump -disassemble -triple mips64el - | FileCheck %s
+; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 -disable-mips-delay-filler %s -o - \
+; RUN: | llvm-objdump -disassemble -triple mips64el - | FileCheck %s 
 
+; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 %s -o - \
+; RUN: | llvm-objdump -disassemble -triple mips64el - | FileCheck %s 
 
 define i64 @f3(i64 %a0) nounwind readnone {
 entry:

Modified: llvm/branches/AMDILBackend/test/MC/Mips/multi-64bit-func.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Mips/multi-64bit-func.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Mips/multi-64bit-func.ll (original)
+++ llvm/branches/AMDILBackend/test/MC/Mips/multi-64bit-func.ll Tue Jan 15 11:16:16 2013
@@ -1,8 +1,8 @@
 ; There is no real check here. If the test doesn't 
 ; assert it passes.
-; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 < %s 
+; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 -disable-mips-delay-filler < %s 
 ; Run it again without extra nop in delay slot
-; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 -enable-mips-delay-filler < %s 
+; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 < %s 
 
 define i32 @bosco1(i32 %x) nounwind readnone {
 entry:

Modified: llvm/branches/AMDILBackend/test/MC/Mips/sext_64_32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/Mips/sext_64_32.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/Mips/sext_64_32.ll (original)
+++ llvm/branches/AMDILBackend/test/MC/Mips/sext_64_32.ll Tue Jan 15 11:16:16 2013
@@ -2,7 +2,7 @@
 
 ; Sign extend from 32 to 64 was creating nonsense opcodes
 
-; CHECK: sll ${{[0-9]+}}, ${{[0-9]+}}, 0
+; CHECK: sll ${{[a-z0-9]+}}, ${{[a-z0-9]+}}, 0
 
 define i64 @foo(i32 %ival) nounwind readnone {
 entry:
@@ -10,7 +10,7 @@
   ret i64 %conv
 }
 
-; CHECK: dsll32 ${{[0-9]+}}, ${{[0-9]+}}, 0
+; CHECK: dsll32 ${{[a-z0-9]+}}, ${{[a-z0-9]+}}, 0
 
 define i64 @foo_2(i32 %ival_2) nounwind readnone {
 entry:

Modified: llvm/branches/AMDILBackend/test/MC/X86/intel-syntax-2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/X86/intel-syntax-2.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/X86/intel-syntax-2.s (original)
+++ llvm/branches/AMDILBackend/test/MC/X86/intel-syntax-2.s Tue Jan 15 11:16:16 2013
@@ -1,7 +1,9 @@
-// RUN: llvm-mc -triple x86_64-unknown-unknown  %s | FileCheck %s
+// RUN: llvm-mc -triple x86_64-unknown-unknown -x86-asm-syntax=att %s | FileCheck %s
 
 	.intel_syntax
 _test:
 // CHECK:	movl	$257, -4(%rsp)
 	mov	DWORD PTR [RSP - 4], 257
-
+    .att_syntax
+// CHECK:	movl	$257, -4(%rsp)
+    movl $257, -4(%rsp)

Modified: llvm/branches/AMDILBackend/test/MC/X86/x86-64.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/MC/X86/x86-64.s?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/MC/X86/x86-64.s (original)
+++ llvm/branches/AMDILBackend/test/MC/X86/x86-64.s Tue Jan 15 11:16:16 2013
@@ -1164,6 +1164,10 @@
 // CHECK: encoding: [0x66,0x48,0x0f,0x6e,0xc7]
 	movd %rdi,%xmm0
 
+// CHECK: movd  %xmm0, %rax
+// CHECK: encoding: [0x66,0x48,0x0f,0x7e,0xc0]
+        movd  %xmm0, %rax
+
 // CHECK: movntil %eax, (%rdi)
 // CHECK: encoding: [0x0f,0xc3,0x07]
 // CHECK: movntil

Modified: llvm/branches/AMDILBackend/test/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Makefile (original)
+++ llvm/branches/AMDILBackend/test/Makefile Tue Jan 15 11:16:16 2013
@@ -29,11 +29,6 @@
 LIT_ARGS := -s -v
 endif
 
-# -jN causes crash on Cygwin's python.
-ifneq (,$(filter $(HOST_OS),Cygwin))
-  LIT_ARGS += -j1
-endif
-
 ifdef TESTSUITE
 LIT_TESTSUITE := $(TESTSUITE)
 CLEANED_TESTSUITE := $(patsubst %/,%,$(TESTSUITE))
@@ -122,6 +117,16 @@
 ENABLE_ASSERTIONS=1
 endif
 
+# Derive whether or not LTO is enabled by checking the extra options.
+LTO_IS_ENABLED := 0
+ifneq ($(findstring -flto,$(CompileCommonOpts)),)
+LTO_IS_ENABLED := 1
+else
+ifneq ($(findstring -O4,$(CompileCommonOpts)),)
+LTO_IS_ENABLED := 1
+endif
+endif
+
 lit.site.cfg: FORCE
 	@echo "Making LLVM 'lit.site.cfg' file..."
 	@$(ECHOPATH) s=@TARGET_TRIPLE@=$(TARGET_TRIPLE)=g > lit.tmp
@@ -131,9 +136,10 @@
 	@$(ECHOPATH) s=@SHLIBDIR@=$(SharedLibDir)=g >> lit.tmp
 	@$(ECHOPATH) s=@SHLIBEXT@=$(SHLIBEXT)=g >> lit.tmp
 	@$(ECHOPATH) s=@PYTHON_EXECUTABLE@=python=g >> lit.tmp
-	@$(ECHOPATH) s, at OCAMLOPT@,$(OCAMLOPT) -cc \\\\\"$(CXX_FOR_OCAMLOPT)\\\\\" -I $(LibDir)/ocaml,g >> lit.tmp
+	@$(ECHOPATH) s=@OCAMLOPT@=$(OCAMLOPT) -cc $(subst *,'\\\"',*$(subst =,"\\=",$(CXX_FOR_OCAMLOPT))*) -I $(LibDir)/ocaml=g >> lit.tmp
 	@$(ECHOPATH) s=@ENABLE_SHARED@=$(ENABLE_SHARED)=g >> lit.tmp
 	@$(ECHOPATH) s=@ENABLE_ASSERTIONS@=$(ENABLE_ASSERTIONS)=g >> lit.tmp
+	@$(ECHOPATH) s=@LTO_IS_ENABLED@=$(LTO_IS_ENABLED)=g >> lit.tmp
 	@$(ECHOPATH) s=@TARGETS_TO_BUILD@=$(TARGETS_TO_BUILD)=g >> lit.tmp
 	@$(ECHOPATH) s=@LLVM_BINDINGS@=$(BINDINGS_TO_BUILD)=g >> lit.tmp
 	@$(ECHOPATH) s=@HOST_OS@=$(HOST_OS)=g >> lit.tmp

Modified: llvm/branches/AMDILBackend/test/Object/nm-shared-object.test
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Object/nm-shared-object.test?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Object/nm-shared-object.test (original)
+++ llvm/branches/AMDILBackend/test/Object/nm-shared-object.test Tue Jan 15 11:16:16 2013
@@ -1,15 +1,23 @@
 RUN: llvm-nm -D %p/Inputs/shared-object-test.elf-i386 \
-RUN:         | FileCheck %s -check-prefix ELF
+RUN:         | FileCheck %s -check-prefix ELF-32
 RUN: llvm-nm -D %p/Inputs/shared-object-test.elf-x86-64 \
-RUN:         | FileCheck %s -check-prefix ELF
+RUN:         | FileCheck %s -check-prefix ELF-64
 
 ; Note: tls_sym should be 'D' (not '?'), but TLS is not
 ; yet recognized by ObjectFile.
 
-ELF: {{[0-9a-f]+}} A __bss_start
-ELF: {{[0-9a-f]+}} A _edata
-ELF: {{[0-9a-f]+}} A _end
-ELF: {{[0-9a-f]+}} B common_sym
-ELF: {{[0-9a-f]+}} D defined_sym
-ELF: {{[0-9a-f]+}} T global_func
-ELF:               ? tls_sym
+ELF-32: 0012c8 A __bss_start
+ELF-32: 0012c8 A _edata
+ELF-32: 0012cc A _end
+ELF-32: 0012c8 B common_sym
+ELF-32: 0012c4 D defined_sym
+ELF-32: 0001f0 T global_func
+ELF-32:        ? tls_sym
+
+ELF-64: 200454 A __bss_start
+ELF-64: 200454 A _edata
+ELF-64: 200458 A _end
+ELF-64: 200454 B common_sym
+ELF-64: 200450 D defined_sym
+ELF-64: 0002f0 T global_func
+ELF-64:        ? tls_sym

Modified: llvm/branches/AMDILBackend/test/Object/objdump-relocations.test
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Object/objdump-relocations.test?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Object/objdump-relocations.test (original)
+++ llvm/branches/AMDILBackend/test/Object/objdump-relocations.test Tue Jan 15 11:16:16 2013
@@ -9,6 +9,9 @@
 RUN: llvm-objdump -r %p/Inputs/trivial-object-test.elf-hexagon \
 RUN:              | FileCheck %s -check-prefix ELF-hexagon
 
+RUN: llvm-objdump -r %p/Inputs/relocations.elf-x86-64 \
+RUN:              | FileCheck %s -check-prefix ELF-complex-x86-64
+
 COFF-i386: .text
 COFF-i386: IMAGE_REL_I386_DIR32 L_.str
 COFF-i386: IMAGE_REL_I386_REL32 _puts
@@ -36,3 +39,13 @@
 ELF-hexagon: R_HEX_LO16 puts
 ELF-hexagon: R_HEX_B15_PCREL testf
 ELF-hexagon: R_HEX_B22_PCREL puts
+
+ELF-complex-x86-64: .text
+ELF-complex-x86-64-NEXT: R_X86_64_8 .data-4
+ELF-complex-x86-64-NEXT: R_X86_64_16 .data-4
+ELF-complex-x86-64-NEXT: R_X86_64_32 .data-4
+ELF-complex-x86-64-NEXT: R_X86_64_32S .data-4
+ELF-complex-x86-64-NEXT: R_X86_64_64 .data-4
+ELF-complex-x86-64-NEXT: R_X86_64_PC32 .data-4-P
+ELF-complex-x86-64-NEXT: R_X86_64_32 .data+0
+ELF-complex-x86-64-NEXT: R_X86_64_32 .data+4

Modified: llvm/branches/AMDILBackend/test/Object/objdump-symbol-table.test
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Object/objdump-symbol-table.test?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Object/objdump-symbol-table.test (original)
+++ llvm/branches/AMDILBackend/test/Object/objdump-symbol-table.test Tue Jan 15 11:16:16 2013
@@ -4,6 +4,8 @@
 RUN:              | FileCheck %s -check-prefix ELF-i386
 RUN: llvm-objdump -t %p/Inputs/trivial-object-test.macho-i386 \
 RUN:              | FileCheck %s -check-prefix macho-i386
+RUN: llvm-objdump -t %p/Inputs/shared-object-test.elf-i386 \
+RUN:              | FileCheck %s -check-prefix ELF-shared
 
 COFF-i386: file format
 COFF-i386: SYMBOL TABLE:
@@ -31,3 +33,9 @@
 macho-i386: 00000000 g     F __TEXT,__text  00000024 _main
 macho-i386: 00000000         *UND*  00000000 _SomeOtherFunction
 macho-i386: 00000000         *UND*  00000000 _puts
+
+ELF-shared: shared-object-test.elf-i386:     file format
+ELF-shared: SYMBOL TABLE:
+ELF-shared: 00000200 l     F .text 00000003 local_func
+ELF-shared: 000012c4 g       .data 00000004 defined_sym
+ELF-shared: 000001f0 g     F .text 00000003 global_func

Modified: llvm/branches/AMDILBackend/test/Other/extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Other/extract.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Other/extract.ll (original)
+++ llvm/branches/AMDILBackend/test/Other/extract.ll Tue Jan 15 11:16:16 2013
@@ -7,18 +7,19 @@
 ; llvm-extract uses lazy bitcode loading, so make sure it correctly reads
 ; from bitcode files in addition to assembly files.
 
-; CHECK: define void @foo() {
+; CHECK: define hidden void @foo() {
 ; CHECK:   ret void
 ; CHECK: }
 
-; The linkonce_odr linkage for foo() should be changed to external linkage.
-; DELETE: declare void @foo()
+; The private linkage for foo() should be changed to external linkage and
+; hidden visibility added.
+; DELETE: declare hidden void @foo()
 ; DELETE: define void @bar() {
 ; DELETE:   call void @foo()
 ; DELETE:   ret void
 ; DELETE: }
 
-define linkonce_odr void @foo() {
+define private void @foo() {
   ret void
 }
 define void @bar() {

Modified: llvm/branches/AMDILBackend/test/Other/lint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Other/lint.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Other/lint.ll (original)
+++ llvm/branches/AMDILBackend/test/Other/lint.ll Tue Jan 15 11:16:16 2013
@@ -9,8 +9,11 @@
 declare void @one_arg(i32)
 
 @CG = constant i32 7
+ at E = external global i8
 
 define i32 @foo() noreturn {
+  %buf = alloca i8
+  %buf2 = alloca {i8, i8}, align 2
 ; CHECK: Caller and callee calling convention differ
   call void @bar()
 ; CHECK: Null pointer dereference
@@ -26,8 +29,10 @@
 ; CHECK: Address one pointer dereference
   store i32 0, i32* inttoptr (i64 1 to i32*)
 ; CHECK: Memory reference address is misaligned
-  %x = inttoptr i32 1 to i32*
-  load i32* %x, align 4
+  store i8 0, i8* %buf, align 2
+; CHECK: Memory reference address is misaligned
+  %gep = getelementptr {i8, i8}* %buf2, i32 0, i32 1
+  store i8 0, i8* %gep, align 2
 ; CHECK: Division by zero
   %sd = sdiv i32 2, 0
 ; CHECK: Division by zero
@@ -75,6 +80,18 @@
 ; CHECK: Write to read-only memory
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (i32* @CG to i8*), i8* bitcast (i32* @CG to i8*), i64 1, i32 1, i1 0)
 
+; CHECK: Undefined behavior: Buffer overflow
+  %wider = bitcast i8* %buf to i16*
+  store i16 0, i16* %wider
+; CHECK: Undefined behavior: Buffer overflow
+  %inner = getelementptr {i8, i8}* %buf2, i32 0, i32 1
+  %wider2 = bitcast i8* %inner to i16*
+  store i16 0, i16* %wider2
+; CHECK: Undefined behavior: Buffer overflow
+  %before = getelementptr i8* %buf, i32 -1
+  %wider3 = bitcast i8* %before to i16*
+  store i16 0, i16* %wider3
+
   br label %next
 
 next:
@@ -84,6 +101,10 @@
   ret i32 0
 
 foo:
+; CHECK-NOT: Undefined behavior: Buffer overflow
+; CHECK-NOT: Memory reference address is misaligned
+  %e = bitcast i8* @E to i64*
+  store i64 0, i64* %e
   %z = add i32 0, 0
 ; CHECK: unreachable immediately preceded by instruction without side effects
   unreachable

Modified: llvm/branches/AMDILBackend/test/Other/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Other/lit.local.cfg?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Other/lit.local.cfg (original)
+++ llvm/branches/AMDILBackend/test/Other/lit.local.cfg Tue Jan 15 11:16:16 2013
@@ -1 +1 @@
-config.suffixes = ['.ll', '.c', '.cpp']
+config.suffixes = ['.ll', '.c', '.cpp', '.txt']

Modified: llvm/branches/AMDILBackend/test/TableGen/if.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/TableGen/if.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/TableGen/if.td (original)
+++ llvm/branches/AMDILBackend/test/TableGen/if.td Tue Jan 15 11:16:16 2013
@@ -3,15 +3,59 @@
 
 // Support for an `!if' operator as part of a `let' statement.
 // CHECK:      class C
-// CHECK-NEXT: bits<16> n = { ?, ?, ?, ?, ?, ?, ?, !if({ C:x{2} }, 0, 1), !if({ C:x{2} }, 1, 1), !if({ C:x{2} }, 0, 0), !if({ C:x{1} }, C:y{3}, 0), !if({ C:x{1} }, C:y{2}, 1), !if({ C:x{0} }, C:y{3}, C:z), !if({ C:x{0} }, C:y{2}, C:y{2}), !if({ C:x{0} }, C:y{1}, C:y{1}), !if({ C:x{0} }, C:y{0}, C:y{0}) };
+// CHECK-NEXT: bits<16> n = { ?, ?, ?, ?, !if({ C:y{3} }, 1, !if({ C:y{2} }, { C:x{0} }, !if({ C:y{1} }, { C:x{1} }, !if({ C:y{0} }, { C:x{2} }, ?)))){0}, !if({ C:x{2} }, { C:y{3}, C:y{2} }, !if({ C:x{1} }, { C:y{2}, C:y{1} }, !if({ C:x{0} }, { C:y{1}, C:y{0} }, ?))){1}, !if({ C:x{2} }, { C:y{3}, C:y{2} }, !if({ C:x{1} }, { C:y{2}, C:y{1} }, !if({ C:x{0} }, { C:y{1}, C:y{0} }, ?))){0}, !if({ C:x{2} }, 2, 6){2}, !if({ C:x{2} }, 2, 6){1}, !if({ C:x{2} }, 2, 6){0}, !if({ C:x{1} }, { C:y{3}, C:y{2} }, { 0, 1 }){1}, !if({ C:x{1} }, { C:y{3}, C:y{2} }, { 0, 1 }){0}, !if({ C:x{0} }, { C:y{3}, C:y{2}, C:y{1}, C:y{0} }, { C:z, C:y{2}, C:y{1}, C:y{0} }){3}, !if({ C:x{0} }, { C:y{3}, C:y{2}, C:y{1}, C:y{0} }, { C:z, C:y{2}, C:y{1}, C:y{0} }){2}, !if({ C:x{0} }, { C:y{3}, C:y{2}, C:y{1}, C:y{0} }, { C:z, C:y{2}, C:y{1}, C:y{0} }){1}, !if({ C:x{0} }, { C:y{3}, C:y{2}, C:y{1}, C:y{0} }, { C:z, C:y{2}, C:y{1}, C:y{0} }){0} };
 class C<bits<3> x, bits<4> y, bit z> {
   bits<16> n;
 
+  let n{11}  = !if(y{3}, 1,
+               !if(y{2}, x{0},
+               !if(y{1}, x{1},
+               !if(y{0}, x{2}, ?))));
+  let n{10-9}= !if(x{2}, y{3-2},
+               !if(x{1}, y{2-1},
+               !if(x{0}, y{1-0}, ?)));
   let n{8-6} = !if(x{2}, 0b010, 0b110);
   let n{5-4} = !if(x{1}, y{3-2}, {0, 1});
   let n{3-0} = !if(x{0}, y{3-0}, {z, y{2}, y{1}, y{0}});
 }
 
+def C1 : C<{1, 0, 1}, {0, 1, 0, 1}, 0>;
+def C2 : C<{0, 1, 0}, {1, 0, 1, 0}, 1>;
+def C3 : C<{0, 0, 0}, {1, 0, 1, 0}, 0>;
+def C4 : C<{0, 0, 0}, {0, 0, 0, 0}, 0>;
+
+// CHECK: def C1
+// CHECK-NEXT: bits<16> n = { ?, ?, ?, ?, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1 };
+// CHECK: def C2
+// CHECK-NEXT: bits<16> n = { ?, ?, ?, ?, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0 };
+// CHECK: def C3
+// CHECK-NEXT: bits<16> n = { ?, ?, ?, ?, 1, ?, ?, 1, 1, 0, 0, 1, 0, 0, 1, 0 };
+// CHECK: def C4
+// CHECK-NEXT: bits<16> n = { ?, ?, ?, ?, ?, ?, ?, 1, 1, 0, 0, 1, 0, 0, 0, 0 };
+
+class S<int s> {
+  bits<2> val = !if(!eq(s, 8),  {0, 0},
+                !if(!eq(s, 16), 0b01,
+                !if(!eq(s, 32), 2,
+                !if(!eq(s, 64), {1, 1}, ?))));
+}
+
+def D8  : S<8>;
+def D16 : S<16>;
+def D32 : S<32>;
+def D64 : S<64>;
+def D128: S<128>;
+// CHECK: def D128
+// CHECK-NEXT: bits<2> val = { ?, ? };
+// CHECK: def D16
+// CHECK-NEXT: bits<2> val = { 0, 1 };
+// CHECK: def D32
+// CHECK-NEXT: bits<2> val = { 1, 0 };
+// CHECK: def D64
+// CHECK-NEXT: bits<2> val = { 1, 1 };
+// CHECK: def D8
+// CHECK-NEXT: bits<2> val = { 0, 0 };
+
 // CHECK:      def One
 // CHECK-NEXT: list<int> first = [1, 2, 3];
 // CHECK-NEXT: list<int> rest = [1, 2, 3];

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/cycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/cycle.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/cycle.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/cycle.ll Tue Jan 15 11:16:16 2013
@@ -107,6 +107,6 @@
   ret void
 ; CHECK: @test1
 ; CHECK: go:
-; CHECK-NEXT: %conv.v.i0.1 = insertelement <2 x i32> undef, i32 %n.0, i32 0
+; CHECK: %conv.v.i0.1 = insertelement <2 x i32> undef, i32 %n.0, i32 0
 ; FIXME: When tree pruning is deterministic, include the entire output.
 }

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/lit.local.cfg?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/lit.local.cfg (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/lit.local.cfg Tue Jan 15 11:16:16 2013
@@ -1 +1,6 @@
 config.suffixes = ['.ll', '.c', '.cpp']
+
+targets = set(config.root.targets_to_build.split())
+if not 'X86' in targets:
+    config.unsupported = True
+

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/loop1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/loop1.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/loop1.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/loop1.ll Tue Jan 15 11:16:16 2013
@@ -42,8 +42,8 @@
 ; CHECK: %mul = fmul double %0, %0
 ; CHECK: %mul3 = fmul double %0, %1
 ; CHECK: %add = fadd double %mul, %mul3
-; CHECK: %add4.v.i1.1 = insertelement <2 x double> undef, double %1, i32 0
 ; CHECK: %mul8 = fmul double %1, %1
+; CHECK: %add4.v.i1.1 = insertelement <2 x double> undef, double %1, i32 0
 ; CHECK: %add4.v.i1.2 = insertelement <2 x double> %add4.v.i1.1, double %0, i32 1
 ; CHECK: %add4 = fadd <2 x double> %add4.v.i1.2, %add4.v.i1.2
 ; CHECK: %add5.v.i1.1 = insertelement <2 x double> undef, double %0, i32 0

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/search-limit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/search-limit.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/search-limit.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/search-limit.ll Tue Jan 15 11:16:16 2013
@@ -7,8 +7,8 @@
 ; CHECK-SL4: @test1
 ; CHECK-SL4-NOT: <2 x double>
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 	%X1 = fsub double %A1, %B1
 	%X2 = fsub double %A2, %B2

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-int.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-int.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-int.ll Tue Jan 15 11:16:16 2013
@@ -17,8 +17,8 @@
 	ret double %R
 ; CHECK: @test1
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 ; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
 ; CHECK: %Y1.v.i2.1 = insertelement <2 x double> undef, double %C1, i32 0
@@ -43,8 +43,8 @@
 	ret double %R
 ; CHECK: @test2
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 ; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
 ; CHECK: %Y1 = call <2 x double> @llvm.cos.v2f64(<2 x double> %X1)
@@ -68,8 +68,8 @@
 	ret double %R
 ; CHECK: @test3
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 ; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
 ; CHECK: %Y1 = call <2 x double> @llvm.powi.v2f64(<2 x double> %X1, i32 %P)

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll Tue Jan 15 11:16:16 2013
@@ -2,6 +2,9 @@
 ; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s
 ; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-aligned-only -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-AO
 
+; FIXME: re-enable this once pointer vectors work properly
+; XFAIL: *
+
 ; Simple 3-pair chain also with loads and stores (using ptrs and gep)
 define double @test1(i64* %a, i64* %b, i64* %c) nounwind uwtable readonly {
 entry:
@@ -79,3 +82,53 @@
 ; CHECK-AO-NOT: <2 x
 }
 
+; Simple 3-pair chain with loads and stores (using ptrs and gep)
+; using pointer vectors.
+define void @test3(<2 x i64*>* %a, <2 x i64*>* %b, <2 x i64*>* %c) nounwind uwtable readonly {
+entry:
+  %i0 = load <2 x i64*>* %a, align 8
+  %i1 = load <2 x i64*>* %b, align 8
+  %arrayidx3 = getelementptr inbounds <2 x i64*>* %a, i64 1
+  %i3 = load <2 x i64*>* %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds <2 x i64*>* %b, i64 1
+  %i4 = load <2 x i64*>* %arrayidx4, align 8
+  %j1 = extractelement <2 x i64*> %i1, i32 0
+  %j4 = extractelement <2 x i64*> %i4, i32 0
+  %o1 = load i64* %j1, align 8
+  %o4 = load i64* %j4, align 8
+  %j0 = extractelement <2 x i64*> %i0, i32 0
+  %j3 = extractelement <2 x i64*> %i3, i32 0
+  %ptr0 = getelementptr inbounds i64* %j0, i64 %o1
+  %ptr3 = getelementptr inbounds i64* %j3, i64 %o4
+  %qtr0 = insertelement <2 x i64*> undef, i64* %ptr0, i32 0
+  %rtr0 = insertelement <2 x i64*> %qtr0, i64* %ptr0, i32 1
+  %qtr3 = insertelement <2 x i64*> undef, i64* %ptr3, i32 0
+  %rtr3 = insertelement <2 x i64*> %qtr3, i64* %ptr3, i32 1
+  store <2 x i64*> %rtr0, <2 x i64*>* %c, align 8
+  %arrayidx5 = getelementptr inbounds <2 x i64*>* %c, i64 1
+  store <2 x i64*> %rtr3, <2 x i64*>* %arrayidx5, align 8
+  ret void
+; CHECK: @test3
+; CHECK: %i0.v.i0 = bitcast <2 x i64*>* %a to <4 x i64*>*
+; CHECK: %i1 = load <2 x i64*>* %b, align 8
+; CHECK: %i0 = load <4 x i64*>* %i0.v.i0, align 8
+; CHECK: %arrayidx4 = getelementptr inbounds <2 x i64*>* %b, i64 1
+; CHECK: %i4 = load <2 x i64*>* %arrayidx4, align 8
+; CHECK: %j1 = extractelement <2 x i64*> %i1, i32 0
+; CHECK: %j4 = extractelement <2 x i64*> %i4, i32 0
+; CHECK: %o1 = load i64* %j1, align 8
+; CHECK: %o4 = load i64* %j4, align 8
+; CHECK: %ptr0.v.i1.1 = insertelement <2 x i64> undef, i64 %o1, i32 0
+; CHECK: %ptr0.v.i1.2 = insertelement <2 x i64> %ptr0.v.i1.1, i64 %o4, i32 1
+; CHECK: %ptr0.v.i0 = shufflevector <4 x i64*> %i0, <4 x i64*> undef, <2 x i32> <i32 0, i32 2>
+; CHECK: %ptr0 = getelementptr inbounds <2 x i64*> %ptr0.v.i0, <2 x i64> %ptr0.v.i1.2
+; CHECK: %rtr0 = shufflevector <2 x i64*> %ptr0, <2 x i64*> undef, <2 x i32> zeroinitializer
+; CHECK: %rtr3 = shufflevector <2 x i64*> %ptr0, <2 x i64*> undef, <2 x i32> <i32 1, i32 1>
+; CHECK: %0 = bitcast <2 x i64*>* %c to <4 x i64*>*
+; CHECK: %1 = shufflevector <2 x i64*> %rtr0, <2 x i64*> %rtr3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK: store <4 x i64*> %1, <4 x i64*>* %0, align 8
+; CHECK: ret void
+; CHECK-AO: @test3
+; CHECK-AO-NOT: <4 x
+}
+

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-ldstr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-ldstr.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-ldstr.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-ldstr.ll Tue Jan 15 11:16:16 2013
@@ -94,13 +94,13 @@
 ; CHECK-AO: @test3
 ; CHECK-AO: %i0 = load double* %a, align 8
 ; CHECK-AO: %i1 = load double* %b, align 8
-; CHECK-AO: %mul.v.i1.1 = insertelement <2 x double> undef, double %i1, i32 0
-; CHECK-AO: %mul.v.i0.1 = insertelement <2 x double> undef, double %i0, i32 0
 ; CHECK-AO: %arrayidx3 = getelementptr inbounds double* %a, i64 1
 ; CHECK-AO: %i3 = load double* %arrayidx3, align 8
 ; CHECK-AO: %arrayidx4 = getelementptr inbounds double* %b, i64 1
 ; CHECK-AO: %i4 = load double* %arrayidx4, align 8
+; CHECK-AO: %mul.v.i1.1 = insertelement <2 x double> undef, double %i1, i32 0
 ; CHECK-AO: %mul.v.i1.2 = insertelement <2 x double> %mul.v.i1.1, double %i4, i32 1
+; CHECK-AO: %mul.v.i0.1 = insertelement <2 x double> undef, double %i0, i32 0
 ; CHECK-AO: %mul.v.i0.2 = insertelement <2 x double> %mul.v.i0.1, double %i3, i32 1
 ; CHECK-AO: %mul = fmul <2 x double> %mul.v.i0.2, %mul.v.i1.2
 ; CHECK-AO: %mulf = fptrunc <2 x double> %mul to <2 x float>
@@ -108,3 +108,63 @@
 ; CHECK-AO: store <2 x float> %mulf, <2 x float>* %0, align 8
 ; CHECK-AO: ret void
 }
+
+; Simple 3-pair chain with loads and stores (unreachable)
+define void @test4(i1 %bool, double* %a, double* %b, double* %c) nounwind uwtable readonly {
+entry:
+  br i1 %bool, label %if.then1, label %if.end
+
+if.then1:
+  unreachable
+  br label %if.then
+
+if.then:
+  %i0 = load double* %a, align 8
+  %i1 = load double* %b, align 8
+  %mul = fmul double %i0, %i1
+  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %i3 = load double* %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %i4 = load double* %arrayidx4, align 8
+  %mul5 = fmul double %i3, %i4
+  store double %mul, double* %c, align 8
+  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  store double %mul5, double* %arrayidx5, align 8
+  br label %if.end
+
+if.end:
+  ret void
+; CHECK: @test4
+; CHECK-NOT: <2 x double>
+; CHECK-AO: @test4
+; CHECK-AO-NOT: <2 x double>
+}
+
+; Simple 3-pair chain with loads and stores
+define void @test5(double* %a, double* %b, double* %c) nounwind uwtable readonly {
+entry:
+  %i0 = load double* %a, align 8
+  %i1 = load double* %b, align 8
+  %mul = fmul double %i0, %i1
+  %arrayidx3 = getelementptr inbounds double* %a, i64 1
+  %i3 = load double* %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds double* %b, i64 1
+  %i4 = load double* %arrayidx4, align 8
+  %mul5 = fmul double %i3, %i4
+  %arrayidx5 = getelementptr inbounds double* %c, i64 1
+  store double %mul5, double* %arrayidx5, align 8
+  store double %mul, double* %c, align 4
+  ret void
+; CHECK: @test5
+; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
+; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
+; CHECK: %i0 = load <2 x double>* %i0.v.i0, align 8
+; CHECK: %i1 = load <2 x double>* %i1.v.i0, align 8
+; CHECK: %mul = fmul <2 x double> %i0, %i1
+; CHECK: %0 = bitcast double* %c to <2 x double>*
+; CHECK: store <2 x double> %mul, <2 x double>* %0, align 4
+; CHECK: ret void
+; CHECK-AO: @test5
+; CHECK-AO-NOT: <2 x double>
+}
+

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-sel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-sel.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-sel.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple-sel.ll Tue Jan 15 11:16:16 2013
@@ -6,8 +6,8 @@
 define double @test1(double %A1, double %A2, double %B1, double %B2, i1 %C1, i1 %C2) {
 ; CHECK: @test1
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 	%X1 = fsub double %A1, %B1
 	%X2 = fsub double %A2, %B2
@@ -33,8 +33,8 @@
 ; CHECK: @test2
 ; CHECK-NB: @test2
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 	%X1 = fsub double %A1, %B1
 	%X2 = fsub double %A2, %B2

Modified: llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/BBVectorize/simple.ll Tue Jan 15 11:16:16 2013
@@ -5,8 +5,8 @@
 define double @test1(double %A1, double %A2, double %B1, double %B2) {
 ; CHECK: @test1
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 	%X1 = fsub double %A1, %B1
 	%X2 = fsub double %A2, %B2
@@ -29,8 +29,8 @@
 define double @test2(double %A1, double %A2, double %B1, double %B2) {
 ; CHECK: @test2
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 	%X1 = fsub double %A1, %B1
 	%X2 = fsub double %A2, %B2
@@ -40,12 +40,13 @@
 ; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
 	%Z1 = fadd double %Y2, %B1
 	%Z2 = fadd double %Y1, %B2
-; CHECK: %Z1.v.i0 = shufflevector <2 x double> %Y1, <2 x double> undef, <2 x i32> <i32 1, i32 0>
-; CHECK: %Z1 = fadd <2 x double> %Z1.v.i0, %X1.v.i1.2
+; CHECK: %Z1.v.i1.1 = insertelement <2 x double> undef, double %B2, i32 0
+; CHECK: %Z1.v.i1.2 = insertelement <2 x double> %Z1.v.i1.1, double %B1, i32 1
+; CHECK: %Z2 = fadd <2 x double> %Y1, %Z1.v.i1.2
 	%R  = fmul double %Z1, %Z2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: %Z2.v.r1 = extractelement <2 x double> %Z2, i32 0
+; CHECK: %Z2.v.r2 = extractelement <2 x double> %Z2, i32 1
+; CHECK: %R = fmul double %Z2.v.r2, %Z2.v.r1
 	ret double %R
 ; CHECK: ret double %R
 }
@@ -54,8 +55,8 @@
 define double @test3(double %A1, double %A2, double %B1, double %B2) {
 ; CHECK: @test3
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 	%X1 = fsub double %A1, %B1
 	%X2 = fsub double %A2, %B2
@@ -79,8 +80,8 @@
 define double @test4(double %A1, double %A2, double %B1, double %B2) {
 ; CHECK: @test4
 ; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
 ; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
 	%X1 = fsub double %A1, %B1
 	%X2 = fsub double %A2, %B2
@@ -148,4 +149,51 @@
 ; CHECK: ret <8 x i8> %R
 }
 
+; Basic depth-3 chain (flipped order)
+define double @test7(double %A1, double %A2, double %B1, double %B2) {
+; CHECK: @test7
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+	%X1 = fsub double %A1, %B1
+	%X2 = fsub double %A2, %B2
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+	%Y1 = fmul double %X1, %A1
+	%Y2 = fmul double %X2, %A2
+; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
+	%Z2 = fadd double %Y2, %B2
+	%Z1 = fadd double %Y1, %B1
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+	%R  = fmul double %Z1, %Z2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+	ret double %R
+; CHECK: ret double %R
+}
+
+; Basic depth-3 chain (subclass data)
+define i64 @test8(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+; CHECK: @test8
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+	%X1 = sub nsw i64 %A1, %B1
+	%X2 = sub i64 %A2, %B2
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+	%Y1 = mul i64 %X1, %A1
+	%Y2 = mul i64 %X2, %A2
+; CHECK: %Y1 = mul <2 x i64> %X1, %X1.v.i0.2
+	%Z1 = add i64 %Y1, %B1
+	%Z2 = add i64 %Y2, %B2
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+	%R  = mul i64 %Z1, %Z2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+	ret i64 %R
+; CHECK: ret i64 %R
+}
 

Modified: llvm/branches/AMDILBackend/test/Transforms/CodeGenPrepare/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/CodeGenPrepare/basic.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/CodeGenPrepare/basic.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/CodeGenPrepare/basic.ll Tue Jan 15 11:16:16 2013
@@ -5,7 +5,7 @@
 
 ; CHECK: @test1
 ; objectsize should fold to a constant, which causes the branch to fold to an
-; uncond branch.
+; uncond branch. Next, we fold the control flow alltogether.
 ; rdar://8785296
 define i32 @test1(i8* %ptr) nounwind ssp noredzone align 2 {
 entry:
@@ -13,8 +13,8 @@
   %1 = icmp ugt i64 %0, 3
   br i1 %1, label %T, label %trap
 
-; CHECK: entry:
-; CHECK-NEXT: br label %T
+; CHECK: T:
+; CHECK-NOT: br label %
 
 trap:                                             ; preds = %0, %entry
   tail call void @llvm.trap() noreturn nounwind

Modified: llvm/branches/AMDILBackend/test/Transforms/ConstProp/loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/ConstProp/loads.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/ConstProp/loads.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/ConstProp/loads.ll Tue Jan 15 11:16:16 2013
@@ -1,17 +1,24 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s 
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+; RUN: opt < %s -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=LE
+; RUN: opt < %s -default-data-layout="E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=BE
 
+; {{ 0xDEADBEEF, 0xBA }, 0xCAFEBABE}
 @g1 = constant {{i32,i8},i32} {{i32,i8} { i32 -559038737, i8 186 }, i32 -889275714 }
 @g2 = constant double 1.0
+; { 0x7B, 0x06B1BFF8 }
 @g3 = constant {i64, i64} { i64 123, i64 112312312 }
 
 ; Simple load
 define i32 @test1() {
   %r = load i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0)
   ret i32 %r
-; CHECK: @test1
-; CHECK: ret i32 -559038737
+
+; 0xDEADBEEF
+; LE: @test1
+; LE: ret i32 -559038737
+
+; 0xDEADBEEF
+; BE: @test1
+; BE: ret i32 -559038737
 }
 
 ; PR3152
@@ -20,8 +27,13 @@
   %r = load i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*)
   ret i16 %r
 
-; CHECK: @test2
-; CHECK: ret i16 -16657 
+; 0xBEEF
+; LE: @test2
+; LE: ret i16 -16657
+
+; 0xDEAD
+; BE: @test2
+; BE: ret i16 -8531
 }
 
 ; Load of second 16 bits of 32-bit value.
@@ -29,16 +41,27 @@
   %r = load i16* getelementptr(i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 1)
   ret i16 %r
 
-; CHECK: @test3
-; CHECK: ret i16 -8531
+; 0xDEAD
+; LE: @test3
+; LE: ret i16 -8531
+
+; 0xBEEF
+; BE: @test3
+; BE: ret i16 -16657
 }
 
 ; Load of 8 bit field + tail padding.
 define i16 @test4() {
   %r = load i16* getelementptr(i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 2)
   ret i16 %r
-; CHECK: @test4
-; CHECK: ret i16 186
+
+; 0x00BA
+; LE: @test4
+; LE: ret i16 186
+
+; 0xBA00
+; BE: @test4
+; BE: ret i16 -17920
 }
 
 ; Load of double bits.
@@ -46,8 +69,13 @@
   %r = load i64* bitcast(double* @g2 to i64*)
   ret i64 %r
 
-; CHECK: @test6
-; CHECK: ret i64 4607182418800017408
+; 0x3FF_0000000000000
+; LE: @test6
+; LE: ret i64 4607182418800017408
+
+; 0x3FF_0000000000000
+; BE: @test6
+; BE: ret i64 4607182418800017408
 }
 
 ; Load of double bits.
@@ -55,8 +83,13 @@
   %r = load i16* bitcast(double* @g2 to i16*)
   ret i16 %r
 
-; CHECK: @test7
-; CHECK: ret i16 0
+; 0x0000
+; LE: @test7
+; LE: ret i16 0
+
+; 0x3FF0
+; BE: @test7
+; BE: ret i16 16368
 }
 
 ; Double load.
@@ -64,8 +97,11 @@
   %r = load double* bitcast({{i32,i8},i32}* @g1 to double*)
   ret double %r
 
-; CHECK: @test8
-; CHECK: ret double 0xBADEADBEEF
+; LE: @test8
+; LE: ret double 0xBADEADBEEF
+
+; BE: @test8
+; BE: ret double 0xDEADBEEFBA000000
 }
 
 
@@ -74,8 +110,13 @@
   %r = load i128* bitcast({i64, i64}* @g3 to i128*)
   ret i128 %r
 
-; CHECK: @test9
-; CHECK: ret i128 2071796475790618158476296315
+; 0x00000000_06B1BFF8_00000000_0000007B
+; LE: @test9
+; LE: ret i128 2071796475790618158476296315
+
+; 0x00000000_0000007B_00000000_06B1BFF8
+; BE: @test9
+; BE: ret i128 2268949521066387161080
 }
 
 ; vector load.
@@ -83,21 +124,30 @@
   %r = load <2 x i64>* bitcast({i64, i64}* @g3 to <2 x i64>*)
   ret <2 x i64> %r
 
-; CHECK: @test10
-; CHECK: ret <2 x i64> <i64 123, i64 112312312>
+; LE: @test10
+; LE: ret <2 x i64> <i64 123, i64 112312312>
+
+; BE: @test10
+; BE: ret <2 x i64> <i64 123, i64 112312312>
 }
 
 
 ; PR5287
+; { 0xA1, 0x08 }
 @g4 = internal constant { i8, i8 } { i8 -95, i8 8 }
 
 define i16 @test11() nounwind {
 entry:
   %a = load i16* bitcast ({ i8, i8 }* @g4 to i16*)
   ret i16 %a
-  
-; CHECK: @test11
-; CHECK: ret i16 2209
+
+; 0x08A1
+; LE: @test11
+; LE: ret i16 2209
+
+; 0xA108
+; BE: @test11
+; BE: ret i16 -24312
 }
 
 
@@ -107,8 +157,14 @@
 define i16 @test12() {
   %a = load i16* getelementptr inbounds ([3 x i16]* bitcast ([6 x i8]* @test12g to [3 x i16]*), i32 0, i64 1) 
   ret i16 %a
-; CHECK: @test12
-; CHECK: ret i16 98
+
+; 0x0062
+; LE: @test12
+; LE: ret i16 98
+
+; 0x6200
+; BE: @test12
+; BE: ret i16 25088
 }
 
 
@@ -117,8 +173,12 @@
 define i1 @test13() {
   %A = load i1* bitcast (i8* @g5 to i1*)
   ret i1 %A
-; CHECK: @test13
-; CHECK: ret i1 false
+
+; LE: @test13
+; LE: ret i1 false
+
+; BE: @test13
+; BE: ret i1 false
 }
 
 @g6 = constant [2 x i8*] [i8* inttoptr (i64 1 to i8*), i8* inttoptr (i64 2 to i8*)]
@@ -126,14 +186,22 @@
 entry:
   %tmp = load i64* bitcast ([2 x i8*]* @g6 to i64*)
   ret i64 %tmp
-; CHECK: @test14
-; CHECK: ret i64 1
+
+; LE: @test14
+; LE: ret i64 1
+
+; BE: @test14
+; BE: ret i64 1
 }
 
 define i64 @test15() nounwind {
 entry:
   %tmp = load i64* bitcast (i8** getelementptr inbounds ([2 x i8*]* @g6, i32 0, i64 1) to i64*)
   ret i64 %tmp
-; CHECK: @test15
-; CHECK: ret i64 2
+
+; LE: @test15
+; LE: ret i64 2
+
+; BE: @test15
+; BE: ret i64 2
 }

Modified: llvm/branches/AMDILBackend/test/Transforms/CorrelatedValuePropagation/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/CorrelatedValuePropagation/crash.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/CorrelatedValuePropagation/crash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/CorrelatedValuePropagation/crash.ll Tue Jan 15 11:16:16 2013
@@ -35,3 +35,28 @@
 func_29.exit:
   ret void
 }
+
+; PR13972
+define void @test3() nounwind {
+for.body:
+  br label %return
+
+for.cond.i:                                       ; preds = %if.else.i, %for.body.i
+  %e.2.i = phi i32 [ %e.2.i, %if.else.i ], [ -8, %for.body.i ]
+  br i1 undef, label %return, label %for.body.i
+
+for.body.i:                                       ; preds = %for.cond.i
+  switch i32 %e.2.i, label %for.cond3.i [
+    i32 -3, label %if.else.i
+    i32 0, label %for.cond.i
+  ]
+
+for.cond3.i:                                      ; preds = %for.cond3.i, %for.body.i
+  br label %for.cond3.i
+
+if.else.i:                                        ; preds = %for.body.i
+  br label %for.cond.i
+
+return:                                           ; preds = %for.cond.i, %for.body
+  ret void
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/DeadStoreElimination/simple.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/DeadStoreElimination/simple.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/DeadStoreElimination/simple.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/DeadStoreElimination/simple.ll Tue Jan 15 11:16:16 2013
@@ -291,3 +291,36 @@
   %call = call i8* @strdup(i8* %arrayidx) nounwind
   ret i8* %call
 }
+
+; Make sure same sized store to later element is deleted
+; CHECK: @test24
+; CHECK-NOT: store i32 0
+; CHECK-NOT: store i32 0
+; CHECK: store i32 %b
+; CHECK: store i32 %c
+; CHECK: ret void
+define void @test24([2 x i32]* %a, i32 %b, i32 %c) nounwind {
+  %1 = getelementptr inbounds [2 x i32]* %a, i64 0, i64 0
+  store i32 0, i32* %1, align 4
+  %2 = getelementptr inbounds [2 x i32]* %a, i64 0, i64 1
+  store i32 0, i32* %2, align 4
+  %3 = getelementptr inbounds [2 x i32]* %a, i64 0, i64 0
+  store i32 %b, i32* %3, align 4
+  %4 = getelementptr inbounds [2 x i32]* %a, i64 0, i64 1
+  store i32 %c, i32* %4, align 4
+  ret void
+}
+
+; Check another case like PR13547 where strdup is not like malloc.
+; CHECK: @test25
+; CHECK: load i8
+; CHECK: store i8 0
+; CHECK: store i8 %tmp
+define i8* @test25(i8* %p) nounwind {
+  %p.4 = getelementptr i8* %p, i64 4
+  %tmp = load i8* %p.4, align 1
+  store i8 0, i8* %p.4, align 1
+  %q = call i8* @strdup(i8* %p) nounwind optsize
+  store i8 %tmp, i8* %p.4, align 1
+  ret i8* %q
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/GVN/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/GVN/crash.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/GVN/crash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/GVN/crash.ll Tue Jan 15 11:16:16 2013
@@ -163,3 +163,39 @@
   ret i8 %1
 }
 
+
+; Test that a GEP in an unreachable block with the following form doesn't crash
+; GVN:
+;
+;    %x = gep %some.type %x, ...
+
+%struct.type = type { i64, i32, i32 }
+
+define fastcc void @func() nounwind uwtable ssp align 2 {
+entry:
+  br label %reachable.bb
+
+;; Unreachable code.
+
+unreachable.bb:
+  %gep.val = getelementptr inbounds %struct.type* %gep.val, i64 1
+  br i1 undef, label %u2.bb, label %u1.bb
+
+u1.bb:
+  %tmp1 = getelementptr inbounds %struct.type* %gep.val, i64 0, i32 0
+  store i64 -1, i64* %tmp1, align 8
+  br label %unreachable.bb
+
+u2.bb:
+  %0 = load i32* undef, align 4
+  %conv.i.i.i.i.i = zext i32 %0 to i64
+  br label %u2.bb
+
+;; Reachable code.
+
+reachable.bb:
+  br label %r1.bb
+
+r1.bb:
+  br label %u2.bb
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/GVN/rle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/GVN/rle.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/GVN/rle.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/GVN/rle.ll Tue Jan 15 11:16:16 2013
@@ -1,7 +1,5 @@
-; RUN: opt < %s -basicaa -gvn -S -die | FileCheck %s
-
-; 32-bit little endian target.
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+; RUN: opt < %s -default-data-layout="e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -gvn -S -die | FileCheck %s
+; RUN: opt < %s -default-data-layout="E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32"      -basicaa -gvn -S -die | FileCheck %s
 
 ;; Trivial RLE test.
 define i32 @test0(i32 %V, i32* %P) {
@@ -318,7 +316,7 @@
   %P4 = getelementptr i8* %P3, i32 2
   br i1 %cond, label %T, label %F
 T:
-  store i32 42, i32* %P
+  store i32 57005, i32* %P
   br label %Cont
   
 F:
@@ -620,7 +618,7 @@
 ; CHECK-NOT: load
 ; CHECK: load i16*
 ; CHECK-NOT: load
-; CHECK-ret i32
+; CHECK: ret i32
 }
 
 define i32 @test_widening2() nounwind ssp noredzone {
@@ -644,7 +642,7 @@
 ; CHECK-NOT: load
 ; CHECK: load i32*
 ; CHECK-NOT: load
-; CHECK-ret i32
+; CHECK: ret i32
 }
 
 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind

Modified: llvm/branches/AMDILBackend/test/Transforms/GlobalOpt/load-store-global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/GlobalOpt/load-store-global.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/GlobalOpt/load-store-global.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/GlobalOpt/load-store-global.ll Tue Jan 15 11:16:16 2013
@@ -1,15 +1,38 @@
-; RUN: opt < %s -globalopt -S | not grep G
+; RUN: opt < %s -globalopt -S | FileCheck %s
 
 @G = internal global i32 17             ; <i32*> [#uses=3]
+; CHECK-NOT: @G
 
 define void @foo() {
         %V = load i32* @G               ; <i32> [#uses=1]
         store i32 %V, i32* @G
         ret void
+; CHECK: @foo
+; CHECK-NEXT: ret void
 }
 
 define i32 @bar() {
         %X = load i32* @G               ; <i32> [#uses=1]
         ret i32 %X
+; CHECK: @bar
+; CHECK-NEXT: ret i32 17
+}
+
+ at a = internal global i64* null, align 8
+; CHECK-NOT: @a
+
+; PR13968
+define void @qux() nounwind {
+  %b = bitcast i64** @a to i8*
+  %g = getelementptr i64** @a, i32 1
+  %cmp = icmp ne i8* null, %b
+  %cmp2 = icmp eq i8* null, %b
+  %cmp3 = icmp eq i64** null, %g
+  store i64* inttoptr (i64 1 to i64*), i64** @a, align 8
+  %l = load i64** @a, align 8
+  ret void
+; CHECK: @qux
+; CHECK-NOT: store
+; CHECK-NOT: load
 }
 

Modified: llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/2004-04-05-InvokeCastCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/2004-04-05-InvokeCastCrash.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/2004-04-05-InvokeCastCrash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/2004-04-05-InvokeCastCrash.ll Tue Jan 15 11:16:16 2013
@@ -39,11 +39,11 @@
 	%"struct.llvm::SymbolTable" = type opaque
 	%"struct.llvm::SymbolTableListTraits<llvm::Argument,llvm::Function,llvm::Function,llvm::ilist_traits<llvm::Argument> >" = type { %"struct.llvm::Function"*, %"struct.llvm::Function"* }
 	%"struct.llvm::SymbolTableListTraits<llvm::Instruction,llvm::BasicBlock,llvm::Function,llvm::ilist_traits<llvm::Instruction> >" = type { %"struct.llvm::Function"*, %"struct.llvm::BasicBlock"* }
-	%"struct.llvm::TargetData" = type { %"struct.llvm::FunctionPass", i1, i8, i8, i8, i8, i8, i8, i8, i8 }
+	%"struct.llvm::DataLayout" = type { %"struct.llvm::FunctionPass", i1, i8, i8, i8, i8, i8, i8, i8, i8 }
 	%"struct.llvm::TargetFrameInfo" = type { i32 (...)**, i32, i32, i32 }
 	%"struct.llvm::TargetInstrDescriptor" = type { i8*, i32, i32, i32, i1, i32, i32, i32, i32, i32, i32*, i32* }
 	%"struct.llvm::TargetInstrInfo" = type { i32 (...)**, %"struct.llvm::TargetInstrDescriptor"*, i32, i32 }
-	%"struct.llvm::TargetMachine" = type { i32 (...)**, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >", %"struct.llvm::TargetData", %"struct.llvm::IntrinsicLowering"* }
+	%"struct.llvm::TargetMachine" = type { i32 (...)**, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >", %"struct.llvm::DataLayout", %"struct.llvm::IntrinsicLowering"* }
 	%"struct.llvm::TargetRegClassInfo" = type { i32 (...)**, i32, i32, i32 }
 	%"struct.llvm::TargetRegInfo" = type { i32 (...)**, %"struct.std::vector<const llvm::TargetRegClassInfo*,std::allocator<const llvm::TargetRegClassInfo*> >", %"struct.llvm::TargetMachine"* }
 	%"struct.llvm::Type" = type { %"struct.llvm::Value", i32, i32, i1, i32, %"struct.llvm::Type"*, %"struct.std::vector<llvm::PATypeHandle,std::allocator<llvm::PATypeHandle> >" }

Modified: llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/crash.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/crash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/crash.ll Tue Jan 15 11:16:16 2013
@@ -87,3 +87,47 @@
 main.f.exit:                                      ; preds = %"3.i"
   unreachable
 }
+
+
+; PR13967
+
+define void @f() nounwind ssp {
+bb:
+  br label %bb4
+
+bb4:
+  %tmp = phi i64 [ %tmp5, %bb7 ], [ undef, %bb ]
+  %tmp5 = add nsw i64 %tmp, 1
+  %extract.t1 = trunc i64 %tmp5 to i32
+  br i1 false, label %bb6, label %bb7
+
+bb6:
+  br label %bb7
+
+bb7:
+  %.off0 = phi i32 [ undef, %bb6 ], [ %extract.t1, %bb4 ]
+  %tmp8 = icmp eq i32 %.off0, 0
+  br i1 %tmp8, label %bb9, label %bb4
+
+bb9:
+  ret void
+}
+
+; PR12536
+define void @fn1() noreturn nounwind {
+entry:
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.end, %entry
+  %b.0 = phi i32 [ undef, %entry ], [ %conv, %for.end ]
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.cond1, %for.cond
+  %c.0 = phi i32 [ %b.0, %for.cond1 ], [ 0, %for.cond ]
+  br i1 undef, label %for.cond1, label %for.end
+
+for.end:                                          ; preds = %for.cond1
+  %cmp2 = icmp slt i32 %c.0, 1
+  %conv = zext i1 %cmp2 to i32
+  br label %for.cond
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/eliminate-comparison.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/eliminate-comparison.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/eliminate-comparison.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/eliminate-comparison.ll Tue Jan 15 11:16:16 2013
@@ -106,3 +106,106 @@
 return:
   ret void
 }
+
+; PR14432
+; Indvars should not turn the second loop into an infinite one.
+
+; CHECK: @func_11
+; CHECK: %tmp5 = icmp slt i32 %__key6.0, 10
+; CHECK-NOT: br i1 true, label %noassert68, label %unrolledend
+
+define i32 @func_11() nounwind uwtable {
+entry:
+  br label %forcond
+
+forcond:                                          ; preds = %noassert, %entry
+  %__key6.0 = phi i32 [ 2, %entry ], [ %tmp37, %noassert ]
+  %tmp5 = icmp slt i32 %__key6.0, 10
+  br i1 %tmp5, label %noassert, label %forcond38.preheader
+
+forcond38.preheader:                              ; preds = %forcond
+  br label %forcond38
+
+noassert:                                         ; preds = %forbody
+  %tmp13 = sdiv i32 -32768, %__key6.0
+  %tmp2936 = shl i32 %tmp13, 24
+  %sext23 = shl i32 %tmp13, 24
+  %tmp32 = icmp eq i32 %tmp2936, %sext23
+  %tmp37 = add i32 %__key6.0, 1
+  br i1 %tmp32, label %forcond, label %assert33
+
+assert33:                                         ; preds = %noassert
+  tail call void @llvm.trap()
+  unreachable
+
+forcond38:                                        ; preds = %noassert68, %forcond38.preheader
+  %__key8.0 = phi i32 [ %tmp81, %noassert68 ], [ 2, %forcond38.preheader ]
+  %tmp46 = icmp slt i32 %__key8.0, 10
+  br i1 %tmp46, label %noassert68, label %unrolledend
+
+noassert68:                                       ; preds = %forbody39
+  %tmp57 = sdiv i32 -32768, %__key8.0
+  %sext34 = shl i32 %tmp57, 16
+  %sext21 = shl i32 %tmp57, 16
+  %tmp76 = icmp eq i32 %sext34, %sext21
+  %tmp81 = add i32 %__key8.0, 1
+  br i1 %tmp76, label %forcond38, label %assert77
+
+assert77:                                         ; preds = %noassert68
+  tail call void @llvm.trap()
+  unreachable
+
+unrolledend:                                      ; preds = %forcond38
+  ret i32 0
+}
+
+declare void @llvm.trap() noreturn nounwind
+
+; In this case the second loop only has a single iteration, fold the header away
+; CHECK: @func_12
+; CHECK: %tmp5 = icmp slt i32 %__key6.0, 10
+; CHECK: br i1 true, label %noassert68, label %unrolledend
+define i32 @func_12() nounwind uwtable {
+entry:
+  br label %forcond
+
+forcond:                                          ; preds = %noassert, %entry
+  %__key6.0 = phi i32 [ 2, %entry ], [ %tmp37, %noassert ]
+  %tmp5 = icmp slt i32 %__key6.0, 10
+  br i1 %tmp5, label %noassert, label %forcond38.preheader
+
+forcond38.preheader:                              ; preds = %forcond
+  br label %forcond38
+
+noassert:                                         ; preds = %forbody
+  %tmp13 = sdiv i32 -32768, %__key6.0
+  %tmp2936 = shl i32 %tmp13, 24
+  %sext23 = shl i32 %tmp13, 24
+  %tmp32 = icmp eq i32 %tmp2936, %sext23
+  %tmp37 = add i32 %__key6.0, 1
+  br i1 %tmp32, label %forcond, label %assert33
+
+assert33:                                         ; preds = %noassert
+  tail call void @llvm.trap()
+  unreachable
+
+forcond38:                                        ; preds = %noassert68, %forcond38.preheader
+  %__key8.0 = phi i32 [ %tmp81, %noassert68 ], [ 2, %forcond38.preheader ]
+  %tmp46 = icmp slt i32 %__key8.0, 10
+  br i1 %tmp46, label %noassert68, label %unrolledend
+
+noassert68:                                       ; preds = %forbody39
+  %tmp57 = sdiv i32 -32768, %__key8.0
+  %sext34 = shl i32 %tmp57, 16
+  %sext21 = shl i32 %tmp57, 16
+  %tmp76 = icmp ne i32 %sext34, %sext21
+  %tmp81 = add i32 %__key8.0, 1
+  br i1 %tmp76, label %forcond38, label %assert77
+
+assert77:                                         ; preds = %noassert68
+  tail call void @llvm.trap()
+  unreachable
+
+unrolledend:                                      ; preds = %forcond38
+  ret i32 0
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/no-iv-rewrite.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/no-iv-rewrite.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/IndVarSimplify/no-iv-rewrite.ll Tue Jan 15 11:16:16 2013
@@ -199,7 +199,6 @@
 ; back to the loop iv.
 ;
 ; CHECK: loop:
-; CHECK: phi i32
 ; CHECK-NOT: phi
 ; CHECK: exit:
 loop:

Modified: llvm/branches/AMDILBackend/test/Transforms/Inline/always-inline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/Inline/always-inline.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/Inline/always-inline.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/Inline/always-inline.ll Tue Jan 15 11:16:16 2013
@@ -33,7 +33,6 @@
 ;
 ; CHECK: @outer2
 ; CHECK-NOT: call void @inner2
-; CHECK alloca i32, i32 %N
 ; CHECK-NOT: call void @inner2
 ; CHECK: ret void
 

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/2012-07-25-LoadPart.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/2012-07-25-LoadPart.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/2012-07-25-LoadPart.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/2012-07-25-LoadPart.ll Tue Jan 15 11:16:16 2013
@@ -1,12 +1,14 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s
+; RUN: opt < %s -default-data-layout="e-p:32:32:32" -instcombine -S | FileCheck %s --check-prefix=LE
+; RUN: opt < %s -default-data-layout="E-p:32:32:32" -instcombine -S | FileCheck %s --check-prefix=BE
 ; PR13442
 
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
-
 @test = constant [4 x i32] [i32 1, i32 2, i32 3, i32 4]
 
 define i64 @foo() {
   %ret = load i64* bitcast (i8* getelementptr (i8* bitcast ([4 x i32]* @test to i8*), i64 2) to i64*), align 1
   ret i64 %ret
-  ; CHECK: ret i64 844424930263040
+  ; 0x00030000_00020000 in [01 00/00 00 02 00 00 00 03 00/00 00 04 00 00 00]
+  ; LE: ret i64 844424930263040
+  ; 0x00000200_00000300 in [00 00/00 01 00 00 00 02 00 00/00 03 00 00 00 04]
+  ; BE: ret i64 281474976841728
 }

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/align-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/align-addr.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/align-addr.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/align-addr.ll Tue Jan 15 11:16:16 2013
@@ -58,3 +58,19 @@
   store double %n, double* %p
   ret double %t
 }
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+
+declare void @use(i8*)
+
+%struct.s = type { i32, i32, i32, i32 }
+
+define void @test3(%struct.s* sret %a4) {
+; Check that the alignment is bumped up the alignment of the sret type.
+; CHECK: @test3
+  %a4.cast = bitcast %struct.s* %a4 to i8*
+  call void @llvm.memset.p0i8.i64(i8* %a4.cast, i8 0, i64 16, i32 1, i1 false)
+; CHECK: call void @llvm.memset.p0i8.i64(i8* %a4.cast, i8 0, i64 16, i32 4, i1 false)
+  call void @use(i8* %a4.cast)
+  ret void
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/alloca.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/alloca.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/alloca.ll Tue Jan 15 11:16:16 2013
@@ -94,3 +94,19 @@
   tail call void @f(i32* %b)
   ret void
 }
+
+; PR14371
+%opaque_type = type opaque
+%real_type = type { { i32, i32* } }
+
+ at opaque_global = external constant %opaque_type, align 4
+
+define void @test7() {
+entry:
+  %0 = alloca %real_type, align 4
+  %1 = bitcast %real_type* %0 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* bitcast (%opaque_type* @opaque_global to i8*), i32 8, i32 1, i1 false)
+  ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/and-fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/and-fcmp.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/and-fcmp.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/and-fcmp.ll Tue Jan 15 11:16:16 2013
@@ -10,7 +10,7 @@
 ; CHECK: fcmp oeq float %x, %y
 ; CHECK-NOT: fcmp ueq float %x, %y
 ; CHECK-NOT: fcmp ord float %x, %y
-; CHECK-NOW: and
+; CHECK-NOT: and
 }
 
 define zeroext i8 @t2(float %x, float %y) nounwind {

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/cast.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/cast.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/cast.ll Tue Jan 15 11:16:16 2013
@@ -694,3 +694,209 @@
 ; CHECK: @test67
 ; CHECK: ret i1 false
 }
+
+%s = type { i32, i32, i32 }
+
+define %s @test68(%s *%p, i64 %i) {
+; CHECK: @test68
+  %o = mul i64 %i, 12
+  %q = bitcast %s* %p to i8*
+  %pp = getelementptr inbounds i8* %q, i64 %o
+; CHECK-NEXT: getelementptr %s*
+  %r = bitcast i8* %pp to %s*
+  %l = load %s* %r
+; CHECK-NEXT: load %s*
+  ret %s %l
+; CHECK-NEXT: ret %s
+}
+
+define double @test69(double *%p, i64 %i) {
+; CHECK: @test69
+  %o = shl nsw i64 %i, 3
+  %q = bitcast double* %p to i8*
+  %pp = getelementptr inbounds i8* %q, i64 %o
+; CHECK-NEXT: getelementptr inbounds double*
+  %r = bitcast i8* %pp to double*
+  %l = load double* %r
+; CHECK-NEXT: load double*
+  ret double %l
+; CHECK-NEXT: ret double
+}
+
+define %s @test70(%s *%p, i64 %i) {
+; CHECK: @test70
+  %o = mul nsw i64 %i, 36
+; CHECK-NEXT: mul nsw i64 %i, 3
+  %q = bitcast %s* %p to i8*
+  %pp = getelementptr inbounds i8* %q, i64 %o
+; CHECK-NEXT: getelementptr inbounds %s*
+  %r = bitcast i8* %pp to %s*
+  %l = load %s* %r
+; CHECK-NEXT: load %s*
+  ret %s %l
+; CHECK-NEXT: ret %s
+}
+
+define double @test71(double *%p, i64 %i) {
+; CHECK: @test71
+  %o = shl i64 %i, 5
+; CHECK-NEXT: shl i64 %i, 2
+  %q = bitcast double* %p to i8*
+  %pp = getelementptr i8* %q, i64 %o
+; CHECK-NEXT: getelementptr double*
+  %r = bitcast i8* %pp to double*
+  %l = load double* %r
+; CHECK-NEXT: load double*
+  ret double %l
+; CHECK-NEXT: ret double
+}
+
+define double @test72(double *%p, i32 %i) {
+; CHECK: @test72
+  %so = mul nsw i32 %i, 8
+  %o = sext i32 %so to i64
+; CHECK-NEXT: sext i32 %i to i64
+  %q = bitcast double* %p to i8*
+  %pp = getelementptr inbounds i8* %q, i64 %o
+; CHECK-NEXT: getelementptr inbounds double*
+  %r = bitcast i8* %pp to double*
+  %l = load double* %r
+; CHECK-NEXT: load double*
+  ret double %l
+; CHECK-NEXT: ret double
+}
+
+define double @test73(double *%p, i128 %i) {
+; CHECK: @test73
+  %lo = mul nsw i128 %i, 8
+  %o = trunc i128 %lo to i64
+; CHECK-NEXT: trunc i128 %i to i64
+  %q = bitcast double* %p to i8*
+  %pp = getelementptr inbounds i8* %q, i64 %o
+; CHECK-NEXT: getelementptr double*
+  %r = bitcast i8* %pp to double*
+  %l = load double* %r
+; CHECK-NEXT: load double*
+  ret double %l
+; CHECK-NEXT: ret double
+}
+
+define double @test74(double *%p, i64 %i) {
+; CHECK: @test74
+  %q = bitcast double* %p to i64*
+  %pp = getelementptr inbounds i64* %q, i64 %i
+; CHECK-NEXT: getelementptr inbounds double*
+  %r = bitcast i64* %pp to double*
+  %l = load double* %r
+; CHECK-NEXT: load double*
+  ret double %l
+; CHECK-NEXT: ret double
+}
+
+define i32* @test75(i32* %p, i32 %x) {
+; CHECK: @test75
+  %y = shl i32 %x, 3
+; CHECK-NEXT: shl i32 %x, 3
+  %z = sext i32 %y to i64
+; CHECK-NEXT: sext i32 %y to i64
+  %q = bitcast i32* %p to i8*
+  %r = getelementptr i8* %q, i64 %z
+  %s = bitcast i8* %r to i32*
+  ret i32* %s
+}
+
+define %s @test76(%s *%p, i64 %i, i64 %j) {
+; CHECK: @test76
+  %o = mul i64 %i, 12
+  %o2 = mul nsw i64 %o, %j
+; CHECK-NEXT: %o2 = mul i64 %i, %j
+  %q = bitcast %s* %p to i8*
+  %pp = getelementptr inbounds i8* %q, i64 %o2
+; CHECK-NEXT: getelementptr %s* %p, i64 %o2
+  %r = bitcast i8* %pp to %s*
+  %l = load %s* %r
+; CHECK-NEXT: load %s*
+  ret %s %l
+; CHECK-NEXT: ret %s
+}
+
+define %s @test77(%s *%p, i64 %i, i64 %j) {
+; CHECK: @test77
+  %o = mul nsw i64 %i, 36
+  %o2 = mul nsw i64 %o, %j
+; CHECK-NEXT: %o = mul nsw i64 %i, 3
+; CHECK-NEXT: %o2 = mul nsw i64 %o, %j
+  %q = bitcast %s* %p to i8*
+  %pp = getelementptr inbounds i8* %q, i64 %o2
+; CHECK-NEXT: getelementptr inbounds %s* %p, i64 %o2
+  %r = bitcast i8* %pp to %s*
+  %l = load %s* %r
+; CHECK-NEXT: load %s*
+  ret %s %l
+; CHECK-NEXT: ret %s
+}
+
+define %s @test78(%s *%p, i64 %i, i64 %j, i32 %k, i32 %l, i128 %m, i128 %n) {
+; CHECK: @test78
+  %a = mul nsw i32 %k, 36
+; CHECK-NEXT: mul nsw i32 %k, 3
+  %b = mul nsw i32 %a, %l
+; CHECK-NEXT: mul nsw i32 %a, %l
+  %c = sext i32 %b to i128
+; CHECK-NEXT: sext i32 %b to i128
+  %d = mul nsw i128 %c, %m
+; CHECK-NEXT: mul nsw i128 %c, %m
+  %e = mul i128 %d, %n
+; CHECK-NEXT: mul i128 %d, %n
+  %f = trunc i128 %e to i64
+; CHECK-NEXT: trunc i128 %e to i64
+  %g = mul nsw i64 %f, %i
+; CHECK-NEXT: mul i64 %f, %i
+  %h = mul nsw i64 %g, %j
+; CHECK-NEXT: mul i64 %g, %j
+  %q = bitcast %s* %p to i8*
+  %pp = getelementptr inbounds i8* %q, i64 %h
+; CHECK-NEXT: getelementptr %s* %p, i64 %h
+  %r = bitcast i8* %pp to %s*
+  %load = load %s* %r
+; CHECK-NEXT: load %s*
+  ret %s %load
+; CHECK-NEXT: ret %s
+}
+
+define %s @test79(%s *%p, i64 %i, i32 %j) {
+; CHECK: @test79
+  %a = mul nsw i64 %i, 36
+; CHECK: mul nsw i64 %i, 36
+  %b = trunc i64 %a to i32
+  %c = mul i32 %b, %j
+  %q = bitcast %s* %p to i8*
+; CHECK: bitcast
+  %pp = getelementptr inbounds i8* %q, i32 %c
+  %r = bitcast i8* %pp to %s*
+  %l = load %s* %r
+  ret %s %l
+}
+
+define double @test80([100 x double]* %p, i32 %i) {
+; CHECK: @test80
+  %tmp = mul nsw i32 %i, 8
+; CHECK-NEXT: sext i32 %i to i64
+  %q = bitcast [100 x double]* %p to i8*
+  %pp = getelementptr i8* %q, i32 %tmp
+; CHECK-NEXT: getelementptr [100 x double]*
+  %r = bitcast i8* %pp to double*
+  %l = load double* %r
+; CHECK-NEXT: load double*
+  ret double %l
+; CHECK-NEXT: ret double
+}
+
+define double @test81(double *%p, float %f) {
+  %i = fptosi float %f to i64
+  %q = bitcast double* %p to i8*
+  %pp = getelementptr i8* %q, i64 %i
+  %r = bitcast i8* %pp to double*
+  %l = load double* %r
+  ret double %l
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/crash.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/crash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/crash.ll Tue Jan 15 11:16:16 2013
@@ -132,12 +132,14 @@
 }
 
 define void @test5() {
-       store i1 true, i1* undef
-       %1 = invoke i32 @test5a() to label %exit unwind label %exit
+  store i1 true, i1* undef
+  %r = invoke i32 @test5a() to label %exit unwind label %unwind
+unwind:
+  %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+          cleanup
+  br label %exit
 exit:
-       %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
-                cleanup
-       ret void
+  ret void
 }
 
 

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/div-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/div-shift.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/div-shift.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/div-shift.ll Tue Jan 15 11:16:16 2013
@@ -21,3 +21,17 @@
   %3 = udiv i64 %x, %2
   ret i64 %3
 }
+
+; PR13250
+define i64 @t3(i64 %x, i32 %y) nounwind  {
+; CHECK: t3
+; CHECK-NOT: udiv
+; CHECK-NEXT: %1 = add i32 %y, 2
+; CHECK-NEXT: %2 = zext i32 %1 to i64
+; CHECK-NEXT: %3 = lshr i64 %x, %2
+; CHECK-NEXT: ret i64 %3
+  %1 = shl i32 4, %y
+  %2 = zext i32 %1 to i64
+  %3 = udiv i64 %x, %2
+  ret i64 %3
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/fcmp.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/fcmp.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/fcmp.ll Tue Jan 15 11:16:16 2013
@@ -54,9 +54,8 @@
   %ext = fpext float %x to ppc_fp128
   %cmp = fcmp ogt ppc_fp128 %ext, 0xM00000000000000000000000000000000
   ret i1 %cmp
-; Can't convert ppc_fp128
 ; CHECK: @test7
-; CHECK-NEXT: fpext float %x to ppc_fp128
+; CHECK-NEXT: fcmp ogt float %x, 0.000000e+00
 }
 
 define float @test8(float %x) nounwind readnone optsize ssp {
@@ -69,3 +68,93 @@
 ; CHECK: @test8
 ; CHECK-NEXT: fcmp olt float %x, 0.000000e+00
 }
+
+declare double @fabs(double) nounwind readnone
+
+define i32 @test9(double %a) nounwind {
+  %call = tail call double @fabs(double %a) nounwind
+  %cmp = fcmp olt double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+; CHECK: @test9
+; CHECK-NOT: fabs
+; CHECK: ret i32 0
+}
+
+define i32 @test10(double %a) nounwind {
+  %call = tail call double @fabs(double %a) nounwind
+  %cmp = fcmp ole double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+; CHECK: @test10
+; CHECK-NOT: fabs
+; CHECK: fcmp oeq double %a, 0.000000e+00
+}
+
+define i32 @test11(double %a) nounwind {
+  %call = tail call double @fabs(double %a) nounwind
+  %cmp = fcmp ogt double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+; CHECK: @test11
+; CHECK-NOT: fabs
+; CHECK: fcmp one double %a, 0.000000e+00
+}
+
+define i32 @test12(double %a) nounwind {
+  %call = tail call double @fabs(double %a) nounwind
+  %cmp = fcmp oge double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+; CHECK: @test12
+; CHECK-NOT: fabs
+; CHECK: fcmp ord double %a, 0.000000e+00
+}
+
+define i32 @test13(double %a) nounwind {
+  %call = tail call double @fabs(double %a) nounwind
+  %cmp = fcmp une double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+; CHECK: @test13
+; CHECK-NOT: fabs
+; CHECK: fcmp une double %a, 0.000000e+00
+}
+
+define i32 @test14(double %a) nounwind {
+  %call = tail call double @fabs(double %a) nounwind
+  %cmp = fcmp oeq double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+; CHECK: @test14
+; CHECK-NOT: fabs
+; CHECK: fcmp oeq double %a, 0.000000e+00
+}
+
+define i32 @test15(double %a) nounwind {
+  %call = tail call double @fabs(double %a) nounwind
+  %cmp = fcmp one double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+; CHECK: @test15
+; CHECK-NOT: fabs
+; CHECK: fcmp one double %a, 0.000000e+00
+}
+
+define i32 @test16(double %a) nounwind {
+  %call = tail call double @fabs(double %a) nounwind
+  %cmp = fcmp ueq double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+; CHECK: @test16
+; CHECK-NOT: fabs
+; CHECK: fcmp ueq double %a, 0.000000e+00
+}
+
+; Don't crash.
+define i32 @test17(double %a, double (double)* %p) nounwind {
+  %call = tail call double %p(double %a) nounwind
+  %cmp = fcmp ueq double %call, 0.000000e+00
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/fold-vector-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/fold-vector-select.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/fold-vector-select.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/fold-vector-select.ll Tue Jan 15 11:16:16 2013
@@ -1,13 +1,148 @@
 ; RUN: opt < %s -instcombine -S | not grep select
 
-define void @foo(<4 x i32> *%A, <4 x i32> *%B, <4 x i32> *%C, <4 x i32> *%D) {
- %r = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> zeroinitializer
- %g = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>,  <4 x i32> zeroinitializer, <4 x i32> <i32 3, i32 6, i32 9, i32 1>
- %b = select <4 x i1> <i1 false, i1 true, i1 false, i1 true>,  <4 x i32> zeroinitializer, <4 x i32> <i32 7, i32 1, i32 4, i32 9>
- %a = select <4 x i1> zeroinitializer,  <4 x i32> zeroinitializer, <4 x i32> <i32 3, i32 2, i32 8, i32 5>
- store <4 x i32> %r, <4 x i32>* %A
- store <4 x i32> %g, <4 x i32>* %B
- store <4 x i32> %b, <4 x i32>* %C
- store <4 x i32> %a, <4 x i32>* %D
+define void @foo(<4 x i32> *%A, <4 x i32> *%B, <4 x i32> *%C, <4 x i32> *%D,
+                 <4 x i32> *%E, <4 x i32> *%F, <4 x i32> *%G, <4 x i32> *%H,
+                 <4 x i32> *%I, <4 x i32> *%J, <4 x i32> *%K, <4 x i32> *%L,
+                 <4 x i32> *%M, <4 x i32> *%N, <4 x i32> *%O, <4 x i32> *%P,
+                 <4 x i32> *%Q, <4 x i32> *%R, <4 x i32> *%S, <4 x i32> *%T,
+                 <4 x i32> *%U, <4 x i32> *%V, <4 x i32> *%W, <4 x i32> *%X,
+                 <4 x i32> *%Y, <4 x i32> *%Z, <4 x i32> *%BA, <4 x i32> *%BB,
+                 <4 x i32> *%BC, <4 x i32> *%BD, <4 x i32> *%BE, <4 x i32> *%BF,
+                 <4 x i32> *%BG, <4 x i32> *%BH, <4 x i32> *%BI, <4 x i32> *%BJ,
+                 <4 x i32> *%BK, <4 x i32> *%BL, <4 x i32> *%BM, <4 x i32> *%BN,
+                 <4 x i32> *%BO, <4 x i32> *%BP, <4 x i32> *%BQ, <4 x i32> *%BR,
+                 <4 x i32> *%BS, <4 x i32> *%BT, <4 x i32> *%BU, <4 x i32> *%BV,
+                 <4 x i32> *%BW, <4 x i32> *%BX, <4 x i32> *%BY, <4 x i32> *%BZ,
+                 <4 x i32> *%CA, <4 x i32> *%CB, <4 x i32> *%CC, <4 x i32> *%CD,
+                 <4 x i32> *%CE, <4 x i32> *%CF, <4 x i32> *%CG, <4 x i32> *%CH,
+                 <4 x i32> *%CI, <4 x i32> *%CJ, <4 x i32> *%CK, <4 x i32> *%CL) {
+ %a = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 9, i32 87, i32 57, i32 8>
+ %b = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 44, i32 99, i32 49, i32 29>
+ %c = select <4 x i1> <i1 false, i1 true, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 15, i32 18, i32 53, i32 84>
+ %d = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 29, i32 82, i32 45, i32 16>
+ %e = select <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 11, i32 15, i32 32, i32 99>
+ %f = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 19, i32 86, i32 29, i32 33>
+ %g = select <4 x i1> <i1 false, i1 true, i1 true, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 44, i32 10, i32 26, i32 45>
+ %h = select <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 88, i32 70, i32 90, i32 48>
+ %i = select <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 30, i32 53, i32 42, i32 12>
+ %j = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 46, i32 24, i32 93, i32 26>
+ %k = select <4 x i1> <i1 false, i1 true, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 33, i32 99, i32 15, i32 57>
+ %l = select <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 51, i32 60, i32 60, i32 50>
+ %m = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 50, i32 12, i32 7, i32 45>
+ %n = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 15, i32 65, i32 36, i32 36>
+ %o = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 54, i32 0, i32 17, i32 78>
+ %p = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 56, i32 13, i32 64, i32 48>
+ %q = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x i32> <i32 52, i32 69, i32 88, i32 11>, <4 x i32> zeroinitializer
+ %r = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> <i32 5, i32 87, i32 68, i32 14>, <4 x i32> zeroinitializer
+ %s = select <4 x i1> <i1 false, i1 true, i1 false, i1 false>, <4 x i32> <i32 47, i32 17, i32 66, i32 63>, <4 x i32> zeroinitializer
+ %t = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> <i32 64, i32 25, i32 73, i32 81>, <4 x i32> zeroinitializer
+ %u = select <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x i32> <i32 51, i32 41, i32 61, i32 63>, <4 x i32> zeroinitializer
+ %v = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> <i32 39, i32 59, i32 17, i32 0>, <4 x i32> zeroinitializer
+ %w = select <4 x i1> <i1 false, i1 true, i1 true, i1 false>, <4 x i32> <i32 91, i32 99, i32 97, i32 29>, <4 x i32> zeroinitializer
+ %x = select <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x i32> <i32 89, i32 45, i32 89, i32 10>, <4 x i32> zeroinitializer
+ %y = select <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> <i32 25, i32 70, i32 21, i32 27>, <4 x i32> zeroinitializer
+ %z = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> <i32 40, i32 12, i32 27, i32 88>, <4 x i32> zeroinitializer
+ %ba = select <4 x i1> <i1 false, i1 true, i1 false, i1 true>, <4 x i32> <i32 36, i32 35, i32 90, i32 23>, <4 x i32> zeroinitializer
+ %bb = select <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> <i32 83, i32 3, i32 64, i32 82>, <4 x i32> zeroinitializer
+ %bc = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x i32> <i32 15, i32 72, i32 2, i32 54>, <4 x i32> zeroinitializer
+ %bd = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> <i32 32, i32 47, i32 100, i32 84>, <4 x i32> zeroinitializer
+ %be = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> <i32 92, i32 57, i32 82, i32 1>, <4 x i32> zeroinitializer
+ %bf = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> <i32 42, i32 14, i32 22, i32 89>, <4 x i32> zeroinitializer
+ %bg = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x i32> <i32 33, i32 10, i32 67, i32 66>, <4 x i32> <i32 42, i32 91, i32 47, i32 40>
+ %bh = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> <i32 8, i32 13, i32 48, i32 0>, <4 x i32> <i32 84, i32 66, i32 87, i32 84>
+ %bi = select <4 x i1> <i1 false, i1 true, i1 false, i1 false>, <4 x i32> <i32 85, i32 96, i32 1, i32 94>, <4 x i32> <i32 54, i32 57, i32 7, i32 92>
+ %bj = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> <i32 55, i32 21, i32 92, i32 68>, <4 x i32> <i32 51, i32 61, i32 62, i32 39>
+ %bk = select <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x i32> <i32 42, i32 18, i32 77, i32 74>, <4 x i32> <i32 82, i32 33, i32 30, i32 7>
+ %bl = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> <i32 80, i32 92, i32 61, i32 84>, <4 x i32> <i32 43, i32 89, i32 92, i32 6>
+ %bm = select <4 x i1> <i1 false, i1 true, i1 true, i1 false>, <4 x i32> <i32 49, i32 14, i32 62, i32 62>, <4 x i32> <i32 35, i32 33, i32 92, i32 59>
+ %bn = select <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x i32> <i32 3, i32 97, i32 49, i32 18>, <4 x i32> <i32 56, i32 64, i32 19, i32 75>
+ %bo = select <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> <i32 91, i32 57, i32 0, i32 1>, <4 x i32> <i32 43, i32 63, i32 64, i32 11>
+ %bp = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> <i32 41, i32 65, i32 18, i32 11>, <4 x i32> <i32 86, i32 26, i32 31, i32 3>
+ %bq = select <4 x i1> <i1 false, i1 true, i1 false, i1 true>, <4 x i32> <i32 31, i32 46, i32 32, i32 68>, <4 x i32> <i32 100, i32 59, i32 62, i32 6>
+ %br = select <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> <i32 76, i32 67, i32 87, i32 7>, <4 x i32> <i32 63, i32 48, i32 97, i32 24>
+ %bs = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x i32> <i32 83, i32 89, i32 19, i32 4>, <4 x i32> <i32 21, i32 2, i32 40, i32 21>
+ %bt = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> <i32 45, i32 76, i32 81, i32 100>, <4 x i32> <i32 65, i32 26, i32 100, i32 46>
+ %bu = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> <i32 16, i32 75, i32 31, i32 17>, <4 x i32> <i32 37, i32 66, i32 86, i32 65>
+ %bv = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> <i32 13, i32 25, i32 43, i32 59>, <4 x i32> <i32 82, i32 78, i32 60, i32 52>
+ %bw = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %bx = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %by = select <4 x i1> <i1 false, i1 true, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %bz = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %ca = select <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %cb = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %cc = select <4 x i1> <i1 false, i1 true, i1 true, i1 false>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %cd = select <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %ce = select <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %cf = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %cg = select <4 x i1> <i1 false, i1 true, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %ch = select <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %ci = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %cj = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %ck = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ %cl = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ store <4 x i32> %a, <4 x i32>* %A
+ store <4 x i32> %b, <4 x i32>* %B
+ store <4 x i32> %c, <4 x i32>* %C
+ store <4 x i32> %d, <4 x i32>* %D
+ store <4 x i32> %e, <4 x i32>* %E
+ store <4 x i32> %f, <4 x i32>* %F
+ store <4 x i32> %g, <4 x i32>* %G
+ store <4 x i32> %h, <4 x i32>* %H
+ store <4 x i32> %i, <4 x i32>* %I
+ store <4 x i32> %j, <4 x i32>* %J
+ store <4 x i32> %k, <4 x i32>* %K
+ store <4 x i32> %l, <4 x i32>* %L
+ store <4 x i32> %m, <4 x i32>* %M
+ store <4 x i32> %n, <4 x i32>* %N
+ store <4 x i32> %o, <4 x i32>* %O
+ store <4 x i32> %p, <4 x i32>* %P
+ store <4 x i32> %q, <4 x i32>* %Q
+ store <4 x i32> %r, <4 x i32>* %R
+ store <4 x i32> %s, <4 x i32>* %S
+ store <4 x i32> %t, <4 x i32>* %T
+ store <4 x i32> %u, <4 x i32>* %U
+ store <4 x i32> %v, <4 x i32>* %V
+ store <4 x i32> %w, <4 x i32>* %W
+ store <4 x i32> %x, <4 x i32>* %X
+ store <4 x i32> %y, <4 x i32>* %Y
+ store <4 x i32> %z, <4 x i32>* %Z
+ store <4 x i32> %ba, <4 x i32>* %BA
+ store <4 x i32> %bb, <4 x i32>* %BB
+ store <4 x i32> %bc, <4 x i32>* %BC
+ store <4 x i32> %bd, <4 x i32>* %BD
+ store <4 x i32> %be, <4 x i32>* %BE
+ store <4 x i32> %bf, <4 x i32>* %BF
+ store <4 x i32> %bg, <4 x i32>* %BG
+ store <4 x i32> %bh, <4 x i32>* %BH
+ store <4 x i32> %bi, <4 x i32>* %BI
+ store <4 x i32> %bj, <4 x i32>* %BJ
+ store <4 x i32> %bk, <4 x i32>* %BK
+ store <4 x i32> %bl, <4 x i32>* %BL
+ store <4 x i32> %bm, <4 x i32>* %BM
+ store <4 x i32> %bn, <4 x i32>* %BN
+ store <4 x i32> %bo, <4 x i32>* %BO
+ store <4 x i32> %bp, <4 x i32>* %BP
+ store <4 x i32> %bq, <4 x i32>* %BQ
+ store <4 x i32> %br, <4 x i32>* %BR
+ store <4 x i32> %bs, <4 x i32>* %BS
+ store <4 x i32> %bt, <4 x i32>* %BT
+ store <4 x i32> %bu, <4 x i32>* %BU
+ store <4 x i32> %bv, <4 x i32>* %BV
+ store <4 x i32> %bw, <4 x i32>* %BW
+ store <4 x i32> %bx, <4 x i32>* %BX
+ store <4 x i32> %by, <4 x i32>* %BY
+ store <4 x i32> %bz, <4 x i32>* %BZ
+ store <4 x i32> %ca, <4 x i32>* %CA
+ store <4 x i32> %cb, <4 x i32>* %CB
+ store <4 x i32> %cc, <4 x i32>* %CC
+ store <4 x i32> %cd, <4 x i32>* %CD
+ store <4 x i32> %ce, <4 x i32>* %CE
+ store <4 x i32> %cf, <4 x i32>* %CF
+ store <4 x i32> %cg, <4 x i32>* %CG
+ store <4 x i32> %ch, <4 x i32>* %CH
+ store <4 x i32> %ci, <4 x i32>* %CI
+ store <4 x i32> %cj, <4 x i32>* %CJ
+ store <4 x i32> %ck, <4 x i32>* %CK
+ store <4 x i32> %cl, <4 x i32>* %CL
  ret void
 }

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/icmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/icmp.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/icmp.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/icmp.ll Tue Jan 15 11:16:16 2013
@@ -659,3 +659,21 @@
 ; CHECK-NEXT: %c = icmp eq i8 %1, %a
 ; CHECK-NEXT: ret i1 %c
 }
+
+define i1 @test65(i64 %A, i64 %B) {
+  %s1 = add i64 %A, %B
+  %s2 = add i64 %A, %B
+  %cmp = icmp eq i64 %s1, %s2
+; CHECK: @test65
+; CHECK-NEXT: ret i1 true
+  ret i1 %cmp
+}
+
+define i1 @test66(i64 %A, i64 %B) {
+  %s1 = add i64 %A, %B
+  %s2 = add i64 %B, %A
+  %cmp = icmp eq i64 %s1, %s2
+; CHECK: @test66
+; CHECK-NEXT: ret i1 true
+  ret i1 %cmp
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/memcpy.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/memcpy.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/memcpy.ll Tue Jan 15 11:16:16 2013
@@ -1,6 +1,7 @@
 ; RUN: opt < %s -instcombine -S | FileCheck %s
 
 declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
 
 define void @test1(i8* %a) {
         tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 100, i32 1, i1 false)
@@ -17,3 +18,10 @@
 ; CHECK: define void @test2
 ; CHECK-NEXT: call void @llvm.memcpy
 }
+
+define void @test3(i8* %d, i8* %s) {
+        tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %d, i8* %s, i64 17179869184, i32 4, i1 false)
+        ret void
+; CHECK: define void @test3
+; CHECK-NEXT: call void @llvm.memcpy
+}

Removed: llvm/branches/AMDILBackend/test/Transforms/InstCombine/memset_chk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/memset_chk.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/memset_chk.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/memset_chk.ll (removed)
@@ -1,18 +0,0 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s
-; rdar://7719085
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-
-%struct.data = type { [100 x i32], [100 x i32], [1024 x i8] }
-
-define i32 @t() nounwind ssp {
-; CHECK: @t
-; CHECK: @llvm.memset.p0i8.i64
-entry:
-  %0 = alloca %struct.data, align 8               ; <%struct.data*> [#uses=1]
-  %1 = bitcast %struct.data* %0 to i8*            ; <i8*> [#uses=1]
-  %2 = call i8* @__memset_chk(i8* %1, i32 0, i64 1824, i64 1824) nounwind ; <i8*> [#uses=0]
-  ret i32 0
-}
-
-declare i8* @__memset_chk(i8*, i32, i64, i64) nounwind

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/objsize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/objsize.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/objsize.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/objsize.ll Tue Jan 15 11:16:16 2013
@@ -238,3 +238,21 @@
 return:
   ret i32 42
 }
+
+; CHECK: @PR13621
+define i32 @PR13621(i1 %bool) nounwind {
+entry:
+  %cond = or i1 %bool, true
+  br i1 %cond, label %return, label %xpto
+
+; technically reachable, but this malformed IR may appear as a result of constant propagation
+xpto:
+  %gep2 = getelementptr i8* %gep, i32 1
+  %gep = getelementptr i8* %gep2, i32 1
+  %o = call i32 @llvm.objectsize.i32(i8* %gep, i1 true)
+; CHECK: ret i32 undef
+  ret i32 %o
+
+return:
+  ret i32 7
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/select.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/select.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/select.ll Tue Jan 15 11:16:16 2013
@@ -829,3 +829,37 @@
 ; CHECK: %C = or i1 %B, %not
 ; CHECK: ret i1 %C
 }
+
+; PR14131
+define void @test64(i32 %p, i16 %b) noreturn nounwind {
+entry:
+  %p.addr.0.insert.mask = and i32 %p, -65536
+  %conv2 = and i32 %p, 65535
+  br i1 undef, label %lor.rhs, label %lor.end
+
+lor.rhs:
+  %p.addr.0.extract.trunc = trunc i32 %p.addr.0.insert.mask to i16
+  %phitmp = zext i16 %p.addr.0.extract.trunc to i32
+  br label %lor.end
+
+lor.end:
+  %t.1 = phi i32 [ 0, %entry ], [ %phitmp, %lor.rhs ]
+  %conv6 = zext i16 %b to i32
+  %div = udiv i32 %conv6, %t.1
+  %tobool8 = icmp eq i32 %div, 0
+  %cmp = icmp eq i32 %t.1, 0
+  %cmp12 = icmp ult i32 %conv2, 2
+  %cmp.sink = select i1 %tobool8, i1 %cmp12, i1 %cmp
+  br i1 %cmp.sink, label %cond.end17, label %cond.false16
+
+cond.false16:
+  br label %cond.end17
+
+cond.end17:
+  br label %while.body
+
+while.body:
+  br label %while.body
+; CHECK: @test64
+; CHECK-NOT: select
+}

Removed: llvm/branches/AMDILBackend/test/Transforms/InstCombine/strcpy_chk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/strcpy_chk.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/strcpy_chk.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/strcpy_chk.ll (removed)
@@ -1,13 +0,0 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
- at a = common global [60 x i8] zeroinitializer, align 1 ; <[60 x i8]*> [#uses=1]
- at .str = private constant [8 x i8] c"abcdefg\00"   ; <[8 x i8]*> [#uses=1]
-
-define i8* @foo() nounwind {
-; CHECK: @foo
-; CHECK-NEXT: call i8* @strcpy
-  %call = call i8* @__strcpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 60) ; <i8*> [#uses=1]
-  ret i8* %call
-}
-
-declare i8* @__strcpy_chk(i8*, i8*, i32) nounwind

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/udiv-simplify-bug-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/udiv-simplify-bug-1.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/udiv-simplify-bug-1.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/udiv-simplify-bug-1.ll Tue Jan 15 11:16:16 2013
@@ -6,9 +6,9 @@
 ; The udiv instructions shouldn't be optimized away, and the
 ; sext instructions should be optimized to zext.
 
-define i64 @bar(i32 %x) nounwind {
+define i64 @bar(i32 %x, i32 %g) nounwind {
   %y = lshr i32 %x, 30
-  %r = udiv i32 %y, 3
+  %r = udiv i32 %y, %g
   %z = sext i32 %r to i64
   ret i64 %z
 }

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/vec_demanded_elts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/vec_demanded_elts.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/vec_demanded_elts.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/vec_demanded_elts.ll Tue Jan 15 11:16:16 2013
@@ -196,7 +196,7 @@
 ; CHECK-NOT: insertelement
 ; CHECK: %a3 = insertelement <4 x float> %a0, float 3.000000e+00, i32 3
 ; CHECK-NOT: insertelement
-; CHECK: %ret = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x float> %a3, <4 x float> <float undef, float 4.000000e+00, float 5.000000e+00, float undef>
+; CHECK: shufflevector <4 x float> %a3, <4 x float> <float undef, float 4.000000e+00, float 5.000000e+00, float undef>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   %a0 = insertelement <4 x float> undef, float %f, i32 0
   %a1 = insertelement <4 x float> %a0, float 1.000000e+00, i32 1
   %a2 = insertelement <4 x float> %a1, float 2.000000e+00, i32 2

Modified: llvm/branches/AMDILBackend/test/Transforms/InstCombine/vec_shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstCombine/vec_shuffle.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstCombine/vec_shuffle.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstCombine/vec_shuffle.ll Tue Jan 15 11:16:16 2013
@@ -153,3 +153,46 @@
   ret <8 x i8> %tmp3
 }
 
+; We should form a shuffle out of a select with constant condition.
+define <4 x i16> @test13a(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: @test13a
+; CHECK-NEXT: shufflevector <4 x i16> %lhs, <4 x i16> %rhs, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT: ret
+  %A = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>,
+           <4 x i16> %lhs, <4 x i16> %rhs
+  ret <4 x i16> %A
+}
+
+define <4 x i16> @test13b(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: @test13b
+; CHECK-NEXT: ret <4 x i16> %lhs
+  %A = select <4 x i1> <i1 true, i1 undef, i1 true, i1 true>,
+           <4 x i16> %lhs, <4 x i16> %rhs
+  ret <4 x i16> %A
+}
+
+define <4 x i16> @test13c(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: @test13c
+; CHECK-NEXT: shufflevector <4 x i16> %lhs, <4 x i16> %rhs, <4 x i32> <i32 0, i32 undef, i32 2, i32 7>
+; CHECK-NEXT: ret
+  %A = select <4 x i1> <i1 true, i1 undef, i1 true, i1 false>,
+           <4 x i16> %lhs, <4 x i16> %rhs
+  ret <4 x i16> %A
+}
+
+define <4 x i16> @test13d(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: @test13d
+; CHECK: select
+; CHECK-NEXT: ret
+  %A = select <4 x i1> <i1 true, i1 icmp ugt (<4 x i16>(<4 x i16>, <4 x i16>)* @test13a, <4 x i16>(<4 x i16>, <4 x i16>)* @test13b), i1 true, i1 false>,
+           <4 x i16> %lhs, <4 x i16> %rhs
+  ret <4 x i16> %A
+}
+
+define <4 x i16> @test13e(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: @test13e
+; CHECK-NEXT: ret <4 x i16> %rhs
+  %A = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>,
+           <4 x i16> %lhs, <4 x i16> %rhs
+  ret <4 x i16> %A
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/InstSimplify/compare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/InstSimplify/compare.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/InstSimplify/compare.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/InstSimplify/compare.ll Tue Jan 15 11:16:16 2013
@@ -266,6 +266,15 @@
 ; CHECK: ret i1 true
 }
 
+define i1 @add6(i64 %A, i64 %B) {
+; CHECK: @add6
+  %s1 = add i64 %A, %B
+  %s2 = add i64 %B, %A
+  %cmp = icmp eq i64 %s1, %s2
+  ret i1 %cmp
+; CHECK: ret i1 true
+}
+
 define i1 @addpowtwo(i32 %x, i32 %y) {
 ; CHECK: @addpowtwo
   %l = lshr i32 %x, 1

Modified: llvm/branches/AMDILBackend/test/Transforms/Internalize/2008-05-09-AllButMain.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/Internalize/2008-05-09-AllButMain.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/Internalize/2008-05-09-AllButMain.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/Internalize/2008-05-09-AllButMain.ll Tue Jan 15 11:16:16 2013
@@ -1,27 +1,55 @@
-; No arguments means internalize all but main
-; RUN: opt < %s -internalize -S | grep internal | count 4
+; No arguments means internalize everything
+; RUN: opt < %s -internalize -S | FileCheck --check-prefix=NOARGS %s
+
 ; Internalize all but foo and j
-; RUN: opt < %s -internalize -internalize-public-api-list foo -internalize-public-api-list j -S | grep internal | count 3
-; Non existent files should be treated as if they were empty (so internalize all but main)
-; RUN: opt < %s -internalize -internalize-public-api-file /nonexistent/file 2> /dev/null -S | grep internal | count 4
-; RUN: opt < %s -internalize -internalize-public-api-list bar -internalize-public-api-list foo -internalize-public-api-file /nonexistent/file 2> /dev/null -S | grep internal | count 3
+; RUN: opt < %s -internalize -internalize-public-api-list foo -internalize-public-api-list j -S | FileCheck --check-prefix=LIST %s
+
+; Non existent files should be treated as if they were empty (so internalize
+; everything)
+; RUN: opt < %s -internalize -internalize-public-api-file /nonexistent/file 2> /dev/null -S | FileCheck --check-prefix=EMPTYFILE %s
+
+; RUN: opt < %s -S -internalize -internalize-public-api-list bar -internalize-public-api-list foo -internalize-public-api-file /nonexistent/file  2> /dev/null | FileCheck --check-prefix=LIST2 %s
+
 ; -file and -list options should be merged, the .apifile contains foo and j
-; RUN: opt < %s -internalize -internalize-public-api-list bar -internalize-public-api-file %s.apifile -S | grep internal | count 2
+; RUN: opt < %s -internalize -internalize-public-api-list bar -internalize-public-api-file %s.apifile -S | FileCheck --check-prefix=MERGE %s
+
+; NOARGS: @i = internal global
+; LIST: @i = internal global
+; EMPTYFILE: @i = internal global
+; LIST2: @i = internal global
+; MERGE: @i = internal global
+ at i = global i32 0
 
- at i = weak global i32 0          ; <i32*> [#uses=0]
- at j = weak global i32 0          ; <i32*> [#uses=0]
+; NOARGS: @j = internal global
+; LIST: @j = global
+; EMPTYFILE: @j = internal global
+; LIST2: @j = internal global
+; MERGE: @j = global
+ at j = global i32 0
 
-define void @main(...) {
-entry:  
+; NOARGS: define internal void @main
+; LIST: define internal void @main
+; EMPTYFILE: define internal void @main
+; LIST2: define internal void @main
+; MERGE: define internal void @main
+define void @main() {
         ret void
 }
 
-define void @foo(...) {
-entry:  
+; NOARGS: define internal void @foo
+; LIST: define void @foo
+; EMPTYFILE: define internal void @foo
+; LIST2: define void @foo
+; MERGE: define void @foo
+define void @foo() {
         ret void
 }
 
-define void @bar(...) {
-entry:  
+; NOARGS: define internal void @bar
+; LIST: define internal void @bar
+; EMPTYFILE: define internal void @bar
+; LIST2: define void @bar
+; MERGE: define void @bar
+define void @bar() {
         ret void
 }

Modified: llvm/branches/AMDILBackend/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: opt < %s -internalize -S | grep internal | count 3
+; RUN: opt < %s -internalize -internalize-public-api-list main -S | grep internal | count 3
 
 @A = global i32 0
 @B = alias i32* @A

Modified: llvm/branches/AMDILBackend/test/Transforms/JumpThreading/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/JumpThreading/crash.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/JumpThreading/crash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/JumpThreading/crash.ll Tue Jan 15 11:16:16 2013
@@ -511,3 +511,56 @@
 if.end:                                           ; preds = %for.cond
   ret void
 }
+
+define void @PR14233(i1 %cmp, i1 %cmp2, i1 %cmp3, i1 %cmp4) {
+entry:
+  br i1 %cmp, label %cond.true, label %cond.false
+
+cond.true:
+  br label %if.end
+
+cond.false:
+  br label %if.end
+
+if.end:
+  %A = phi i64 [ 0, %cond.true ], [ 1, %cond.false ]
+  br i1 %cmp2, label %bb, label %if.end2
+
+bb:
+  br label %if.end2
+
+if.end2:
+  %B = phi i64 [ ptrtoint (i8* ()* @PR14233.f1 to i64), %bb ], [ %A, %if.end ]
+  %cmp.ptr = icmp eq i64 %B, ptrtoint (i8* ()* @PR14233.f2 to i64)
+  br i1 %cmp.ptr, label %cond.true2, label %if.end3
+
+cond.true2:
+  br i1 %cmp3, label %bb2, label %ur
+
+bb2:
+  br i1 %cmp4, label %if.end4, label %if.end3
+
+if.end4:
+  unreachable
+
+if.end3:
+  %cmp.ptr2 = icmp eq i64 %B, ptrtoint (i8* ()* @PR14233.f2 to i64)
+  br i1 %cmp.ptr2, label %ur, label %if.then601
+
+if.then601:
+  %C = icmp eq i64 %B, 0
+  br i1 %C, label %bb3, label %bb4
+
+bb3:
+  unreachable
+
+bb4:
+  unreachable
+
+ur:
+  unreachable
+}
+
+declare i8* @PR14233.f1()
+
+declare i8* @PR14233.f2()

Modified: llvm/branches/AMDILBackend/test/Transforms/JumpThreading/select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/JumpThreading/select.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/JumpThreading/select.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/JumpThreading/select.ll Tue Jan 15 11:16:16 2013
@@ -121,3 +121,39 @@
   call void @quux()
   br label %L0
 }
+
+; Make sure the edge value of %0 from entry to L2 includes 0 and L3 is
+; reachable.
+; CHECK: test_switch_default
+; CHECK: entry:
+; CHECK: load
+; CHECK: switch
+; CHECK: [[THREADED:[A-Za-z.0-9]+]]:
+; CHECK: store
+; CHECK: br
+; CHECK: L2:
+; CHECK: icmp
+define void @test_switch_default(i32* nocapture %status) nounwind {
+entry:
+  %0 = load i32* %status, align 4
+  switch i32 %0, label %L2 [
+    i32 5061, label %L1
+    i32 0, label %L2
+  ]
+
+L1:
+  store i32 10025, i32* %status, align 4
+  br label %L2
+
+L2:
+  %1 = load i32* %status, align 4
+  %cmp57.i = icmp eq i32 %1, 0
+  br i1 %cmp57.i, label %L3, label %L4
+
+L3:
+  store i32 10000, i32* %status, align 4
+  br label %L4
+
+L4:
+  ret void
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/LICM/2003-12-11-SinkingToPHI.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/LICM/2003-12-11-SinkingToPHI.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/LICM/2003-12-11-SinkingToPHI.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/LICM/2003-12-11-SinkingToPHI.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: opt < %s -licm | lli
+; RUN: opt < %s -licm | lli %defaultjit
 
 define i32 @main() {
 entry:

Modified: llvm/branches/AMDILBackend/test/Transforms/LICM/hoisting.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/LICM/hoisting.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/LICM/hoisting.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/LICM/hoisting.ll Tue Jan 15 11:16:16 2013
@@ -29,7 +29,7 @@
 }
 
 
-declare void @foo2(i32)
+declare void @foo2(i32) nounwind
 
 
 ;; It is ok and desirable to hoist this potentially trapping instruction.
@@ -64,3 +64,29 @@
 	%C = sub i32 %A, %B		; <i32> [#uses=1]
 	ret i32 %C
 }
+
+; CHECK: @test4
+; CHECK: call
+; CHECK: sdiv
+; CHECK: ret
+define i32 @test4(i32 %x, i32 %y) nounwind uwtable ssp {
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %entry, %for.body
+  %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %n.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
+  call void @foo_may_call_exit(i32 0)
+  %div = sdiv i32 %x, %y
+  %add = add nsw i32 %n.01, %div
+  %inc = add nsw i32 %i.02, 1
+  %cmp = icmp slt i32 %inc, 10000
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:                                          ; preds = %for.body
+  %n.0.lcssa = phi i32 [ %add, %for.body ]
+  ret i32 %n.0.lcssa
+}
+
+declare void @foo_may_call_exit(i32)
+

Modified: llvm/branches/AMDILBackend/test/Transforms/LoopIdiom/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/LoopIdiom/basic.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/LoopIdiom/basic.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/LoopIdiom/basic.ll Tue Jan 15 11:16:16 2013
@@ -383,4 +383,37 @@
 
 }
 
+define void @PR14241(i32* %s, i64 %size) {
+; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught
+; LoopIdiom about memmove and strided loops, this got miscompiled into a memcpy
+; instead of a memmove. If we get the memmove transform back, this will catch
+; regressions.
+;
+; CHECK: @PR14241
 
+entry:
+  %end.idx = add i64 %size, -1
+  %end.ptr = getelementptr inbounds i32* %s, i64 %end.idx
+  br label %while.body
+; CHECK-NOT: memcpy
+;
+; FIXME: When we regain the ability to form a memmove here, this test should be
+; reversed and turned into a positive assertion.
+; CHECK-NOT: memmove
+
+while.body:
+  %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
+  %src.ptr = getelementptr inbounds i32* %phi.ptr, i64 1
+  %val = load i32* %src.ptr, align 4
+; CHECK: load
+  %dst.ptr = getelementptr inbounds i32* %phi.ptr, i64 0
+  store i32 %val, i32* %dst.ptr, align 4
+; CHECK: store
+  %next.ptr = getelementptr inbounds i32* %phi.ptr, i64 1
+  %cmp = icmp eq i32* %next.ptr, %end.ptr
+  br i1 %cmp, label %exit, label %while.body
+
+exit:
+  ret void
+; CHECK: ret void
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll Tue Jan 15 11:16:16 2013
@@ -1,15 +1,15 @@
 ; RUN: opt -loop-reduce -S < %s | FileCheck %s
 ;
 ; Test LSR's use of SplitCriticalEdge during phi rewriting.
-; Verify that identical edges are merged. rdar://problem/6453893
 
 target triple = "x86-apple-darwin"
 
-; CHECK: @test
+; Verify that identical edges are merged. rdar://problem/6453893
+; CHECK: @test1
 ; CHECK: bb89:
 ; CHECK: phi i8* [ %lsr.iv.next1, %bbA.bb89_crit_edge ], [ %lsr.iv.next1, %bbB.bb89_crit_edge ]{{$}}
 
-define i8* @test() {
+define i8* @test1() {
 entry:
   br label %loop
 
@@ -41,3 +41,41 @@
 exit:
   ret i8* %tmp75phi
 }
+
+; Handle single-predecessor phis: PR13756
+; CHECK: @test2
+; CHECK: bb89:
+; CHECK: phi i8* [ %lsr.iv.next1, %bbA ], [ %lsr.iv.next1, %bbA ], [ %lsr.iv.next1, %bbA ]{{$}}
+define i8* @test2() {
+entry:
+  br label %loop
+
+loop:
+  %rec = phi i32 [ %next, %loop ], [ 0, %entry ]
+  %next = add i32 %rec, 1
+  %tmp75 = getelementptr i8* null, i32 %next
+  br i1 false, label %loop, label %loopexit
+
+loopexit:
+  br i1 false, label %bbA, label %bbB
+
+bbA:
+  switch i32 0, label %bb89 [
+    i32 47, label %bb89
+    i32 58, label %bb89
+  ]
+
+bbB:
+  switch i8 0, label %exit [
+    i8 47, label %exit
+    i8 58, label %exit
+  ]
+
+bb89:
+  %tmp75phi = phi i8* [ %tmp75, %bbA ], [ %tmp75, %bbA ], [ %tmp75, %bbA ]
+  br label %exit
+
+exit:
+  %result = phi i8* [ %tmp75phi, %bb89 ], [ %tmp75, %bbB ], [ %tmp75, %bbB ], [ %tmp75, %bbB ]
+  ret i8* %result
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll Tue Jan 15 11:16:16 2013
@@ -44,7 +44,7 @@
 
 ; CHECK: @main
 ; Check that the loop preheader contains no address computation.
-; CHECK: %entry
+; CHECK: %end_of_chain
 ; CHECK-NOT: add{{.*}}lsl
 ; CHECK: ldr{{.*}}lsl #2
 ; CHECK: ldr{{.*}}lsl #2
@@ -65,15 +65,15 @@
 
 while.body:
   %v3 = load i32* @ncol, align 4, !tbaa !0
-  br label %while.cond.i
+  br label %end_of_chain
 
-while.cond.i:
+end_of_chain:
   %state.i = getelementptr inbounds %s* %call18, i32 0, i32 0
   %v4 = load i32** %state.i, align 4, !tbaa !3
   br label %while.cond.i.i
 
 while.cond.i.i:
-  %counter.0.i.i = phi i32 [ %v3, %while.cond.i ], [ %dec.i.i, %land.rhs.i.i ]
+  %counter.0.i.i = phi i32 [ %v3, %end_of_chain ], [ %dec.i.i, %land.rhs.i.i ]
   %dec.i.i = add nsw i32 %counter.0.i.i, -1
   %tobool.i.i = icmp eq i32 %counter.0.i.i, 0
   br i1 %tobool.i.i, label %where.exit, label %land.rhs.i.i

Modified: llvm/branches/AMDILBackend/test/Transforms/LoopUnroll/pr11361.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/LoopUnroll/pr11361.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/LoopUnroll/pr11361.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/LoopUnroll/pr11361.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: opt -loop-unroll -disable-output
+; RUN: opt -loop-unroll -disable-output < %s
 ; PR11361
 
 ; This tests for an iterator invalidation issue.

Modified: llvm/branches/AMDILBackend/test/Transforms/LoopUnswitch/2011-06-02-CritSwitch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/LoopUnswitch/2011-06-02-CritSwitch.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/LoopUnswitch/2011-06-02-CritSwitch.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/LoopUnswitch/2011-06-02-CritSwitch.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: opt -loop-unswitch -disable-output
+; RUN: opt -loop-unswitch -disable-output < %s
 ; PR10031
 
 define i32 @test(i32 %command) {

Modified: llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll Tue Jan 15 11:16:16 2013
@@ -9,11 +9,11 @@
 define float @f() {
 entry:
   %a_var = alloca %a
-  %b_var = alloca %b
+  %b_var = alloca %b, align 1
   call void @g(%a* %a_var)
   %a_i8 = bitcast %a* %a_var to i8*
   %b_i8 = bitcast %b* %b_var to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_i8, i8* %a_i8, i32 4, i32 4, i1 false)
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_i8, i8* %a_i8, i32 4, i32 1, i1 false)
   %tmp1 = getelementptr %b* %b_var, i32 0, i32 0
   %tmp2 = load float* %tmp1
   ret float %tmp2

Modified: llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/align.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/align.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/align.ll Tue Jan 15 11:16:16 2013
@@ -1,12 +1,15 @@
-; RUN: opt < %s -S -memcpyopt | FileCheck %s
+; RUN: opt < %s -S -basicaa -memcpyopt | FileCheck %s
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
 
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+
 ; The resulting memset is only 4-byte aligned, despite containing
 ; a 16-byte aligned store in the middle.
 
-; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 16, i32 4, i1 false)
-
 define void @foo(i32* %p) {
+; CHECK: @foo
+; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 16, i32 4, i1 false)
   %a0 = getelementptr i32* %p, i64 0
   store i32 0, i32* %a0, align 4
   %a1 = getelementptr i32* %p, i64 1
@@ -17,3 +20,18 @@
   store i32 0, i32* %a3, align 4
   ret void
 }
+
+; Replacing %a8 with %a4 in the memset requires boosting the alignment of %a4.
+
+define void @bar() {
+; CHECK: @bar
+; CHECK: %a4 = alloca i32, align 8
+; CHECK-NOT: memcpy
+  %a4 = alloca i32, align 4
+  %a8 = alloca i32, align 8
+  %a8.cast = bitcast i32* %a8 to i8*
+  %a4.cast = bitcast i32* %a4 to i8*
+  call void @llvm.memset.p0i8.i64(i8* %a8.cast, i8 0, i64 4, i32 8, i1 false)
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a4.cast, i8* %a8.cast, i64 4, i32 4, i1 false)
+  ret void
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/form-memset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/form-memset.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/form-memset.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/MemCpyOpt/form-memset.ll Tue Jan 15 11:16:16 2013
@@ -248,3 +248,27 @@
 ; CHECK: @test8
 ; CHECK: store <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32>* %0, align 16
 }
+
+ at test9buf = internal unnamed_addr global [16 x i64] zeroinitializer, align 16
+
+define void @test9() nounwind {
+  store i8 -1, i8* bitcast ([16 x i64]* @test9buf to i8*), align 16
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 1), align 1
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 2), align 2
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 3), align 1
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 4), align 4
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 5), align 1
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 6), align 2
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 7), align 1
+  store i8 -1, i8* bitcast (i64* getelementptr inbounds ([16 x i64]* @test9buf, i64 0, i64 1) to i8*), align 8
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 9), align 1
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 10), align 2
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 11), align 1
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 12), align 4
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 13), align 1
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 14), align 2
+  store i8 -1, i8* getelementptr (i8* bitcast ([16 x i64]* @test9buf to i8*), i64 15), align 1
+  ret void
+; CHECK: @test9(
+; CHECK: call void @llvm.memset.p0i8.i64(i8* bitcast ([16 x i64]* @test9buf to i8*), i8 -1, i64 16, i32 16, i1 false)
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/ObjCARC/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/ObjCARC/basic.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/ObjCARC/basic.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/ObjCARC/basic.ll Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-; RUN: opt -objc-arc -S < %s | FileCheck %s
+; RUN: opt -basicaa -objc-arc -S < %s | FileCheck %s
 
 target datalayout = "e-p:64:64:64"
 
@@ -1272,7 +1272,7 @@
 ; Delete retain,release pairs around loops.
 
 ; CHECK: define void @test39(
-; CHECK_NOT: @objc_
+; CHECK-NOT: @objc_
 ; CHECK: }
 define void @test39(i8* %p) {
 entry:
@@ -1290,7 +1290,7 @@
 ; Delete retain,release pairs around loops containing uses.
 
 ; CHECK: define void @test39b(
-; CHECK_NOT: @objc_
+; CHECK-NOT: @objc_
 ; CHECK: }
 define void @test39b(i8* %p) {
 entry:
@@ -1309,7 +1309,7 @@
 ; Delete retain,release pairs around loops containing potential decrements.
 
 ; CHECK: define void @test39c(
-; CHECK_NOT: @objc_
+; CHECK-NOT: @objc_
 ; CHECK: }
 define void @test39c(i8* %p) {
 entry:
@@ -1329,7 +1329,7 @@
 ; the successors are in a different order.
 
 ; CHECK: define void @test40(
-; CHECK_NOT: @objc_
+; CHECK-NOT: @objc_
 ; CHECK: }
 define void @test40(i8* %p) {
 entry:
@@ -1498,7 +1498,7 @@
 }
 
 ; Do delete retain+release with intervening stores of the
-; address value;
+; address value.
 
 ; CHECK: define void @test50(
 ; CHECK-NOT: @objc_

Modified: llvm/branches/AMDILBackend/test/Transforms/ObjCARC/invoke.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/ObjCARC/invoke.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/ObjCARC/invoke.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/ObjCARC/invoke.ll Tue Jan 15 11:16:16 2013
@@ -76,12 +76,12 @@
 ; CHECK: define void @test2() {
 ; CHECK: invoke.cont:
 ; CHECK-NEXT: call i8* @objc_retain
-; CHEK-NOT: @objc
+; CHECK-NOT: @objc_r
 ; CHECK: finally.cont:
 ; CHECK-NEXT: call void @objc_release
-; CHEK-NOT: @objc
+; CHECK-NOT: @objc
 ; CHECK: finally.rethrow:
-; CHEK-NOT: @objc
+; CHECK-NOT: @objc
 ; CHECK: }
 define void @test2() {
 entry:

Modified: llvm/branches/AMDILBackend/test/Transforms/ObjCARC/nested.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/ObjCARC/nested.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/ObjCARC/nested.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/ObjCARC/nested.ll Tue Jan 15 11:16:16 2013
@@ -16,6 +16,10 @@
 declare i8* @objc_msgSend(i8*, i8*, ...) nonlazybind
 declare void @use(i8*)
 declare void @objc_release(i8*)
+declare i8* @def()
+declare void @__crasher_block_invoke(i8* nocapture)
+declare i8* @objc_retainBlock(i8*)
+declare void @__crasher_block_invoke1(i8* nocapture)
 
 !0 = metadata !{}
 
@@ -279,11 +283,13 @@
   ret void
 }
 
-; Delete a nested retain+release pair.
+; TODO: Delete a nested retain+release pair.
+; The optimizer currently can't do this, because isn't isn't sophisticated enough in
+; reasnoning about nesting.
 
 ; CHECK: define void @test6(
 ; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: @objc_retain
 ; CHECK: }
 define void @test6() nounwind {
 entry:
@@ -345,11 +351,13 @@
   ret void
 }
 
-; Delete a nested retain+release pair.
+; TODO: Delete a nested retain+release pair.
+; The optimizer currently can't do this, because isn't isn't sophisticated enough in
+; reasnoning about nesting.
 
 ; CHECK: define void @test7(
 ; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: @objc_retain
 ; CHECK: }
 define void @test7() nounwind {
 entry:
@@ -553,12 +561,12 @@
   ret void
 }
 
-; Like test9, but without a split backedge. This we can optimize.
+; Like test9, but without a split backedge. TODO: optimize this.
 
 ; CHECK: define void @test9b(
 ; CHECK: call i8* @objc_retain
 ; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: @objc_retain
 ; CHECK: }
 define void @test9b() nounwind {
 entry:
@@ -687,12 +695,12 @@
   ret void
 }
 
-; Like test10, but without a split backedge. This we can optimize.
+; Like test10, but without a split backedge. TODO: optimize this.
 
 ; CHECK: define void @test10b(
 ; CHECK: call i8* @objc_retain
 ; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: @objc_retain
 ; CHECK: }
 define void @test10b() nounwind {
 entry:
@@ -751,3 +759,64 @@
   call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
   ret void
 }
+
+; Pointers to strong pointers can obscure provenance relationships. Be conservative
+; in the face of escaping pointers. rdar://12150909.
+
+%struct.__block_d = type { i64, i64 }
+
+ at _NSConcreteStackBlock = external global i8*
+ at __block_d_tmp = external hidden constant { i64, i64, i8*, i8*, i8*, i8* }
+ at __block_d_tmp5 = external hidden constant { i64, i64, i8*, i8*, i8*, i8* }
+
+; CHECK: define void @test11(
+; CHECK: tail call i8* @objc_retain(i8* %call) nounwind
+; CHECK: tail call i8* @objc_retain(i8* %call) nounwind
+; CHECK: call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+; CHECK: }
+define void @test11() {
+entry:
+  %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
+  %block9 = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
+  %call = call i8* @def(), !clang.arc.no_objc_arc_exceptions !0
+  %foo = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 5
+  %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 0
+  store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
+  %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 1
+  store i32 1107296256, i32* %block.flags, align 8
+  %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 2
+  store i32 0, i32* %block.reserved, align 4
+  %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 3
+  store i8* bitcast (void (i8*)* @__crasher_block_invoke to i8*), i8** %block.invoke, align 8
+  %block.d = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 4
+  store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp to %struct.__block_d*), %struct.__block_d** %block.d, align 8
+  %foo2 = tail call i8* @objc_retain(i8* %call) nounwind
+  store i8* %foo2, i8** %foo, align 8
+  %foo4 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block to i8*
+  %foo5 = call i8* @objc_retainBlock(i8* %foo4) nounwind
+  call void @use(i8* %foo5), !clang.arc.no_objc_arc_exceptions !0
+  call void @objc_release(i8* %foo5) nounwind
+  %strongdestroy = load i8** %foo, align 8
+  call void @objc_release(i8* %strongdestroy) nounwind, !clang.imprecise_release !0
+  %foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
+  %block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
+  store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa11, align 8
+  %block.flags12 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 1
+  store i32 1107296256, i32* %block.flags12, align 8
+  %block.reserved13 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 2
+  store i32 0, i32* %block.reserved13, align 4
+  %block.invoke14 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 3
+  store i8* bitcast (void (i8*)* @__crasher_block_invoke1 to i8*), i8** %block.invoke14, align 8
+  %block.d15 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 4
+  store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp5 to %struct.__block_d*), %struct.__block_d** %block.d15, align 8
+  %foo18 = call i8* @objc_retain(i8* %call) nounwind
+  store i8* %call, i8** %foo10, align 8
+  %foo20 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9 to i8*
+  %foo21 = call i8* @objc_retainBlock(i8* %foo20) nounwind
+  call void @use(i8* %foo21), !clang.arc.no_objc_arc_exceptions !0
+  call void @objc_release(i8* %foo21) nounwind
+  %strongdestroy25 = load i8** %foo10, align 8
+  call void @objc_release(i8* %strongdestroy25) nounwind, !clang.imprecise_release !0
+  call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+  ret void
+}

Removed: llvm/branches/AMDILBackend/test/Transforms/PruneEH/2003-09-14-ExternalCall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/PruneEH/2003-09-14-ExternalCall.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/PruneEH/2003-09-14-ExternalCall.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/PruneEH/2003-09-14-ExternalCall.ll (removed)
@@ -1,14 +0,0 @@
-; RUN: opt < %s -prune-eh -S | grep invoke
-
-declare void @External()
-
-define void @foo() {
-	invoke void @External( )
-			to label %Cont unwind label %Cont
-Cont:		; preds = %0, %0
-        %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
-                 cleanup
-	ret void
-}
-
-declare i32 @__gxx_personality_v0(...)

Modified: llvm/branches/AMDILBackend/test/Transforms/Reassociate/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/Reassociate/crash.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/Reassociate/crash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/Reassociate/crash.ll Tue Jan 15 11:16:16 2013
@@ -144,3 +144,31 @@
   %t6 = add i32 %t4, %t5
   ret i32 %t6
 }
+
+define i32 @bar(i32 %arg, i32 %arg1, i32 %arg2) {
+  %tmp1 = mul i32 %arg1, 2
+  %tmp2 = mul i32 %tmp1, 3
+  %tmp3 = mul i32 %arg2, 2
+  %tmp4 = add i32 %tmp1, 1 ; dead code
+  %ret = add i32 %tmp2, %tmp3
+  ret i32 %ret
+}
+
+; PR14060
+define i8 @hang(i8 %p, i8 %p0, i8 %p1, i8 %p2, i8 %p3, i8 %p4, i8 %p5, i8 %p6, i8 %p7, i8 %p8, i8 %p9) {
+  %tmp = zext i1 false to i8
+  %tmp16 = or i8 %tmp, 1
+  %tmp22 = or i8 %p7, %p0
+  %tmp23 = or i8 %tmp16, %tmp22
+  %tmp28 = or i8 %p9, %p1
+  %tmp31 = or i8 %tmp23, %p2
+  %tmp32 = or i8 %tmp31, %tmp28
+  %tmp38 = or i8 %p8, %p3
+  %tmp39 = or i8 %tmp16, %tmp38
+  %tmp43 = or i8 %tmp39, %p4
+  %tmp44 = or i8 %tmp43, 1
+  %tmp47 = or i8 %tmp32, %p5
+  %tmp50 = or i8 %tmp47, %p6
+  %tmp51 = or i8 %tmp44, %tmp50
+  ret i8 %tmp51
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/SCCP/loadtest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SCCP/loadtest.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SCCP/loadtest.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SCCP/loadtest.ll Tue Jan 15 11:16:16 2013
@@ -1,8 +1,9 @@
 ; This test makes sure that these instructions are properly constant propagated.
 
-target datalayout = "e-p:32:32"
+; RUN: opt < %s -default-data-layout="e-p:32:32" -sccp -S | FileCheck %s
+; RUN: opt < %s -default-data-layout="E-p:32:32" -sccp -S | FileCheck %s
 
-; RUN: opt < %s -sccp -S | not grep load
+; CHECK-NOT: load
 
 
 @X = constant i32 42		; <i32*> [#uses=1]

Copied: llvm/branches/AMDILBackend/test/Transforms/SROA/basictest.ll (from r167701, llvm/trunk/test/Transforms/SROA/basictest.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SROA/basictest.ll?p2=llvm/branches/AMDILBackend/test/Transforms/SROA/basictest.ll&p1=llvm/trunk/test/Transforms/SROA/basictest.ll&r1=167701&r2=172541&rev=172541&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/basictest.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SROA/basictest.ll Tue Jan 15 11:16:16 2013
@@ -1100,12 +1100,12 @@
   %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
   store float %phi.real, float* %real
   store float %phi.imag, float* %imag
+  ; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32
   ; CHECK-NEXT: %[[imag_convert:.*]] = bitcast float %[[imag]] to i32
   ; CHECK-NEXT: %[[imag_ext:.*]] = zext i32 %[[imag_convert]] to i64
   ; CHECK-NEXT: %[[imag_shift:.*]] = shl i64 %[[imag_ext]], 32
   ; CHECK-NEXT: %[[imag_mask:.*]] = and i64 undef, 4294967295
   ; CHECK-NEXT: %[[imag_insert:.*]] = or i64 %[[imag_mask]], %[[imag_shift]]
-  ; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32
   ; CHECK-NEXT: %[[real_ext:.*]] = zext i32 %[[real_convert]] to i64
   ; CHECK-NEXT: %[[real_mask:.*]] = and i64 %[[imag_insert]], -4294967296
   ; CHECK-NEXT: %[[real_insert:.*]] = or i64 %[[real_mask]], %[[real_ext]]
@@ -1134,3 +1134,45 @@
   ret void
 ; CHECK: ret
 }
+
+define void @PR14465() {
+; Ensure that we don't crash when analyzing a alloca larger than the maximum
+; integer type width (MAX_INT_BITS) supported by llvm (1048576*32 > (1<<23)-1).
+; CHECK: @PR14465
+
+  %stack = alloca [1048576 x i32], align 16
+; CHECK: alloca [1048576 x i32]
+  %cast = bitcast [1048576 x i32]* %stack to i8*
+  call void @llvm.memset.p0i8.i64(i8* %cast, i8 -2, i64 4194304, i32 16, i1 false)
+  ret void
+; CHECK: ret
+}
+
+define void @PR14548(i1 %x) {
+; Handle a mixture of i1 and i8 loads and stores to allocas. This particular
+; pattern caused crashes and invalid output in the PR, and its nature will
+; trigger a mixture in several permutations as we resolve each alloca
+; iteratively.
+; Note that we don't do a particularly good *job* of handling these mixtures,
+; but the hope is that this is very rare.
+; CHECK: @PR14548
+
+entry:
+  %a = alloca <{ i1 }>, align 8
+  %b = alloca <{ i1 }>, align 8
+; Nothing of interest is simplified here.
+; CHECK: alloca
+; CHECK: alloca
+
+  %b.i1 = bitcast <{ i1 }>* %b to i1*
+  store i1 %x, i1* %b.i1, align 8
+  %b.i8 = bitcast <{ i1 }>* %b to i8*
+  %foo = load i8* %b.i8, align 1
+
+  %a.i8 = bitcast <{ i1 }>* %a to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.i8, i8* %b.i8, i32 1, i32 1, i1 false) nounwind
+  %bar = load i8* %a.i8, align 1
+  %a.i1 = getelementptr inbounds <{ i1 }>* %a, i32 0, i32 0
+  %baz = load i1* %a.i1, align 1
+  ret void
+}

Copied: llvm/branches/AMDILBackend/test/Transforms/SROA/big-endian.ll (from r167701, llvm/trunk/test/Transforms/SROA/big-endian.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SROA/big-endian.ll?p2=llvm/branches/AMDILBackend/test/Transforms/SROA/big-endian.ll&p1=llvm/trunk/test/Transforms/SROA/big-endian.ll&r1=167701&r2=172541&rev=172541&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/big-endian.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SROA/big-endian.ll Tue Jan 15 11:16:16 2013
@@ -82,14 +82,9 @@
 
   %a0i16ptr = bitcast i8* %a0ptr to i16*
   store i16 1, i16* %a0i16ptr
-; CHECK:      %[[mask0:.*]] = and i16 1, -16
-
-  %a1i4ptr = bitcast i8* %a1ptr to i4*
-  store i4 1, i4* %a1i4ptr
-; CHECK-NEXT: %[[insert0:.*]] = or i16 %[[mask0]], 1
 
   store i8 1, i8* %a2ptr
-; CHECK-NEXT: %[[mask1:.*]] = and i40 undef, 4294967295
+; CHECK:      %[[mask1:.*]] = and i40 undef, 4294967295
 ; CHECK-NEXT: %[[insert1:.*]] = or i40 %[[mask1]], 4294967296
 
   %a3i24ptr = bitcast i8* %a3ptr to i24*
@@ -110,7 +105,7 @@
   %ai = load i56* %aiptr
   %ret = zext i56 %ai to i64
   ret i64 %ret
-; CHECK-NEXT: %[[ext4:.*]] = zext i16 %[[insert0]] to i56
+; CHECK-NEXT: %[[ext4:.*]] = zext i16 1 to i56
 ; CHECK-NEXT: %[[shift4:.*]] = shl i56 %[[ext4]], 40
 ; CHECK-NEXT: %[[mask4:.*]] = and i56 %[[insert3]], 1099511627775
 ; CHECK-NEXT: %[[insert4:.*]] = or i56 %[[mask4]], %[[shift4]]

Copied: llvm/branches/AMDILBackend/test/Transforms/SROA/phi-and-select.ll (from r167701, llvm/trunk/test/Transforms/SROA/phi-and-select.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SROA/phi-and-select.ll?p2=llvm/branches/AMDILBackend/test/Transforms/SROA/phi-and-select.ll&p1=llvm/trunk/test/Transforms/SROA/phi-and-select.ll&r1=167701&r2=172541&rev=172541&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/phi-and-select.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SROA/phi-and-select.ll Tue Jan 15 11:16:16 2013
@@ -390,3 +390,38 @@
   %tmpcast.d.0 = select i1 undef, i32* %c, i32* %d.0
   br label %for.cond
 }
+
+define i64 @PR14132(i1 %flag) {
+; CHECK: @PR14132
+; Here we form a PHI-node by promoting the pointer alloca first, and then in
+; order to promote the other two allocas, we speculate the load of the
+; now-phi-node-pointer. In doing so we end up loading a 64-bit value from an i8
+; alloca, which is completely bogus. However, we were asserting on trying to
+; rewrite it. Now it is replaced with undef. Eventually we may replace it with
+; unrechable and even the CFG will go away here.
+entry:
+  %a = alloca i64
+  %b = alloca i8
+  %ptr = alloca i64*
+; CHECK-NOT: alloca
+
+  %ptr.cast = bitcast i64** %ptr to i8**
+  store i64 0, i64* %a
+  store i8 1, i8* %b
+  store i64* %a, i64** %ptr
+  br i1 %flag, label %if.then, label %if.end
+
+if.then:
+  store i8* %b, i8** %ptr.cast
+  br label %if.end
+
+if.end:
+  %tmp = load i64** %ptr
+  %result = load i64* %tmp
+; CHECK-NOT: store
+; CHECK-NOT: load
+; CHECK: %[[result:.*]] = phi i64 [ undef, %if.then ], [ 0, %entry ]
+
+  ret i64 %result
+; CHECK-NEXT: ret i64 %[[result]]
+}

Copied: llvm/branches/AMDILBackend/test/Transforms/SROA/vector-promotion.ll (from r167701, llvm/trunk/test/Transforms/SROA/vector-promotion.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SROA/vector-promotion.ll?p2=llvm/branches/AMDILBackend/test/Transforms/SROA/vector-promotion.ll&p1=llvm/trunk/test/Transforms/SROA/vector-promotion.ll&r1=167701&r2=172541&rev=172541&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SROA/vector-promotion.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SROA/vector-promotion.ll Tue Jan 15 11:16:16 2013
@@ -220,3 +220,48 @@
   ret i32 %load
 ; CHECK: ret i32
 }
+
+define <2 x i8> @PR14349.1(i32 %x) {
+; CEHCK: @PR14349.1
+; The first testcase for broken SROA rewriting of split integer loads and
+; stores due to smaller vector loads and stores. This particular test ensures
+; that we can rewrite a split store of an integer to a store of a vector.
+entry:
+  %a = alloca i32
+; CHECK-NOT: alloca
+
+  store i32 %x, i32* %a
+; CHECK-NOT: store
+
+  %cast = bitcast i32* %a to <2 x i8>*
+  %vec = load <2 x i8>* %cast
+; CHECK-NOT: load
+
+  ret <2 x i8> %vec
+; CHECK: %[[trunc:.*]] = trunc i32 %x to i16
+; CHECK: %[[cast:.*]] = bitcast i16 %[[trunc]] to <2 x i8>
+; CHECK: ret <2 x i8> %[[cast]]
+}
+
+define i32 @PR14349.2(<2 x i8> %x) {
+; CEHCK: @PR14349.2
+; The first testcase for broken SROA rewriting of split integer loads and
+; stores due to smaller vector loads and stores. This particular test ensures
+; that we can rewrite a split load of an integer to a load of a vector.
+entry:
+  %a = alloca i32
+; CHECK-NOT: alloca
+
+  %cast = bitcast i32* %a to <2 x i8>*
+  store <2 x i8> %x, <2 x i8>* %cast
+; CHECK-NOT: store
+
+  %int = load i32* %a
+; CHECK-NOT: load
+
+  ret i32 %int
+; CHECK: %[[cast:.*]] = bitcast <2 x i8> %x to i16
+; CHECK: %[[trunc:.*]] = zext i16 %[[cast]] to i32
+; CHECK: %[[insert:.*]] = or i32 %{{.*}}, %[[trunc]]
+; CHECK: ret i32 %[[insert]]
+}

Removed: llvm/branches/AMDILBackend/test/Transforms/ScalarRepl/memcpy-from-global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/ScalarRepl/memcpy-from-global.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/ScalarRepl/memcpy-from-global.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/ScalarRepl/memcpy-from-global.ll (removed)
@@ -1,146 +0,0 @@
-; RUN: opt < %s -scalarrepl -S | FileCheck %s
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
- at C.0.1248 = internal constant [128 x float] [ float -1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00, float -1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00, float 1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 1.000000e+00, float 0.000000e+00, float -1.000000e+00, float 0.000000e+00, float -1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00, float 0.000000e+00, float 1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00, float 0.000000e+00, float -1.000000e+00, float 1.000000e+00, float 0.000000e+00, float -1.000000e+00, float -1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00, float -1.000000e+00, float 1.000000e+00,
  float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float -1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00, float -1.000000e+00, float -1.000000e+00, float 1.000000e+00, float 0.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00, float 0.000000e+00, float -1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00, float 1.000000e+00, float -1.000000e+00, float 0.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float -1.000000e+00, float -1.000000e+00, float 1.000000e+00, float 0.000000e+00, float -1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.00
 0000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float -1.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float -1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00, float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00, float -1.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 ], align 32		; <[128 x float]*> [#uses=1]
-
-define float @test1(i32 %hash, float %x, float %y, float %z, float %w) {
-entry:
-	%lookupTable = alloca [128 x float], align 16		; <[128 x float]*> [#uses=5]
-	%lookupTable1 = bitcast [128 x float]* %lookupTable to i8*		; <i8*> [#uses=1]
-	call void @llvm.memcpy.i32( i8* %lookupTable1, i8* bitcast ([128 x float]* @C.0.1248 to i8*), i32 512, i32 16 )
-        
-; CHECK: @test1
-; CHECK-NOT: alloca
-; CHECK-NOT: call{{.*}}@llvm.memcpy
-; CHECK: %lookupTable1 = bitcast [128 x float]* @C.0.1248 to i8*
-; CHECK-NOT: call{{.*}}@llvm.memcpy
-        
-	%tmp3 = shl i32 %hash, 2		; <i32> [#uses=1]
-	%tmp5 = and i32 %tmp3, 124		; <i32> [#uses=4]
-	%tmp753 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp5		; <float*> [#uses=1]
-	%tmp9 = load float* %tmp753		; <float> [#uses=1]
-	%tmp11 = fmul float %tmp9, %x		; <float> [#uses=1]
-	%tmp13 = fadd float %tmp11, 0.000000e+00		; <float> [#uses=1]
-	%tmp17.sum52 = or i32 %tmp5, 1		; <i32> [#uses=1]
-	%tmp1851 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp17.sum52		; <float*> [#uses=1]
-	%tmp19 = load float* %tmp1851		; <float> [#uses=1]
-	%tmp21 = fmul float %tmp19, %y		; <float> [#uses=1]
-	%tmp23 = fadd float %tmp21, %tmp13		; <float> [#uses=1]
-	%tmp27.sum50 = or i32 %tmp5, 2		; <i32> [#uses=1]
-	%tmp2849 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp27.sum50		; <float*> [#uses=1]
-	%tmp29 = load float* %tmp2849		; <float> [#uses=1]
-	%tmp31 = fmul float %tmp29, %z		; <float> [#uses=1]
-	%tmp33 = fadd float %tmp31, %tmp23		; <float> [#uses=1]
-	%tmp37.sum48 = or i32 %tmp5, 3		; <i32> [#uses=1]
-	%tmp3847 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp37.sum48		; <float*> [#uses=1]
-	%tmp39 = load float* %tmp3847		; <float> [#uses=1]
-	%tmp41 = fmul float %tmp39, %w		; <float> [#uses=1]
-	%tmp43 = fadd float %tmp41, %tmp33		; <float> [#uses=1]
-	ret float %tmp43
-}
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-
-
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
-
-%T = type { i8, [123 x i8] }
-%U = type { i32, i32, i32, i32, i32 }
-
- at G = constant %T {i8 1, [123 x i8] zeroinitializer }
- at H = constant [2 x %U] zeroinitializer, align 16
-
-define void @test2() {
-  %A = alloca %T
-  %B = alloca %T
-  %a = bitcast %T* %A to i8*
-  %b = bitcast %T* %B to i8*
-
-; CHECK: @test2
-
-; %A alloca is deleted
-; CHECK-NEXT: %B = alloca %T
-
-; use @G instead of %A
-; CHECK-NEXT: %a = bitcast %T* @G to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %b, i8* %a, i64 124, i32 4, i1 false)
-  call void @bar(i8* %b)
-  ret void
-}
-
-declare void @bar(i8*)
-
-
-;; Should be able to eliminate the alloca.
-define void @test3() {
-  %A = alloca %T
-  %a = bitcast %T* %A to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
-  call void @bar(i8* %a) readonly
-; CHECK: @test3
-; CHECK-NEXT: %a = bitcast %T* @G to i8*
-; CHECK-NEXT: call void @bar(i8* %a)
-  ret void
-}
-
-define void @test4() {
-  %A = alloca %T
-  %a = bitcast %T* %A to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
-  call void @baz(i8* byval %a) 
-; CHECK: @test4
-; CHECK-NEXT: %a = bitcast %T* @G to i8*
-; CHECK-NEXT: call void @baz(i8* byval %a)
-  ret void
-}
-
-declare void @llvm.lifetime.start(i64, i8*)
-define void @test5() {
-  %A = alloca %T
-  %a = bitcast %T* %A to i8*
-  call void @llvm.lifetime.start(i64 -1, i8* %a)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
-  call void @baz(i8* byval %a) 
-; CHECK: @test5
-; CHECK-NEXT: %a = bitcast %T* @G to i8*
-; CHECK-NEXT: call void @baz(i8* byval %a)
-  ret void
-}
-
-
-declare void @baz(i8* byval)
-
-
-define void @test6() {
-  %A = alloca %U, align 16
-  %a = bitcast %U* %A to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast ([2 x %U]* @H to i8*), i64 20, i32 16, i1 false)
-  call void @bar(i8* %a) readonly
-; CHECK: @test6
-; CHECK-NEXT: %a = bitcast
-; CHECK-NEXT: call void @bar(i8* %a)
-  ret void
-}
-
-define void @test7() {
-  %A = alloca %U, align 16
-  %a = bitcast %U* %A to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%U* getelementptr ([2 x %U]* @H, i64 0, i32 0) to i8*), i64 20, i32 4, i1 false)
-  call void @bar(i8* %a) readonly
-; CHECK: @test7
-; CHECK-NEXT: %a = bitcast
-; CHECK-NEXT: call void @bar(i8* %a)
-  ret void
-}
-
-define void @test8() {
-  %A = alloca %U, align 16
-  %a = bitcast %U* %A to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%U* getelementptr ([2 x %U]* @H, i64 0, i32 1) to i8*), i64 20, i32 4, i1 false)
-  call void @bar(i8* %a) readonly
-; CHECK: @test8
-; CHECK: llvm.memcpy
-; CHECK: bar
-  ret void
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2003-08-05-MishandleInvoke.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2003-08-05-MishandleInvoke.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2003-08-05-MishandleInvoke.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2003-08-05-MishandleInvoke.ll (removed)
@@ -1,15 +0,0 @@
-; Do not remove the invoke!
-;
-; RUN: opt < %s -simplifycfg -S | grep invoke
-
-define i32 @test() {
-	invoke i32 @test( )
-			to label %Ret unwind label %Ret		; <i32>:1 [#uses=0]
-Ret:		; preds = %0, %0
-        %val = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                 catch i8* null
-	%A = add i32 0, 1		; <i32> [#uses=1]
-	ret i32 %A
-}
-
-declare i32 @__gxx_personality_v0(...)

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll (removed)
@@ -1,567 +0,0 @@
-; RUN: opt < %s -simplifycfg -disable-output
-; END.
-	%struct..4._102 = type { %struct.QVectorData* }
-	%struct..5._125 = type { %struct.QMapData* }
-	%struct.QAbstractTextDocumentLayout = type { %struct.QObject }
-	%struct.QBasicAtomic = type { i32 }
-	%struct.QFont = type { %struct.QFontPrivate*, i32 }
-	%struct.QFontMetrics = type { %struct.QFontPrivate* }
-	%struct.QFontPrivate = type opaque
-	%"struct.QFragmentMap<QTextBlockData>" = type { %struct.QFragmentMapData }
-	%struct.QFragmentMapData = type { %"struct.QFragmentMapData::._154", i32 }
-	%"struct.QFragmentMapData::._154" = type { %"struct.QFragmentMapData::Header"* }
-	%"struct.QFragmentMapData::Header" = type { i32, i32, i32, i32, i32, i32, i32, i32 }
-	%"struct.QHash<uint,QHashDummyValue>" = type { %"struct.QHash<uint,QHashDummyValue>::._152" }
-	%"struct.QHash<uint,QHashDummyValue>::._152" = type { %struct.QHashData* }
-	%struct.QHashData = type { %"struct.QHashData::Node"*, %"struct.QHashData::Node"**, %struct.QBasicAtomic, i32, i32, i16, i16, i32, i8 }
-	%"struct.QHashData::Node" = type { %"struct.QHashData::Node"*, i32 }
-	%"struct.QList<QObject*>::._92" = type { %struct.QListData }
-	%"struct.QList<QPointer<QObject> >" = type { %"struct.QList<QObject*>::._92" }
-	%struct.QListData = type { %"struct.QListData::Data"* }
-	%"struct.QListData::Data" = type { %struct.QBasicAtomic, i32, i32, i32, i8, [1 x i8*] }
-	%"struct.QMap<QUrl,QVariant>" = type { %struct..5._125 }
-	%struct.QMapData = type { %"struct.QMapData::Node"*, [12 x %"struct.QMapData::Node"*], %struct.QBasicAtomic, i32, i32, i32, i8 }
-	%"struct.QMapData::Node" = type { %"struct.QMapData::Node"*, [1 x %"struct.QMapData::Node"*] }
-	%struct.QObject = type { i32 (...)**, %struct.QObjectData* }
-	%struct.QObjectData = type { i32 (...)**, %struct.QObject*, %struct.QObject*, %"struct.QList<QPointer<QObject> >", i8, [3 x i8], i32, i32 }
-	%struct.QObjectPrivate = type { %struct.QObjectData, i32, %struct.QObject*, %"struct.QList<QPointer<QObject> >", %"struct.QVector<QAbstractTextDocumentLayout::Selection>", %struct.QString }
-	%struct.QPaintDevice = type { i32 (...)**, i16 }
-	%struct.QPainter = type { %struct.QPainterPrivate* }
-	%struct.QPainterPrivate = type opaque
-	%struct.QPointF = type { double, double }
-	%struct.QPrinter = type { %struct.QPaintDevice, %struct.QPrinterPrivate* }
-	%struct.QPrinterPrivate = type opaque
-	%struct.QRectF = type { double, double, double, double }
-	%"struct.QSet<uint>" = type { %"struct.QHash<uint,QHashDummyValue>" }
-	%"struct.QSharedDataPointer<QTextFormatPrivate>" = type { %struct.QTextFormatPrivate* }
-	%struct.QString = type { %"struct.QString::Data"* }
-	%"struct.QString::Data" = type { %struct.QBasicAtomic, i32, i32, i16*, i8, i8, [1 x i16] }
-	%struct.QTextBlockFormat = type { %struct.QTextFormat }
-	%struct.QTextBlockGroup = type { %struct.QAbstractTextDocumentLayout }
-	%struct.QTextDocumentConfig = type { %struct.QString }
-	%struct.QTextDocumentPrivate = type { %struct.QObjectPrivate, %struct.QString, %"struct.QVector<QAbstractTextDocumentLayout::Selection>", i1, i32, i32, i1, i32, i32, i32, i32, i1, %struct.QTextFormatCollection, %struct.QTextBlockGroup*, %struct.QAbstractTextDocumentLayout*, %"struct.QFragmentMap<QTextBlockData>", %"struct.QFragmentMap<QTextBlockData>", i32, %"struct.QList<QPointer<QObject> >", %"struct.QList<QPointer<QObject> >", %"struct.QMap<QUrl,QVariant>", %"struct.QMap<QUrl,QVariant>", %"struct.QMap<QUrl,QVariant>", %struct.QTextDocumentConfig, i1, i1, %struct.QPointF }
-	%struct.QTextFormat = type { %"struct.QSharedDataPointer<QTextFormatPrivate>", i32 }
-	%struct.QTextFormatCollection = type { %"struct.QVector<QAbstractTextDocumentLayout::Selection>", %"struct.QVector<QAbstractTextDocumentLayout::Selection>", %"struct.QSet<uint>", %struct.QFont }
-	%struct.QTextFormatPrivate = type opaque
-	%"struct.QVector<QAbstractTextDocumentLayout::Selection>" = type { %struct..4._102 }
-	%struct.QVectorData = type { %struct.QBasicAtomic, i32, i32, i8 }
-
-define void @_ZNK13QTextDocument5printEP8QPrinter(%struct.QAbstractTextDocumentLayout* %this, %struct.QPrinter* %printer) {
-entry:
-	%tmp = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=2]
-	%tmp.upgrd.1 = alloca %struct.QRectF, align 16		; <%struct.QRectF*> [#uses=5]
-	%tmp2 = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=3]
-	%tmp.upgrd.2 = alloca %struct.QFontMetrics, align 16		; <%struct.QFontMetrics*> [#uses=4]
-	%tmp.upgrd.3 = alloca %struct.QFont, align 16		; <%struct.QFont*> [#uses=4]
-	%tmp3 = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=2]
-	%p = alloca %struct.QPainter, align 16		; <%struct.QPainter*> [#uses=14]
-	%body = alloca %struct.QRectF, align 16		; <%struct.QRectF*> [#uses=9]
-	%pageNumberPos = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=4]
-	%scaledPageSize = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=6]
-	%printerPageSize = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=3]
-	%fmt = alloca %struct.QTextBlockFormat, align 16		; <%struct.QTextBlockFormat*> [#uses=5]
-	%font = alloca %struct.QFont, align 16		; <%struct.QFont*> [#uses=5]
-	%tmp.upgrd.4 = call %struct.QTextDocumentPrivate* @_ZNK13QTextDocument6d_funcEv( %struct.QAbstractTextDocumentLayout* %this )		; <%struct.QTextDocumentPrivate*> [#uses=5]
-	%tmp.upgrd.5 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	call void @_ZN8QPainterC1EP12QPaintDevice( %struct.QPainter* %p, %struct.QPaintDevice* %tmp.upgrd.5 )
-	%tmp.upgrd.6 = invoke i1 @_ZNK8QPainter8isActiveEv( %struct.QPainter* %p )
-			to label %invcont unwind label %cleanup329		; <i1> [#uses=1]
-invcont:		; preds = %entry
-	br i1 %tmp.upgrd.6, label %cond_next, label %cleanup328
-cond_next:		; preds = %invcont
-	%tmp8 = invoke %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument14documentLayoutEv( %struct.QAbstractTextDocumentLayout* %this )
-			to label %invcont7 unwind label %cleanup329		; <%struct.QAbstractTextDocumentLayout*> [#uses=0]
-invcont7:		; preds = %cond_next
-	%tmp10 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26		; <%struct.QPointF*> [#uses=1]
-	call void @_ZN7QPointFC1Edd( %struct.QPointF* %tmp, double 0.000000e+00, double 0.000000e+00 )
-	call void @_ZN6QRectFC1ERK7QPointFRK6QSizeF( %struct.QRectF* %body, %struct.QPointF* %tmp, %struct.QPointF* %tmp10 )
-	call void @_ZN7QPointFC1Ev( %struct.QPointF* %pageNumberPos )
-	%tmp12 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26		; <%struct.QPointF*> [#uses=1]
-	%tmp13 = call i1 @_ZNK6QSizeF7isValidEv( %struct.QPointF* %tmp12 )		; <i1> [#uses=1]
-	br i1 %tmp13, label %cond_next15, label %bb
-cond_next15:		; preds = %invcont7
-	%tmp17 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26		; <%struct.QPointF*> [#uses=1]
-	%tmp.upgrd.7 = call double @_ZNK6QSizeF6heightEv( %struct.QPointF* %tmp17 )		; <double> [#uses=1]
-	%tmp18 = fcmp oeq double %tmp.upgrd.7, 0x41DFFFFFFFC00000		; <i1> [#uses=1]
-	br i1 %tmp18, label %bb, label %cond_next20
-cond_next20:		; preds = %cond_next15
-	br label %bb21
-bb:		; preds = %cond_next15, %invcont7
-	br label %bb21
-bb21:		; preds = %bb, %cond_next20
-	%iftmp.406.0 = phi i1 [ false, %bb ], [ true, %cond_next20 ]		; <i1> [#uses=1]
-	br i1 %iftmp.406.0, label %cond_true24, label %cond_false
-cond_true24:		; preds = %bb21
-	%tmp.upgrd.8 = invoke i32 @_Z13qt_defaultDpiv( )
-			to label %invcont25 unwind label %cleanup329		; <i32> [#uses=1]
-invcont25:		; preds = %cond_true24
-	%tmp26 = sitofp i32 %tmp.upgrd.8 to double		; <double> [#uses=2]
-	%tmp30 = invoke %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument14documentLayoutEv( %struct.QAbstractTextDocumentLayout* %this )
-			to label %invcont29 unwind label %cleanup329		; <%struct.QAbstractTextDocumentLayout*> [#uses=1]
-invcont29:		; preds = %invcont25
-	%tmp32 = invoke %struct.QPaintDevice* @_ZNK27QAbstractTextDocumentLayout11paintDeviceEv( %struct.QAbstractTextDocumentLayout* %tmp30 )
-			to label %invcont31 unwind label %cleanup329		; <%struct.QPaintDevice*> [#uses=3]
-invcont31:		; preds = %invcont29
-	%tmp34 = icmp eq %struct.QPaintDevice* %tmp32, null		; <i1> [#uses=1]
-	br i1 %tmp34, label %cond_next42, label %cond_true35
-cond_true35:		; preds = %invcont31
-	%tmp38 = invoke i32 @_ZNK12QPaintDevice11logicalDpiXEv( %struct.QPaintDevice* %tmp32 )
-			to label %invcont37 unwind label %cleanup329		; <i32> [#uses=1]
-invcont37:		; preds = %cond_true35
-	%tmp38.upgrd.9 = sitofp i32 %tmp38 to double		; <double> [#uses=1]
-	%tmp41 = invoke i32 @_ZNK12QPaintDevice11logicalDpiYEv( %struct.QPaintDevice* %tmp32 )
-			to label %invcont40 unwind label %cleanup329		; <i32> [#uses=1]
-invcont40:		; preds = %invcont37
-	%tmp41.upgrd.10 = sitofp i32 %tmp41 to double		; <double> [#uses=1]
-	br label %cond_next42
-cond_next42:		; preds = %invcont40, %invcont31
-	%sourceDpiY.2 = phi double [ %tmp41.upgrd.10, %invcont40 ], [ %tmp26, %invcont31 ]		; <double> [#uses=1]
-	%sourceDpiX.2 = phi double [ %tmp38.upgrd.9, %invcont40 ], [ %tmp26, %invcont31 ]		; <double> [#uses=1]
-	%tmp44 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	%tmp46 = invoke i32 @_ZNK12QPaintDevice11logicalDpiXEv( %struct.QPaintDevice* %tmp44 )
-			to label %invcont45 unwind label %cleanup329		; <i32> [#uses=1]
-invcont45:		; preds = %cond_next42
-	%tmp46.upgrd.11 = sitofp i32 %tmp46 to double		; <double> [#uses=1]
-	%tmp48 = fdiv double %tmp46.upgrd.11, %sourceDpiX.2		; <double> [#uses=2]
-	%tmp50 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	%tmp52 = invoke i32 @_ZNK12QPaintDevice11logicalDpiYEv( %struct.QPaintDevice* %tmp50 )
-			to label %invcont51 unwind label %cleanup329		; <i32> [#uses=1]
-invcont51:		; preds = %invcont45
-	%tmp52.upgrd.12 = sitofp i32 %tmp52 to double		; <double> [#uses=1]
-	%tmp54 = fdiv double %tmp52.upgrd.12, %sourceDpiY.2		; <double> [#uses=2]
-	invoke void @_ZN8QPainter5scaleEdd( %struct.QPainter* %p, double %tmp48, double %tmp54 )
-			to label %invcont57 unwind label %cleanup329
-invcont57:		; preds = %invcont51
-	%tmp.upgrd.13 = getelementptr %struct.QPointF* %scaledPageSize, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp60 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26, i32 0		; <double*> [#uses=1]
-	%tmp61 = load double* %tmp60		; <double> [#uses=1]
-	store double %tmp61, double* %tmp.upgrd.13
-	%tmp62 = getelementptr %struct.QPointF* %scaledPageSize, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp63 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26, i32 1		; <double*> [#uses=1]
-	%tmp64 = load double* %tmp63		; <double> [#uses=1]
-	store double %tmp64, double* %tmp62
-	%tmp65 = call double* @_ZN6QSizeF6rwidthEv( %struct.QPointF* %scaledPageSize )		; <double*> [#uses=2]
-	%tmp67 = load double* %tmp65		; <double> [#uses=1]
-	%tmp69 = fmul double %tmp67, %tmp48		; <double> [#uses=1]
-	store double %tmp69, double* %tmp65
-	%tmp71 = call double* @_ZN6QSizeF7rheightEv( %struct.QPointF* %scaledPageSize )		; <double*> [#uses=2]
-	%tmp73 = load double* %tmp71		; <double> [#uses=1]
-	%tmp75 = fmul double %tmp73, %tmp54		; <double> [#uses=1]
-	store double %tmp75, double* %tmp71
-	%tmp78 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	%tmp80 = invoke i32 @_ZNK12QPaintDevice6heightEv( %struct.QPaintDevice* %tmp78 )
-			to label %invcont79 unwind label %cleanup329		; <i32> [#uses=1]
-invcont79:		; preds = %invcont57
-	%tmp82 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	%tmp84 = invoke i32 @_ZNK12QPaintDevice5widthEv( %struct.QPaintDevice* %tmp82 )
-			to label %invcont83 unwind label %cleanup329		; <i32> [#uses=1]
-invcont83:		; preds = %invcont79
-	%tmp80.upgrd.14 = sitofp i32 %tmp80 to double		; <double> [#uses=1]
-	%tmp84.upgrd.15 = sitofp i32 %tmp84 to double		; <double> [#uses=1]
-	call void @_ZN6QSizeFC1Edd( %struct.QPointF* %printerPageSize, double %tmp84.upgrd.15, double %tmp80.upgrd.14 )
-	%tmp85 = call double @_ZNK6QSizeF6heightEv( %struct.QPointF* %printerPageSize )		; <double> [#uses=1]
-	%tmp86 = call double @_ZNK6QSizeF6heightEv( %struct.QPointF* %scaledPageSize )		; <double> [#uses=1]
-	%tmp87 = fdiv double %tmp85, %tmp86		; <double> [#uses=1]
-	%tmp88 = call double @_ZNK6QSizeF5widthEv( %struct.QPointF* %printerPageSize )		; <double> [#uses=1]
-	%tmp89 = call double @_ZNK6QSizeF5widthEv( %struct.QPointF* %scaledPageSize )		; <double> [#uses=1]
-	%tmp90 = fdiv double %tmp88, %tmp89		; <double> [#uses=1]
-	invoke void @_ZN8QPainter5scaleEdd( %struct.QPainter* %p, double %tmp90, double %tmp87 )
-			to label %cond_next194 unwind label %cleanup329
-cond_false:		; preds = %bb21
-	%tmp.upgrd.16 = getelementptr %struct.QAbstractTextDocumentLayout* %this, i32 0, i32 0		; <%struct.QObject*> [#uses=1]
-	%tmp95 = invoke %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument5cloneEP7QObject( %struct.QAbstractTextDocumentLayout* %this, %struct.QObject* %tmp.upgrd.16 )
-			to label %invcont94 unwind label %cleanup329		; <%struct.QAbstractTextDocumentLayout*> [#uses=9]
-invcont94:		; preds = %cond_false
-	%tmp99 = invoke %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument14documentLayoutEv( %struct.QAbstractTextDocumentLayout* %tmp95 )
-			to label %invcont98 unwind label %cleanup329		; <%struct.QAbstractTextDocumentLayout*> [#uses=1]
-invcont98:		; preds = %invcont94
-	%tmp101 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont100 unwind label %cleanup329		; <%struct.QPaintDevice*> [#uses=1]
-invcont100:		; preds = %invcont98
-	invoke void @_ZN27QAbstractTextDocumentLayout14setPaintDeviceEP12QPaintDevice( %struct.QAbstractTextDocumentLayout* %tmp99, %struct.QPaintDevice* %tmp101 )
-			to label %invcont103 unwind label %cleanup329
-invcont103:		; preds = %invcont100
-	%tmp105 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont104 unwind label %cleanup329		; <%struct.QPaintDevice*> [#uses=1]
-invcont104:		; preds = %invcont103
-	%tmp107 = invoke i32 @_ZNK12QPaintDevice11logicalDpiYEv( %struct.QPaintDevice* %tmp105 )
-			to label %invcont106 unwind label %cleanup329		; <i32> [#uses=1]
-invcont106:		; preds = %invcont104
-	%tmp108 = sitofp i32 %tmp107 to double		; <double> [#uses=1]
-	%tmp109 = fmul double %tmp108, 0x3FE93264C993264C		; <double> [#uses=1]
-	%tmp109.upgrd.17 = fptosi double %tmp109 to i32		; <i32> [#uses=3]
-	%tmp.upgrd.18 = call %struct.QTextBlockGroup* @_ZNK13QTextDocument9rootFrameEv( %struct.QAbstractTextDocumentLayout* %tmp95 )		; <%struct.QTextBlockGroup*> [#uses=1]
-	invoke void @_ZNK10QTextFrame11frameFormatEv( %struct.QTextBlockFormat* sret  %fmt, %struct.QTextBlockGroup* %tmp.upgrd.18 )
-			to label %invcont111 unwind label %cleanup329
-invcont111:		; preds = %invcont106
-	%tmp112 = sitofp i32 %tmp109.upgrd.17 to double		; <double> [#uses=1]
-	invoke void @_ZN16QTextFrameFormat9setMarginEd( %struct.QTextBlockFormat* %fmt, double %tmp112 )
-			to label %invcont114 unwind label %cleanup192
-invcont114:		; preds = %invcont111
-	%tmp116 = call %struct.QTextBlockGroup* @_ZNK13QTextDocument9rootFrameEv( %struct.QAbstractTextDocumentLayout* %tmp95 )		; <%struct.QTextBlockGroup*> [#uses=1]
-	invoke void @_ZN10QTextFrame14setFrameFormatERK16QTextFrameFormat( %struct.QTextBlockGroup* %tmp116, %struct.QTextBlockFormat* %fmt )
-			to label %invcont117 unwind label %cleanup192
-invcont117:		; preds = %invcont114
-	%tmp119 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont118 unwind label %cleanup192		; <%struct.QPaintDevice*> [#uses=1]
-invcont118:		; preds = %invcont117
-	%tmp121 = invoke i32 @_ZNK12QPaintDevice6heightEv( %struct.QPaintDevice* %tmp119 )
-			to label %invcont120 unwind label %cleanup192		; <i32> [#uses=1]
-invcont120:		; preds = %invcont118
-	%tmp121.upgrd.19 = sitofp i32 %tmp121 to double		; <double> [#uses=1]
-	%tmp123 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont122 unwind label %cleanup192		; <%struct.QPaintDevice*> [#uses=1]
-invcont122:		; preds = %invcont120
-	%tmp125 = invoke i32 @_ZNK12QPaintDevice5widthEv( %struct.QPaintDevice* %tmp123 )
-			to label %invcont124 unwind label %cleanup192		; <i32> [#uses=1]
-invcont124:		; preds = %invcont122
-	%tmp125.upgrd.20 = sitofp i32 %tmp125 to double		; <double> [#uses=1]
-	call void @_ZN6QRectFC1Edddd( %struct.QRectF* %tmp.upgrd.1, double 0.000000e+00, double 0.000000e+00, double %tmp125.upgrd.20, double %tmp121.upgrd.19 )
-	%tmp126 = getelementptr %struct.QRectF* %body, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp127 = getelementptr %struct.QRectF* %tmp.upgrd.1, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp128 = load double* %tmp127		; <double> [#uses=1]
-	store double %tmp128, double* %tmp126
-	%tmp129 = getelementptr %struct.QRectF* %body, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp130 = getelementptr %struct.QRectF* %tmp.upgrd.1, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp131 = load double* %tmp130		; <double> [#uses=1]
-	store double %tmp131, double* %tmp129
-	%tmp132 = getelementptr %struct.QRectF* %body, i32 0, i32 2		; <double*> [#uses=1]
-	%tmp133 = getelementptr %struct.QRectF* %tmp.upgrd.1, i32 0, i32 2		; <double*> [#uses=1]
-	%tmp134 = load double* %tmp133		; <double> [#uses=1]
-	store double %tmp134, double* %tmp132
-	%tmp135 = getelementptr %struct.QRectF* %body, i32 0, i32 3		; <double*> [#uses=1]
-	%tmp136 = getelementptr %struct.QRectF* %tmp.upgrd.1, i32 0, i32 3		; <double*> [#uses=1]
-	%tmp137 = load double* %tmp136		; <double> [#uses=1]
-	store double %tmp137, double* %tmp135
-	%tmp138 = call double @_ZNK6QRectF6heightEv( %struct.QRectF* %body )		; <double> [#uses=1]
-	%tmp139 = sitofp i32 %tmp109.upgrd.17 to double		; <double> [#uses=1]
-	%tmp140 = fsub double %tmp138, %tmp139		; <double> [#uses=1]
-	%tmp142 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont141 unwind label %cleanup192		; <%struct.QPaintDevice*> [#uses=1]
-invcont141:		; preds = %invcont124
-	invoke void @_ZNK13QTextDocument11defaultFontEv( %struct.QFont* sret  %tmp.upgrd.3, %struct.QAbstractTextDocumentLayout* %tmp95 )
-			to label %invcont144 unwind label %cleanup192
-invcont144:		; preds = %invcont141
-	invoke void @_ZN12QFontMetricsC1ERK5QFontP12QPaintDevice( %struct.QFontMetrics* %tmp.upgrd.2, %struct.QFont* %tmp.upgrd.3, %struct.QPaintDevice* %tmp142 )
-			to label %invcont146 unwind label %cleanup173
-invcont146:		; preds = %invcont144
-	%tmp149 = invoke i32 @_ZNK12QFontMetrics6ascentEv( %struct.QFontMetrics* %tmp.upgrd.2 )
-			to label %invcont148 unwind label %cleanup168		; <i32> [#uses=1]
-invcont148:		; preds = %invcont146
-	%tmp149.upgrd.21 = sitofp i32 %tmp149 to double		; <double> [#uses=1]
-	%tmp150 = fadd double %tmp140, %tmp149.upgrd.21		; <double> [#uses=1]
-	%tmp152 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont151 unwind label %cleanup168		; <%struct.QPaintDevice*> [#uses=1]
-invcont151:		; preds = %invcont148
-	%tmp154 = invoke i32 @_ZNK12QPaintDevice11logicalDpiYEv( %struct.QPaintDevice* %tmp152 )
-			to label %invcont153 unwind label %cleanup168		; <i32> [#uses=1]
-invcont153:		; preds = %invcont151
-	%tmp155 = mul i32 %tmp154, 5		; <i32> [#uses=1]
-	%tmp156 = sdiv i32 %tmp155, 72		; <i32> [#uses=1]
-	%tmp156.upgrd.22 = sitofp i32 %tmp156 to double		; <double> [#uses=1]
-	%tmp157 = fadd double %tmp150, %tmp156.upgrd.22		; <double> [#uses=1]
-	%tmp158 = call double @_ZNK6QRectF5widthEv( %struct.QRectF* %body )		; <double> [#uses=1]
-	%tmp159 = sitofp i32 %tmp109.upgrd.17 to double		; <double> [#uses=1]
-	%tmp160 = fsub double %tmp158, %tmp159		; <double> [#uses=1]
-	call void @_ZN7QPointFC1Edd( %struct.QPointF* %tmp2, double %tmp160, double %tmp157 )
-	%tmp161 = getelementptr %struct.QPointF* %pageNumberPos, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp162 = getelementptr %struct.QPointF* %tmp2, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp163 = load double* %tmp162		; <double> [#uses=1]
-	store double %tmp163, double* %tmp161
-	%tmp164 = getelementptr %struct.QPointF* %pageNumberPos, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp165 = getelementptr %struct.QPointF* %tmp2, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp166 = load double* %tmp165		; <double> [#uses=1]
-	store double %tmp166, double* %tmp164
-	invoke void @_ZN12QFontMetricsD1Ev( %struct.QFontMetrics* %tmp.upgrd.2 )
-			to label %cleanup171 unwind label %cleanup173
-cleanup168:		; preds = %invcont151, %invcont148, %invcont146
-        %val168 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	invoke void @_ZN12QFontMetricsD1Ev( %struct.QFontMetrics* %tmp.upgrd.2 )
-			to label %cleanup173 unwind label %cleanup173
-cleanup171:		; preds = %invcont153
-	invoke void @_ZN5QFontD1Ev( %struct.QFont* %tmp.upgrd.3 )
-			to label %finally170 unwind label %cleanup192
-cleanup173:		; preds = %cleanup168, %cleanup168, %invcont153, %invcont144
-        %val173 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	invoke void @_ZN5QFontD1Ev( %struct.QFont* %tmp.upgrd.3 )
-			to label %cleanup192 unwind label %cleanup192
-finally170:		; preds = %cleanup171
-	invoke void @_ZNK13QTextDocument11defaultFontEv( %struct.QFont* sret  %font, %struct.QAbstractTextDocumentLayout* %tmp95 )
-			to label %invcont177 unwind label %cleanup192
-invcont177:		; preds = %finally170
-	invoke void @_ZN5QFont12setPointSizeEi( %struct.QFont* %font, i32 10 )
-			to label %invcont179 unwind label %cleanup187
-invcont179:		; preds = %invcont177
-	invoke void @_ZN13QTextDocument14setDefaultFontERK5QFont( %struct.QAbstractTextDocumentLayout* %tmp95, %struct.QFont* %font )
-			to label %invcont181 unwind label %cleanup187
-invcont181:		; preds = %invcont179
-	call void @_ZNK6QRectF4sizeEv( %struct.QPointF* sret  %tmp3, %struct.QRectF* %body )
-	invoke void @_ZN13QTextDocument11setPageSizeERK6QSizeF( %struct.QAbstractTextDocumentLayout* %tmp95, %struct.QPointF* %tmp3 )
-			to label %cleanup185 unwind label %cleanup187
-cleanup185:		; preds = %invcont181
-	invoke void @_ZN5QFontD1Ev( %struct.QFont* %font )
-			to label %cleanup190 unwind label %cleanup192
-cleanup187:		; preds = %invcont181, %invcont179, %invcont177
-        %val187 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	invoke void @_ZN5QFontD1Ev( %struct.QFont* %font )
-			to label %cleanup192 unwind label %cleanup192
-cleanup190:		; preds = %cleanup185
-	invoke void @_ZN16QTextFrameFormatD1Ev( %struct.QTextBlockFormat* %fmt )
-			to label %cond_next194 unwind label %cleanup329
-cleanup192:		; preds = %cleanup187, %cleanup187, %cleanup185, %finally170, %cleanup173, %cleanup173, %cleanup171, %invcont141, %invcont124, %invcont122, %invcont120, %invcont118, %invcont117, %invcont114, %invcont111
-        %val192 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	invoke void @_ZN16QTextFrameFormatD1Ev( %struct.QTextBlockFormat* %fmt )
-			to label %cleanup329 unwind label %cleanup329
-cond_next194:		; preds = %cleanup190, %invcont83
-	%clonedDoc.1 = phi %struct.QAbstractTextDocumentLayout* [ null, %invcont83 ], [ %tmp95, %cleanup190 ]		; <%struct.QAbstractTextDocumentLayout*> [#uses=3]
-	%doc.1 = phi %struct.QAbstractTextDocumentLayout* [ %this, %invcont83 ], [ %tmp95, %cleanup190 ]		; <%struct.QAbstractTextDocumentLayout*> [#uses=2]
-	%tmp197 = invoke i1 @_ZNK8QPrinter13collateCopiesEv( %struct.QPrinter* %printer )
-			to label %invcont196 unwind label %cleanup329		; <i1> [#uses=1]
-invcont196:		; preds = %cond_next194
-	br i1 %tmp197, label %cond_true200, label %cond_false204
-cond_true200:		; preds = %invcont196
-	%tmp203 = invoke i32 @_ZNK8QPrinter9numCopiesEv( %struct.QPrinter* %printer )
-			to label %invcont202 unwind label %cleanup329		; <i32> [#uses=1]
-invcont202:		; preds = %cond_true200
-	br label %cond_next208
-cond_false204:		; preds = %invcont196
-	%tmp207 = invoke i32 @_ZNK8QPrinter9numCopiesEv( %struct.QPrinter* %printer )
-			to label %invcont206 unwind label %cleanup329		; <i32> [#uses=1]
-invcont206:		; preds = %cond_false204
-	br label %cond_next208
-cond_next208:		; preds = %invcont206, %invcont202
-	%pageCopies.0 = phi i32 [ %tmp203, %invcont202 ], [ 1, %invcont206 ]		; <i32> [#uses=2]
-	%docCopies.0 = phi i32 [ 1, %invcont202 ], [ %tmp207, %invcont206 ]		; <i32> [#uses=2]
-	%tmp211 = invoke i32 @_ZNK8QPrinter8fromPageEv( %struct.QPrinter* %printer )
-			to label %invcont210 unwind label %cleanup329		; <i32> [#uses=3]
-invcont210:		; preds = %cond_next208
-	%tmp214 = invoke i32 @_ZNK8QPrinter6toPageEv( %struct.QPrinter* %printer )
-			to label %invcont213 unwind label %cleanup329		; <i32> [#uses=3]
-invcont213:		; preds = %invcont210
-	%tmp216 = icmp eq i32 %tmp211, 0		; <i1> [#uses=1]
-	br i1 %tmp216, label %cond_true217, label %cond_next225
-cond_true217:		; preds = %invcont213
-	%tmp219 = icmp eq i32 %tmp214, 0		; <i1> [#uses=1]
-	br i1 %tmp219, label %cond_true220, label %cond_next225
-cond_true220:		; preds = %cond_true217
-	%tmp223 = invoke i32 @_ZNK13QTextDocument9pageCountEv( %struct.QAbstractTextDocumentLayout* %doc.1 )
-			to label %invcont222 unwind label %cleanup329		; <i32> [#uses=1]
-invcont222:		; preds = %cond_true220
-	br label %cond_next225
-cond_next225:		; preds = %invcont222, %cond_true217, %invcont213
-	%toPage.1 = phi i32 [ %tmp223, %invcont222 ], [ %tmp214, %cond_true217 ], [ %tmp214, %invcont213 ]		; <i32> [#uses=2]
-	%fromPage.1 = phi i32 [ 1, %invcont222 ], [ %tmp211, %cond_true217 ], [ %tmp211, %invcont213 ]		; <i32> [#uses=2]
-	%tmp.page = invoke i32 @_ZNK8QPrinter9pageOrderEv( %struct.QPrinter* %printer )
-			to label %invcont227 unwind label %cleanup329		; <i32> [#uses=1]
-invcont227:		; preds = %cond_next225
-	%tmp228 = icmp eq i32 %tmp.page, 1		; <i1> [#uses=1]
-	br i1 %tmp228, label %cond_true230, label %cond_next234
-cond_true230:		; preds = %invcont227
-	br label %cond_next234
-cond_next234:		; preds = %cond_true230, %invcont227
-	%ascending.1 = phi i1 [ false, %cond_true230 ], [ true, %invcont227 ]		; <i1> [#uses=1]
-	%toPage.2 = phi i32 [ %fromPage.1, %cond_true230 ], [ %toPage.1, %invcont227 ]		; <i32> [#uses=1]
-	%fromPage.2 = phi i32 [ %toPage.1, %cond_true230 ], [ %fromPage.1, %invcont227 ]		; <i32> [#uses=1]
-	br label %bb309
-bb237:		; preds = %cond_true313, %cond_next293
-	%iftmp.410.4 = phi i1 [ %iftmp.410.5, %cond_true313 ], [ %iftmp.410.1, %cond_next293 ]		; <i1> [#uses=1]
-	%page.4 = phi i32 [ %fromPage.2, %cond_true313 ], [ %page.3, %cond_next293 ]		; <i32> [#uses=4]
-	br label %bb273
-invcont240:		; preds = %cond_true277
-	%tmp242 = icmp eq i32 %tmp241, 2		; <i1> [#uses=1]
-	br i1 %tmp242, label %bb252, label %cond_next244
-cond_next244:		; preds = %invcont240
-	%tmp247 = invoke i32 @_ZNK8QPrinter12printerStateEv( %struct.QPrinter* %printer )
-			to label %invcont246 unwind label %cleanup329		; <i32> [#uses=1]
-invcont246:		; preds = %cond_next244
-	%tmp248 = icmp eq i32 %tmp247, 3		; <i1> [#uses=1]
-	br i1 %tmp248, label %bb252, label %bb253
-bb252:		; preds = %invcont246, %invcont240
-	br label %bb254
-bb253:		; preds = %invcont246
-	br label %bb254
-bb254:		; preds = %bb253, %bb252
-	%iftmp.410.0 = phi i1 [ true, %bb252 ], [ false, %bb253 ]		; <i1> [#uses=2]
-	br i1 %iftmp.410.0, label %UserCanceled, label %cond_next258
-cond_next258:		; preds = %bb254
-	invoke fastcc void @_Z9printPageiP8QPainterPK13QTextDocumentRK6QRectFRK7QPointF( i32 %page.4, %struct.QPainter* %p, %struct.QAbstractTextDocumentLayout* %doc.1, %struct.QRectF* %body, %struct.QPointF* %pageNumberPos )
-			to label %invcont261 unwind label %cleanup329
-invcont261:		; preds = %cond_next258
-	%tmp263 = add i32 %pageCopies.0, -1		; <i32> [#uses=1]
-	%tmp265 = icmp sgt i32 %tmp263, %j.4		; <i1> [#uses=1]
-	br i1 %tmp265, label %cond_true266, label %cond_next270
-cond_true266:		; preds = %invcont261
-	%tmp269 = invoke i1 @_ZN8QPrinter7newPageEv( %struct.QPrinter* %printer )
-			to label %cond_next270 unwind label %cleanup329		; <i1> [#uses=0]
-cond_next270:		; preds = %cond_true266, %invcont261
-	%tmp272 = add i32 %j.4, 1		; <i32> [#uses=1]
-	br label %bb273
-bb273:		; preds = %cond_next270, %bb237
-	%iftmp.410.1 = phi i1 [ %iftmp.410.4, %bb237 ], [ %iftmp.410.0, %cond_next270 ]		; <i1> [#uses=2]
-	%j.4 = phi i32 [ 0, %bb237 ], [ %tmp272, %cond_next270 ]		; <i32> [#uses=3]
-	%tmp276 = icmp slt i32 %j.4, %pageCopies.0		; <i1> [#uses=1]
-	br i1 %tmp276, label %cond_true277, label %bb280
-cond_true277:		; preds = %bb273
-	%tmp241 = invoke i32 @_ZNK8QPrinter12printerStateEv( %struct.QPrinter* %printer )
-			to label %invcont240 unwind label %cleanup329		; <i32> [#uses=1]
-bb280:		; preds = %bb273
-	%tmp283 = icmp eq i32 %page.4, %toPage.2		; <i1> [#uses=1]
-	br i1 %tmp283, label %bb297, label %cond_next285
-cond_next285:		; preds = %bb280
-	br i1 %ascending.1, label %cond_true287, label %cond_false290
-cond_true287:		; preds = %cond_next285
-	%tmp289 = add i32 %page.4, 1		; <i32> [#uses=1]
-	br label %cond_next293
-cond_false290:		; preds = %cond_next285
-	%tmp292 = add i32 %page.4, -1		; <i32> [#uses=1]
-	br label %cond_next293
-cond_next293:		; preds = %cond_false290, %cond_true287
-	%page.3 = phi i32 [ %tmp289, %cond_true287 ], [ %tmp292, %cond_false290 ]		; <i32> [#uses=1]
-	%tmp296 = invoke i1 @_ZN8QPrinter7newPageEv( %struct.QPrinter* %printer )
-			to label %bb237 unwind label %cleanup329		; <i1> [#uses=0]
-bb297:		; preds = %bb280
-	%tmp299 = add i32 %docCopies.0, -1		; <i32> [#uses=1]
-	%tmp301 = icmp sgt i32 %tmp299, %i.1		; <i1> [#uses=1]
-	br i1 %tmp301, label %cond_true302, label %cond_next306
-cond_true302:		; preds = %bb297
-	%tmp305 = invoke i1 @_ZN8QPrinter7newPageEv( %struct.QPrinter* %printer )
-			to label %cond_next306 unwind label %cleanup329		; <i1> [#uses=0]
-cond_next306:		; preds = %cond_true302, %bb297
-	%tmp308 = add i32 %i.1, 1		; <i32> [#uses=1]
-	br label %bb309
-bb309:		; preds = %cond_next306, %cond_next234
-	%iftmp.410.5 = phi i1 [ undef, %cond_next234 ], [ %iftmp.410.1, %cond_next306 ]		; <i1> [#uses=1]
-	%i.1 = phi i32 [ 0, %cond_next234 ], [ %tmp308, %cond_next306 ]		; <i32> [#uses=3]
-	%tmp312 = icmp slt i32 %i.1, %docCopies.0		; <i1> [#uses=1]
-	br i1 %tmp312, label %cond_true313, label %UserCanceled
-cond_true313:		; preds = %bb309
-	br label %bb237
-UserCanceled:		; preds = %bb309, %bb254
-	%tmp318 = icmp eq %struct.QAbstractTextDocumentLayout* %clonedDoc.1, null		; <i1> [#uses=1]
-	br i1 %tmp318, label %cleanup327, label %cond_true319
-cond_true319:		; preds = %UserCanceled
-	%tmp.upgrd.23 = getelementptr %struct.QAbstractTextDocumentLayout* %clonedDoc.1, i32 0, i32 0, i32 0		; <i32 (...)***> [#uses=1]
-	%tmp.upgrd.24 = load i32 (...)*** %tmp.upgrd.23		; <i32 (...)**> [#uses=1]
-	%tmp322 = getelementptr i32 (...)** %tmp.upgrd.24, i32 4		; <i32 (...)**> [#uses=1]
-	%tmp.upgrd.25 = load i32 (...)** %tmp322		; <i32 (...)*> [#uses=1]
-	%tmp.upgrd.26 = bitcast i32 (...)* %tmp.upgrd.25 to void (%struct.QAbstractTextDocumentLayout*)*		; <void (%struct.QAbstractTextDocumentLayout*)*> [#uses=1]
-	invoke void %tmp.upgrd.26( %struct.QAbstractTextDocumentLayout* %clonedDoc.1 )
-			to label %cleanup327 unwind label %cleanup329
-cleanup327:		; preds = %cond_true319, %UserCanceled
-	call void @_ZN8QPainterD1Ev( %struct.QPainter* %p )
-	ret void
-cleanup328:		; preds = %invcont
-	call void @_ZN8QPainterD1Ev( %struct.QPainter* %p )
-	ret void
-cleanup329:		; preds = %cond_true319, %cond_true302, %cond_next293, %cond_true277, %cond_true266, %cond_next258, %cond_next244, %cond_next225, %cond_true220, %invcont210, %cond_next208, %cond_false204, %cond_true200, %cond_next194, %cleanup192, %cleanup192, %cleanup190, %invcont106, %invcont104, %invcont103, %invcont100, %invcont98, %invcont94, %cond_false, %invcont83, %invcont79, %invcont57, %invcont51, %invcont45, %cond_next42, %invcont37, %cond_true35, %invcont29, %invcont25, %cond_true24, %cond_next, %entry
-        %val = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                 cleanup
-	call void @_ZN8QPainterD1Ev( %struct.QPainter* %p )
-	resume { i8*, i32 } %val
-}
-
-declare void @_ZN6QSizeFC1Edd(%struct.QPointF*, double, double)
-
-declare i1 @_ZNK6QSizeF7isValidEv(%struct.QPointF*)
-
-declare double @_ZNK6QSizeF5widthEv(%struct.QPointF*)
-
-declare double @_ZNK6QSizeF6heightEv(%struct.QPointF*)
-
-declare double* @_ZN6QSizeF6rwidthEv(%struct.QPointF*)
-
-declare double* @_ZN6QSizeF7rheightEv(%struct.QPointF*)
-
-declare %struct.QTextDocumentPrivate* @_ZNK13QTextDocument6d_funcEv(%struct.QAbstractTextDocumentLayout*)
-
-declare void @_ZN7QPointFC1Ev(%struct.QPointF*)
-
-declare void @_ZN7QPointFC1Edd(%struct.QPointF*, double, double)
-
-declare void @_ZN16QTextFrameFormat9setMarginEd(%struct.QTextBlockFormat*, double)
-
-declare void @_ZN6QRectFC1Edddd(%struct.QRectF*, double, double, double, double)
-
-declare void @_ZN6QRectFC1ERK7QPointFRK6QSizeF(%struct.QRectF*, %struct.QPointF*, %struct.QPointF*)
-
-declare double @_ZNK6QRectF5widthEv(%struct.QRectF*)
-
-declare double @_ZNK6QRectF6heightEv(%struct.QRectF*)
-
-declare void @_ZNK6QRectF4sizeEv(%struct.QPointF*, %struct.QRectF*)
-
-declare void @_ZN16QTextFrameFormatD1Ev(%struct.QTextBlockFormat*)
-
-declare void @_ZNK10QTextFrame11frameFormatEv(%struct.QTextBlockFormat*, %struct.QTextBlockGroup*)
-
-declare void @_ZN10QTextFrame14setFrameFormatERK16QTextFrameFormat(%struct.QTextBlockGroup*, %struct.QTextBlockFormat*)
-
-declare i32 @_ZNK12QPaintDevice5widthEv(%struct.QPaintDevice*)
-
-declare i32 @_ZNK12QPaintDevice6heightEv(%struct.QPaintDevice*)
-
-declare i32 @_ZNK12QPaintDevice11logicalDpiXEv(%struct.QPaintDevice*)
-
-declare i32 @_ZNK12QPaintDevice11logicalDpiYEv(%struct.QPaintDevice*)
-
-declare %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument5cloneEP7QObject(%struct.QAbstractTextDocumentLayout*, %struct.QObject*)
-
-declare void @_ZN5QFontD1Ev(%struct.QFont*)
-
-declare %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument14documentLayoutEv(%struct.QAbstractTextDocumentLayout*)
-
-declare %struct.QTextBlockGroup* @_ZNK13QTextDocument9rootFrameEv(%struct.QAbstractTextDocumentLayout*)
-
-declare i32 @_ZNK13QTextDocument9pageCountEv(%struct.QAbstractTextDocumentLayout*)
-
-declare void @_ZNK13QTextDocument11defaultFontEv(%struct.QFont*, %struct.QAbstractTextDocumentLayout*)
-
-declare void @_ZN13QTextDocument14setDefaultFontERK5QFont(%struct.QAbstractTextDocumentLayout*, %struct.QFont*)
-
-declare void @_ZN13QTextDocument11setPageSizeERK6QSizeF(%struct.QAbstractTextDocumentLayout*, %struct.QPointF*)
-
-declare void @_Z9printPageiP8QPainterPK13QTextDocumentRK6QRectFRK7QPointF(i32, %struct.QPainter*, %struct.QAbstractTextDocumentLayout*, %struct.QRectF*, %struct.QPointF*)
-
-declare void @_ZN12QFontMetricsD1Ev(%struct.QFontMetrics*)
-
-declare void @_ZN8QPainterC1EP12QPaintDevice(%struct.QPainter*, %struct.QPaintDevice*)
-
-declare i1 @_ZNK8QPainter8isActiveEv(%struct.QPainter*)
-
-declare i32 @_Z13qt_defaultDpiv()
-
-declare %struct.QPaintDevice* @_ZNK27QAbstractTextDocumentLayout11paintDeviceEv(%struct.QAbstractTextDocumentLayout*)
-
-declare void @_ZN8QPainter5scaleEdd(%struct.QPainter*, double, double)
-
-declare %struct.QPaintDevice* @_ZNK8QPainter6deviceEv(%struct.QPainter*)
-
-declare void @_ZN27QAbstractTextDocumentLayout14setPaintDeviceEP12QPaintDevice(%struct.QAbstractTextDocumentLayout*, %struct.QPaintDevice*)
-
-declare void @_ZN12QFontMetricsC1ERK5QFontP12QPaintDevice(%struct.QFontMetrics*, %struct.QFont*, %struct.QPaintDevice*)
-
-declare i32 @_ZNK12QFontMetrics6ascentEv(%struct.QFontMetrics*)
-
-declare void @_ZN5QFont12setPointSizeEi(%struct.QFont*, i32)
-
-declare i1 @_ZNK8QPrinter13collateCopiesEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter9numCopiesEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter8fromPageEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter6toPageEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter9pageOrderEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter12printerStateEv(%struct.QPrinter*)
-
-declare i1 @_ZN8QPrinter7newPageEv(%struct.QPrinter*)
-
-declare void @_ZN8QPainterD1Ev(%struct.QPainter*)
-
-declare i32 @__gxx_personality_v0(...)

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2009-06-15-InvokeCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2009-06-15-InvokeCrash.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2009-06-15-InvokeCrash.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/2009-06-15-InvokeCrash.ll (removed)
@@ -1,569 +0,0 @@
-; RUN: opt < %s -simplifycfg -disable-output
-; END.
-	%struct..4._102 = type { %struct.QVectorData* }
-	%struct..5._125 = type { %struct.QMapData* }
-	%struct.QAbstractTextDocumentLayout = type { %struct.QObject }
-	%struct.QBasicAtomic = type { i32 }
-	%struct.QFont = type { %struct.QFontPrivate*, i32 }
-	%struct.QFontMetrics = type { %struct.QFontPrivate* }
-	%struct.QFontPrivate = type opaque
-	%"struct.QFragmentMap<QTextBlockData>" = type { %struct.QFragmentMapData }
-	%struct.QFragmentMapData = type { %"struct.QFragmentMapData::._154", i32 }
-	%"struct.QFragmentMapData::._154" = type { %"struct.QFragmentMapData::Header"* }
-	%"struct.QFragmentMapData::Header" = type { i32, i32, i32, i32, i32, i32, i32, i32 }
-	%"struct.QHash<uint,QHashDummyValue>" = type { %"struct.QHash<uint,QHashDummyValue>::._152" }
-	%"struct.QHash<uint,QHashDummyValue>::._152" = type { %struct.QHashData* }
-	%struct.QHashData = type { %"struct.QHashData::Node"*, %"struct.QHashData::Node"**, %struct.QBasicAtomic, i32, i32, i16, i16, i32, i8 }
-	%"struct.QHashData::Node" = type { %"struct.QHashData::Node"*, i32 }
-	%"struct.QList<QObject*>::._92" = type { %struct.QListData }
-	%"struct.QList<QPointer<QObject> >" = type { %"struct.QList<QObject*>::._92" }
-	%struct.QListData = type { %"struct.QListData::Data"* }
-	%"struct.QListData::Data" = type { %struct.QBasicAtomic, i32, i32, i32, i8, [1 x i8*] }
-	%"struct.QMap<QUrl,QVariant>" = type { %struct..5._125 }
-	%struct.QMapData = type { %"struct.QMapData::Node"*, [12 x %"struct.QMapData::Node"*], %struct.QBasicAtomic, i32, i32, i32, i8 }
-	%"struct.QMapData::Node" = type { %"struct.QMapData::Node"*, [1 x %"struct.QMapData::Node"*] }
-	%struct.QObject = type { i32 (...)**, %struct.QObjectData* }
-	%struct.QObjectData = type { i32 (...)**, %struct.QObject*, %struct.QObject*, %"struct.QList<QPointer<QObject> >", i8, [3 x i8], i32, i32 }
-	%struct.QObjectPrivate = type { %struct.QObjectData, i32, %struct.QObject*, %"struct.QList<QPointer<QObject> >", %"struct.QVector<QAbstractTextDocumentLayout::Selection>", %struct.QString }
-	%struct.QPaintDevice = type { i32 (...)**, i16 }
-	%struct.QPainter = type { %struct.QPainterPrivate* }
-	%struct.QPainterPrivate = type opaque
-	%struct.QPointF = type { double, double }
-	%struct.QPrinter = type { %struct.QPaintDevice, %struct.QPrinterPrivate* }
-	%struct.QPrinterPrivate = type opaque
-	%struct.QRectF = type { double, double, double, double }
-	%"struct.QSet<uint>" = type { %"struct.QHash<uint,QHashDummyValue>" }
-	%"struct.QSharedDataPointer<QTextFormatPrivate>" = type { %struct.QTextFormatPrivate* }
-	%struct.QString = type { %"struct.QString::Data"* }
-	%"struct.QString::Data" = type { %struct.QBasicAtomic, i32, i32, i16*, i8, i8, [1 x i16] }
-	%struct.QTextBlockFormat = type { %struct.QTextFormat }
-	%struct.QTextBlockGroup = type { %struct.QAbstractTextDocumentLayout }
-	%struct.QTextDocumentConfig = type { %struct.QString }
-	%struct.QTextDocumentPrivate = type { %struct.QObjectPrivate, %struct.QString, %"struct.QVector<QAbstractTextDocumentLayout::Selection>", i1, i32, i32, i1, i32, i32, i32, i32, i1, %struct.QTextFormatCollection, %struct.QTextBlockGroup*, %struct.QAbstractTextDocumentLayout*, %"struct.QFragmentMap<QTextBlockData>", %"struct.QFragmentMap<QTextBlockData>", i32, %"struct.QList<QPointer<QObject> >", %"struct.QList<QPointer<QObject> >", %"struct.QMap<QUrl,QVariant>", %"struct.QMap<QUrl,QVariant>", %"struct.QMap<QUrl,QVariant>", %struct.QTextDocumentConfig, i1, i1, %struct.QPointF }
-	%struct.QTextFormat = type { %"struct.QSharedDataPointer<QTextFormatPrivate>", i32 }
-	%struct.QTextFormatCollection = type { %"struct.QVector<QAbstractTextDocumentLayout::Selection>", %"struct.QVector<QAbstractTextDocumentLayout::Selection>", %"struct.QSet<uint>", %struct.QFont }
-	%struct.QTextFormatPrivate = type opaque
-	%"struct.QVector<QAbstractTextDocumentLayout::Selection>" = type { %struct..4._102 }
-	%struct.QVectorData = type { %struct.QBasicAtomic, i32, i32, i8 }
-
-define void @_ZNK13QTextDocument5printEP8QPrinter(%struct.QAbstractTextDocumentLayout* %this, %struct.QPrinter* %printer) {
-entry:
-	%tmp = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=2]
-	%tmp.upgrd.1 = alloca %struct.QRectF, align 16		; <%struct.QRectF*> [#uses=5]
-	%tmp2 = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=3]
-	%tmp.upgrd.2 = alloca %struct.QFontMetrics, align 16		; <%struct.QFontMetrics*> [#uses=4]
-	%tmp.upgrd.3 = alloca %struct.QFont, align 16		; <%struct.QFont*> [#uses=4]
-	%tmp3 = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=2]
-	%p = alloca %struct.QPainter, align 16		; <%struct.QPainter*> [#uses=14]
-	%body = alloca %struct.QRectF, align 16		; <%struct.QRectF*> [#uses=9]
-        %foo = alloca double, align 8
-        %bar = alloca double, align 8
-	%pageNumberPos = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=4]
-	%scaledPageSize = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=6]
-	%printerPageSize = alloca %struct.QPointF, align 16		; <%struct.QPointF*> [#uses=3]
-	%fmt = alloca %struct.QTextBlockFormat, align 16		; <%struct.QTextBlockFormat*> [#uses=5]
-	%font = alloca %struct.QFont, align 16		; <%struct.QFont*> [#uses=5]
-	%tmp.upgrd.4 = call %struct.QTextDocumentPrivate* @_ZNK13QTextDocument6d_funcEv( %struct.QAbstractTextDocumentLayout* %this )		; <%struct.QTextDocumentPrivate*> [#uses=5]
-	%tmp.upgrd.5 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	call void @_ZN8QPainterC1EP12QPaintDevice( %struct.QPainter* %p, %struct.QPaintDevice* %tmp.upgrd.5 )
-	%tmp.upgrd.6 = invoke i1 @_ZNK8QPainter8isActiveEv( %struct.QPainter* %p )
-			to label %invcont unwind label %cleanup329		; <i1> [#uses=1]
-invcont:		; preds = %entry
-	br i1 %tmp.upgrd.6, label %cond_next, label %cleanup328
-cond_next:		; preds = %invcont
-	%tmp8 = invoke %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument14documentLayoutEv( %struct.QAbstractTextDocumentLayout* %this )
-			to label %invcont7 unwind label %cleanup329		; <%struct.QAbstractTextDocumentLayout*> [#uses=0]
-invcont7:		; preds = %cond_next
-	%tmp10 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26		; <%struct.QPointF*> [#uses=1]
-	call void @_ZN7QPointFC1Edd( %struct.QPointF* %tmp, double 0.000000e+00, double 0.000000e+00 )
-	call void @_ZN6QRectFC1ERK7QPointFRK6QSizeF( %struct.QRectF* %body, %struct.QPointF* %tmp, %struct.QPointF* %tmp10 )
-	call void @_ZN7QPointFC1Ev( %struct.QPointF* %pageNumberPos )
-	%tmp12 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26		; <%struct.QPointF*> [#uses=1]
-	%tmp13 = call i1 @_ZNK6QSizeF7isValidEv( %struct.QPointF* %tmp12 )		; <i1> [#uses=1]
-	br i1 %tmp13, label %cond_next15, label %bb
-cond_next15:		; preds = %invcont7
-	%tmp17 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26		; <%struct.QPointF*> [#uses=1]
-	%tmp.upgrd.7 = call double @_ZNK6QSizeF6heightEv( %struct.QPointF* %tmp17 )		; <double> [#uses=1]
-	%tmp18 = fcmp oeq double %tmp.upgrd.7, 0x41DFFFFFFFC00000		; <i1> [#uses=1]
-	br i1 %tmp18, label %bb, label %cond_next20
-cond_next20:		; preds = %cond_next15
-	br label %bb21
-bb:		; preds = %cond_next15, %invcont7
-	br label %bb21
-bb21:		; preds = %bb, %cond_next20
-	%iftmp.406.0 = phi i1 [ false, %bb ], [ true, %cond_next20 ]		; <i1> [#uses=1]
-	br i1 %iftmp.406.0, label %cond_true24, label %cond_false
-cond_true24:		; preds = %bb21
-	%tmp.upgrd.8 = invoke i32 @_Z13qt_defaultDpiv( )
-			to label %invcont25 unwind label %cleanup329		; <i32> [#uses=1]
-invcont25:		; preds = %cond_true24
-	%tmp26 = sitofp i32 %tmp.upgrd.8 to double		; <double> [#uses=2]
-	%tmp30 = invoke %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument14documentLayoutEv( %struct.QAbstractTextDocumentLayout* %this )
-			to label %invcont29 unwind label %cleanup329		; <%struct.QAbstractTextDocumentLayout*> [#uses=1]
-invcont29:		; preds = %invcont25
-	%tmp32 = invoke %struct.QPaintDevice* @_ZNK27QAbstractTextDocumentLayout11paintDeviceEv( %struct.QAbstractTextDocumentLayout* %tmp30 )
-			to label %invcont31 unwind label %cleanup329		; <%struct.QPaintDevice*> [#uses=3]
-invcont31:		; preds = %invcont29
-	%tmp34 = icmp eq %struct.QPaintDevice* %tmp32, null		; <i1> [#uses=1]
-	br i1 %tmp34, label %cond_next42, label %cond_true35
-cond_true35:		; preds = %invcont31
-	%tmp38 = invoke i32 @_ZNK12QPaintDevice11logicalDpiXEv( %struct.QPaintDevice* %tmp32 )
-			to label %invcont37 unwind label %cleanup329		; <i32> [#uses=1]
-invcont37:		; preds = %cond_true35
-	%tmp38.upgrd.9 = sitofp i32 %tmp38 to double		; <double> [#uses=1]
-	%tmp41 = invoke i32 @_ZNK12QPaintDevice11logicalDpiYEv( %struct.QPaintDevice* %tmp32 )
-			to label %invcont40 unwind label %cleanup329		; <i32> [#uses=1]
-invcont40:		; preds = %invcont37
-	%tmp41.upgrd.10 = sitofp i32 %tmp41 to double		; <double> [#uses=1]
-	br label %cond_next42
-cond_next42:		; preds = %invcont40, %invcont31
-	%sourceDpiY.2 = phi double [ %tmp41.upgrd.10, %invcont40 ], [ %tmp26, %invcont31 ]		; <double> [#uses=1]
-	%sourceDpiX.2 = phi double [ %tmp38.upgrd.9, %invcont40 ], [ %tmp26, %invcont31 ]		; <double> [#uses=1]
-	%tmp44 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	%tmp46 = invoke i32 @_ZNK12QPaintDevice11logicalDpiXEv( %struct.QPaintDevice* %tmp44 )
-			to label %invcont45 unwind label %cleanup329		; <i32> [#uses=1]
-invcont45:		; preds = %cond_next42
-	%tmp46.upgrd.11 = sitofp i32 %tmp46 to double		; <double> [#uses=1]
-	%tmp48 = fdiv double %tmp46.upgrd.11, %sourceDpiX.2		; <double> [#uses=2]
-	%tmp50 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	%tmp52 = invoke i32 @_ZNK12QPaintDevice11logicalDpiYEv( %struct.QPaintDevice* %tmp50 )
-			to label %invcont51 unwind label %cleanup329		; <i32> [#uses=1]
-invcont51:		; preds = %invcont45
-	%tmp52.upgrd.12 = sitofp i32 %tmp52 to double		; <double> [#uses=1]
-	%tmp54 = fdiv double %tmp52.upgrd.12, %sourceDpiY.2		; <double> [#uses=2]
-	invoke void @_ZN8QPainter5scaleEdd( %struct.QPainter* %p, double %tmp48, double %tmp54 )
-			to label %invcont57 unwind label %cleanup329
-invcont57:		; preds = %invcont51
-	%tmp.upgrd.13 = getelementptr %struct.QPointF* %scaledPageSize, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp60 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26, i32 0		; <double*> [#uses=1]
-	%tmp61 = load double* %tmp60		; <double> [#uses=1]
-	store double %tmp61, double* %tmp.upgrd.13
-	%tmp62 = getelementptr %struct.QPointF* %scaledPageSize, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp63 = getelementptr %struct.QTextDocumentPrivate* %tmp.upgrd.4, i32 0, i32 26, i32 1		; <double*> [#uses=1]
-	%tmp64 = load double* %tmp63		; <double> [#uses=1]
-	store double %tmp64, double* %tmp62
-	%tmp65 = call double* @_ZN6QSizeF6rwidthEv( %struct.QPointF* %scaledPageSize )		; <double*> [#uses=2]
-	%tmp67 = load double* %tmp65		; <double> [#uses=1]
-	%tmp69 = fmul double %tmp67, %tmp48		; <double> [#uses=1]
-	store double %tmp69, double* %tmp65
-	%tmp71 = call double* @_ZN6QSizeF7rheightEv( %struct.QPointF* %scaledPageSize )		; <double*> [#uses=2]
-	%tmp73 = load double* %tmp71		; <double> [#uses=1]
-	%tmp75 = fmul double %tmp73, %tmp54		; <double> [#uses=1]
-	store double %tmp75, double* %tmp71
-	%tmp78 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	%tmp80 = invoke i32 @_ZNK12QPaintDevice6heightEv( %struct.QPaintDevice* %tmp78 )
-			to label %invcont79 unwind label %cleanup329		; <i32> [#uses=1]
-invcont79:		; preds = %invcont57
-	%tmp82 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0		; <%struct.QPaintDevice*> [#uses=1]
-	%tmp84 = invoke i32 @_ZNK12QPaintDevice5widthEv( %struct.QPaintDevice* %tmp82 )
-			to label %invcont83 unwind label %cleanup329		; <i32> [#uses=1]
-invcont83:		; preds = %invcont79
-	%tmp80.upgrd.14 = sitofp i32 %tmp80 to double		; <double> [#uses=1]
-	%tmp84.upgrd.15 = sitofp i32 %tmp84 to double		; <double> [#uses=1]
-	call void @_ZN6QSizeFC1Edd( %struct.QPointF* %printerPageSize, double %tmp84.upgrd.15, double %tmp80.upgrd.14 )
-	%tmp85 = call double @_ZNK6QSizeF6heightEv( %struct.QPointF* %printerPageSize )		; <double> [#uses=1]
-	%tmp86 = call double @_ZNK6QSizeF6heightEv( %struct.QPointF* %scaledPageSize )		; <double> [#uses=1]
-	%tmp87 = fdiv double %tmp85, %tmp86		; <double> [#uses=1]
-	%tmp88 = call double @_ZNK6QSizeF5widthEv( %struct.QPointF* %printerPageSize )		; <double> [#uses=1]
-	%tmp89 = call double @_ZNK6QSizeF5widthEv( %struct.QPointF* %scaledPageSize )		; <double> [#uses=1]
-	%tmp90 = fdiv double %tmp88, %tmp89		; <double> [#uses=1]
-	invoke void @_ZN8QPainter5scaleEdd( %struct.QPainter* %p, double %tmp90, double %tmp87 )
-			to label %cond_next194 unwind label %cleanup329
-cond_false:		; preds = %bb21
-	%tmp.upgrd.16 = getelementptr %struct.QAbstractTextDocumentLayout* %this, i32 0, i32 0		; <%struct.QObject*> [#uses=1]
-	%tmp95 = invoke %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument5cloneEP7QObject( %struct.QAbstractTextDocumentLayout* %this, %struct.QObject* %tmp.upgrd.16 )
-			to label %invcont94 unwind label %cleanup329		; <%struct.QAbstractTextDocumentLayout*> [#uses=9]
-invcont94:		; preds = %cond_false
-	%tmp99 = invoke %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument14documentLayoutEv( %struct.QAbstractTextDocumentLayout* %tmp95 )
-			to label %invcont98 unwind label %cleanup329		; <%struct.QAbstractTextDocumentLayout*> [#uses=1]
-invcont98:		; preds = %invcont94
-	%tmp101 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont100 unwind label %cleanup329		; <%struct.QPaintDevice*> [#uses=1]
-invcont100:		; preds = %invcont98
-	invoke void @_ZN27QAbstractTextDocumentLayout14setPaintDeviceEP12QPaintDevice( %struct.QAbstractTextDocumentLayout* %tmp99, %struct.QPaintDevice* %tmp101 )
-			to label %invcont103 unwind label %cleanup329
-invcont103:		; preds = %invcont100
-	%tmp105 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont104 unwind label %cleanup329		; <%struct.QPaintDevice*> [#uses=1]
-invcont104:		; preds = %invcont103
-	%tmp107 = invoke i32 @_ZNK12QPaintDevice11logicalDpiYEv( %struct.QPaintDevice* %tmp105 )
-			to label %invcont106 unwind label %cleanup329		; <i32> [#uses=1]
-invcont106:		; preds = %invcont104
-	%tmp108 = sitofp i32 %tmp107 to double		; <double> [#uses=1]
-	%tmp109 = fmul double %tmp108, 0x3FE93264C993264C		; <double> [#uses=1]
-	%tmp109.upgrd.17 = fptosi double %tmp109 to i32		; <i32> [#uses=3]
-	%tmp.upgrd.18 = call %struct.QTextBlockGroup* @_ZNK13QTextDocument9rootFrameEv( %struct.QAbstractTextDocumentLayout* %tmp95 )		; <%struct.QTextBlockGroup*> [#uses=1]
-	invoke void @_ZNK10QTextFrame11frameFormatEv( %struct.QTextBlockFormat* sret  %fmt, %struct.QTextBlockGroup* %tmp.upgrd.18 )
-			to label %invcont111 unwind label %cleanup329
-invcont111:		; preds = %invcont106
-	%tmp112 = sitofp i32 %tmp109.upgrd.17 to double		; <double> [#uses=1]
-	invoke void @_ZN16QTextFrameFormat9setMarginEd( %struct.QTextBlockFormat* %fmt, double %tmp112 )
-			to label %invcont114 unwind label %cleanup192
-invcont114:		; preds = %invcont111
-	%tmp116 = call %struct.QTextBlockGroup* @_ZNK13QTextDocument9rootFrameEv( %struct.QAbstractTextDocumentLayout* %tmp95 )		; <%struct.QTextBlockGroup*> [#uses=1]
-	invoke void @_ZN10QTextFrame14setFrameFormatERK16QTextFrameFormat( %struct.QTextBlockGroup* %tmp116, %struct.QTextBlockFormat* %fmt )
-			to label %invcont117 unwind label %cleanup192
-invcont117:		; preds = %invcont114
-	%tmp119 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont118 unwind label %cleanup192		; <%struct.QPaintDevice*> [#uses=1]
-invcont118:		; preds = %invcont117
-	%tmp121 = invoke i32 @_ZNK12QPaintDevice6heightEv( %struct.QPaintDevice* %tmp119 )
-			to label %invcont120 unwind label %cleanup192		; <i32> [#uses=1]
-invcont120:		; preds = %invcont118
-	%tmp121.upgrd.19 = sitofp i32 %tmp121 to double		; <double> [#uses=1]
-	%tmp123 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont122 unwind label %cleanup192		; <%struct.QPaintDevice*> [#uses=1]
-invcont122:		; preds = %invcont120
-	%tmp125 = invoke i32 @_ZNK12QPaintDevice5widthEv( %struct.QPaintDevice* %tmp123 )
-			to label %invcont124 unwind label %cleanup192		; <i32> [#uses=1]
-invcont124:		; preds = %invcont122
-	%tmp125.upgrd.20 = sitofp i32 %tmp125 to double		; <double> [#uses=1]
-	call void @_ZN6QRectFC1Edddd( %struct.QRectF* %tmp.upgrd.1, double 0.000000e+00, double 0.000000e+00, double %tmp125.upgrd.20, double %tmp121.upgrd.19 )
-	%tmp126 = getelementptr %struct.QRectF* %body, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp127 = getelementptr %struct.QRectF* %tmp.upgrd.1, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp128 = load double* %tmp127		; <double> [#uses=1]
-	store double %tmp128, double* %tmp126
-	%tmp129 = getelementptr %struct.QRectF* %body, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp130 = getelementptr %struct.QRectF* %tmp.upgrd.1, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp131 = load double* %tmp130		; <double> [#uses=1]
-	store double %tmp131, double* %tmp129
-	%tmp132 = getelementptr %struct.QRectF* %body, i32 0, i32 2		; <double*> [#uses=1]
-	%tmp133 = getelementptr %struct.QRectF* %tmp.upgrd.1, i32 0, i32 2		; <double*> [#uses=1]
-	%tmp134 = load double* %tmp133		; <double> [#uses=1]
-	store double %tmp134, double* %tmp132
-	%tmp135 = getelementptr %struct.QRectF* %body, i32 0, i32 3		; <double*> [#uses=1]
-	%tmp136 = getelementptr %struct.QRectF* %tmp.upgrd.1, i32 0, i32 3		; <double*> [#uses=1]
-	%tmp137 = load double* %tmp136		; <double> [#uses=1]
-	store double %tmp137, double* %tmp135
-	%tmp138 = call double @_ZNK6QRectF6heightEv( %struct.QRectF* %body )		; <double> [#uses=1]
-	%tmp139 = sitofp i32 %tmp109.upgrd.17 to double		; <double> [#uses=1]
-	%tmp140 = fsub double %tmp138, %tmp139		; <double> [#uses=1]
-	%tmp142 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont141 unwind label %cleanup192		; <%struct.QPaintDevice*> [#uses=1]
-invcont141:		; preds = %invcont124
-	invoke void @_ZNK13QTextDocument11defaultFontEv( %struct.QFont* sret  %tmp.upgrd.3, %struct.QAbstractTextDocumentLayout* %tmp95 )
-			to label %invcont144 unwind label %cleanup192
-invcont144:		; preds = %invcont141
-	invoke void @_ZN12QFontMetricsC1ERK5QFontP12QPaintDevice( %struct.QFontMetrics* %tmp.upgrd.2, %struct.QFont* %tmp.upgrd.3, %struct.QPaintDevice* %tmp142 )
-			to label %invcont146 unwind label %cleanup173
-invcont146:		; preds = %invcont144
-	%tmp149 = invoke i32 @_ZNK12QFontMetrics6ascentEv( %struct.QFontMetrics* %tmp.upgrd.2 )
-			to label %invcont148 unwind label %cleanup168		; <i32> [#uses=1]
-invcont148:		; preds = %invcont146
-	%tmp149.upgrd.21 = sitofp i32 %tmp149 to double		; <double> [#uses=1]
-	%tmp150 = fadd double %tmp140, %tmp149.upgrd.21		; <double> [#uses=1]
-	%tmp152 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p )
-			to label %invcont151 unwind label %cleanup168		; <%struct.QPaintDevice*> [#uses=1]
-invcont151:		; preds = %invcont148
-	%tmp154 = invoke i32 @_ZNK12QPaintDevice11logicalDpiYEv( %struct.QPaintDevice* %tmp152 )
-			to label %invcont153 unwind label %cleanup168		; <i32> [#uses=1]
-invcont153:		; preds = %invcont151
-	%tmp155 = mul i32 %tmp154, 5		; <i32> [#uses=1]
-	%tmp156 = sdiv i32 %tmp155, 72		; <i32> [#uses=1]
-	%tmp156.upgrd.22 = sitofp i32 %tmp156 to double		; <double> [#uses=1]
-	%tmp157 = fadd double %tmp150, %tmp156.upgrd.22		; <double> [#uses=1]
-	%tmp158 = call double @_ZNK6QRectF5widthEv( %struct.QRectF* %body )		; <double> [#uses=1]
-	%tmp159 = sitofp i32 %tmp109.upgrd.17 to double		; <double> [#uses=1]
-	%tmp160 = fsub double %tmp158, %tmp159		; <double> [#uses=1]
-	call void @_ZN7QPointFC1Edd( %struct.QPointF* %tmp2, double %tmp160, double %tmp157 )
-	%tmp161 = getelementptr %struct.QPointF* %pageNumberPos, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp162 = getelementptr %struct.QPointF* %tmp2, i32 0, i32 0		; <double*> [#uses=1]
-	%tmp163 = load double* %tmp162		; <double> [#uses=1]
-	store double %tmp163, double* %tmp161
-	%tmp164 = getelementptr %struct.QPointF* %pageNumberPos, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp165 = getelementptr %struct.QPointF* %tmp2, i32 0, i32 1		; <double*> [#uses=1]
-	%tmp166 = load double* %tmp165		; <double> [#uses=1]
-	store double %tmp166, double* %tmp164
-	invoke void @_ZN12QFontMetricsD1Ev( %struct.QFontMetrics* %tmp.upgrd.2 )
-			to label %cleanup171 unwind label %cleanup173
-cleanup168:		; preds = %invcont151, %invcont148, %invcont146
-        %val168 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	invoke void @_ZN12QFontMetricsD1Ev( %struct.QFontMetrics* %tmp.upgrd.2 )
-			to label %cleanup173 unwind label %cleanup173
-cleanup171:		; preds = %invcont153
-	invoke void @_ZN5QFontD1Ev( %struct.QFont* %tmp.upgrd.3 )
-			to label %finally170 unwind label %cleanup192
-cleanup173:		; preds = %cleanup168, %cleanup168, %invcont153, %invcont144
-        %val173 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	invoke void @_ZN5QFontD1Ev( %struct.QFont* %tmp.upgrd.3 )
-			to label %cleanup192 unwind label %cleanup192
-finally170:		; preds = %cleanup171
-	invoke void @_ZNK13QTextDocument11defaultFontEv( %struct.QFont* sret  %font, %struct.QAbstractTextDocumentLayout* %tmp95 )
-			to label %invcont177 unwind label %cleanup192
-invcont177:		; preds = %finally170
-	invoke void @_ZN5QFont12setPointSizeEi( %struct.QFont* %font, i32 10 )
-			to label %invcont179 unwind label %cleanup187
-invcont179:		; preds = %invcont177
-	invoke void @_ZN13QTextDocument14setDefaultFontERK5QFont( %struct.QAbstractTextDocumentLayout* %tmp95, %struct.QFont* %font )
-			to label %invcont181 unwind label %cleanup187
-invcont181:		; preds = %invcont179
-	call void @_ZNK6QRectF4sizeEv( %struct.QPointF* sret  %tmp3, %struct.QRectF* %body )
-	invoke void @_ZN13QTextDocument11setPageSizeERK6QSizeF( %struct.QAbstractTextDocumentLayout* %tmp95, %struct.QPointF* %tmp3 )
-			to label %cleanup185 unwind label %cleanup187
-cleanup185:		; preds = %invcont181
-	invoke void @_ZN5QFontD1Ev( %struct.QFont* %font )
-			to label %cleanup190 unwind label %cleanup192
-cleanup187:		; preds = %invcont181, %invcont179, %invcont177
-        %val187 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	invoke void @_ZN5QFontD1Ev( %struct.QFont* %font )
-			to label %cleanup192 unwind label %cleanup192
-cleanup190:		; preds = %cleanup185
-	invoke void @_ZN16QTextFrameFormatD1Ev( %struct.QTextBlockFormat* %fmt )
-			to label %cond_next194 unwind label %cleanup329
-cleanup192:		; preds = %cleanup187, %cleanup187, %cleanup185, %finally170, %cleanup173, %cleanup173, %cleanup171, %invcont141, %invcont124, %invcont122, %invcont120, %invcont118, %invcont117, %invcont114, %invcont111
-        %val192 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	invoke void @_ZN16QTextFrameFormatD1Ev( %struct.QTextBlockFormat* %fmt )
-			to label %cleanup329 unwind label %cleanup329
-cond_next194:		; preds = %cleanup190, %invcont83
-	%clonedDoc.1 = phi %struct.QAbstractTextDocumentLayout* [ null, %invcont83 ], [ %tmp95, %cleanup190 ]		; <%struct.QAbstractTextDocumentLayout*> [#uses=3]
-	%doc.1 = phi %struct.QAbstractTextDocumentLayout* [ %this, %invcont83 ], [ %tmp95, %cleanup190 ]		; <%struct.QAbstractTextDocumentLayout*> [#uses=2]
-	%tmp197 = invoke i1 @_ZNK8QPrinter13collateCopiesEv( %struct.QPrinter* %printer )
-			to label %invcont196 unwind label %cleanup329		; <i1> [#uses=1]
-invcont196:		; preds = %cond_next194
-	br i1 %tmp197, label %cond_true200, label %cond_false204
-cond_true200:		; preds = %invcont196
-	%tmp2000 = load double* %foo
-	store double %tmp2000, double* %bar
-	%tmp203 = invoke i32 @_ZNK8QPrinter9numCopiesEv( %struct.QPrinter* %printer )
-			to label %cond_next208 unwind label %cleanup329		; <i32> [#uses=1]
-cond_false204:		; preds = %invcont196
-	%tmp2001 = load double* %foo
-	store double %tmp2001, double* %bar
-	%tmp207 = invoke i32 @_ZNK8QPrinter9numCopiesEv( %struct.QPrinter* %printer )
-			to label %cond_next208 unwind label %cleanup329		; <i32> [#uses=1]
-cond_next208:		; preds = %invcont206, %invcont202
-	%pageCopies.0 = phi i32 [ %tmp203, %cond_true200 ], [ 1, %cond_false204 ]		; <i32> [#uses=2]
-	%docCopies.0 = phi i32 [ 1, %cond_true200 ], [ %tmp207, %cond_false204 ]		; <i32> [#uses=2]
-	%tmp211 = invoke i32 @_ZNK8QPrinter8fromPageEv( %struct.QPrinter* %printer )
-			to label %invcont210 unwind label %cleanup329		; <i32> [#uses=3]
-invcont210:		; preds = %cond_next208
-	%tmp214 = invoke i32 @_ZNK8QPrinter6toPageEv( %struct.QPrinter* %printer )
-			to label %invcont213 unwind label %cleanup329		; <i32> [#uses=3]
-invcont213:		; preds = %invcont210
-	%tmp216 = icmp eq i32 %tmp211, 0		; <i1> [#uses=1]
-	br i1 %tmp216, label %cond_true217, label %cond_next225
-cond_true217:		; preds = %invcont213
-	%tmp219 = icmp eq i32 %tmp214, 0		; <i1> [#uses=1]
-	br i1 %tmp219, label %cond_true220, label %cond_next225
-cond_true220:		; preds = %cond_true217
-	%tmp223 = invoke i32 @_ZNK13QTextDocument9pageCountEv( %struct.QAbstractTextDocumentLayout* %doc.1 )
-			to label %invcont222 unwind label %cleanup329		; <i32> [#uses=1]
-invcont222:		; preds = %cond_true220
-	br label %cond_next225
-cond_next225:		; preds = %invcont222, %cond_true217, %invcont213
-	%toPage.1 = phi i32 [ %tmp223, %invcont222 ], [ %tmp214, %cond_true217 ], [ %tmp214, %invcont213 ]		; <i32> [#uses=2]
-	%fromPage.1 = phi i32 [ 1, %invcont222 ], [ %tmp211, %cond_true217 ], [ %tmp211, %invcont213 ]		; <i32> [#uses=2]
-	%tmp.page = invoke i32 @_ZNK8QPrinter9pageOrderEv( %struct.QPrinter* %printer )
-			to label %invcont227 unwind label %cleanup329		; <i32> [#uses=1]
-invcont227:		; preds = %cond_next225
-	%tmp228 = icmp eq i32 %tmp.page, 1		; <i1> [#uses=1]
-	br i1 %tmp228, label %cond_true230, label %cond_next234
-cond_true230:		; preds = %invcont227
-	br label %cond_next234
-cond_next234:		; preds = %cond_true230, %invcont227
-	%ascending.1 = phi i1 [ false, %cond_true230 ], [ true, %invcont227 ]		; <i1> [#uses=1]
-	%toPage.2 = phi i32 [ %fromPage.1, %cond_true230 ], [ %toPage.1, %invcont227 ]		; <i32> [#uses=1]
-	%fromPage.2 = phi i32 [ %toPage.1, %cond_true230 ], [ %fromPage.1, %invcont227 ]		; <i32> [#uses=1]
-	br label %bb309
-bb237:		; preds = %cond_true313, %cond_next293
-	%iftmp.410.4 = phi i1 [ %iftmp.410.5, %cond_true313 ], [ %iftmp.410.1, %cond_next293 ]		; <i1> [#uses=1]
-	%page.4 = phi i32 [ %fromPage.2, %cond_true313 ], [ %page.3, %cond_next293 ]		; <i32> [#uses=4]
-	br label %bb273
-invcont240:		; preds = %cond_true277
-	%tmp242 = icmp eq i32 %tmp241, 2		; <i1> [#uses=1]
-	br i1 %tmp242, label %bb252, label %cond_next244
-cond_next244:		; preds = %invcont240
-	%tmp247 = invoke i32 @_ZNK8QPrinter12printerStateEv( %struct.QPrinter* %printer )
-			to label %invcont246 unwind label %cleanup329		; <i32> [#uses=1]
-invcont246:		; preds = %cond_next244
-	%tmp248 = icmp eq i32 %tmp247, 3		; <i1> [#uses=1]
-	br i1 %tmp248, label %bb252, label %bb253
-bb252:		; preds = %invcont246, %invcont240
-	br label %bb254
-bb253:		; preds = %invcont246
-	br label %bb254
-bb254:		; preds = %bb253, %bb252
-	%iftmp.410.0 = phi i1 [ true, %bb252 ], [ false, %bb253 ]		; <i1> [#uses=2]
-	br i1 %iftmp.410.0, label %UserCanceled, label %cond_next258
-cond_next258:		; preds = %bb254
-	invoke fastcc void @_Z9printPageiP8QPainterPK13QTextDocumentRK6QRectFRK7QPointF( i32 %page.4, %struct.QPainter* %p, %struct.QAbstractTextDocumentLayout* %doc.1, %struct.QRectF* %body, %struct.QPointF* %pageNumberPos )
-			to label %invcont261 unwind label %cleanup329
-invcont261:		; preds = %cond_next258
-	%tmp263 = add i32 %pageCopies.0, -1		; <i32> [#uses=1]
-	%tmp265 = icmp sgt i32 %tmp263, %j.4		; <i1> [#uses=1]
-	br i1 %tmp265, label %cond_true266, label %cond_next270
-cond_true266:		; preds = %invcont261
-	%tmp269 = invoke i1 @_ZN8QPrinter7newPageEv( %struct.QPrinter* %printer )
-			to label %cond_next270 unwind label %cleanup329		; <i1> [#uses=0]
-cond_next270:		; preds = %cond_true266, %invcont261
-	%tmp272 = add i32 %j.4, 1		; <i32> [#uses=1]
-	br label %bb273
-bb273:		; preds = %cond_next270, %bb237
-	%iftmp.410.1 = phi i1 [ %iftmp.410.4, %bb237 ], [ %iftmp.410.0, %cond_next270 ]		; <i1> [#uses=2]
-	%j.4 = phi i32 [ 0, %bb237 ], [ %tmp272, %cond_next270 ]		; <i32> [#uses=3]
-	%tmp276 = icmp slt i32 %j.4, %pageCopies.0		; <i1> [#uses=1]
-	br i1 %tmp276, label %cond_true277, label %bb280
-cond_true277:		; preds = %bb273
-	%tmp241 = invoke i32 @_ZNK8QPrinter12printerStateEv( %struct.QPrinter* %printer )
-			to label %invcont240 unwind label %cleanup329		; <i32> [#uses=1]
-bb280:		; preds = %bb273
-	%tmp283 = icmp eq i32 %page.4, %toPage.2		; <i1> [#uses=1]
-	br i1 %tmp283, label %bb297, label %cond_next285
-cond_next285:		; preds = %bb280
-	br i1 %ascending.1, label %cond_true287, label %cond_false290
-cond_true287:		; preds = %cond_next285
-	%tmp289 = add i32 %page.4, 1		; <i32> [#uses=1]
-	br label %cond_next293
-cond_false290:		; preds = %cond_next285
-	%tmp292 = add i32 %page.4, -1		; <i32> [#uses=1]
-	br label %cond_next293
-cond_next293:		; preds = %cond_false290, %cond_true287
-	%page.3 = phi i32 [ %tmp289, %cond_true287 ], [ %tmp292, %cond_false290 ]		; <i32> [#uses=1]
-	%tmp296 = invoke i1 @_ZN8QPrinter7newPageEv( %struct.QPrinter* %printer )
-			to label %bb237 unwind label %cleanup329		; <i1> [#uses=0]
-bb297:		; preds = %bb280
-	%tmp299 = add i32 %docCopies.0, -1		; <i32> [#uses=1]
-	%tmp301 = icmp sgt i32 %tmp299, %i.1		; <i1> [#uses=1]
-	br i1 %tmp301, label %cond_true302, label %cond_next306
-cond_true302:		; preds = %bb297
-	%tmp305 = invoke i1 @_ZN8QPrinter7newPageEv( %struct.QPrinter* %printer )
-			to label %cond_next306 unwind label %cleanup329		; <i1> [#uses=0]
-cond_next306:		; preds = %cond_true302, %bb297
-	%tmp308 = add i32 %i.1, 1		; <i32> [#uses=1]
-	br label %bb309
-bb309:		; preds = %cond_next306, %cond_next234
-	%iftmp.410.5 = phi i1 [ undef, %cond_next234 ], [ %iftmp.410.1, %cond_next306 ]		; <i1> [#uses=1]
-	%i.1 = phi i32 [ 0, %cond_next234 ], [ %tmp308, %cond_next306 ]		; <i32> [#uses=3]
-	%tmp312 = icmp slt i32 %i.1, %docCopies.0		; <i1> [#uses=1]
-	br i1 %tmp312, label %cond_true313, label %UserCanceled
-cond_true313:		; preds = %bb309
-	br label %bb237
-UserCanceled:		; preds = %bb309, %bb254
-	%tmp318 = icmp eq %struct.QAbstractTextDocumentLayout* %clonedDoc.1, null		; <i1> [#uses=1]
-	br i1 %tmp318, label %cleanup327, label %cond_true319
-cond_true319:		; preds = %UserCanceled
-	%tmp.upgrd.23 = getelementptr %struct.QAbstractTextDocumentLayout* %clonedDoc.1, i32 0, i32 0, i32 0		; <i32 (...)***> [#uses=1]
-	%tmp.upgrd.24 = load i32 (...)*** %tmp.upgrd.23		; <i32 (...)**> [#uses=1]
-	%tmp322 = getelementptr i32 (...)** %tmp.upgrd.24, i32 4		; <i32 (...)**> [#uses=1]
-	%tmp.upgrd.25 = load i32 (...)** %tmp322		; <i32 (...)*> [#uses=1]
-	%tmp.upgrd.26 = bitcast i32 (...)* %tmp.upgrd.25 to void (%struct.QAbstractTextDocumentLayout*)*		; <void (%struct.QAbstractTextDocumentLayout*)*> [#uses=1]
-	invoke void %tmp.upgrd.26( %struct.QAbstractTextDocumentLayout* %clonedDoc.1 )
-			to label %cleanup327 unwind label %cleanup329
-cleanup327:		; preds = %cond_true319, %UserCanceled
-	call void @_ZN8QPainterD1Ev( %struct.QPainter* %p )
-	ret void
-cleanup328:		; preds = %invcont
-	call void @_ZN8QPainterD1Ev( %struct.QPainter* %p )
-	ret void
-cleanup329:		; preds = %cond_true319, %cond_true302, %cond_next293, %cond_true277, %cond_true266, %cond_next258, %cond_next244, %cond_next225, %cond_true220, %invcont210, %cond_next208, %cond_false204, %cond_true200, %cond_next194, %cleanup192, %cleanup192, %cleanup190, %invcont106, %invcont104, %invcont103, %invcont100, %invcont98, %invcont94, %cond_false, %invcont83, %invcont79, %invcont57, %invcont51, %invcont45, %cond_next42, %invcont37, %cond_true35, %invcont29, %invcont25, %cond_true24, %cond_next, %entry
-        %val329 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
-                    cleanup
-	call void @_ZN8QPainterD1Ev( %struct.QPainter* %p )
-	resume { i8*, i32 } %val329
-}
-
-declare void @_ZN6QSizeFC1Edd(%struct.QPointF*, double, double)
-
-declare i1 @_ZNK6QSizeF7isValidEv(%struct.QPointF*)
-
-declare double @_ZNK6QSizeF5widthEv(%struct.QPointF*)
-
-declare double @_ZNK6QSizeF6heightEv(%struct.QPointF*)
-
-declare double* @_ZN6QSizeF6rwidthEv(%struct.QPointF*)
-
-declare double* @_ZN6QSizeF7rheightEv(%struct.QPointF*)
-
-declare %struct.QTextDocumentPrivate* @_ZNK13QTextDocument6d_funcEv(%struct.QAbstractTextDocumentLayout*)
-
-declare void @_ZN7QPointFC1Ev(%struct.QPointF*)
-
-declare void @_ZN7QPointFC1Edd(%struct.QPointF*, double, double)
-
-declare void @_ZN16QTextFrameFormat9setMarginEd(%struct.QTextBlockFormat*, double)
-
-declare void @_ZN6QRectFC1Edddd(%struct.QRectF*, double, double, double, double)
-
-declare void @_ZN6QRectFC1ERK7QPointFRK6QSizeF(%struct.QRectF*, %struct.QPointF*, %struct.QPointF*)
-
-declare double @_ZNK6QRectF5widthEv(%struct.QRectF*)
-
-declare double @_ZNK6QRectF6heightEv(%struct.QRectF*)
-
-declare void @_ZNK6QRectF4sizeEv(%struct.QPointF*, %struct.QRectF*)
-
-declare void @_ZN16QTextFrameFormatD1Ev(%struct.QTextBlockFormat*)
-
-declare void @_ZNK10QTextFrame11frameFormatEv(%struct.QTextBlockFormat*, %struct.QTextBlockGroup*)
-
-declare void @_ZN10QTextFrame14setFrameFormatERK16QTextFrameFormat(%struct.QTextBlockGroup*, %struct.QTextBlockFormat*)
-
-declare i32 @_ZNK12QPaintDevice5widthEv(%struct.QPaintDevice*)
-
-declare i32 @_ZNK12QPaintDevice6heightEv(%struct.QPaintDevice*)
-
-declare i32 @_ZNK12QPaintDevice11logicalDpiXEv(%struct.QPaintDevice*)
-
-declare i32 @_ZNK12QPaintDevice11logicalDpiYEv(%struct.QPaintDevice*)
-
-declare %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument5cloneEP7QObject(%struct.QAbstractTextDocumentLayout*, %struct.QObject*)
-
-declare void @_ZN5QFontD1Ev(%struct.QFont*)
-
-declare %struct.QAbstractTextDocumentLayout* @_ZNK13QTextDocument14documentLayoutEv(%struct.QAbstractTextDocumentLayout*)
-
-declare %struct.QTextBlockGroup* @_ZNK13QTextDocument9rootFrameEv(%struct.QAbstractTextDocumentLayout*)
-
-declare i32 @_ZNK13QTextDocument9pageCountEv(%struct.QAbstractTextDocumentLayout*)
-
-declare void @_ZNK13QTextDocument11defaultFontEv(%struct.QFont*, %struct.QAbstractTextDocumentLayout*)
-
-declare void @_ZN13QTextDocument14setDefaultFontERK5QFont(%struct.QAbstractTextDocumentLayout*, %struct.QFont*)
-
-declare void @_ZN13QTextDocument11setPageSizeERK6QSizeF(%struct.QAbstractTextDocumentLayout*, %struct.QPointF*)
-
-declare void @_Z9printPageiP8QPainterPK13QTextDocumentRK6QRectFRK7QPointF(i32, %struct.QPainter*, %struct.QAbstractTextDocumentLayout*, %struct.QRectF*, %struct.QPointF*)
-
-declare void @_ZN12QFontMetricsD1Ev(%struct.QFontMetrics*)
-
-declare void @_ZN8QPainterC1EP12QPaintDevice(%struct.QPainter*, %struct.QPaintDevice*)
-
-declare i1 @_ZNK8QPainter8isActiveEv(%struct.QPainter*)
-
-declare i32 @_Z13qt_defaultDpiv()
-
-declare %struct.QPaintDevice* @_ZNK27QAbstractTextDocumentLayout11paintDeviceEv(%struct.QAbstractTextDocumentLayout*)
-
-declare void @_ZN8QPainter5scaleEdd(%struct.QPainter*, double, double)
-
-declare %struct.QPaintDevice* @_ZNK8QPainter6deviceEv(%struct.QPainter*)
-
-declare void @_ZN27QAbstractTextDocumentLayout14setPaintDeviceEP12QPaintDevice(%struct.QAbstractTextDocumentLayout*, %struct.QPaintDevice*)
-
-declare void @_ZN12QFontMetricsC1ERK5QFontP12QPaintDevice(%struct.QFontMetrics*, %struct.QFont*, %struct.QPaintDevice*)
-
-declare i32 @_ZNK12QFontMetrics6ascentEv(%struct.QFontMetrics*)
-
-declare void @_ZN5QFont12setPointSizeEi(%struct.QFont*, i32)
-
-declare i1 @_ZNK8QPrinter13collateCopiesEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter9numCopiesEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter8fromPageEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter6toPageEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter9pageOrderEv(%struct.QPrinter*)
-
-declare i32 @_ZNK8QPrinter12printerStateEv(%struct.QPrinter*)
-
-declare i1 @_ZN8QPrinter7newPageEv(%struct.QPrinter*)
-
-declare void @_ZN8QPainterD1Ev(%struct.QPainter*)
-
-declare i32 @__gxx_personality_v0(...)

Modified: llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll Tue Jan 15 11:16:16 2013
@@ -85,3 +85,31 @@
 ; CHECK: if.end7:
 ; CHECK: phi i32* [ %a, %if.then ], [ null, %if.then4 ], [ null, %if.else ]
 }
+
+define i32 @test4(i32* %a, i32 %b, i32* %c, i32 %d) nounwind {
+entry:
+  %tobool = icmp eq i32 %b, 0
+  br i1 %tobool, label %if.else, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @bar() nounwind
+  br label %if.end7
+
+if.else:                                          ; preds = %entry
+  %tobool3 = icmp eq i32 %d, 0
+  br i1 %tobool3, label %if.end7, label %if.then4
+
+if.then4:                                         ; preds = %if.else
+  tail call void @bar() nounwind
+  br label %if.end7
+
+if.end7:                                          ; preds = %if.else, %if.then4, %if.then
+  %x.0 = phi i32* [ %a, %if.then ], [ null, %if.then4 ], [ null, %if.else ]
+  %gep = getelementptr i32* %x.0, i32 10
+  %tmp9 = load i32* %gep
+  %tmp10 = or i32 %tmp9, 1
+  store i32 %tmp10, i32* %gep
+  ret i32 %tmp9
+; CHECK: @test4
+; CHECK-NOT: phi
+}

Modified: llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/preserve-branchweights.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/preserve-branchweights.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/preserve-branchweights.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyCFG/preserve-branchweights.ll Tue Jan 15 11:16:16 2013
@@ -79,10 +79,238 @@
   ret void
 }
 
+;; test5 - The case where it jumps to the default target will be removed.
+define void @test5(i32 %M, i32 %N) nounwind uwtable {
+entry:
+  switch i32 %N, label %sw2 [
+    i32 1, label %sw2
+    i32 2, label %sw.bb
+    i32 3, label %sw.bb1
+  ], !prof !3
+; CHECK: test5
+; CHECK: switch i32 %N, label %sw2 [
+; CHECK: i32 3, label %sw.bb1
+; CHECK: i32 2, label %sw.bb
+; CHECK: ], !prof !2
+
+sw.bb:
+  call void @helper(i32 0)
+  br label %sw.epilog
+
+sw.bb1:
+  call void @helper(i32 1)
+  br label %sw.epilog
+
+sw2:
+  call void @helper(i32 2)
+  br label %sw.epilog
+
+sw.epilog:
+  ret void
+}
+
+;; test6 - Some cases of the second switch are pruned during optimization.
+;; Then the second switch will be converted to a branch, finally, the first
+;; switch and the branch will be merged into a single switch.
+define void @test6(i32 %M, i32 %N) nounwind uwtable {
+entry:
+  switch i32 %N, label %sw2 [
+    i32 1, label %sw2
+    i32 2, label %sw.bb
+    i32 3, label %sw.bb1
+  ], !prof !4
+; CHECK: test6
+; CHECK: switch i32 %N, label %sw.epilog
+; CHECK: i32 3, label %sw.bb1
+; CHECK: i32 2, label %sw.bb
+; CHECK: i32 4, label %sw.bb5
+; CHECK: ], !prof !3
+
+sw.bb:
+  call void @helper(i32 0)
+  br label %sw.epilog
+
+sw.bb1:
+  call void @helper(i32 1)
+  br label %sw.epilog
+
+sw2:
+;; Here "case 2" is invalidated since the default case of the first switch
+;; does not include "case 2".
+  switch i32 %N, label %sw.epilog [
+    i32 2, label %sw.bb4
+    i32 4, label %sw.bb5
+  ], !prof !5
+
+sw.bb4:
+  call void @helper(i32 2)
+  br label %sw.epilog
+
+sw.bb5:
+  call void @helper(i32 3)
+  br label %sw.epilog
+
+sw.epilog:
+  ret void
+}
+
+;; This test is based on test1 but swapped the targets of the second branch.
+define void @test1_swap(i1 %a, i1 %b) {
+; CHECK: @test1_swap
+entry:
+  br i1 %a, label %Y, label %X, !prof !0
+; CHECK: br i1 %or.cond, label %Y, label %Z, !prof !4
+
+X:
+  %c = or i1 %b, false
+  br i1 %c, label %Y, label %Z, !prof !1
+
+Y:
+  call void @helper(i32 0)
+  ret void
+
+Z:
+  call void @helper(i32 1)
+  ret void
+}
+
+define void @test7(i1 %a, i1 %b) {
+; CHECK: @test7
+entry:
+  %c = or i1 %b, false
+  br i1 %a, label %Y, label %X, !prof !0
+; CHECK: br i1 %brmerge, label %Y, label %Z, !prof !5
+
+X:
+  br i1 %c, label %Y, label %Z, !prof !6
+
+Y:
+  call void @helper(i32 0)
+  ret void
+
+Z:
+  call void @helper(i32 1)
+  ret void
+}
+
+; Test basic folding to a conditional branch.
+define void @test8(i64 %x, i64 %y) nounwind {
+; CHECK: @test8
+entry:
+    %lt = icmp slt i64 %x, %y
+; CHECK: br i1 %lt, label %a, label %b, !prof !6
+    %qux = select i1 %lt, i32 0, i32 2
+    switch i32 %qux, label %bees [
+        i32 0, label %a
+        i32 1, label %b
+        i32 2, label %b
+    ], !prof !7
+a:
+    call void @helper(i32 0) nounwind
+    ret void
+b:
+    call void @helper(i32 1) nounwind
+    ret void
+bees:
+    call void @helper(i32 2) nounwind
+    ret void
+}
+
+; Test edge splitting when the default target has icmp and unconditinal
+; branch
+define i1 @test9(i32 %x, i32 %y) nounwind {
+; CHECK: @test9
+entry:
+    switch i32 %x, label %bees [
+        i32 0, label %a
+        i32 1, label %end
+        i32 2, label %end
+    ], !prof !7
+; CHECK: switch i32 %x, label %bees [
+; CHECK: i32 0, label %a
+; CHECK: i32 1, label %end
+; CHECK: i32 2, label %end
+; CHECK: i32 92, label %end
+; CHECK: ], !prof !7
+
+a:
+    call void @helper(i32 0) nounwind
+    %reta = icmp slt i32 %x, %y
+    ret i1 %reta
+
+bees:
+    %tmp = icmp eq i32 %x, 92
+    br label %end
+
+end:
+; CHECK: end:
+; CHECK: %ret = phi i1 [ true, %entry ], [ false, %bees ], [ true, %entry ], [ true, %entry ]
+    %ret = phi i1 [ true, %entry ], [%tmp, %bees], [true, %entry]
+    call void @helper(i32 2) nounwind
+    ret i1 %ret
+}
+
+define void @test10(i32 %x) nounwind readnone ssp noredzone {
+entry:
+ switch i32 %x, label %lor.rhs [
+   i32 2, label %lor.end
+   i32 1, label %lor.end
+   i32 3, label %lor.end
+ ], !prof !7
+
+lor.rhs:
+ call void @helper(i32 1) nounwind
+ ret void
+
+lor.end:
+ call void @helper(i32 0) nounwind
+ ret void
+
+; CHECK: test10
+; CHECK: %x.off = add i32 %x, -1
+; CHECK: %switch = icmp ult i32 %x.off, 3
+; CHECK: br i1 %switch, label %lor.end, label %lor.rhs, !prof !8
+}
+
+; Remove dead cases from the switch.
+define void @test11(i32 %x) nounwind {
+  %i = shl i32 %x, 1
+  switch i32 %i, label %a [
+    i32 21, label %b
+    i32 24, label %c
+  ], !prof !8
+; CHECK: %cond = icmp eq i32 %i, 24
+; CHECK: br i1 %cond, label %c, label %a, !prof !9
+
+a:
+ call void @helper(i32 0) nounwind
+ ret void
+b:
+ call void @helper(i32 1) nounwind
+ ret void
+c:
+ call void @helper(i32 2) nounwind
+ ret void
+}
+
 !0 = metadata !{metadata !"branch_weights", i32 3, i32 5}
 !1 = metadata !{metadata !"branch_weights", i32 1, i32 1}
 !2 = metadata !{metadata !"branch_weights", i32 1, i32 2}
+!3 = metadata !{metadata !"branch_weights", i32 4, i32 3, i32 2, i32 1}
+!4 = metadata !{metadata !"branch_weights", i32 4, i32 3, i32 2, i32 1}
+!5 = metadata !{metadata !"branch_weights", i32 7, i32 6, i32 5}
+!6 = metadata !{metadata !"branch_weights", i32 1, i32 3}
+!7 = metadata !{metadata !"branch_weights", i32 33, i32 9, i32 8, i32 7}
+!8 = metadata !{metadata !"branch_weights", i32 33, i32 9, i32 8}
 
 ; CHECK: !0 = metadata !{metadata !"branch_weights", i32 5, i32 11}
 ; CHECK: !1 = metadata !{metadata !"branch_weights", i32 1, i32 5}
-; CHECK-NOT: !2
+; CHECK: !2 = metadata !{metadata !"branch_weights", i32 7, i32 1, i32 2}
+; CHECK: !3 = metadata !{metadata !"branch_weights", i32 49, i32 12, i32 24, i32 35}
+; CHECK: !4 = metadata !{metadata !"branch_weights", i32 11, i32 5}
+; CHECK: !5 = metadata !{metadata !"branch_weights", i32 17, i32 15} 
+; CHECK: !6 = metadata !{metadata !"branch_weights", i32 9, i32 7}
+; CHECK: !7 = metadata !{metadata !"branch_weights", i32 17, i32 9, i32 8, i32 7, i32 17}
+; CHECK: !8 = metadata !{metadata !"branch_weights", i32 24, i32 33}
+; CHECK: !9 = metadata !{metadata !"branch_weights", i32 8, i32 33}
+; CHECK-NOT: !9

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/2009-02-12-StrTo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/2009-02-12-StrTo.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/2009-02-12-StrTo.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/2009-02-12-StrTo.ll (removed)
@@ -1,14 +0,0 @@
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-; Test that we add nocapture to the declaration, and to the second call only.
-
-; CHECK: declare float @strtol(i8*, i8** nocapture, i32) nounwind
-declare float @strtol(i8* %s, i8** %endptr, i32 %base)
-
-define void @foo(i8* %x, i8** %endptr) {
-; CHECK:  call float @strtol(i8* %x, i8** %endptr, i32 10)
-  call float @strtol(i8* %x, i8** %endptr, i32 10)
-; CHECK: %2 = call float @strtol(i8* nocapture %x, i8** null, i32 10)
-  call float @strtol(i8* %x, i8** null, i32 10)
-  ret void
-}

Modified: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/FFS.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/FFS.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/FFS.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/FFS.ll Tue Jan 15 11:16:16 2013
@@ -1,6 +1,7 @@
-; Test that the ToAsciiOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | \
-; RUN:   not grep "call.*@ffs"
+; Test that FFSOpt works correctly
+; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
+
+; CHECK-NOT: call{{.*}}@ffs
 
 @non_const = external global i32		; <i32*> [#uses=1]
 
@@ -34,3 +35,11 @@
         %2 = call i32 @ffsll(i64 %0)            ; <i32> [#uses=1]
         ret i32 %2
 }
+
+; PR13028
+define i32 @b() nounwind {
+  %ffs = call i32 @ffsll(i64 0)
+  ret i32 %ffs
+; CHECK: @b
+; CHECK-NEXT: ret i32 0
+}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StpCpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StpCpy.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StpCpy.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StpCpy.ll (removed)
@@ -1,43 +0,0 @@
-; Test that the StpCpyOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-; This transformation requires the pointer size, as it assumes that size_t is
-; the size of a pointer.
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
-
- at hello = constant [6 x i8] c"hello\00"
-
-declare i8* @stpcpy(i8*, i8*)
-
-declare i8* @__stpcpy_chk(i8*, i8*, i32) nounwind
-
-declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readonly
-
-define i32 @t1() {
-; CHECK: @t1
-  %target = alloca [1024 x i8]
-  %arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
-  %arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
-  %rslt1 = call i8* @stpcpy( i8* %arg1, i8* %arg2 )
-; CHECK: @llvm.memcpy.p0i8.p0i8.i32
-  ret i32 0
-}
-
-define i32 @t2() {
-; CHECK: @t2
-  %target = alloca [1024 x i8]
-  %arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
-  %arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
-  %tmp1 = call i32 @llvm.objectsize.i32(i8* %arg1, i1 false)
-  %rslt1 = call i8* @__stpcpy_chk(i8* %arg1, i8* %arg2, i32 %tmp1)
-; CHECK: @__memcpy_chk
-  ret i32 0
-}
-
-define i8* @t3(i8* %arg) {
-; CHECK: @t3
-  %stpcpy = tail call i8* @stpcpy(i8* %arg, i8* %arg)
-; CHECK: [[LEN:%[a-z]+]] = call i32 @strlen(i8* %arg)
-; CHECK-NEXT: getelementptr inbounds i8* %arg, i32 [[LEN]]
-  ret i8* %stpcpy
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCat.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCat.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCat.ll (removed)
@@ -1,33 +0,0 @@
-; Test that the StrCatOptimizer works correctly
-; PR3661
-; RUN: opt < %s -simplify-libcalls -S | \
-; RUN:   not grep "call.*strcat"
-; RUN: opt < %s -simplify-libcalls -S | \
-; RUN:   grep "puts.*%arg1"
-
-; This transformation requires the pointer size, as it assumes that size_t is
-; the size of a pointer.
-target datalayout = "-p:64:64:64"
-
- at hello = constant [6 x i8] c"hello\00"		; <[6 x i8]*> [#uses=1]
- at null = constant [1 x i8] zeroinitializer		; <[1 x i8]*> [#uses=1]
- at null_hello = constant [7 x i8] c"\00hello\00"		; <[7 x i8]*> [#uses=1]
-
-declare i8* @strcat(i8*, i8*)
-
-declare i32 @puts(i8*)
-
-define i32 @main() {
-	%target = alloca [1024 x i8]		; <[1024 x i8]*> [#uses=1]
-	%arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0		; <i8*> [#uses=2]
-	store i8 0, i8* %arg1
-	%arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt1 = call i8* @strcat( i8* %arg1, i8* %arg2 )		; <i8*> [#uses=1]
-	%arg3 = getelementptr [1 x i8]* @null, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt2 = call i8* @strcat( i8* %rslt1, i8* %arg3 )		; <i8*> [#uses=1]
-	%arg4 = getelementptr [7 x i8]* @null_hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt3 = call i8* @strcat( i8* %rslt2, i8* %arg4 )		; <i8*> [#uses=1]
-	call i32 @puts( i8* %rslt3 )		; <i32>:1 [#uses=0]
-	ret i32 0
-}
-

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrChr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrChr.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrChr.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrChr.ll (removed)
@@ -1,26 +0,0 @@
-; Test that the StrChrOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-; This transformation requires the pointer size, as it assumes that size_t is
-; the size of a pointer.
-target datalayout = "-p:64:64:64"
-
- at hello = constant [14 x i8] c"hello world\5Cn\00"
- at null = constant [1 x i8] zeroinitializer
-
-declare i8* @strchr(i8*, i32)
-
-define i32 @foo(i32 %index) {
-	%hello_p = getelementptr [14 x i8]* @hello, i32 0, i32 0
-	%null_p = getelementptr [1 x i8]* @null, i32 0, i32 0
-	%world = call i8* @strchr(i8* %hello_p, i32 119)
-; CHECK: getelementptr i8* %hello_p, i64 6
-	%ignore = call i8* @strchr(i8* %null_p, i32 119)
-; CHECK-NOT: call i8* strchr
-	%null = call i8* @strchr(i8* %hello_p, i32 0)
-; CHECK: getelementptr i8* %hello_p, i64 13
-	%result = call i8* @strchr(i8* %hello_p, i32 %index)
-; CHECK: call i8* @memchr(i8* %hello_p, i32 %index, i64 14)
-	ret i32 %index
-}
-

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCmp.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCmp.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCmp.ll (removed)
@@ -1,65 +0,0 @@
-; Test that the StrCmpOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-
- at hello = constant [6 x i8] c"hello\00"		; <[6 x i8]*> [#uses=1]
- at hell = constant [5 x i8] c"hell\00"		; <[5 x i8]*> [#uses=1]
- at bell = constant [5 x i8] c"bell\00"		; <[5 x i8]*> [#uses=1]
- at null = constant [1 x i8] zeroinitializer		; <[1 x i8]*> [#uses=1]
-
-declare i32 @strcmp(i8*, i8*)
-
-; strcmp("", x) -> -*x
-define i32 @test1(i8* %str) {
-  %temp1 = call i32 @strcmp(i8* getelementptr inbounds ([1 x i8]* @null, i32 0, i32 0), i8* %str)
-  ret i32 %temp1
-  ; CHECK: @test1
-  ; CHECK: %strcmpload = load i8* %str
-  ; CHECK: %1 = zext i8 %strcmpload to i32
-  ; CHECK: %temp1 = sub i32 0, %1
-  ; CHECK: ret i32 %temp1
-}
-
-; strcmp(x, "") -> *x
-define i32 @test2(i8* %str) {
-  %temp1 = call i32 @strcmp(i8* %str, i8* getelementptr inbounds ([1 x i8]* @null, i32 0, i32 0))
-  ret i32 %temp1
-  ; CHECK: @test2
-  ; CHECK: %strcmpload = load i8* %str
-  ; CHECK: %temp1 = zext i8 %strcmpload to i32
-  ; CHECK: ret i32 %temp1
-}
-
-; strcmp(x, y)  -> cnst
-define i32 @test3() {
-  %temp1 = call i32 @strcmp(i8* getelementptr inbounds ([5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8]* @hello, i32 0, i32 0))
-  ret i32 %temp1
-  ; CHECK: @test3
-  ; CHECK: ret i32 -1
-}
-define i32 @test4() {
-  %temp1 = call i32 @strcmp(i8* getelementptr inbounds ([5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([1 x i8]* @null, i32 0, i32 0))
-  ret i32 %temp1
-  ; CHECK: @test4
-  ; CHECK: ret i32 1
-}
-
-; strcmp(x, y)   -> memcmp(x, y, <known length>)
-; (This transform is rather difficult to trigger in a useful manner)
-define i32 @test5(i1 %b) {
-  %sel = select i1 %b, i8* getelementptr inbounds ([5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8]* @bell, i32 0, i32 0)
-  %temp1 = call i32 @strcmp(i8* getelementptr inbounds ([6 x i8]* @hello, i32 0, i32 0), i8* %sel)
-  ret i32 %temp1
-  ; CHECK: @test5
-  ; CHECK: %memcmp = call i32 @memcmp(i8* getelementptr inbounds ([6 x i8]* @hello, i32 0, i32 0), i8* %sel, i32 5)
-  ; CHECK: ret i32 %memcmp
-}
-
-; strcmp(x,x)  -> 0
-define i32 @test6(i8* %str) {
-  %temp1 = call i32 @strcmp(i8* %str, i8* %str)
-  ret i32 %temp1
-  ; CHECK: @test6
-  ; CHECK: ret i32 0
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCpy.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCpy.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrCpy.ll (removed)
@@ -1,37 +0,0 @@
-; Test that the StrCpyOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-; This transformation requires the pointer size, as it assumes that size_t is
-; the size of a pointer.
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
-
- at hello = constant [6 x i8] c"hello\00"
-
-declare i8* @strcpy(i8*, i8*)
-
-declare i8* @__strcpy_chk(i8*, i8*, i32) nounwind
-
-declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readonly
-
-; rdar://6839935
-
-define i32 @t1() {
-; CHECK: @t1
-  %target = alloca [1024 x i8]
-  %arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
-  %arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
-  %rslt1 = call i8* @strcpy( i8* %arg1, i8* %arg2 )
-; CHECK: @llvm.memcpy.p0i8.p0i8.i32
-  ret i32 0
-}
-
-define i32 @t2() {
-; CHECK: @t2
-  %target = alloca [1024 x i8]
-  %arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
-  %arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
-  %tmp1 = call i32 @llvm.objectsize.i32(i8* %arg1, i1 false)
-  %rslt1 = call i8* @__strcpy_chk(i8* %arg1, i8* %arg2, i32 %tmp1)
-; CHECK: @__memcpy_chk
-  ret i32 0
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrLen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrLen.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrLen.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrLen.ll (removed)
@@ -1,62 +0,0 @@
-; Test that the StrCatOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | \
-; RUN:    not grep "call.*strlen"
-
-target datalayout = "e-p:32:32"
- at hello = constant [6 x i8] c"hello\00"		; <[6 x i8]*> [#uses=3]
- at null = constant [1 x i8] zeroinitializer		; <[1 x i8]*> [#uses=3]
- at null_hello = constant [7 x i8] c"\00hello\00"		; <[7 x i8]*> [#uses=1]
- at nullstring = constant i8 0
-
-declare i32 @strlen(i8*)
-
-define i32 @test1() {
-	%hello_p = getelementptr [6 x i8]* @hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%hello_l = call i32 @strlen( i8* %hello_p )		; <i32> [#uses=1]
-	ret i32 %hello_l
-}
-
-define i32 @test2() {
-	%null_p = getelementptr [1 x i8]* @null, i32 0, i32 0		; <i8*> [#uses=1]
-	%null_l = call i32 @strlen( i8* %null_p )		; <i32> [#uses=1]
-	ret i32 %null_l
-}
-
-define i32 @test3() {
-	%null_hello_p = getelementptr [7 x i8]* @null_hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%null_hello_l = call i32 @strlen( i8* %null_hello_p )		; <i32> [#uses=1]
-	ret i32 %null_hello_l
-}
-
-define i1 @test4() {
-	%hello_p = getelementptr [6 x i8]* @hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%hello_l = call i32 @strlen( i8* %hello_p )		; <i32> [#uses=1]
-	%eq_hello = icmp eq i32 %hello_l, 0		; <i1> [#uses=1]
-	ret i1 %eq_hello
-}
-
-define i1 @test5() {
-	%null_p = getelementptr [1 x i8]* @null, i32 0, i32 0		; <i8*> [#uses=1]
-	%null_l = call i32 @strlen( i8* %null_p )		; <i32> [#uses=1]
-	%eq_null = icmp eq i32 %null_l, 0		; <i1> [#uses=1]
-	ret i1 %eq_null
-}
-
-define i1 @test6() {
-	%hello_p = getelementptr [6 x i8]* @hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%hello_l = call i32 @strlen( i8* %hello_p )		; <i32> [#uses=1]
-	%ne_hello = icmp ne i32 %hello_l, 0		; <i1> [#uses=1]
-	ret i1 %ne_hello
-}
-
-define i1 @test7() {
-	%null_p = getelementptr [1 x i8]* @null, i32 0, i32 0		; <i8*> [#uses=1]
-	%null_l = call i32 @strlen( i8* %null_p )		; <i32> [#uses=1]
-	%ne_null = icmp ne i32 %null_l, 0		; <i1> [#uses=1]
-	ret i1 %ne_null
-}
-
-define i32 @test8() {
-	%len = tail call i32 @strlen(i8* @nullstring) nounwind
-	ret i32 %len
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCat.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCat.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCat.ll (removed)
@@ -1,31 +0,0 @@
-; Test that the StrNCatOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | \
-; RUN:   not grep "call.*strncat"
-; RUN: opt < %s -simplify-libcalls -S | \
-; RUN:   grep "puts.*%arg1"
-
-; This transformation requires the pointer size, as it assumes that size_t is
-; the size of a pointer.
-target datalayout = "-p:64:64:64"
-
- at hello = constant [6 x i8] c"hello\00"		; <[6 x i8]*> [#uses=1]
- at null = constant [1 x i8] zeroinitializer		; <[1 x i8]*> [#uses=1]
- at null_hello = constant [7 x i8] c"\00hello\00"		; <[7 x i8]*> [#uses=1]
-
-declare i8* @strncat(i8*, i8*, i32)
-
-declare i32 @puts(i8*)
-
-define i32 @main() {
-	%target = alloca [1024 x i8]		; <[1024 x i8]*> [#uses=1]
-	%arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0		; <i8*> [#uses=2]
-	store i8 0, i8* %arg1
-	%arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt1 = call i8* @strncat( i8* %arg1, i8* %arg2, i32 6 )		; <i8*> [#uses=1]
-	%arg3 = getelementptr [1 x i8]* @null, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt2 = call i8* @strncat( i8* %rslt1, i8* %arg3, i32 42 )		; <i8*> [#uses=1]
-	%arg4 = getelementptr [7 x i8]* @null_hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt3 = call i8* @strncat( i8* %rslt2, i8* %arg4, i32 42 )		; <i8*> [#uses=1]
-	call i32 @puts( i8* %rslt3 )		; <i32>:1 [#uses=0]
-	ret i32 0
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCmp.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCmp.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCmp.ll (removed)
@@ -1,78 +0,0 @@
-; Test that the StrCmpOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-
- at hello = constant [6 x i8] c"hello\00"		; <[6 x i8]*> [#uses=1]
- at hell = constant [5 x i8] c"hell\00"		; <[5 x i8]*> [#uses=1]
- at bell = constant [5 x i8] c"bell\00"		; <[5 x i8]*> [#uses=1]
- at null = constant [1 x i8] zeroinitializer		; <[1 x i8]*> [#uses=1]
-
-declare i32 @strncmp(i8*, i8*, i32)
-
-; strcmp("", x) -> -*x
-define i32 @test1(i8* %str) {
-  %temp1 = call i32 @strncmp(i8* getelementptr inbounds ([1 x i8]* @null, i32 0, i32 0), i8* %str, i32 10)
-  ret i32 %temp1
-  ; CHECK: @test1
-  ; CHECK: %strcmpload = load i8* %str
-  ; CHECK: %1 = zext i8 %strcmpload to i32
-  ; CHECK: %temp1 = sub i32 0, %1
-  ; CHECK: ret i32 %temp1
-}
-
-; strcmp(x, "") -> *x
-define i32 @test2(i8* %str) {
-  %temp1 = call i32 @strncmp(i8* %str, i8* getelementptr inbounds ([1 x i8]* @null, i32 0, i32 0), i32 10)
-  ret i32 %temp1
-  ; CHECK: @test2
-  ; CHECK: %strcmpload = load i8* %str
-  ; CHECK: %temp1 = zext i8 %strcmpload to i32
-  ; CHECK: ret i32 %temp1
-}
-
-; strncmp(x, y, n)  -> cnst
-define i32 @test3() {
-  %temp1 = call i32 @strncmp(i8* getelementptr inbounds ([5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8]* @hello, i32 0, i32 0), i32 10)
-  ret i32 %temp1
-  ; CHECK: @test3
-  ; CHECK: ret i32 -1
-}
-define i32 @test4() {
-  %temp1 = call i32 @strncmp(i8* getelementptr inbounds ([5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([1 x i8]* @null, i32 0, i32 0), i32 10)
-  ret i32 %temp1
-  ; CHECK: @test4
-  ; CHECK: ret i32 1
-}
-define i32 @test5() {
-  %temp1 = call i32 @strncmp(i8* getelementptr inbounds ([5 x i8]* @hell, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8]* @hello, i32 0, i32 0), i32 4)
-  ret i32 %temp1
-  ; CHECK: @test5
-  ; CHECK: ret i32 0
-}
-
-; strncmp(x,y,1) -> memcmp(x,y,1)
-define i32 @test6(i8* %str1, i8* %str2) {
-  %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 1)
-  ret i32 %temp1
-  ; CHECK: @test6
-  ; CHECK: load i8*
-  ; CHECK: load i8*
-  ; CHECK: sub i32
-}
-
-; strncmp(x,y,0)   -> 0
-define i32 @test7(i8* %str1, i8* %str2) {
-  %temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 0)
-  ret i32 %temp1
-  ; CHECK: @test7
-  ; CHECK: ret i32 0
-}
-
-; strncmp(x,x,n)  -> 0
-define i32 @test8(i8* %str, i32 %n) {
-  %temp1 = call i32 @strncmp(i8* %str, i8* %str, i32 %n)
-  ret i32 %temp1
-  ; CHECK: @test8
-  ; CHECK: ret i32 0
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCpy.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCpy.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrNCpy.ll (removed)
@@ -1,29 +0,0 @@
-; Test that the StrNCpyOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | \
-; RUN:   not grep "call.*strncpy"
-
-; This transformation requires the pointer size, as it assumes that size_t is
-; the size of a pointer.
-target datalayout = "-p:64:64:64"
-
- at hello = constant [6 x i8] c"hello\00"		; <[6 x i8]*> [#uses=1]
- at null = constant [1 x i8] zeroinitializer		; <[1 x i8]*> [#uses=1]
- at null_hello = constant [7 x i8] c"\00hello\00"		; <[7 x i8]*> [#uses=1]
-
-declare i8* @strncpy(i8*, i8*, i32)
-
-declare i32 @puts(i8*)
-
-define i32 @main() {
-	%target = alloca [1024 x i8]		; <[1024 x i8]*> [#uses=1]
-	%arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0		; <i8*> [#uses=2]
-	store i8 0, i8* %arg1
-	%arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt1 = call i8* @strncpy( i8* %arg1, i8* %arg2, i32 6 )		; <i8*> [#uses=1]
-	%arg3 = getelementptr [1 x i8]* @null, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt2 = call i8* @strncpy( i8* %rslt1, i8* %arg3, i32 42 )		; <i8*> [#uses=1]
-	%arg4 = getelementptr [7 x i8]* @null_hello, i32 0, i32 0		; <i8*> [#uses=1]
-	%rslt3 = call i8* @strncpy( i8* %rslt2, i8* %arg4, i32 42 )		; <i8*> [#uses=1]
-	call i32 @puts( i8* %rslt3 )		; <i32>:1 [#uses=0]
-	ret i32 0
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrPBrk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrPBrk.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrPBrk.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrPBrk.ll (removed)
@@ -1,25 +0,0 @@
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-target datalayout = "-p:64:64:64"
-
- at hello = constant [12 x i8] c"hello world\00"
- at w = constant [2 x i8] c"w\00"
- at null = constant [1 x i8] zeroinitializer
-
-declare i8* @strpbrk(i8*, i8*)
-
-define void @test(i8* %s1, i8* %s2) {
-	%hello_p = getelementptr [12 x i8]* @hello, i32 0, i32 0
-	%w_p = getelementptr [2 x i8]* @w, i32 0, i32 0
-	%null_p = getelementptr [1 x i8]* @null, i32 0, i32 0
-	%test1 = call i8* @strpbrk(i8* %null_p, i8* %s2)
-	%test2 = call i8* @strpbrk(i8* %s1, i8* %null_p)
-; CHECK-NOT: call i8* @strpbrk
-	%test3 = call i8* @strpbrk(i8* %s1, i8* %w_p)
-; CHECK: call i8* @strchr(i8* %s1, i32 119)
-	%test4 = call i8* @strpbrk(i8* %hello_p, i8* %w_p)
-; CHECK: getelementptr i8* %hello_p, i64 6
-	%test5 = call i8* @strpbrk(i8* %s1, i8* %s2)
-; CHECK: call i8* @strpbrk(i8* %s1, i8* %s2)
-	ret void
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrRChr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrRChr.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrRChr.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrRChr.ll (removed)
@@ -1,23 +0,0 @@
-; Test that the StrRChrOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-target datalayout = "-p:64:64:64"
-
- at hello = constant [14 x i8] c"hello world\5Cn\00"
- at null = constant [1 x i8] zeroinitializer
-
-declare i8* @strrchr(i8*, i32)
-
-define void @foo(i8* %bar) {
-	%hello_p = getelementptr [14 x i8]* @hello, i32 0, i32 0
-	%null_p = getelementptr [1 x i8]* @null, i32 0, i32 0
-	%world = call i8* @strrchr(i8* %hello_p, i32 119)
-; CHECK: getelementptr i8* %hello_p, i64 6
-	%ignore = call i8* @strrchr(i8* %null_p, i32 119)
-; CHECK-NOT: call i8* strrchr
-	%null = call i8* @strrchr(i8* %hello_p, i32 0)
-; CHECK: getelementptr i8* %hello_p, i64 13
-	%strchr = call i8* @strrchr(i8* %bar, i32 0)
-; CHECK: call i8* @strchr(i8* %bar, i32 0)
-	ret void
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrSpn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrSpn.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrSpn.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrSpn.ll (removed)
@@ -1,41 +0,0 @@
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
-target datalayout = "-p:64:64:64"
-
- at abcba = constant [6 x i8] c"abcba\00"
- at abc = constant [4 x i8] c"abc\00"
- at null = constant [1 x i8] zeroinitializer
-
-declare i64 @strspn(i8*, i8*)
-
-define i64 @testspn(i8* %s1, i8* %s2) {
-  	%abcba_p = getelementptr [6 x i8]* @abcba, i32 0, i32 0
-	%abc_p = getelementptr [4 x i8]* @abc, i32 0, i32 0
-	%null_p = getelementptr [1 x i8]* @null, i32 0, i32 0
-	%test1 = call i64 @strspn(i8* %s1, i8* %null_p)
-	%test2 = call i64 @strspn(i8* %null_p, i8* %s2)
-	%test3 = call i64 @strspn(i8* %abcba_p, i8* %abc_p)
-; CHECK-NOT: call i64 @strspn
-	%test4 = call i64 @strspn(i8* %s1, i8* %s2)
-; CHECK: call i64 @strspn(i8* %s1, i8* %s2)
-	ret i64 %test3
-; CHECK: ret i64 5
-}
-
-declare i64 @strcspn(i8*, i8*)
-
-define i64 @testcspn(i8* %s1, i8* %s2) {
-  	%abcba_p = getelementptr [6 x i8]* @abcba, i32 0, i32 0
-	%abc_p = getelementptr [4 x i8]* @abc, i32 0, i32 0
-	%null_p = getelementptr [1 x i8]* @null, i32 0, i32 0
-	%test1 = call i64 @strcspn(i8* %s1, i8* %null_p)
-; CHECK: call i64 @strlen(i8* %s1)
-	%test2 = call i64 @strcspn(i8* %null_p, i8* %s2)
-	%test3 = call i64 @strcspn(i8* %abcba_p, i8* %abc_p)
-; CHECK-NOT: call i64 @strcspn
-	%test4 = call i64 @strcspn(i8* %s1, i8* %s2)
-; CHECK: call i64 @strcspn(i8* %s1, i8* %s2)
-        %add0 = add i64 %test1, %test3
-; CHECK: add i64 %{{.+}}, 0
-	ret i64 %add0
-}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrStr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrStr.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrStr.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/StrStr.ll (removed)
@@ -1,60 +0,0 @@
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-; PR5783
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
-target triple = "i386-apple-darwin9.0"
-
- at .str = private constant [1 x i8] zeroinitializer ; <[1 x i8]*> [#uses=1]
- at .str1 = private constant [2 x i8] c"a\00"        ; <[2 x i8]*> [#uses=1]
- at .str2 = private constant [6 x i8] c"abcde\00"    ; <[6 x i8]*> [#uses=1]
- at .str3 = private constant [4 x i8] c"bcd\00"      ; <[4 x i8]*> [#uses=1]
-
-define i8* @test1(i8* %P) nounwind readonly {
-entry:
-  %call = tail call i8* @strstr(i8* %P, i8* getelementptr inbounds ([1 x i8]* @.str, i32 0, i32 0)) nounwind ; <i8*> [#uses=1]
-  ret i8* %call
-; strstr(P, "") -> P
-; CHECK: @test1
-; CHECK: ret i8* %P
-}
-
-declare i8* @strstr(i8*, i8* nocapture) nounwind readonly
-
-define i8* @test2(i8* %P) nounwind readonly {
-entry:
-  %call = tail call i8* @strstr(i8* %P, i8* getelementptr inbounds ([2 x i8]* @.str1, i32 0, i32 0)) nounwind ; <i8*> [#uses=1]
-  ret i8* %call
-; strstr(P, "a") -> strchr(P, 'a')
-; CHECK: @test2
-; CHECK: @strchr(i8* %P, i32 97)
-}
-
-define i8* @test3(i8* nocapture %P) nounwind readonly {
-entry:
-  %call = tail call i8* @strstr(i8* getelementptr inbounds ([6 x i8]* @.str2, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8]* @.str3, i32 0, i32 0)) nounwind ; <i8*> [#uses=1]
-  ret i8* %call
-; strstr("abcde", "bcd") -> "abcde"+1
-; CHECK: @test3
-; CHECK: getelementptr inbounds ([6 x i8]* @.str2, i32 0, i64 1)
-}
-
-define i8* @test4(i8* %P) nounwind readonly {
-entry:
-  %call = tail call i8* @strstr(i8* %P, i8* %P) nounwind ; <i8*> [#uses=1]
-  ret i8* %call
-; strstr(P, P) -> P
-; CHECK: @test4
-; CHECK: ret i8* %P
-}
-
-define i1 @test5(i8* %P, i8* %Q) nounwind readonly {
-entry:
-  %call = tail call i8* @strstr(i8* %P, i8* %Q) nounwind ; <i8*> [#uses=1]
-  %cmp = icmp eq i8* %call, %P
-  ret i1 %cmp
-; CHECK: @test5
-; CHECK: [[LEN:%[a-z]+]] = call {{i[0-9]+}} @strlen(i8* %Q)
-; CHECK: [[NCMP:%[a-z]+]] = call {{i[0-9]+}} @strncmp(i8* %P, i8* %Q, {{i[0-9]+}} [[LEN]])
-; CHECK: icmp eq {{i[0-9]+}} [[NCMP]], 0
-; CHECK: ret i1
-}

Modified: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/floor.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/floor.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/floor.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/floor.ll Tue Jan 15 11:16:16 2013
@@ -9,6 +9,8 @@
 ; DO-SIMPLIFY: call float @ceilf(
 ; DO-SIMPLIFY: call float @roundf(
 ; DO-SIMPLIFY: call float @nearbyintf(
+; DO-SIMPLIFY: call float @truncf(
+; DO-SIMPLIFY: call float @fabsf(
 
 ; C89-SIMPLIFY: call float @floorf(
 ; C89-SIMPLIFY: call float @ceilf(
@@ -19,6 +21,8 @@
 ; DONT-SIMPLIFY: call double @ceil(
 ; DONT-SIMPLIFY: call double @round(
 ; DONT-SIMPLIFY: call double @nearbyint(
+; DONT-SIMPLIFY: call double @trunc(
+; DONT-SIMPLIFY: call double @fabs(
 
 declare double @floor(double)
 
@@ -28,6 +32,10 @@
 
 declare double @nearbyint(double)
 
+declare double @trunc(double)
+
+declare double @fabs(double)
+
 define float @test_floor(float %C) {
 	%D = fpext float %C to double		; <double> [#uses=1]
         ; --> floorf
@@ -60,3 +68,18 @@
 	ret float %F
 }
 
+define float @test_trunc(float %C) {
+	%D = fpext float %C to double
+	; --> truncf
+        %E = call double @trunc(double %D)
+	%F = fptrunc double %E to float
+	ret float %F
+}
+
+define float @test_fabs(float %C) {
+	%D = fpext float %C to double
+	; --> fabsf
+        %E = call double @fabs(double %D)
+	%F = fptrunc double %E to float
+	ret float %F
+}

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memcmp.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memcmp.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memcmp.ll (removed)
@@ -1,35 +0,0 @@
-; Test that the memcmpOptimizer works correctly
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-
- at h = constant [2 x i8] c"h\00"		; <[2 x i8]*> [#uses=0]
- at hel = constant [4 x i8] c"hel\00"		; <[4 x i8]*> [#uses=0]
- at hello_u = constant [8 x i8] c"hello_u\00"		; <[8 x i8]*> [#uses=0]
-
-declare i32 @memcmp(i8*, i8*, i32)
-
-define void @test(i8* %P, i8* %Q, i32 %N, i32* %IP, i1* %BP) {
-	%A = call i32 @memcmp( i8* %P, i8* %P, i32 %N )		; <i32> [#uses=1]
-; CHECK-NOT: call {{.*}} memcmp
-; CHECK: store volatile
-	store volatile i32 %A, i32* %IP
-	%B = call i32 @memcmp( i8* %P, i8* %Q, i32 0 )		; <i32> [#uses=1]
-; CHECK-NOT: call {{.*}} memcmp
-; CHECK: store volatile
-	store volatile i32 %B, i32* %IP
-	%C = call i32 @memcmp( i8* %P, i8* %Q, i32 1 )		; <i32> [#uses=1]
-; CHECK: load
-; CHECK: zext
-; CHECK: load
-; CHECK: zext
-; CHECK: sub
-; CHECK: store volatile
-	store volatile i32 %C, i32* %IP
-  %F = call i32 @memcmp(i8* getelementptr ([4 x i8]* @hel, i32 0, i32 0),
-                        i8* getelementptr ([8 x i8]* @hello_u, i32 0, i32 0),
-                        i32 3)
-; CHECK-NOT: call {{.*}} memcmp
-; CHECK: store volatile
-  store volatile i32 %F, i32* %IP
-	ret void
-}
-

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memmove.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memmove.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memmove.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memmove.ll (removed)
@@ -1,12 +0,0 @@
-; RUN: opt < %s -simplify-libcalls -S | grep "llvm.memmove"
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define i8* @test(i8* %a, i8* %b, i32 %x) {
-entry:
-	%call = call i8* @memmove(i8* %a, i8* %b, i32 %x )
-	ret i8* %call
-}
-
-declare i8* @memmove(i8*,i8*,i32)
-

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memset-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memset-64.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memset-64.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memset-64.ll (removed)
@@ -1,12 +0,0 @@
-; RUN: opt < %s -simplify-libcalls -S | grep "llvm.memset"
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-pc-linux-gnu"
-
-define void @a(i8* %x) nounwind {
-entry:
-	%call = call i8* @memset(i8* %x, i32 1, i64 100)		; <i8*> [#uses=0]
-	ret void
-}
-
-declare i8* @memset(i8*, i32, i64)
-

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memset.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memset.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/memset.ll (removed)
@@ -1,12 +0,0 @@
-; RUN: opt < %s -simplify-libcalls -S | grep "llvm.memset"
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define i8* @test(i8* %a, i32 %b, i32 %x) {
-entry:
-	%call = call i8* @memset(i8* %a, i32 %b, i32 %x )
-	ret i8* %call
-}
-
-declare i8* @memset(i8*,i32,i32)
-

Removed: llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/weak-symbols.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/weak-symbols.ll?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/weak-symbols.ll (original)
+++ llvm/branches/AMDILBackend/test/Transforms/SimplifyLibCalls/weak-symbols.ll (removed)
@@ -1,26 +0,0 @@
-; RUN: opt < %s -simplify-libcalls -S | FileCheck %s
-; PR4738
-
-; SimplifyLibcalls shouldn't assume anything about weak symbols.
-
- at real_init = weak_odr constant [2 x i8] c"y\00"
- at fake_init = weak constant [2 x i8] c"y\00"
- at .str = private constant [2 x i8] c"y\00"
-
-; CHECK: define i32 @foo
-; CHECK: call i32 @strcmp
-define i32 @foo() nounwind {
-entry:
-  %t0 = call i32 @strcmp(i8* getelementptr inbounds ([2 x i8]* @fake_init, i64 0, i64 0), i8* getelementptr inbounds ([2 x i8]* @.str, i64 0, i64 0)) nounwind readonly
-  ret i32 %t0
-}
-
-; CHECK: define i32 @bar
-; CHECK: ret i32 0
-define i32 @bar() nounwind {
-entry:
-  %t0 = call i32 @strcmp(i8* getelementptr inbounds ([2 x i8]* @real_init, i64 0, i64 0), i8* getelementptr inbounds ([2 x i8]* @.str, i64 0, i64 0)) nounwind readonly
-  ret i32 %t0
-}
-
-declare i32 @strcmp(i8*, i8*) nounwind readonly

Modified: llvm/branches/AMDILBackend/test/Verifier/invoke.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/Verifier/invoke.ll?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/Verifier/invoke.ll (original)
+++ llvm/branches/AMDILBackend/test/Verifier/invoke.ll Tue Jan 15 11:16:16 2013
@@ -19,7 +19,6 @@
 	br label %L
 L:		; preds = %L2, %L1, %L1
 ; CHECK: The unwind destination does not have a landingpad instruction
-; CHECK: Instruction does not dominate all uses
 	ret i32 %A
 }
 
@@ -34,9 +33,12 @@
 entry:
 ; OK
   invoke void @llvm.donothing()
-  to label %cont unwind label %cont
+  to label %conta unwind label %contb
 
-cont:
+conta:
+  ret void
+
+contb:
   %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
           filter [0 x i8*] zeroinitializer
   ret void
@@ -63,3 +65,15 @@
   %call = call i32 @fn(i8 (i8, i8)* @llvm.expect.i8)
   ret i32 %call
 }
+
+define void @f4() {
+entry:
+  invoke void @llvm.donothing()
+  to label %cont unwind label %cont
+
+cont:
+; CHECK: Block containing LandingPadInst must be jumped to only by the unwind edge of an invoke.
+  %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+          filter [0 x i8*] zeroinitializer
+  ret void
+}

Modified: llvm/branches/AMDILBackend/test/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/lit.cfg?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/lit.cfg (original)
+++ llvm/branches/AMDILBackend/test/lit.cfg Tue Jan 15 11:16:16 2013
@@ -5,6 +5,7 @@
 import os
 import sys
 import re
+import platform
 
 # name: The name of this test suite.
 config.name = 'LLVM'
@@ -139,9 +140,22 @@
 
 ###
 
-# When running under valgrind, we mangle '-vg' or '-vg_leak' onto the end of the
-# triple so we can check it with XFAIL and XTARGET.
-config.target_triple += lit.valgrindTriple
+# Provide a target triple for mcjit tests
+mcjit_triple = config.target_triple
+# Force ELF format on Windows
+if re.search(r'cygwin|mingw32|win32', mcjit_triple):
+  mcjit_triple += "-elf"
+config.substitutions.append( ('%mcjit_triple', mcjit_triple) )
+
+# Provide a substition for those tests that need to run the jit to obtain data
+# but simply want use the currently considered most reliable jit for platform
+# FIXME: ppc32 is not ready for mcjit.
+if 'arm' in config.target_triple \
+   or 'powerpc64' in config.target_triple:
+    defaultIsMCJIT = 'true'
+else:
+    defaultIsMCJIT = 'false'
+config.substitutions.append( ('%defaultjit', '-use-mcjit='+defaultIsMCJIT) )
 
 # Process jit implementation option
 jit_impl_cfg = lit.params.get('jit_impl', None)
@@ -230,6 +244,10 @@
 if loadable_module:
     config.available_features.add('loadable_module')
 
+# LTO on OS X
+if config.lto_is_enabled == "1" and platform.system() == "Darwin":
+    config.available_features.add('lto_on_osx')
+
 # llc knows whether he is compiled with -DNDEBUG.
 import subprocess
 try:

Modified: llvm/branches/AMDILBackend/test/lit.site.cfg.in
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/test/lit.site.cfg.in?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/test/lit.site.cfg.in (original)
+++ llvm/branches/AMDILBackend/test/lit.site.cfg.in Tue Jan 15 11:16:16 2013
@@ -11,6 +11,7 @@
 config.ocamlopt_executable = "@OCAMLOPT@"
 config.enable_shared = @ENABLE_SHARED@
 config.enable_assertions = @ENABLE_ASSERTIONS@
+config.lto_is_enabled = "@LTO_IS_ENABLED@"
 config.targets_to_build = "@TARGETS_TO_BUILD@"
 config.llvm_bindings = "@LLVM_BINDINGS@"
 config.host_os = "@HOST_OS@"

Propchange: llvm/branches/AMDILBackend/tools/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Tue Jan 15 11:16:16 2013
@@ -0,0 +1,3 @@
+clang
+lld
+lldb

Modified: llvm/branches/AMDILBackend/tools/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/tools/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -36,6 +36,7 @@
 add_subdirectory(bugpoint-passes)
 add_subdirectory(llvm-bcanalyzer)
 add_subdirectory(llvm-stress)
+add_subdirectory(llvm-mcmarkup)
 
 if( NOT WIN32 )
   add_subdirectory(lto)

Modified: llvm/branches/AMDILBackend/tools/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/LLVMBuild.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/LLVMBuild.txt (original)
+++ llvm/branches/AMDILBackend/tools/LLVMBuild.txt Tue Jan 15 11:16:16 2013
@@ -16,7 +16,7 @@
 ;===------------------------------------------------------------------------===;
 
 [common]
-subdirectories = bugpoint llc lli llvm-ar llvm-as llvm-bcanalyzer llvm-cov llvm-diff llvm-dis llvm-dwarfdump llvm-extract llvm-link llvm-mc llvm-nm llvm-objdump llvm-prof llvm-ranlib llvm-rtdyld llvm-size macho-dump opt
+subdirectories = bugpoint llc lli llvm-ar llvm-as llvm-bcanalyzer llvm-cov llvm-diff llvm-dis llvm-dwarfdump llvm-extract llvm-link llvm-mc llvm-nm llvm-objdump llvm-prof llvm-ranlib llvm-rtdyld llvm-size macho-dump opt llvm-mcmarkup
 
 [component_0]
 type = Group

Modified: llvm/branches/AMDILBackend/tools/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/Makefile (original)
+++ llvm/branches/AMDILBackend/tools/Makefile Tue Jan 15 11:16:16 2013
@@ -34,7 +34,7 @@
                  bugpoint llvm-bcanalyzer \
                  llvm-diff macho-dump llvm-objdump llvm-readobj \
 	         llvm-rtdyld llvm-dwarfdump llvm-cov \
-	         llvm-size llvm-stress
+	         llvm-size llvm-stress llvm-mcmarkup
 
 # Let users override the set of tools to build from the command line.
 ifdef ONLY_TOOLS

Modified: llvm/branches/AMDILBackend/tools/bugpoint/ExtractFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/bugpoint/ExtractFunction.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/bugpoint/ExtractFunction.cpp (original)
+++ llvm/branches/AMDILBackend/tools/bugpoint/ExtractFunction.cpp Tue Jan 15 11:16:16 2013
@@ -14,6 +14,7 @@
 
 #include "BugDriver.h"
 #include "llvm/Constants.h"
+#include "llvm/DataLayout.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/LLVMContext.h"
 #include "llvm/Module.h"
@@ -25,7 +26,6 @@
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Utils/Cloning.h"
 #include "llvm/Transforms/Utils/CodeExtractor.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/FileUtilities.h"

Modified: llvm/branches/AMDILBackend/tools/bugpoint/OptimizerDriver.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/bugpoint/OptimizerDriver.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/bugpoint/OptimizerDriver.cpp (original)
+++ llvm/branches/AMDILBackend/tools/bugpoint/OptimizerDriver.cpp Tue Jan 15 11:16:16 2013
@@ -16,11 +16,11 @@
 //===----------------------------------------------------------------------===//
 
 #include "BugDriver.h"
+#include "llvm/DataLayout.h"
 #include "llvm/Module.h"
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Support/FileUtilities.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/SystemUtils.h"

Modified: llvm/branches/AMDILBackend/tools/gold/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/gold/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/gold/Makefile (original)
+++ llvm/branches/AMDILBackend/tools/gold/Makefile Tue Jan 15 11:16:16 2013
@@ -24,6 +24,8 @@
 # Because off_t is used in the public API, the largefile parts are required for
 # ABI compatibility.
 CXXFLAGS += -I$(BINUTILS_INCDIR) -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
-CXXFLAGS += -L$(SharedLibDir)/$(SharedPrefix) -lLTO
+LDFLAGS += -L$(SharedLibDir)/$(SharedPrefix)
 
 include $(LEVEL)/Makefile.common
+
+LIBS += -lLTO

Modified: llvm/branches/AMDILBackend/tools/gold/gold-plugin.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/gold/gold-plugin.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/gold/gold-plugin.cpp (original)
+++ llvm/branches/AMDILBackend/tools/gold/gold-plugin.cpp Tue Jan 15 11:16:16 2013
@@ -378,9 +378,6 @@
     }
   }
 
-  // If we don't preserve any symbols, libLTO will assume that all symbols are
-  // needed. Keep all symbols unless we're producing a final executable.
-  bool anySymbolsPreserved = false;
   for (std::list<claimed_file>::iterator I = Modules.begin(),
          E = Modules.end(); I != E; ++I) {
     if (I->syms.empty())
@@ -389,7 +386,6 @@
     for (unsigned i = 0, e = I->syms.size(); i != e; i++) {
       if (I->syms[i].resolution == LDPR_PREVAILING_DEF) {
         lto_codegen_add_must_preserve_symbol(code_gen, I->syms[i].name);
-        anySymbolsPreserved = true;
 
         if (options::generate_api_file)
           api_file << I->syms[i].name << "\n";
@@ -400,12 +396,6 @@
   if (options::generate_api_file)
     api_file.close();
 
-  if (!anySymbolsPreserved) {
-    // All of the IL is unnecessary!
-    lto_codegen_dispose(code_gen);
-    return LDPS_OK;
-  }
-
   lto_codegen_set_pic_model(code_gen, output_type);
   lto_codegen_set_debug_model(code_gen, LTO_DEBUG_MODEL_DWARF);
   if (!options::mcpu.empty())

Modified: llvm/branches/AMDILBackend/tools/llc/llc.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llc/llc.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llc/llc.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llc/llc.cpp Tue Jan 15 11:16:16 2013
@@ -14,12 +14,14 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/LLVMContext.h"
+#include "llvm/DataLayout.h"
 #include "llvm/Module.h"
 #include "llvm/PassManager.h"
 #include "llvm/Pass.h"
 #include "llvm/ADT/Triple.h"
 #include "llvm/Assembly/PrintModulePass.h"
 #include "llvm/Support/IRReader.h"
+#include "llvm/CodeGen/CommandFlags.h"
 #include "llvm/CodeGen/LinkAllAsmWriterComponents.h"
 #include "llvm/CodeGen/LinkAllCodegenComponents.h"
 #include "llvm/MC/SubtargetFeature.h"
@@ -34,7 +36,6 @@
 #include "llvm/Support/Signals.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/TargetSelect.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include <memory>
@@ -62,211 +63,13 @@
 static cl::opt<std::string>
 TargetTriple("mtriple", cl::desc("Override target triple for module"));
 
-static cl::opt<std::string>
-MArch("march", cl::desc("Architecture to generate code for (see --version)"));
-
-static cl::opt<std::string>
-MCPU("mcpu",
-  cl::desc("Target a specific cpu type (-mcpu=help for details)"),
-  cl::value_desc("cpu-name"),
-  cl::init(""));
-
-static cl::list<std::string>
-MAttrs("mattr",
-  cl::CommaSeparated,
-  cl::desc("Target specific attributes (-mattr=help for details)"),
-  cl::value_desc("a1,+a2,-a3,..."));
-
-static cl::opt<Reloc::Model>
-RelocModel("relocation-model",
-             cl::desc("Choose relocation model"),
-             cl::init(Reloc::Default),
-             cl::values(
-            clEnumValN(Reloc::Default, "default",
-                       "Target default relocation model"),
-            clEnumValN(Reloc::Static, "static",
-                       "Non-relocatable code"),
-            clEnumValN(Reloc::PIC_, "pic",
-                       "Fully relocatable, position independent code"),
-            clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
-                       "Relocatable external references, non-relocatable code"),
-            clEnumValEnd));
-
-static cl::opt<llvm::CodeModel::Model>
-CMModel("code-model",
-        cl::desc("Choose code model"),
-        cl::init(CodeModel::Default),
-        cl::values(clEnumValN(CodeModel::Default, "default",
-                              "Target default code model"),
-                   clEnumValN(CodeModel::Small, "small",
-                              "Small code model"),
-                   clEnumValN(CodeModel::Kernel, "kernel",
-                              "Kernel code model"),
-                   clEnumValN(CodeModel::Medium, "medium",
-                              "Medium code model"),
-                   clEnumValN(CodeModel::Large, "large",
-                              "Large code model"),
-                   clEnumValEnd));
-
-static cl::opt<bool>
-RelaxAll("mc-relax-all",
-  cl::desc("When used with filetype=obj, "
-           "relax all fixups in the emitted object file"));
-
-cl::opt<TargetMachine::CodeGenFileType>
-FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
-  cl::desc("Choose a file type (not all types are supported by all targets):"),
-  cl::values(
-       clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
-                  "Emit an assembly ('.s') file"),
-       clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
-                  "Emit a native object ('.o') file"),
-       clEnumValN(TargetMachine::CGFT_Null, "null",
-                  "Emit nothing, for performance testing"),
-       clEnumValEnd));
-
 cl::opt<bool> NoVerify("disable-verify", cl::Hidden,
                        cl::desc("Do not verify input module"));
 
-cl::opt<bool> DisableDotLoc("disable-dot-loc", cl::Hidden,
-                            cl::desc("Do not use .loc entries"));
-
-cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
-                         cl::desc("Do not use .cfi_* directives"));
-
-cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
-    cl::desc("Use .file directives with an explicit directory."));
-
-static cl::opt<bool>
-DisableRedZone("disable-red-zone",
-  cl::desc("Do not emit code that uses the red zone."),
-  cl::init(false));
-
-static cl::opt<bool>
-EnableFPMAD("enable-fp-mad",
-  cl::desc("Enable less precise MAD instructions to be generated"),
-  cl::init(false));
-
-static cl::opt<bool>
-DisableFPElim("disable-fp-elim",
-  cl::desc("Disable frame pointer elimination optimization"),
-  cl::init(false));
-
-static cl::opt<bool>
-DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
-  cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
-  cl::init(false));
-
-static cl::opt<bool>
-EnableUnsafeFPMath("enable-unsafe-fp-math",
-  cl::desc("Enable optimizations that may decrease FP precision"),
-  cl::init(false));
-
-static cl::opt<bool>
-EnableNoInfsFPMath("enable-no-infs-fp-math",
-  cl::desc("Enable FP math optimizations that assume no +-Infs"),
-  cl::init(false));
-
-static cl::opt<bool>
-EnableNoNaNsFPMath("enable-no-nans-fp-math",
-  cl::desc("Enable FP math optimizations that assume no NaNs"),
-  cl::init(false));
-
-static cl::opt<bool>
-EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
-  cl::Hidden,
-  cl::desc("Force codegen to assume rounding mode can change dynamically"),
-  cl::init(false));
-
-static cl::opt<bool>
-GenerateSoftFloatCalls("soft-float",
-  cl::desc("Generate software floating point library calls"),
-  cl::init(false));
-
-static cl::opt<llvm::FloatABI::ABIType>
-FloatABIForCalls("float-abi",
-  cl::desc("Choose float ABI type"),
-  cl::init(FloatABI::Default),
-  cl::values(
-    clEnumValN(FloatABI::Default, "default",
-               "Target default float ABI type"),
-    clEnumValN(FloatABI::Soft, "soft",
-               "Soft float ABI (implied by -soft-float)"),
-    clEnumValN(FloatABI::Hard, "hard",
-               "Hard float ABI (uses FP registers)"),
-    clEnumValEnd));
-
-static cl::opt<llvm::FPOpFusion::FPOpFusionMode>
-FuseFPOps("fp-contract",
-  cl::desc("Enable aggresive formation of fused FP ops"),
-  cl::init(FPOpFusion::Standard),
-  cl::values(
-    clEnumValN(FPOpFusion::Fast, "fast",
-               "Fuse FP ops whenever profitable"),
-    clEnumValN(FPOpFusion::Standard, "on",
-               "Only fuse 'blessed' FP ops."),
-    clEnumValN(FPOpFusion::Strict, "off",
-               "Only fuse FP ops when the result won't be effected."),
-    clEnumValEnd));
-
-static cl::opt<bool>
-DontPlaceZerosInBSS("nozero-initialized-in-bss",
-  cl::desc("Don't place zero-initialized symbols into bss section"),
-  cl::init(false));
-
-static cl::opt<bool>
+cl::opt<bool>
 DisableSimplifyLibCalls("disable-simplify-libcalls",
-  cl::desc("Disable simplify-libcalls"),
-  cl::init(false));
-
-static cl::opt<bool>
-EnableGuaranteedTailCallOpt("tailcallopt",
-  cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
-  cl::init(false));
-
-static cl::opt<bool>
-DisableTailCalls("disable-tail-calls",
-  cl::desc("Never emit tail calls"),
-  cl::init(false));
-
-static cl::opt<unsigned>
-OverrideStackAlignment("stack-alignment",
-  cl::desc("Override default stack alignment"),
-  cl::init(0));
-
-static cl::opt<bool>
-EnableRealignStack("realign-stack",
-  cl::desc("Realign stack if needed"),
-  cl::init(true));
-
-static cl::opt<std::string>
-TrapFuncName("trap-func", cl::Hidden,
-  cl::desc("Emit a call to trap function rather than a trap instruction"),
-  cl::init(""));
-
-static cl::opt<bool>
-EnablePIE("enable-pie",
-  cl::desc("Assume the creation of a position independent executable."),
-  cl::init(false));
-
-static cl::opt<bool>
-SegmentedStacks("segmented-stacks",
-  cl::desc("Use segmented stacks if possible."),
-  cl::init(false));
-
-static cl::opt<bool>
-UseInitArray("use-init-array",
-  cl::desc("Use .init_array instead of .ctors."),
-  cl::init(false));
-
-static cl::opt<std::string> StopAfter("stop-after",
-  cl::desc("Stop compilation after a specific pass"),
-  cl::value_desc("pass-name"),
-  cl::init(""));
-static cl::opt<std::string> StartAfter("start-after",
-  cl::desc("Resume compilation after a specific pass"),
-  cl::value_desc("pass-name"),
-  cl::init(""));
+                        cl::desc("Disable simplify-libcalls"),
+                        cl::init(false));
 
 // GetFileNameRoot - Helper function to get the basename of a filename.
 static inline std::string
@@ -459,6 +262,7 @@
   Options.PositionIndependentExecutable = EnablePIE;
   Options.EnableSegmentedStacks = SegmentedStacks;
   Options.UseInitArray = UseInitArray;
+  Options.SSPBufferSize = SSPBufferSize;
 
   std::auto_ptr<TargetMachine>
     target(TheTarget->createTargetMachine(TheTriple.getTriple(),
@@ -499,11 +303,16 @@
     TLI->disableAllFunctions();
   PM.add(TLI);
 
+  if (target.get()) {
+    PM.add(new TargetTransformInfo(target->getScalarTargetTransformInfo(),
+                                   target->getVectorTargetTransformInfo()));
+  }
+
   // Add the target data from the target machine, if it exists, or the module.
-  if (const TargetData *TD = Target.getTargetData())
-    PM.add(new TargetData(*TD));
+  if (const DataLayout *TD = Target.getDataLayout())
+    PM.add(new DataLayout(*TD));
   else
-    PM.add(new TargetData(mod));
+    PM.add(new DataLayout(mod));
 
   // Override default to generate verbose assembly.
   Target.setAsmVerbosityDefault(true);

Modified: llvm/branches/AMDILBackend/tools/lli/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/lli/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/lli/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/tools/lli/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -1,7 +1,5 @@
 
-link_directories( ${LLVM_INTEL_JITEVENTS_LIBDIR} )
-
-set(LLVM_LINK_COMPONENTS mcjit jit interpreter nativecodegen bitreader asmparser selectiondag)
+set(LLVM_LINK_COMPONENTS mcjit jit interpreter nativecodegen bitreader asmparser selectiondag native)
 
 if( LLVM_USE_OPROFILE )
   set(LLVM_LINK_COMPONENTS
@@ -19,4 +17,6 @@
 
 add_llvm_tool(lli
   lli.cpp
+  RecordingMemoryManager.cpp
+  RemoteTarget.cpp
   )

Modified: llvm/branches/AMDILBackend/tools/lli/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/lli/LLVMBuild.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/lli/LLVMBuild.txt (original)
+++ llvm/branches/AMDILBackend/tools/lli/LLVMBuild.txt Tue Jan 15 11:16:16 2013
@@ -19,4 +19,4 @@
 type = Tool
 name = lli
 parent = Tools
-required_libraries = AsmParser BitReader Interpreter JIT MCJIT NativeCodeGen SelectionDAG
+required_libraries = AsmParser BitReader Interpreter JIT MCJIT NativeCodeGen SelectionDAG Native

Modified: llvm/branches/AMDILBackend/tools/lli/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/lli/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/lli/Makefile (original)
+++ llvm/branches/AMDILBackend/tools/lli/Makefile Tue Jan 15 11:16:16 2013
@@ -12,7 +12,7 @@
 
 include $(LEVEL)/Makefile.config
 
-LINK_COMPONENTS := mcjit jit interpreter nativecodegen bitreader asmparser selectiondag
+LINK_COMPONENTS := mcjit jit interpreter nativecodegen bitreader asmparser selectiondag native
 
 # If Intel JIT Events support is confiured, link against the LLVM Intel JIT
 # Events interface library

Modified: llvm/branches/AMDILBackend/tools/lli/lli.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/lli/lli.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/lli/lli.cpp (original)
+++ llvm/branches/AMDILBackend/tools/lli/lli.cpp Tue Jan 15 11:16:16 2013
@@ -13,6 +13,9 @@
 //
 //===----------------------------------------------------------------------===//
 
+#define DEBUG_TYPE "lli"
+#include "RecordingMemoryManager.h"
+#include "RemoteTarget.h"
 #include "llvm/LLVMContext.h"
 #include "llvm/Module.h"
 #include "llvm/Type.h"
@@ -32,11 +35,14 @@
 #include "llvm/Support/PluginLoader.h"
 #include "llvm/Support/PrettyStackTrace.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Format.h"
 #include "llvm/Support/Process.h"
 #include "llvm/Support/Signals.h"
 #include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/Debug.h"
 #include "llvm/Support/DynamicLibrary.h"
 #include "llvm/Support/Memory.h"
+#include "llvm/Support/MathExtras.h"
 #include <cerrno>
 
 #ifdef __linux__
@@ -73,6 +79,13 @@
     "use-mcjit", cl::desc("Enable use of the MC-based JIT (if available)"),
     cl::init(false));
 
+  // The MCJIT supports building for a target address space separate from
+  // the JIT compilation process. Use a forked process and a copying
+  // memory manager with IPC to execute using this functionality.
+  cl::opt<bool> RemoteMCJIT("remote-mcjit",
+    cl::desc("Execute MCJIT'ed code in a separate process."),
+    cl::init(false));
+
   // Determine optimization level.
   cl::opt<char>
   OptLevel("O",
@@ -159,6 +172,23 @@
     cl::init(false));
 
   cl::opt<bool>
+  GenerateSoftFloatCalls("soft-float",
+    cl::desc("Generate software floating point library calls"),
+    cl::init(false));
+
+  cl::opt<llvm::FloatABI::ABIType>
+  FloatABIForCalls("float-abi",
+                   cl::desc("Choose float ABI type"),
+                   cl::init(FloatABI::Default),
+                   cl::values(
+                     clEnumValN(FloatABI::Default, "default",
+                                "Target default float ABI type"),
+                     clEnumValN(FloatABI::Soft, "soft",
+                                "Soft float ABI (implied by -soft-float)"),
+                     clEnumValN(FloatABI::Hard, "hard",
+                                "Hard float ABI (uses FP registers)"),
+                     clEnumValEnd));
+  cl::opt<bool>
 // In debug builds, make this default to true.
 #ifdef NDEBUG
 #define EMIT_DEBUG false
@@ -212,7 +242,7 @@
   // the data cache but not to the instruction cache.
   virtual void invalidateInstructionCache();
 
-  // The MCJITMemoryManager doesn't use the following functions, so we don't
+  // The RTDyldMemoryManager doesn't use the following functions, so we don't
   // need implement them.
   virtual void setMemoryWritable() {
     llvm_unreachable("Unexpected call!");
@@ -274,9 +304,16 @@
                                                     unsigned SectionID) {
   if (!Alignment)
     Alignment = 16;
-  uint8_t *Addr = (uint8_t*)calloc((Size + Alignment - 1)/Alignment, Alignment);
-  AllocatedDataMem.push_back(sys::MemoryBlock(Addr, Size));
-  return Addr;
+  // Ensure that enough memory is requested to allow aligning.
+  size_t NumElementsAligned = 1 + (Size + Alignment - 1)/Alignment;
+  uint8_t *Addr = (uint8_t*)calloc(NumElementsAligned, Alignment);
+
+  // Honour the alignment requirement.
+  uint8_t *AlignedAddr = (uint8_t*)RoundUpToAlignment((uint64_t)Addr, Alignment);
+
+  // Store the original address from calloc so we can free it later.
+  AllocatedDataMem.push_back(sys::MemoryBlock(Addr, NumElementsAligned*Alignment));
+  return AlignedAddr;
 }
 
 uint8_t *LLIMCJITMemoryManager::allocateCodeSection(uintptr_t Size,
@@ -326,6 +363,10 @@
                                             AllocatedCodeMem[i].size());
 }
 
+static int jit_noop() {
+  return 0;
+}
+
 void *LLIMCJITMemoryManager::getPointerToNamedFunction(const std::string &Name,
                                                        bool AbortOnFailure) {
 #if defined(__linux__)
@@ -348,6 +389,14 @@
   if (Name == "mknod") return (void*)(intptr_t)&mknod;
 #endif // __linux__
 
+  // We should not invoke parent's ctors/dtors from generated main()!
+  // On Mingw and Cygwin, the symbol __main is resolved to
+  // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
+  // (and register wrong callee's dtors with atexit(3)).
+  // We expect ExecutionEngine::runStaticConstructorsDestructors()
+  // is called before ExecutionEngine::runFunctionAsMain() is called.
+  if (Name == "__main") return (void*)(intptr_t)&jit_noop;
+
   const char *NameStr = Name.c_str();
   void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
   if (Ptr) return Ptr;
@@ -372,6 +421,83 @@
     free(AllocatedDataMem[i].base());
 }
 
+
+void layoutRemoteTargetMemory(RemoteTarget *T, RecordingMemoryManager *JMM) {
+  // Lay out our sections in order, with all the code sections first, then
+  // all the data sections.
+  uint64_t CurOffset = 0;
+  unsigned MaxAlign = T->getPageAlignment();
+  SmallVector<std::pair<const void*, uint64_t>, 16> Offsets;
+  SmallVector<unsigned, 16> Sizes;
+  for (RecordingMemoryManager::const_code_iterator I = JMM->code_begin(),
+                                                   E = JMM->code_end();
+       I != E; ++I) {
+    DEBUG(dbgs() << "code region: size " << I->first.size()
+                 << ", alignment " << I->second << "\n");
+    // Align the current offset up to whatever is needed for the next
+    // section.
+    unsigned Align = I->second;
+    CurOffset = (CurOffset + Align - 1) / Align * Align;
+    // Save off the address of the new section and allocate its space.
+    Offsets.push_back(std::pair<const void*,uint64_t>(I->first.base(), CurOffset));
+    Sizes.push_back(I->first.size());
+    CurOffset += I->first.size();
+  }
+  // Adjust to keep code and data aligned on seperate pages.
+  CurOffset = (CurOffset + MaxAlign - 1) / MaxAlign * MaxAlign;
+  unsigned FirstDataIndex = Offsets.size();
+  for (RecordingMemoryManager::const_data_iterator I = JMM->data_begin(),
+                                                   E = JMM->data_end();
+       I != E; ++I) {
+    DEBUG(dbgs() << "data region: size " << I->first.size()
+                 << ", alignment " << I->second << "\n");
+    // Align the current offset up to whatever is needed for the next
+    // section.
+    unsigned Align = I->second;
+    CurOffset = (CurOffset + Align - 1) / Align * Align;
+    // Save off the address of the new section and allocate its space.
+    Offsets.push_back(std::pair<const void*,uint64_t>(I->first.base(), CurOffset));
+    Sizes.push_back(I->first.size());
+    CurOffset += I->first.size();
+  }
+
+  // Allocate space in the remote target.
+  uint64_t RemoteAddr;
+  if (T->allocateSpace(CurOffset, MaxAlign, RemoteAddr))
+    report_fatal_error(T->getErrorMsg());
+  // Map the section addresses so relocations will get updated in the local
+  // copies of the sections.
+  for (unsigned i = 0, e = Offsets.size(); i != e; ++i) {
+    uint64_t Addr = RemoteAddr + Offsets[i].second;
+    EE->mapSectionAddress(const_cast<void*>(Offsets[i].first), Addr);
+
+    DEBUG(dbgs() << "  Mapping local: " << Offsets[i].first
+                 << " to remote: " << format("%p", Addr) << "\n");
+
+  }
+
+  // Trigger application of relocations
+  EE->finalizeObject();
+
+  // Now load it all to the target.
+  for (unsigned i = 0, e = Offsets.size(); i != e; ++i) {
+    uint64_t Addr = RemoteAddr + Offsets[i].second;
+
+    if (i < FirstDataIndex) {
+      T->loadCode(Addr, Offsets[i].first, Sizes[i]);
+
+      DEBUG(dbgs() << "  loading code: " << Offsets[i].first
+            << " to remote: " << format("%p", Addr) << "\n");
+    } else {
+      T->loadData(Addr, Offsets[i].first, Sizes[i]);
+
+      DEBUG(dbgs() << "  loading data: " << Offsets[i].first
+            << " to remote: " << format("%p", Addr) << "\n");
+    }
+
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // main Driver function
 //
@@ -386,6 +512,7 @@
   // usable by the JIT.
   InitializeNativeTarget();
   InitializeNativeTargetAsmPrinter();
+  InitializeNativeTargetAsmParser();
 
   cl::ParseCommandLineOptions(argc, argv,
                               "llvm interpreter & dynamic compiler\n");
@@ -428,12 +555,19 @@
     Mod->setTargetTriple(Triple::normalize(TargetTriple));
 
   // Enable MCJIT if desired.
-  LLIMCJITMemoryManager *JMM = 0;
+  JITMemoryManager *JMM = 0;
   if (UseMCJIT && !ForceInterpreter) {
     builder.setUseMCJIT(true);
-    JMM = new LLIMCJITMemoryManager();
+    if (RemoteMCJIT)
+      JMM = new RecordingMemoryManager();
+    else
+      JMM = new LLIMCJITMemoryManager();
     builder.setJITMemoryManager(JMM);
   } else {
+    if (RemoteMCJIT) {
+      errs() << "error: Remote process execution requires -use-mcjit\n";
+      exit(1);
+    }
     builder.setJITMemoryManager(ForceInterpreter ? 0 :
                                 JITMemoryManager::CreateDefaultMemManager());
   }
@@ -452,9 +586,19 @@
   builder.setOptLevel(OLvl);
 
   TargetOptions Options;
-  Options.JITExceptionHandling = EnableJITExceptionHandling;
-  Options.JITEmitDebugInfo = EmitJitDebugInfo;
-  Options.JITEmitDebugInfoToDisk = EmitJitDebugInfoToDisk;
+  Options.UseSoftFloat = GenerateSoftFloatCalls;
+  if (FloatABIForCalls != FloatABI::Default)
+    Options.FloatABIType = FloatABIForCalls;
+  if (GenerateSoftFloatCalls)
+    FloatABIForCalls = FloatABI::Soft;
+
+  // Remote target execution doesn't handle EH or debug registration.
+  if (!RemoteMCJIT) {
+    Options.JITExceptionHandling = EnableJITExceptionHandling;
+    Options.JITEmitDebugInfo = EmitJitDebugInfo;
+    Options.JITEmitDebugInfoToDisk = EmitJitDebugInfoToDisk;
+  }
+
   builder.setTargetOptions(Options);
 
   EE = builder.create();
@@ -466,10 +610,6 @@
     exit(1);
   }
 
-  // Clear instruction cache before code will be executed.
-  if (JMM)
-    JMM->invalidateInstructionCache();
-
   // The following functions have no effect if their respective profiling
   // support wasn't enabled in the build configuration.
   EE->RegisterJITEventListener(
@@ -477,6 +617,10 @@
   EE->RegisterJITEventListener(
                 JITEventListener::createIntelJITEventListener());
 
+  if (!NoLazyCompilation && RemoteMCJIT) {
+    errs() << "warning: remote mcjit does not support lazy compilation\n";
+    NoLazyCompilation = true;
+  }
   EE->DisableLazyCompilation(NoLazyCompilation);
 
   // If the user specifically requested an argv[0] to pass into the program,
@@ -513,8 +657,13 @@
   // Reset errno to zero on entry to main.
   errno = 0;
 
+  // Remote target MCJIT doesn't (yet) support static constructors. No reason
+  // it couldn't. This is a limitation of the LLI implemantation, not the
+  // MCJIT itself. FIXME.
+  //
   // Run static constructors.
-  EE->runStaticConstructorsDestructors(false);
+  if (!RemoteMCJIT)
+    EE->runStaticConstructorsDestructors(false);
 
   if (NoLazyCompilation) {
     for (Module::iterator I = Mod->begin(), E = Mod->end(); I != E; ++I) {
@@ -524,24 +673,69 @@
     }
   }
 
-  // Run main.
-  int Result = EE->runFunctionAsMain(EntryFn, InputArgv, envp);
+  int Result;
+  if (RemoteMCJIT) {
+    RecordingMemoryManager *MM = static_cast<RecordingMemoryManager*>(JMM);
+    // Everything is prepared now, so lay out our program for the target
+    // address space, assign the section addresses to resolve any relocations,
+    // and send it to the target.
+    RemoteTarget Target;
+    Target.create();
+
+    // Ask for a pointer to the entry function. This triggers the actual
+    // compilation.
+    (void)EE->getPointerToFunction(EntryFn);
+
+    // Enough has been compiled to execute the entry function now, so
+    // layout the target memory.
+    layoutRemoteTargetMemory(&Target, MM);
+
+    // Since we're executing in a (at least simulated) remote address space,
+    // we can't use the ExecutionEngine::runFunctionAsMain(). We have to
+    // grab the function address directly here and tell the remote target
+    // to execute the function.
+    // FIXME: argv and envp handling.
+    uint64_t Entry = (uint64_t)EE->getPointerToFunction(EntryFn);
+
+    DEBUG(dbgs() << "Executing '" << EntryFn->getName() << "' at "
+                 << format("%p", Entry) << "\n");
 
-  // Run static destructors.
-  EE->runStaticConstructorsDestructors(true);
+    if (Target.executeCode(Entry, Result))
+      errs() << "ERROR: " << Target.getErrorMsg() << "\n";
 
-  // If the program didn't call exit explicitly, we should call it now.
-  // This ensures that any atexit handlers get called correctly.
-  if (Function *ExitF = dyn_cast<Function>(Exit)) {
-    std::vector<GenericValue> Args;
-    GenericValue ResultGV;
-    ResultGV.IntVal = APInt(32, Result);
-    Args.push_back(ResultGV);
-    EE->runFunction(ExitF, Args);
-    errs() << "ERROR: exit(" << Result << ") returned!\n";
-    abort();
+    Target.stop();
   } else {
-    errs() << "ERROR: exit defined with wrong prototype!\n";
-    abort();
+    // Trigger compilation separately so code regions that need to be 
+    // invalidated will be known.
+    (void)EE->getPointerToFunction(EntryFn);
+    // Clear instruction cache before code will be executed.
+    if (JMM)
+      static_cast<LLIMCJITMemoryManager*>(JMM)->invalidateInstructionCache();
+
+    // Run main.
+    Result = EE->runFunctionAsMain(EntryFn, InputArgv, envp);
+  }
+
+  // Like static constructors, the remote target MCJIT support doesn't handle
+  // this yet. It could. FIXME.
+  if (!RemoteMCJIT) {
+    // Run static destructors.
+    EE->runStaticConstructorsDestructors(true);
+
+    // If the program didn't call exit explicitly, we should call it now.
+    // This ensures that any atexit handlers get called correctly.
+    if (Function *ExitF = dyn_cast<Function>(Exit)) {
+      std::vector<GenericValue> Args;
+      GenericValue ResultGV;
+      ResultGV.IntVal = APInt(32, Result);
+      Args.push_back(ResultGV);
+      EE->runFunction(ExitF, Args);
+      errs() << "ERROR: exit(" << Result << ") returned!\n";
+      abort();
+    } else {
+      errs() << "ERROR: exit defined with wrong prototype!\n";
+      abort();
+    }
   }
+  return Result;
 }

Modified: llvm/branches/AMDILBackend/tools/llvm-ar/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-ar/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-ar/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/tools/llvm-ar/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -1,5 +1,4 @@
 set(LLVM_LINK_COMPONENTS archive)
-set(LLVM_REQUIRES_EH 1)
 
 add_llvm_tool(llvm-ar
   llvm-ar.cpp

Modified: llvm/branches/AMDILBackend/tools/llvm-ar/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-ar/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-ar/Makefile (original)
+++ llvm/branches/AMDILBackend/tools/llvm-ar/Makefile Tue Jan 15 11:16:16 2013
@@ -10,7 +10,6 @@
 LEVEL := ../..
 TOOLNAME := llvm-ar
 LINK_COMPONENTS := archive
-REQUIRES_EH := 1
 
 # This tool has no plugins, optimize startup time.
 TOOL_NO_EXPORTS := 1

Modified: llvm/branches/AMDILBackend/tools/llvm-ar/llvm-ar.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-ar/llvm-ar.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-ar/llvm-ar.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-ar/llvm-ar.cpp Tue Jan 15 11:16:16 2013
@@ -23,6 +23,7 @@
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/Signals.h"
 #include <algorithm>
+#include <cstdlib>
 #include <memory>
 #include <fstream>
 using namespace llvm;
@@ -126,40 +127,57 @@
 // The Archive object to which all the editing operations will be sent.
 Archive* TheArchive = 0;
 
+// The name this program was invoked as.
+static const char *program_name;
+
+// show_help - Show the error message, the help message and exit.
+LLVM_ATTRIBUTE_NORETURN static void
+show_help(const std::string &msg) {
+  errs() << program_name << ": " << msg << "\n\n";
+  cl::PrintHelpMessage();
+  if (TheArchive)
+    delete TheArchive;
+  std::exit(1);
+}
+
+// fail - Show the error message and exit.
+LLVM_ATTRIBUTE_NORETURN static void
+fail(const std::string &msg) {
+  errs() << program_name << ": " << msg << "\n\n";
+  if (TheArchive)
+    delete TheArchive;
+  std::exit(1);
+}
+
 // getRelPos - Extract the member filename from the command line for
 // the [relpos] argument associated with a, b, and i modifiers
 void getRelPos() {
-  if(RestOfArgs.size() > 0) {
-    RelPos = RestOfArgs[0];
-    RestOfArgs.erase(RestOfArgs.begin());
-  }
-  else
-    throw "Expected [relpos] for a, b, or i modifier";
+  if(RestOfArgs.size() == 0)
+    show_help("Expected [relpos] for a, b, or i modifier");
+  RelPos = RestOfArgs[0];
+  RestOfArgs.erase(RestOfArgs.begin());
 }
 
 // getCount - Extract the [count] argument associated with the N modifier
 // from the command line and check its value.
 void getCount() {
-  if(RestOfArgs.size() > 0) {
-    Count = atoi(RestOfArgs[0].c_str());
-    RestOfArgs.erase(RestOfArgs.begin());
-  }
-  else
-    throw "Expected [count] value with N modifier";
+  if(RestOfArgs.size() == 0)
+    show_help("Expected [count] value with N modifier");
+
+  Count = atoi(RestOfArgs[0].c_str());
+  RestOfArgs.erase(RestOfArgs.begin());
 
   // Non-positive counts are not allowed
   if (Count < 1)
-    throw "Invalid [count] value (not a positive integer)";
+    show_help("Invalid [count] value (not a positive integer)");
 }
 
 // getArchive - Get the archive file name from the command line
 void getArchive() {
-  if(RestOfArgs.size() > 0) {
-    ArchiveName = RestOfArgs[0];
-    RestOfArgs.erase(RestOfArgs.begin());
-  }
-  else
-    throw "An archive name must be specified.";
+  if(RestOfArgs.size() == 0)
+    show_help("An archive name must be specified");
+  ArchiveName = RestOfArgs[0];
+  RestOfArgs.erase(RestOfArgs.begin());
 }
 
 // getMembers - Copy over remaining items in RestOfArgs to our Members vector
@@ -240,25 +258,27 @@
   // Perform various checks on the operation/modifier specification
   // to make sure we are dealing with a legal request.
   if (NumOperations == 0)
-    throw "You must specify at least one of the operations";
+    show_help("You must specify at least one of the operations");
   if (NumOperations > 1)
-    throw "Only one operation may be specified";
+    show_help("Only one operation may be specified");
   if (NumPositional > 1)
-    throw "You may only specify one of a, b, and i modifiers";
-  if (AddAfter || AddBefore || InsertBefore)
+    show_help("You may only specify one of a, b, and i modifiers");
+  if (AddAfter || AddBefore || InsertBefore) {
     if (Operation != Move && Operation != ReplaceOrInsert)
-      throw "The 'a', 'b' and 'i' modifiers can only be specified with "
-            "the 'm' or 'r' operations";
+      show_help("The 'a', 'b' and 'i' modifiers can only be specified with "
+            "the 'm' or 'r' operations");
+  }
   if (RecurseDirectories && Operation != ReplaceOrInsert)
-    throw "The 'R' modifiers is only applicabe to the 'r' operation";
+    show_help("The 'R' modifiers is only applicabe to the 'r' operation");
   if (OriginalDates && Operation != Extract)
-    throw "The 'o' modifier is only applicable to the 'x' operation";
+    show_help("The 'o' modifier is only applicable to the 'x' operation");
   if (TruncateNames && Operation!=QuickAppend && Operation!=ReplaceOrInsert)
-    throw "The 'f' modifier is only applicable to the 'q' and 'r' operations";
+    show_help("The 'f' modifier is only applicable to the 'q' and 'r' "
+              "operations");
   if (OnlyUpdate && Operation != ReplaceOrInsert)
-    throw "The 'u' modifier is only applicable to the 'r' operation";
+    show_help("The 'u' modifier is only applicable to the 'r' operation");
   if (Count > 1 && Members.size() > 1)
-    throw "Only one member name may be specified with the 'N' modifier";
+    show_help("Only one member name may be specified with the 'N' modifier");
 
   // Return the parsed operation to the caller
   return Operation;
@@ -304,16 +324,16 @@
   for (unsigned i = 0; i < Members.size(); i++) {
     sys::Path aPath;
     if (!aPath.set(Members[i]))
-      throw std::string("File member name invalid: ") + Members[i];
+      fail(std::string("File member name invalid: ") + Members[i]);
     if (checkExistence) {
       bool Exists;
       if (sys::fs::exists(aPath.str(), Exists) || !Exists)
-        throw std::string("File does not exist: ") + Members[i];
+        fail(std::string("File does not exist: ") + Members[i]);
       std::string Err;
       sys::PathWithStatus PwS(aPath);
       const sys::FileStatus *si = PwS.getFileStatus(false, &Err);
       if (!si)
-        throw Err;
+        fail(Err);
       if (si->isDir) {
         std::set<sys::Path> dirpaths;
         if (recurseDirectories(aPath, dirpaths, ErrMsg))
@@ -683,6 +703,7 @@
 
 // main - main program for llvm-ar .. see comments in the code
 int main(int argc, char **argv) {
+  program_name = argv[0];
   // Print a stack trace if we signal out.
   sys::PrintStackTraceOnErrorSignal();
   PrettyStackTraceProgram X(argc, argv);
@@ -698,76 +719,60 @@
 
   int exitCode = 0;
 
-  // Make sure we don't exit with "unhandled exception".
-  try {
-    // Do our own parsing of the command line because the CommandLine utility
-    // can't handle the grouped positional parameters without a dash.
-    ArchiveOperation Operation = parseCommandLine();
-
-    // Check the path name of the archive
-    sys::Path ArchivePath;
-    if (!ArchivePath.set(ArchiveName))
-      throw std::string("Archive name invalid: ") + ArchiveName;
-
-    // Create or open the archive object.
-    bool Exists;
-    if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists) {
-      // Produce a warning if we should and we're creating the archive
-      if (!Create)
-        errs() << argv[0] << ": creating " << ArchivePath.str() << "\n";
-      TheArchive = Archive::CreateEmpty(ArchivePath, Context);
-      TheArchive->writeToDisk();
-    } else {
-      std::string Error;
-      TheArchive = Archive::OpenAndLoad(ArchivePath, Context, &Error);
-      if (TheArchive == 0) {
-        errs() << argv[0] << ": error loading '" << ArchivePath.str() << "': "
-               << Error << "!\n";
-        return 1;
-      }
+  // Do our own parsing of the command line because the CommandLine utility
+  // can't handle the grouped positional parameters without a dash.
+  ArchiveOperation Operation = parseCommandLine();
+
+  // Check the path name of the archive
+  sys::Path ArchivePath;
+  if (!ArchivePath.set(ArchiveName)) {
+    errs() << argv[0] << ": Archive name invalid: " << ArchiveName << "\n";
+    return 1;
+  }
+
+  // Create or open the archive object.
+  bool Exists;
+  if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists) {
+    // Produce a warning if we should and we're creating the archive
+    if (!Create)
+      errs() << argv[0] << ": creating " << ArchivePath.str() << "\n";
+    TheArchive = Archive::CreateEmpty(ArchivePath, Context);
+    TheArchive->writeToDisk();
+  } else {
+    std::string Error;
+    TheArchive = Archive::OpenAndLoad(ArchivePath, Context, &Error);
+    if (TheArchive == 0) {
+      errs() << argv[0] << ": error loading '" << ArchivePath.str() << "': "
+             << Error << "!\n";
+      return 1;
     }
+  }
 
-    // Make sure we're not fooling ourselves.
-    assert(TheArchive && "Unable to instantiate the archive");
-
-    // Make sure we clean up the archive even on failure.
-    std::auto_ptr<Archive> AutoArchive(TheArchive);
+  // Make sure we're not fooling ourselves.
+  assert(TheArchive && "Unable to instantiate the archive");
 
-    // Perform the operation
-    std::string ErrMsg;
-    bool haveError = false;
-    switch (Operation) {
-      case Print:           haveError = doPrint(&ErrMsg); break;
-      case Delete:          haveError = doDelete(&ErrMsg); break;
-      case Move:            haveError = doMove(&ErrMsg); break;
-      case QuickAppend:     haveError = doQuickAppend(&ErrMsg); break;
-      case ReplaceOrInsert: haveError = doReplaceOrInsert(&ErrMsg); break;
-      case DisplayTable:    haveError = doDisplayTable(&ErrMsg); break;
-      case Extract:         haveError = doExtract(&ErrMsg); break;
-      case NoOperation:
-        errs() << argv[0] << ": No operation was selected.\n";
-        break;
-    }
-    if (haveError) {
-      errs() << argv[0] << ": " << ErrMsg << "\n";
-      return 1;
-    }
-  } catch (const char*msg) {
-    // These errors are usage errors, thrown only by the various checks in the
-    // code above.
-    errs() << argv[0] << ": " << msg << "\n\n";
-    cl::PrintHelpMessage();
-    exitCode = 1;
-  } catch (const std::string& msg) {
-    // These errors are thrown by LLVM libraries (e.g. lib System) and represent
-    // a more serious error so we bump the exitCode and don't print the usage.
-    errs() << argv[0] << ": " << msg << "\n";
-    exitCode = 2;
-  } catch (...) {
-    // This really shouldn't happen, but just in case ....
-    errs() << argv[0] << ": An unexpected unknown exception occurred.\n";
-    exitCode = 3;
+  // Perform the operation
+  std::string ErrMsg;
+  bool haveError = false;
+  switch (Operation) {
+    case Print:           haveError = doPrint(&ErrMsg); break;
+    case Delete:          haveError = doDelete(&ErrMsg); break;
+    case Move:            haveError = doMove(&ErrMsg); break;
+    case QuickAppend:     haveError = doQuickAppend(&ErrMsg); break;
+    case ReplaceOrInsert: haveError = doReplaceOrInsert(&ErrMsg); break;
+    case DisplayTable:    haveError = doDisplayTable(&ErrMsg); break;
+    case Extract:         haveError = doExtract(&ErrMsg); break;
+    case NoOperation:
+      errs() << argv[0] << ": No operation was selected.\n";
+      break;
   }
+  if (haveError) {
+    errs() << argv[0] << ": " << ErrMsg << "\n";
+    return 1;
+  }
+
+  delete TheArchive;
+  TheArchive = 0;
 
   // Return result code back to operating system.
   return exitCode;

Modified: llvm/branches/AMDILBackend/tools/llvm-as/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-as/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-as/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/tools/llvm-as/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -1,5 +1,4 @@
 set(LLVM_LINK_COMPONENTS asmparser bitwriter)
-set(LLVM_REQUIRES_EH 1)
 
 add_llvm_tool(llvm-as
   llvm-as.cpp

Modified: llvm/branches/AMDILBackend/tools/llvm-bcanalyzer/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-bcanalyzer/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-bcanalyzer/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/tools/llvm-bcanalyzer/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -1,5 +1,4 @@
 set(LLVM_LINK_COMPONENTS bitreader)
-set(LLVM_REQUIRES_EH 1)
 
 add_llvm_tool(llvm-bcanalyzer
   llvm-bcanalyzer.cpp

Modified: llvm/branches/AMDILBackend/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp Tue Jan 15 11:16:16 2013
@@ -40,7 +40,7 @@
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/Signals.h"
 #include "llvm/Support/system_error.h"
-#include <cstdio>
+
 #include <map>
 #include <algorithm>
 using namespace llvm;
@@ -463,11 +463,11 @@
 }
 
 static void PrintSize(double Bits) {
-  fprintf(stderr, "%.2f/%.2fB/%luW", Bits, Bits/8,(unsigned long)(Bits/32));
+  outs() << format("%.2f/%.2fB/%luW", Bits, Bits/8,(unsigned long)(Bits/32));
 }
 static void PrintSize(uint64_t Bits) {
-  fprintf(stderr, "%lub/%.2fB/%luW", (unsigned long)Bits,
-          (double)Bits/8, (unsigned long)(Bits/32));
+  outs() << format("%lub/%.2fB/%luW", (unsigned long)Bits,
+                   (double)Bits/8, (unsigned long)(Bits/32));
 }
 
 
@@ -483,7 +483,7 @@
   if (MemBuf->getBufferSize() & 3)
     return Error("Bitcode stream should be a multiple of 4 bytes in length");
 
-  const unsigned char *BufPtr = (unsigned char *)MemBuf->getBufferStart();
+  const unsigned char *BufPtr = (const unsigned char *)MemBuf->getBufferStart();
   const unsigned char *EndBufPtr = BufPtr+MemBuf->getBufferSize();
 
   // If we have a wrapper header, parse it and ignore the non-bc file contents.
@@ -556,7 +556,7 @@
     PrintSize(Stats.NumBits);
     outs() << "\n";
     double pct = (Stats.NumBits * 100.0) / BufferSizeBits;
-    errs() << "    Percent of file: " << format("%2.4f%%", pct) << "\n";
+    outs() << "    Percent of file: " << format("%2.4f%%", pct) << "\n";
     if (Stats.NumInstances > 1) {
       outs() << "       Average Size: ";
       PrintSize(Stats.NumBits/(double)Stats.NumInstances);
@@ -588,24 +588,26 @@
       std::reverse(FreqPairs.begin(), FreqPairs.end());
 
       outs() << "\tRecord Histogram:\n";
-      fprintf(stderr, "\t\t  Count    # Bits   %% Abv  Record Kind\n");
+      outs() << "\t\t  Count    # Bits   %% Abv  Record Kind\n";
       for (unsigned i = 0, e = FreqPairs.size(); i != e; ++i) {
         const PerRecordStats &RecStats = Stats.CodeFreq[FreqPairs[i].second];
 
-        fprintf(stderr, "\t\t%7d %9lu ", RecStats.NumInstances,
-                (unsigned long)RecStats.TotalBits);
+        outs() << format("\t\t%7d %9lu",
+                         RecStats.NumInstances,
+                         (unsigned long)RecStats.TotalBits);
 
         if (RecStats.NumAbbrev)
-          fprintf(stderr, "%7.2f  ",
-                  (double)RecStats.NumAbbrev/RecStats.NumInstances*100);
+          outs() <<
+              format("%7.2f  ",
+                     (double)RecStats.NumAbbrev/RecStats.NumInstances*100);
         else
-          fprintf(stderr, "         ");
+          outs() << "         ";
 
         if (const char *CodeName =
               GetCodeName(FreqPairs[i].second, I->first, StreamFile))
-          fprintf(stderr, "%s\n", CodeName);
+          outs() << CodeName << "\n";
         else
-          fprintf(stderr, "UnknownCode%d\n", FreqPairs[i].second);
+          outs() << "UnknownCode" << FreqPairs[i].second << "\n";
       }
       outs() << "\n";
 

Modified: llvm/branches/AMDILBackend/tools/llvm-config/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-config/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-config/Makefile (original)
+++ llvm/branches/AMDILBackend/tools/llvm-config/Makefile Tue Jan 15 11:16:16 2013
@@ -63,5 +63,5 @@
 install:: $(DESTDIR)$(PROJ_bindir)
 	$(Echo) Installing llvm-config-host
 	$(Verb) $(ProgInstall) $(BuildLLVMToolDir)/llvm-config \
-	  $(DESTDIR)$(PROJ_bindir)/llvm-config-host
+	  $(DESTDIR)$(PROJ_bindir)/$(program_prefix)llvm-config-host
 endif

Modified: llvm/branches/AMDILBackend/tools/llvm-dis/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-dis/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-dis/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/tools/llvm-dis/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -1,5 +1,4 @@
 set(LLVM_LINK_COMPONENTS bitreader analysis)
-set(LLVM_REQUIRES_EH 1)
 
 add_llvm_tool(llvm-dis
   llvm-dis.cpp

Modified: llvm/branches/AMDILBackend/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-dwarfdump/llvm-dwarfdump.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-dwarfdump/llvm-dwarfdump.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-dwarfdump/llvm-dwarfdump.cpp Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-//===-- llvm-dwarfdump.cpp - Debug info dumping utility for llvm -----------===//
+//===-- llvm-dwarfdump.cpp - Debug info dumping utility for llvm ----------===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -15,6 +15,7 @@
 #include "llvm/ADT/Triple.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/RelocVisitor.h"
 #include "llvm/DebugInfo/DIContext.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
@@ -28,6 +29,9 @@
 #include "llvm/Support/system_error.h"
 #include <algorithm>
 #include <cstring>
+#include <list>
+#include <string>
+
 using namespace llvm;
 using namespace object;
 
@@ -44,6 +48,18 @@
                cl::desc("Print function names as well as line information "
                         "for a given address"));
 
+static cl::opt<bool>
+PrintInlining("inlining", cl::init(false),
+              cl::desc("Print all inlined frames for a given address"));
+
+static void PrintDILineInfo(DILineInfo dli) {
+  if (PrintFunctions)
+    outs() << (dli.getFunctionName() ? dli.getFunctionName() : "<unknown>")
+           << "\n";
+  outs() << (dli.getFileName() ? dli.getFileName() : "<unknown>") << ':'
+         << dli.getLine() << ':' << dli.getColumn() << '\n';
+}
+
 static void DumpInput(const StringRef &Filename) {
   OwningPtr<MemoryBuffer> Buff;
 
@@ -55,10 +71,12 @@
   OwningPtr<ObjectFile> Obj(ObjectFile::createObjectFile(Buff.take()));
 
   StringRef DebugInfoSection;
+  RelocAddrMap RelocMap;
   StringRef DebugAbbrevSection;
   StringRef DebugLineSection;
   StringRef DebugArangesSection;
   StringRef DebugStringSection;
+  StringRef DebugRangesSection;
 
   error_code ec;
   for (section_iterator i = Obj->begin_sections(),
@@ -82,6 +100,59 @@
       DebugArangesSection = data;
     else if (name == "debug_str")
       DebugStringSection = data;
+    else if (name == "debug_ranges")
+      DebugRangesSection = data;
+    // Any more debug info sections go here.
+    else
+      continue;
+
+    // TODO: For now only handle relocations for the debug_info section.
+    if (name != "debug_info")
+      continue;
+
+    if (i->begin_relocations() != i->end_relocations()) {
+      uint64_t SectionSize;
+      i->getSize(SectionSize);
+      for (relocation_iterator reloc_i = i->begin_relocations(),
+                               reloc_e = i->end_relocations();
+                               reloc_i != reloc_e; reloc_i.increment(ec)) {
+        uint64_t Address;
+        reloc_i->getAddress(Address);
+        uint64_t Type;
+        reloc_i->getType(Type);
+
+        RelocVisitor V(Obj->getFileFormatName());
+        // The section address is always 0 for debug sections.
+        RelocToApply R(V.visit(Type, *reloc_i));
+        if (V.error()) {
+          SmallString<32> Name;
+          error_code ec(reloc_i->getTypeName(Name));
+          if (ec) {
+            errs() << "Aaaaaa! Nameless relocation! Aaaaaa!\n";
+          }
+          errs() << "error: failed to compute relocation: "
+                 << Name << "\n";
+          continue;
+        }
+
+        if (Address + R.Width > SectionSize) {
+          errs() << "error: " << R.Width << "-byte relocation starting "
+                 << Address << " bytes into section " << name << " which is "
+                 << SectionSize << " bytes long.\n";
+          continue;
+        }
+        if (R.Width > 8) {
+          errs() << "error: can't handle a relocation of more than 8 bytes at "
+                    "a time.\n";
+          continue;
+        }
+        DEBUG(dbgs() << "Writing " << format("%p", R.Value)
+                     << " at " << format("%p", Address)
+                     << " with width " << format("%d", R.Width)
+                     << "\n");
+        RelocMap[Address] = std::make_pair(R.Width, R.Value);
+      }
+    }
   }
 
   OwningPtr<DIContext> dictx(DIContext::getDWARFContext(/*FIXME*/true,
@@ -89,7 +160,9 @@
                                                         DebugAbbrevSection,
                                                         DebugArangesSection,
                                                         DebugLineSection,
-                                                        DebugStringSection));
+                                                        DebugStringSection,
+                                                        DebugRangesSection,
+                                                        RelocMap));
   if (Address == -1ULL) {
     outs() << Filename
            << ":\tfile format " << Obj->getFileFormatName() << "\n\n";
@@ -97,16 +170,27 @@
     dictx->dump(outs());
   } else {
     // Print line info for the specified address.
-    int spec_flags = DILineInfoSpecifier::FileLineInfo |
-                     DILineInfoSpecifier::AbsoluteFilePath;
-    if (PrintFunctions)
-      spec_flags |= DILineInfoSpecifier::FunctionName;
-    DILineInfo dli = dictx->getLineInfoForAddress(Address, spec_flags);
+    int SpecFlags = DILineInfoSpecifier::FileLineInfo |
+                    DILineInfoSpecifier::AbsoluteFilePath;
     if (PrintFunctions)
-      outs() << (dli.getFunctionName() ? dli.getFunctionName() : "<unknown>")
-             << "\n";
-    outs() << (dli.getFileName() ? dli.getFileName() : "<unknown>") << ':'
-           << dli.getLine() << ':' << dli.getColumn() << '\n';
+      SpecFlags |= DILineInfoSpecifier::FunctionName;
+    if (PrintInlining) {
+      DIInliningInfo InliningInfo =
+        dictx->getInliningInfoForAddress(Address, SpecFlags);
+      uint32_t n = InliningInfo.getNumberOfFrames();
+      if (n == 0) {
+        // Print one empty debug line info in any case.
+        PrintDILineInfo(DILineInfo());
+      } else {
+        for (uint32_t i = 0; i < n; i++) {
+          DILineInfo dli = InliningInfo.getFrame(i);
+          PrintDILineInfo(dli);
+        }
+      }
+    } else {
+      DILineInfo dli = dictx->getLineInfoForAddress(Address, SpecFlags);
+      PrintDILineInfo(dli);
+    }
   }
 }
 

Modified: llvm/branches/AMDILBackend/tools/llvm-extract/llvm-extract.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-extract/llvm-extract.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-extract/llvm-extract.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-extract/llvm-extract.cpp Tue Jan 15 11:16:16 2013
@@ -18,7 +18,7 @@
 #include "llvm/Assembly/PrintModulePass.h"
 #include "llvm/Bitcode/ReaderWriter.h"
 #include "llvm/Transforms/IPO.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/IRReader.h"
 #include "llvm/Support/ManagedStatic.h"
@@ -59,6 +59,19 @@
                                      "regular expression"),
                    cl::ZeroOrMore, cl::value_desc("rfunction"));
 
+// ExtractAlias - The alias to extract from the module.
+static cl::list<std::string>
+ExtractAliases("alias", cl::desc("Specify alias to extract"),
+               cl::ZeroOrMore, cl::value_desc("alias"));
+
+
+// ExtractRegExpAliases - The aliases, matched via regular expression, to
+// extract from the module.
+static cl::list<std::string>
+ExtractRegExpAliases("ralias", cl::desc("Specify alias(es) to extract using a "
+                                        "regular expression"),
+                     cl::ZeroOrMore, cl::value_desc("ralias"));
+
 // ExtractGlobals - The globals to extract from the module.
 static cl::list<std::string>
 ExtractGlobals("glob", cl::desc("Specify global to extract"),
@@ -97,6 +110,40 @@
   // Use SetVector to avoid duplicates.
   SetVector<GlobalValue *> GVs;
 
+  // Figure out which aliases we should extract.
+  for (size_t i = 0, e = ExtractAliases.size(); i != e; ++i) {
+    GlobalAlias *GA = M->getNamedAlias(ExtractAliases[i]);
+    if (!GA) {
+      errs() << argv[0] << ": program doesn't contain alias named '"
+             << ExtractAliases[i] << "'!\n";
+      return 1;
+    }
+    GVs.insert(GA);
+  }
+
+  // Extract aliases via regular expression matching.
+  for (size_t i = 0, e = ExtractRegExpAliases.size(); i != e; ++i) {
+    std::string Error;
+    Regex RegEx(ExtractRegExpAliases[i]);
+    if (!RegEx.isValid(Error)) {
+      errs() << argv[0] << ": '" << ExtractRegExpAliases[i] << "' "
+        "invalid regex: " << Error;
+    }
+    bool match = false;
+    for (Module::alias_iterator GA = M->alias_begin(), E = M->alias_end();
+         GA != E; GA++) {
+      if (RegEx.match(GA->getName())) {
+        GVs.insert(&*GA);
+        match = true;
+      }
+    }
+    if (!match) {
+      errs() << argv[0] << ": program doesn't contain global named '"
+             << ExtractRegExpAliases[i] << "'!\n";
+      return 1;
+    }
+  }
+
   // Figure out which globals we should extract.
   for (size_t i = 0, e = ExtractGlobals.size(); i != e; ++i) {
     GlobalValue *GV = M->getNamedGlobal(ExtractGlobals[i]);
@@ -206,7 +253,7 @@
   // In addition to deleting all other functions, we also want to spiff it
   // up a little bit.  Do this now.
   PassManager Passes;
-  Passes.add(new TargetData(M.get())); // Use correct TargetData
+  Passes.add(new DataLayout(M.get())); // Use correct DataLayout
 
   std::vector<GlobalValue*> Gvs(GVs.begin(), GVs.end());
 

Modified: llvm/branches/AMDILBackend/tools/llvm-mc/llvm-mc.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-mc/llvm-mc.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-mc/llvm-mc.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-mc/llvm-mc.cpp Tue Jan 15 11:16:16 2013
@@ -158,7 +158,8 @@
   AC_AsLex,
   AC_Assemble,
   AC_Disassemble,
-  AC_EDisassemble
+  AC_EDisassemble,
+  AC_MDisassemble
 };
 
 static cl::opt<ActionType>
@@ -172,6 +173,8 @@
                              "Disassemble strings of hex bytes"),
                   clEnumValN(AC_EDisassemble, "edis",
                              "Enhanced disassembly of strings of hex bytes"),
+                  clEnumValN(AC_MDisassemble, "mdis",
+                             "Marked up disassembly of strings of hex bytes"),
                   clEnumValEnd));
 
 static const Target *GetTarget(const char *ProgName) {
@@ -402,14 +405,15 @@
   OwningPtr<MCSubtargetInfo>
     STI(TheTarget->createMCSubtargetInfo(TripleName, MCPU, FeaturesStr));
 
+  MCInstPrinter *IP;
   if (FileType == OFT_AssemblyFile) {
-    MCInstPrinter *IP =
+    IP =
       TheTarget->createMCInstPrinter(OutputAsmVariant, *MAI, *MCII, *MRI, *STI);
     MCCodeEmitter *CE = 0;
     MCAsmBackend *MAB = 0;
     if (ShowEncoding) {
       CE = TheTarget->createMCCodeEmitter(*MCII, *MRI, *STI, Ctx);
-      MAB = TheTarget->createMCAsmBackend(TripleName);
+      MAB = TheTarget->createMCAsmBackend(TripleName, MCPU);
     }
     Str.reset(TheTarget->createAsmStreamer(Ctx, FOS, /*asmverbose*/true,
                                            /*useLoc*/ true,
@@ -422,7 +426,7 @@
   } else {
     assert(FileType == OFT_ObjectFile && "Invalid file type!");
     MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*MCII, *MRI, *STI, Ctx);
-    MCAsmBackend *MAB = TheTarget->createMCAsmBackend(TripleName);
+    MCAsmBackend *MAB = TheTarget->createMCAsmBackend(TripleName, MCPU);
     Str.reset(TheTarget->createMCObjectStreamer(TripleName, Ctx, *MAB,
                                                 FOS, CE, RelaxAll,
                                                 NoExecStack));
@@ -436,6 +440,9 @@
   case AC_Assemble:
     Res = AssembleInput(ProgName, TheTarget, SrcMgr, Ctx, *Str, *MAI, *STI);
     break;
+  case AC_MDisassemble:
+    IP->setUseMarkup(1);
+    // Fall through to do disassembly.
   case AC_Disassemble:
     Res = Disassembler::disassemble(*TheTarget, TripleName, *STI, *Str,
                                     *Buffer, SrcMgr, Out->os());

Modified: llvm/branches/AMDILBackend/tools/llvm-nm/llvm-nm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-nm/llvm-nm.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-nm/llvm-nm.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-nm/llvm-nm.cpp Tue Jan 15 11:16:16 2013
@@ -110,6 +110,9 @@
 
   cl::opt<bool> SizeSort("size-sort", cl::desc("Sort symbols by size"));
 
+  cl::opt<bool> WithoutAliases("without-aliases", cl::Hidden,
+                               cl::desc("Exclude aliases from output"));
+
   bool PrintAddress = true;
 
   bool MultipleFiles = false;
@@ -256,7 +259,6 @@
   if (GV.hasPrivateLinkage() ||
       GV.hasLinkerPrivateLinkage() ||
       GV.hasLinkerPrivateWeakLinkage() ||
-      GV.hasLinkerPrivateWeakDefAutoLinkage() ||
       GV.hasAvailableExternallyLinkage())
     return;
   char TypeChar = TypeCharForSymbol(GV);
@@ -276,8 +278,9 @@
   std::for_each (M->begin(), M->end(), DumpSymbolNameForGlobalValue);
   std::for_each (M->global_begin(), M->global_end(),
                  DumpSymbolNameForGlobalValue);
-  std::for_each (M->alias_begin(), M->alias_end(),
-                 DumpSymbolNameForGlobalValue);
+  if (!WithoutAliases)
+    std::for_each (M->alias_begin(), M->alias_end(),
+		   DumpSymbolNameForGlobalValue);
 
   SortAndPrintSymbolList();
 }

Modified: llvm/branches/AMDILBackend/tools/llvm-objdump/llvm-objdump.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-objdump/llvm-objdump.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-objdump/llvm-objdump.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-objdump/llvm-objdump.cpp Tue Jan 15 11:16:16 2013
@@ -94,6 +94,12 @@
 SectionHeadersShorter("h", cl::desc("Alias for --section-headers"),
                       cl::aliasopt(SectionHeaders));
 
+static cl::list<std::string>
+MAttrs("mattr",
+  cl::CommaSeparated,
+  cl::desc("Target specific attributes"),
+  cl::value_desc("a1,+a2,-a3,..."));
+
 static StringRef ToolName;
 
 static bool error(error_code ec) {
@@ -169,6 +175,15 @@
   if (!TheTarget)
     return;
 
+  // Package up features to be passed to target/subtarget
+  std::string FeaturesStr;
+  if (MAttrs.size()) {
+    SubtargetFeatures Features;
+    for (unsigned i = 0; i != MAttrs.size(); ++i)
+      Features.AddFeature(MAttrs[i]);
+    FeaturesStr = Features.getString();
+  }
+
   error_code ec;
   for (section_iterator i = Obj->begin_sections(),
                         e = Obj->end_sections();
@@ -233,7 +248,7 @@
     }
 
     OwningPtr<const MCSubtargetInfo> STI(
-      TheTarget->createMCSubtargetInfo(TripleName, "", ""));
+      TheTarget->createMCSubtargetInfo(TripleName, "", FeaturesStr));
 
     if (!STI) {
       errs() << "error: no subtarget info for target " << TripleName << "\n";

Modified: llvm/branches/AMDILBackend/tools/llvm-ranlib/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-ranlib/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-ranlib/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/tools/llvm-ranlib/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -1,5 +1,4 @@
 set(LLVM_LINK_COMPONENTS archive)
-set(LLVM_REQUIRES_EH 1)
 
 add_llvm_tool(llvm-ranlib
   llvm-ranlib.cpp

Modified: llvm/branches/AMDILBackend/tools/llvm-ranlib/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-ranlib/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-ranlib/Makefile (original)
+++ llvm/branches/AMDILBackend/tools/llvm-ranlib/Makefile Tue Jan 15 11:16:16 2013
@@ -10,7 +10,6 @@
 LEVEL := ../..
 TOOLNAME := llvm-ranlib
 LINK_COMPONENTS := archive
-REQUIRES_EH := 1
 
 # This tool has no plugins, optimize startup time.
 TOOL_NO_EXPORTS := 1

Modified: llvm/branches/AMDILBackend/tools/llvm-ranlib/llvm-ranlib.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-ranlib/llvm-ranlib.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-ranlib/llvm-ranlib.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-ranlib/llvm-ranlib.cpp Tue Jan 15 11:16:16 2013
@@ -61,41 +61,38 @@
 
   int exitCode = 0;
 
-  // Make sure we don't exit with "unhandled exception".
-  try {
+  // Check the path name of the archive
+  sys::Path ArchivePath;
+  if (!ArchivePath.set(ArchiveName)) {
+    errs() << argv[0] << ": " << "Archive name invalid: " << ArchiveName <<
+      "\n";
+    return 1;
+  }
+
+  // Make sure it exists, we don't create empty archives
+  bool Exists;
+  if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists) {
+    errs() << argv[0] << ": " << "Archive file does not exist" <<
+      ArchivePath.str() << "\n";
+    return 1;
+  }
 
-    // Check the path name of the archive
-    sys::Path ArchivePath;
-    if (!ArchivePath.set(ArchiveName))
-      throw std::string("Archive name invalid: ") + ArchiveName;
-
-    // Make sure it exists, we don't create empty archives
-    bool Exists;
-    if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists)
-      throw std::string("Archive file does not exist");
-
-    std::string err_msg;
-    std::auto_ptr<Archive>
-      AutoArchive(Archive::OpenAndLoad(ArchivePath, Context, &err_msg));
-    Archive* TheArchive = AutoArchive.get();
-    if (!TheArchive)
-      throw err_msg;
-
-    if (TheArchive->writeToDisk(true, false, &err_msg ))
-      throw err_msg;
-
-    if (Verbose)
-      printSymbolTable(TheArchive);
-
-  } catch (const char* msg) {
-    errs() << argv[0] << ": " << msg << "\n\n";
-    exitCode = 1;
-  } catch (const std::string& msg) {
-    errs() << argv[0] << ": " << msg << "\n";
-    exitCode = 2;
-  } catch (...) {
-    errs() << argv[0] << ": An unexpected unknown exception occurred.\n";
-    exitCode = 3;
+  std::string err_msg;
+  std::auto_ptr<Archive>
+    AutoArchive(Archive::OpenAndLoad(ArchivePath, Context, &err_msg));
+  Archive* TheArchive = AutoArchive.get();
+  if (!TheArchive) {
+    errs() << argv[0] << ": " << err_msg << "\n";
+    return 1;
   }
+
+  if (TheArchive->writeToDisk(true, false, &err_msg )) {
+    errs() << argv[0] << ": " << err_msg << "\n";
+    return 1;
+  }
+
+  if (Verbose)
+    printSymbolTable(TheArchive);
+
   return exitCode;
 }

Modified: llvm/branches/AMDILBackend/tools/llvm-rtdyld/llvm-rtdyld.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-rtdyld/llvm-rtdyld.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-rtdyld/llvm-rtdyld.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-rtdyld/llvm-rtdyld.cpp Tue Jan 15 11:16:16 2013
@@ -14,6 +14,8 @@
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/OwningPtr.h"
 #include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/ObjectImage.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
 #include "llvm/Object/MachOObject.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ManagedStatic.h"
@@ -120,12 +122,14 @@
   for(unsigned i = 0, e = InputFileList.size(); i != e; ++i) {
     // Load the input memory buffer.
     OwningPtr<MemoryBuffer> InputBuffer;
+    OwningPtr<ObjectImage>  LoadedObject;
     if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFileList[i],
                                                      InputBuffer))
       return Error("unable to read input: '" + ec.message() + "'");
 
-    // Load the object file into it.
-    if (Dyld.loadObject(InputBuffer.take())) {
+    // Load the object file
+    LoadedObject.reset(Dyld.loadObject(new ObjectBuffer(InputBuffer.take())));
+    if (!LoadedObject) {
       return Error(Dyld.getErrorString());
     }
   }

Modified: llvm/branches/AMDILBackend/tools/llvm-stress/llvm-stress.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/llvm-stress/llvm-stress.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/llvm-stress/llvm-stress.cpp (original)
+++ llvm/branches/AMDILBackend/tools/llvm-stress/llvm-stress.cpp Tue Jan 15 11:16:16 2013
@@ -126,6 +126,10 @@
   /// C'tor
   Modifier(BasicBlock *Block, PieceTable *PT, Random *R):
     BB(Block),PT(PT),Ran(R),Context(BB->getContext()) {}
+
+  /// virtual D'tor to silence warnings.
+  virtual ~Modifier() {}
+
   /// Add a new instruction.
   virtual void Act() = 0;
   /// Add N new instructions,

Modified: llvm/branches/AMDILBackend/tools/lto/LTOCodeGenerator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/lto/LTOCodeGenerator.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/lto/LTOCodeGenerator.cpp (original)
+++ llvm/branches/AMDILBackend/tools/lto/LTOCodeGenerator.cpp Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
 #include "LTOCodeGenerator.h"
 #include "LTOModule.h"
 #include "llvm/Constants.h"
+#include "llvm/DataLayout.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/Linker.h"
 #include "llvm/LLVMContext.h"
@@ -29,7 +30,6 @@
 #include "llvm/MC/SubtargetFeature.h"
 #include "llvm/Target/Mangler.h"
 #include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Transforms/IPO.h"
@@ -163,13 +163,16 @@
   // generate object file
   bool genResult = false;
   tool_output_file objFile(uniqueObjPath.c_str(), errMsg);
-  if (!errMsg.empty())
+  if (!errMsg.empty()) {
+    uniqueObjPath.eraseFromDisk();
     return true;
+  }
 
   genResult = this->generateObjectFile(objFile.os(), errMsg);
   objFile.os().close();
   if (objFile.os().has_error()) {
     objFile.os().clear_error();
+    uniqueObjPath.eraseFromDisk();
     return true;
   }
 
@@ -196,6 +199,7 @@
   OwningPtr<MemoryBuffer> BuffPtr;
   if (error_code ec = MemoryBuffer::getFile(name, BuffPtr, -1, false)) {
     errMsg = ec.message();
+    sys::Path(_nativeObjectPath).eraseFromDisk();
     return NULL;
   }
   _nativeObjectFile = BuffPtr.take();
@@ -214,12 +218,13 @@
   if (_target != NULL)
     return false;
 
-  std::string Triple = _linker.getModule()->getTargetTriple();
-  if (Triple.empty())
-    Triple = sys::getDefaultTargetTriple();
+  std::string TripleStr = _linker.getModule()->getTargetTriple();
+  if (TripleStr.empty())
+    TripleStr = sys::getDefaultTargetTriple();
+  llvm::Triple Triple(TripleStr);
 
   // create target machine from info for merged modules
-  const Target *march = TargetRegistry::lookupTarget(Triple, errMsg);
+  const Target *march = TargetRegistry::lookupTarget(TripleStr, errMsg);
   if (march == NULL)
     return true;
 
@@ -240,11 +245,18 @@
 
   // construct LTOModule, hand over ownership of module and target
   SubtargetFeatures Features;
-  Features.getDefaultSubtargetFeatures(llvm::Triple(Triple));
+  Features.getDefaultSubtargetFeatures(Triple);
   std::string FeatureStr = Features.getString();
+  // Set a default CPU for Darwin triples.
+  if (_mCpu.empty() && Triple.isOSDarwin()) {
+    if (Triple.getArch() == llvm::Triple::x86_64)
+      _mCpu = "core2";
+    else if (Triple.getArch() == llvm::Triple::x86)
+      _mCpu = "yonah";
+  }
   TargetOptions Options;
   LTOModule::getTargetOptions(Options);
-  _target = march->createTargetMachine(Triple, _mCpu, FeatureStr, Options,
+  _target = march->createTargetMachine(TripleStr, _mCpu, FeatureStr, Options,
                                        RelocModel, CodeModel::Default,
                                        CodeGenOpt::Aggressive);
   return false;
@@ -289,7 +301,7 @@
 
   // mark which symbols can not be internalized
   MCContext Context(*_target->getMCAsmInfo(), *_target->getRegisterInfo(),NULL);
-  Mangler mangler(Context, *_target->getTargetData());
+  Mangler mangler(Context, *_target->getDataLayout());
   std::vector<const char*> mustPreserveList;
   SmallPtrSet<GlobalValue*, 8> asmUsed;
 
@@ -357,8 +369,10 @@
   // Start off with a verification pass.
   passes.add(createVerifierPass());
 
-  // Add an appropriate TargetData instance for this module...
-  passes.add(new TargetData(*_target->getTargetData()));
+  // Add an appropriate DataLayout instance for this module...
+  passes.add(new DataLayout(*_target->getDataLayout()));
+  passes.add(new TargetTransformInfo(_target->getScalarTargetTransformInfo(),
+                                     _target->getVectorTargetTransformInfo()));
 
   // Enabling internalize here would use its AllButMain variant. It
   // keeps only main if it exists and does nothing for libraries. Instead
@@ -372,7 +386,7 @@
 
   FunctionPassManager *codeGenPasses = new FunctionPassManager(mergedModule);
 
-  codeGenPasses->add(new TargetData(*_target->getTargetData()));
+  codeGenPasses->add(new DataLayout(*_target->getDataLayout()));
 
   formatted_raw_ostream Out(out);
 

Modified: llvm/branches/AMDILBackend/tools/lto/LTOModule.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/lto/LTOModule.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/lto/LTOModule.cpp (original)
+++ llvm/branches/AMDILBackend/tools/lto/LTOModule.cpp Tue Jan 15 11:16:16 2013
@@ -150,15 +150,20 @@
   cl::desc("Use .init_array instead of .ctors."),
   cl::init(false));
 
+static cl::opt<unsigned>
+SSPBufferSize("stack-protector-buffer-size", cl::init(8),
+              cl::desc("Lower bound for a buffer to be considered for "
+                       "stack protection"));
+
 LTOModule::LTOModule(llvm::Module *m, llvm::TargetMachine *t)
   : _module(m), _target(t),
     _context(*_target->getMCAsmInfo(), *_target->getRegisterInfo(), NULL),
-    _mangler(_context, *_target->getTargetData()) {}
+    _mangler(_context, *_target->getDataLayout()) {}
 
 /// isBitcodeFile - Returns 'true' if the file (or memory contents) is LLVM
 /// bitcode.
 bool LTOModule::isBitcodeFile(const void *mem, size_t length) {
-  return llvm::sys::IdentifyFileType((char*)mem, length)
+  return llvm::sys::IdentifyFileType((const char*)mem, length)
     == llvm::sys::Bitcode_FileType;
 }
 
@@ -252,6 +257,7 @@
   Options.PositionIndependentExecutable = EnablePIE;
   Options.EnableSegmentedStacks = SegmentedStacks;
   Options.UseInitArray = UseInitArray;
+  Options.SSPBufferSize = SSPBufferSize;
 }
 
 LTOModule *LTOModule::makeLTOModule(MemoryBuffer *buffer,
@@ -272,23 +278,31 @@
     return NULL;
   }
 
-  std::string Triple = m->getTargetTriple();
-  if (Triple.empty())
-    Triple = sys::getDefaultTargetTriple();
+  std::string TripleStr = m->getTargetTriple();
+  if (TripleStr.empty())
+    TripleStr = sys::getDefaultTargetTriple();
+  llvm::Triple Triple(TripleStr);
 
   // find machine architecture for this module
-  const Target *march = TargetRegistry::lookupTarget(Triple, errMsg);
+  const Target *march = TargetRegistry::lookupTarget(TripleStr, errMsg);
   if (!march)
     return NULL;
 
   // construct LTOModule, hand over ownership of module and target
   SubtargetFeatures Features;
-  Features.getDefaultSubtargetFeatures(llvm::Triple(Triple));
+  Features.getDefaultSubtargetFeatures(Triple);
   std::string FeatureStr = Features.getString();
+  // Set a default CPU for Darwin triples.
   std::string CPU;
+  if (Triple.isOSDarwin()) {
+    if (Triple.getArch() == llvm::Triple::x86_64)
+      CPU = "core2";
+    else if (Triple.getArch() == llvm::Triple::x86)
+      CPU = "yonah";
+  }
   TargetOptions Options;
   getTargetOptions(Options);
-  TargetMachine *target = march->createTargetMachine(Triple, CPU, FeatureStr,
+  TargetMachine *target = march->createTargetMachine(TripleStr, CPU, FeatureStr,
                                                      Options);
   LTOModule *Ret = new LTOModule(m.take(), target);
   if (Ret->parseSymbols(errMsg)) {
@@ -301,7 +315,7 @@
 
 /// makeBuffer - Create a MemoryBuffer from a memory range.
 MemoryBuffer *LTOModule::makeBuffer(const void *mem, size_t length) {
-  const char *startPtr = (char*)mem;
+  const char *startPtr = (const char*)mem;
   return MemoryBuffer::getMemBuffer(StringRef(startPtr, length), "", false);
 }
 
@@ -487,8 +501,7 @@
 
   // set definition part
   if (def->hasWeakLinkage() || def->hasLinkOnceLinkage() ||
-      def->hasLinkerPrivateWeakLinkage() ||
-      def->hasLinkerPrivateWeakDefAutoLinkage())
+      def->hasLinkerPrivateWeakLinkage())
     attr |= LTO_SYMBOL_DEFINITION_WEAK;
   else if (def->hasCommonLinkage())
     attr |= LTO_SYMBOL_DEFINITION_TENTATIVE;
@@ -504,7 +517,7 @@
            def->hasLinkOnceLinkage() || def->hasCommonLinkage() ||
            def->hasLinkerPrivateWeakLinkage())
     attr |= LTO_SYMBOL_SCOPE_DEFAULT;
-  else if (def->hasLinkerPrivateWeakDefAutoLinkage())
+  else if (def->hasLinkOnceODRAutoHideLinkage())
     attr |= LTO_SYMBOL_SCOPE_DEFAULT_CAN_BE_HIDDEN;
   else
     attr |= LTO_SYMBOL_SCOPE_INTERNAL;

Modified: llvm/branches/AMDILBackend/tools/lto/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/lto/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/lto/Makefile (original)
+++ llvm/branches/AMDILBackend/tools/lto/Makefile Tue Jan 15 11:16:16 2013
@@ -49,4 +49,11 @@
                             -Wl,-install_name \
                             -Wl,"@executable_path/../lib/lib$(LIBRARYNAME)$(SHLIBEXT)"
     endif
+
+    # If we're doing an Apple-style build, add the LTO object path.
+    ifeq ($(RC_BUILDIT),YES)
+       TempFile        := $(shell mkdir -p ${OBJROOT}/dSYMs ; mktemp ${OBJROOT}/dSYMs/llvm-lto.XXXXXX)
+       LLVMLibsOptions := $(LLVMLibsOptions) \
+                          -Wl,-object_path_lto -Wl,$(TempFile)
+    endif
 endif

Modified: llvm/branches/AMDILBackend/tools/lto/lto.exports
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/lto/lto.exports?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/lto/lto.exports (original)
+++ llvm/branches/AMDILBackend/tools/lto/lto.exports Tue Jan 15 11:16:16 2013
@@ -30,3 +30,4 @@
 LLVMCreateDisasm
 LLVMDisasmDispose
 LLVMDisasmInstruction
+LLVMSetDisasmOptions

Modified: llvm/branches/AMDILBackend/tools/opt/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/opt/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/opt/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/tools/opt/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-set(LLVM_LINK_COMPONENTS bitreader asmparser bitwriter instrumentation scalaropts ipo vectorize)
+set(LLVM_LINK_COMPONENTS ${LLVM_TARGETS_TO_BUILD} bitreader asmparser bitwriter instrumentation scalaropts ipo vectorize)
 
 add_llvm_tool(opt
   AnalysisWrappers.cpp

Modified: llvm/branches/AMDILBackend/tools/opt/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/opt/LLVMBuild.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/opt/LLVMBuild.txt (original)
+++ llvm/branches/AMDILBackend/tools/opt/LLVMBuild.txt Tue Jan 15 11:16:16 2013
@@ -19,4 +19,4 @@
 type = Tool
 name = opt
 parent = Tools
-required_libraries = AsmParser BitReader BitWriter IPO Instrumentation Scalar
+required_libraries = AsmParser BitReader BitWriter IPO Instrumentation Scalar all-targets

Modified: llvm/branches/AMDILBackend/tools/opt/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/opt/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/opt/Makefile (original)
+++ llvm/branches/AMDILBackend/tools/opt/Makefile Tue Jan 15 11:16:16 2013
@@ -9,6 +9,6 @@
 
 LEVEL := ../..
 TOOLNAME := opt
-LINK_COMPONENTS := bitreader bitwriter asmparser instrumentation scalaropts ipo vectorize
+LINK_COMPONENTS := bitreader bitwriter asmparser instrumentation scalaropts ipo vectorize all-targets
 
 include $(LEVEL)/Makefile.common

Modified: llvm/branches/AMDILBackend/tools/opt/opt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/tools/opt/opt.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/tools/opt/opt.cpp (original)
+++ llvm/branches/AMDILBackend/tools/opt/opt.cpp Tue Jan 15 11:16:16 2013
@@ -13,17 +13,18 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/LLVMContext.h"
+#include "llvm/DataLayout.h"
 #include "llvm/DebugInfo.h"
 #include "llvm/Module.h"
 #include "llvm/PassManager.h"
 #include "llvm/CallGraphSCCPass.h"
+#include "llvm/CodeGen/CommandFlags.h"
 #include "llvm/Bitcode/ReaderWriter.h"
 #include "llvm/Assembly/PrintModulePass.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Analysis/RegionPass.h"
 #include "llvm/Analysis/CallGraph.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/ADT/StringSet.h"
@@ -36,7 +37,10 @@
 #include "llvm/Support/PluginLoader.h"
 #include "llvm/Support/PrettyStackTrace.h"
 #include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/MC/SubtargetFeature.h"
 #include "llvm/LinkAllPasses.h"
 #include "llvm/LinkAllVMCore.h"
 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
@@ -478,6 +482,75 @@
                                  /*RunInliner=*/ !DisableInline);
 }
 
+//===----------------------------------------------------------------------===//
+// CodeGen-related helper functions.
+//
+static TargetOptions GetTargetOptions() {
+  TargetOptions Options;
+  Options.LessPreciseFPMADOption = EnableFPMAD;
+  Options.NoFramePointerElim = DisableFPElim;
+  Options.NoFramePointerElimNonLeaf = DisableFPElimNonLeaf;
+  Options.AllowFPOpFusion = FuseFPOps;
+  Options.UnsafeFPMath = EnableUnsafeFPMath;
+  Options.NoInfsFPMath = EnableNoInfsFPMath;
+  Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+  Options.HonorSignDependentRoundingFPMathOption =
+  EnableHonorSignDependentRoundingFPMath;
+  Options.UseSoftFloat = GenerateSoftFloatCalls;
+  if (FloatABIForCalls != FloatABI::Default)
+    Options.FloatABIType = FloatABIForCalls;
+  Options.NoZerosInBSS = DontPlaceZerosInBSS;
+  Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
+  Options.DisableTailCalls = DisableTailCalls;
+  Options.StackAlignmentOverride = OverrideStackAlignment;
+  Options.RealignStack = EnableRealignStack;
+  Options.TrapFuncName = TrapFuncName;
+  Options.PositionIndependentExecutable = EnablePIE;
+  Options.EnableSegmentedStacks = SegmentedStacks;
+  Options.UseInitArray = UseInitArray;
+  Options.SSPBufferSize = SSPBufferSize;
+  return Options;
+}
+
+CodeGenOpt::Level GetCodeGenOptLevel() {
+  if (OptLevelO1)
+    return CodeGenOpt::Less;
+  if (OptLevelO2)
+    return CodeGenOpt::Default;
+  if (OptLevelO3)
+    return CodeGenOpt::Aggressive;
+  return CodeGenOpt::None;
+}
+
+// Returns the TargetMachine instance or zero if no triple is provided.
+static TargetMachine* GetTargetMachine(std::string TripleStr) {
+  if (TripleStr.empty())
+    return 0;
+
+  // Get the target specific parser.
+  std::string Error;
+  Triple TheTriple(Triple::normalize(TargetTriple));
+
+  const Target *TheTarget = TargetRegistry::lookupTarget(MArch, TheTriple,
+                                                         Error);
+  if (!TheTarget) {
+    return 0;
+  }
+
+  // Package up features to be passed to target/subtarget
+  std::string FeaturesStr;
+  if (MAttrs.size()) {
+    SubtargetFeatures Features;
+    for (unsigned i = 0; i != MAttrs.size(); ++i)
+      Features.AddFeature(MAttrs[i]);
+    FeaturesStr = Features.getString();
+  }
+
+  return TheTarget->createTargetMachine(TheTriple.getTriple(),
+                                        MCPU, FeaturesStr, GetTargetOptions(),
+                                        RelocModel, CMModel,
+                                        GetCodeGenOptLevel());
+}
 
 //===----------------------------------------------------------------------===//
 // main for opt
@@ -492,6 +565,9 @@
   llvm_shutdown_obj Y;  // Call llvm_shutdown() on exit.
   LLVMContext &Context = getGlobalContext();
 
+  InitializeAllTargets();
+  InitializeAllTargetMCs();
+
   // Initialize passes
   PassRegistry &Registry = *PassRegistry::getPassRegistry();
   initializeCore(Registry);
@@ -513,10 +589,6 @@
     return 1;
   }
 
-  // Allocate a full target machine description only if necessary.
-  // FIXME: The choice of target should be controllable on the command line.
-  std::auto_ptr<TargetMachine> target;
-
   SMDiagnostic Err;
 
   // Load the input module...
@@ -572,22 +644,28 @@
     TLI->disableAllFunctions();
   Passes.add(TLI);
 
-  // Add an appropriate TargetData instance for this module.
-  TargetData *TD = 0;
+  // Add an appropriate DataLayout instance for this module.
+  DataLayout *TD = 0;
   const std::string &ModuleDataLayout = M.get()->getDataLayout();
   if (!ModuleDataLayout.empty())
-    TD = new TargetData(ModuleDataLayout);
+    TD = new DataLayout(ModuleDataLayout);
   else if (!DefaultDataLayout.empty())
-    TD = new TargetData(DefaultDataLayout);
+    TD = new DataLayout(DefaultDataLayout);
 
   if (TD)
     Passes.add(TD);
 
+  std::auto_ptr<TargetMachine> TM(GetTargetMachine(TargetTriple));
+  if (TM.get()) {
+    Passes.add(new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+                                       TM->getVectorTargetTransformInfo()));
+  }
+
   OwningPtr<FunctionPassManager> FPasses;
   if (OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz || OptLevelO3) {
     FPasses.reset(new FunctionPassManager(M.get()));
     if (TD)
-      FPasses->add(new TargetData(*TD));
+      FPasses->add(new DataLayout(*TD));
   }
 
   if (PrintBreakpoints) {

Modified: llvm/branches/AMDILBackend/unittests/ADT/APFloatTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ADT/APFloatTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ADT/APFloatTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ADT/APFloatTest.cpp Tue Jan 15 11:16:16 2013
@@ -635,6 +635,12 @@
   EXPECT_TRUE(inv.bitwiseIsEqual(APFloat(0.5)));
   EXPECT_TRUE(APFloat(2.0f).getExactInverse(&inv));
   EXPECT_TRUE(inv.bitwiseIsEqual(APFloat(0.5f)));
+  EXPECT_TRUE(APFloat(APFloat::IEEEquad, "2.0").getExactInverse(&inv));
+  EXPECT_TRUE(inv.bitwiseIsEqual(APFloat(APFloat::IEEEquad, "0.5")));
+  EXPECT_TRUE(APFloat(APFloat::PPCDoubleDouble, "2.0").getExactInverse(&inv));
+  EXPECT_TRUE(inv.bitwiseIsEqual(APFloat(APFloat::PPCDoubleDouble, "0.5")));
+  EXPECT_TRUE(APFloat(APFloat::x87DoubleExtended, "2.0").getExactInverse(&inv));
+  EXPECT_TRUE(inv.bitwiseIsEqual(APFloat(APFloat::x87DoubleExtended, "0.5")));
 
   // FLT_MIN
   EXPECT_TRUE(APFloat(1.17549435e-38f).getExactInverse(&inv));
@@ -648,6 +654,66 @@
   EXPECT_FALSE(APFloat(1.40129846e-45f).getExactInverse(0));
 }
 
+TEST(APFloatTest, roundToIntegral) {
+  APFloat T(-0.5), S(3.14), R(APFloat::getLargest(APFloat::IEEEdouble)), P(0.0);
+
+  P = T;
+  P.roundToIntegral(APFloat::rmTowardZero);
+  EXPECT_EQ(-0.0, P.convertToDouble());
+  P = T;
+  P.roundToIntegral(APFloat::rmTowardNegative);
+  EXPECT_EQ(-1.0, P.convertToDouble());
+  P = T;
+  P.roundToIntegral(APFloat::rmTowardPositive);
+  EXPECT_EQ(-0.0, P.convertToDouble());
+  P = T;
+  P.roundToIntegral(APFloat::rmNearestTiesToEven);
+  EXPECT_EQ(-0.0, P.convertToDouble());
+
+  P = S;
+  P.roundToIntegral(APFloat::rmTowardZero);
+  EXPECT_EQ(3.0, P.convertToDouble());
+  P = S;
+  P.roundToIntegral(APFloat::rmTowardNegative);
+  EXPECT_EQ(3.0, P.convertToDouble());
+  P = S;
+  P.roundToIntegral(APFloat::rmTowardPositive);
+  EXPECT_EQ(4.0, P.convertToDouble());
+  P = S;
+  P.roundToIntegral(APFloat::rmNearestTiesToEven);
+  EXPECT_EQ(3.0, P.convertToDouble());
+
+  P = R;
+  P.roundToIntegral(APFloat::rmTowardZero);
+  EXPECT_EQ(R.convertToDouble(), P.convertToDouble());
+  P = R;
+  P.roundToIntegral(APFloat::rmTowardNegative);
+  EXPECT_EQ(R.convertToDouble(), P.convertToDouble());
+  P = R;
+  P.roundToIntegral(APFloat::rmTowardPositive);
+  EXPECT_EQ(R.convertToDouble(), P.convertToDouble());
+  P = R;
+  P.roundToIntegral(APFloat::rmNearestTiesToEven);
+  EXPECT_EQ(R.convertToDouble(), P.convertToDouble());
+
+  P = APFloat::getZero(APFloat::IEEEdouble);
+  P.roundToIntegral(APFloat::rmTowardZero);
+  EXPECT_EQ(0.0, P.convertToDouble());
+  P = APFloat::getZero(APFloat::IEEEdouble, true);
+  P.roundToIntegral(APFloat::rmTowardZero);
+  EXPECT_EQ(-0.0, P.convertToDouble());
+  P = APFloat::getNaN(APFloat::IEEEdouble);
+  P.roundToIntegral(APFloat::rmTowardZero);
+  EXPECT_TRUE(IsNAN(P.convertToDouble()));
+  P = APFloat::getInf(APFloat::IEEEdouble);
+  P.roundToIntegral(APFloat::rmTowardZero);
+  EXPECT_TRUE(IsInf(P.convertToDouble()) && P.convertToDouble() > 0.0);
+  P = APFloat::getInf(APFloat::IEEEdouble, true);
+  P.roundToIntegral(APFloat::rmTowardZero);
+  EXPECT_TRUE(IsInf(P.convertToDouble()) && P.convertToDouble() < 0.0);
+
+}
+
 TEST(APFloatTest, getLargest) {
   EXPECT_EQ(3.402823466e+38f, APFloat::getLargest(APFloat::IEEEsingle).convertToFloat());
   EXPECT_EQ(1.7976931348623158e+308, APFloat::getLargest(APFloat::IEEEdouble).convertToDouble());
@@ -677,4 +743,40 @@
   EXPECT_EQ(4294967295.0, test.convertToDouble());
   EXPECT_FALSE(losesInfo);
 }
+
+TEST(APFloatTest, PPCDoubleDouble) {
+  APFloat test(APFloat::PPCDoubleDouble, "1.0");
+  EXPECT_EQ(0x3ff0000000000000ull, test.bitcastToAPInt().getRawData()[0]);
+  EXPECT_EQ(0x0000000000000000ull, test.bitcastToAPInt().getRawData()[1]);
+
+  test.divide(APFloat(APFloat::PPCDoubleDouble, "3.0"), APFloat::rmNearestTiesToEven);
+  EXPECT_EQ(0x3fd5555555555555ull, test.bitcastToAPInt().getRawData()[0]);
+  EXPECT_EQ(0x3c75555555555556ull, test.bitcastToAPInt().getRawData()[1]);
+
+  // LDBL_MAX
+  test = APFloat(APFloat::PPCDoubleDouble, "1.79769313486231580793728971405301e+308");
+  EXPECT_EQ(0x7fefffffffffffffull, test.bitcastToAPInt().getRawData()[0]);
+  EXPECT_EQ(0x7c8ffffffffffffeull, test.bitcastToAPInt().getRawData()[1]);
+
+  // LDBL_MIN
+  test = APFloat(APFloat::PPCDoubleDouble, "2.00416836000897277799610805135016e-292");
+  EXPECT_EQ(0x0360000000000000ull, test.bitcastToAPInt().getRawData()[0]);
+  EXPECT_EQ(0x0000000000000000ull, test.bitcastToAPInt().getRawData()[1]);
+
+  test = APFloat(APFloat::PPCDoubleDouble, "1.0");
+  test.add(APFloat(APFloat::PPCDoubleDouble, "0x1p-105"), APFloat::rmNearestTiesToEven);
+  EXPECT_EQ(0x3ff0000000000000ull, test.bitcastToAPInt().getRawData()[0]);
+  EXPECT_EQ(0x3960000000000000ull, test.bitcastToAPInt().getRawData()[1]);
+
+  test = APFloat(APFloat::PPCDoubleDouble, "1.0");
+  test.add(APFloat(APFloat::PPCDoubleDouble, "0x1p-106"), APFloat::rmNearestTiesToEven);
+  EXPECT_EQ(0x3ff0000000000000ull, test.bitcastToAPInt().getRawData()[0]);
+#if 0 // XFAIL
+  // This is what we would expect with a true double-double implementation
+  EXPECT_EQ(0x3950000000000000ull, test.bitcastToAPInt().getRawData()[1]);
+#else
+  // This is what we get with our 106-bit mantissa approximation
+  EXPECT_EQ(0x0000000000000000ull, test.bitcastToAPInt().getRawData()[1]);
+#endif
+}
 }

Modified: llvm/branches/AMDILBackend/unittests/ADT/BitVectorTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ADT/BitVectorTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ADT/BitVectorTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ADT/BitVectorTest.cpp Tue Jan 15 11:16:16 2013
@@ -281,5 +281,57 @@
   EXPECT_FALSE(A.anyCommon(B));
   EXPECT_FALSE(B.anyCommon(A));
 }
+
+TYPED_TEST(BitVectorTest, RangeOps) {
+  TypeParam A;
+  A.resize(256);
+  A.reset();
+  A.set(1, 255);
+
+  EXPECT_FALSE(A.test(0));
+  EXPECT_TRUE( A.test(1));
+  EXPECT_TRUE( A.test(23));
+  EXPECT_TRUE( A.test(254));
+  EXPECT_FALSE(A.test(255));
+
+  TypeParam B;
+  B.resize(256);
+  B.set();
+  B.reset(1, 255);
+
+  EXPECT_TRUE( B.test(0));
+  EXPECT_FALSE(B.test(1));
+  EXPECT_FALSE(B.test(23));
+  EXPECT_FALSE(B.test(254));
+  EXPECT_TRUE( B.test(255));
+
+  TypeParam C;
+  C.resize(3);
+  C.reset();
+  C.set(0, 1);
+
+  EXPECT_TRUE(C.test(0));
+  EXPECT_FALSE( C.test(1));
+  EXPECT_FALSE( C.test(2));
+
+  TypeParam D;
+  D.resize(3);
+  D.set();
+  D.reset(0, 1);
+
+  EXPECT_FALSE(D.test(0));
+  EXPECT_TRUE( D.test(1));
+  EXPECT_TRUE( D.test(2));
+
+  TypeParam E;
+  E.resize(128);
+  E.reset();
+  E.set(1, 33);
+
+  EXPECT_FALSE(E.test(0));
+  EXPECT_TRUE( E.test(1));
+  EXPECT_TRUE( E.test(32));
+  EXPECT_FALSE(E.test(33));
+}
 }
 #endif

Modified: llvm/branches/AMDILBackend/unittests/ADT/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ADT/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ADT/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/unittests/ADT/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -2,7 +2,7 @@
   Support
   )
 
-add_llvm_unittest(ADTTests
+set(ADTSources
   APFloatTest.cpp
   APIntTest.cpp
   BitVectorTest.cpp
@@ -13,6 +13,7 @@
   FoldingSet.cpp
   HashingTest.cpp
   ilistTest.cpp
+  ImmutableMapTest.cpp
   ImmutableSetTest.cpp
   IntEqClassesTest.cpp
   IntervalMapTest.cpp
@@ -31,3 +32,16 @@
   TwineTest.cpp
   VariadicFunctionTest.cpp
  )
+
+# They cannot be compiled on MSVC9 due to its bug.
+if(MSVC AND MSVC_VERSION LESS 1600)
+  set(LLVM_OPTIONAL_SOURCES
+    DenseMapTest.cpp
+    SmallVectorTest.cpp
+    )
+  list(REMOVE_ITEM ADTSources ${LLVM_OPTIONAL_SOURCES})
+endif()
+
+add_llvm_unittest(ADTTests
+  ${ADTSources}
+  )

Modified: llvm/branches/AMDILBackend/unittests/ADT/DenseMapTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ADT/DenseMapTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ADT/DenseMapTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ADT/DenseMapTest.cpp Tue Jan 15 11:16:16 2013
@@ -330,4 +330,37 @@
   EXPECT_TRUE(map.find_as("d") == map.end());
 }
 
+struct ContiguousDenseMapInfo {
+  static inline unsigned getEmptyKey() { return ~0; }
+  static inline unsigned getTombstoneKey() { return ~0U - 1; }
+  static unsigned getHashValue(const unsigned& Val) { return Val; }
+  static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
+    return LHS == RHS;
+  }
+};
+
+// Test that filling a small dense map with exactly the number of elements in
+// the map grows to have enough space for an empty bucket.
+TEST(DenseMapCustomTest, SmallDenseMapGrowTest) {
+  SmallDenseMap<unsigned, unsigned, 32, ContiguousDenseMapInfo> map;
+  // Add some number of elements, then delete a few to leave us some tombstones.
+  // If we just filled the map with 32 elements we'd grow because of not enough
+  // tombstones which masks the issue here.
+  for (unsigned i = 0; i < 20; ++i)
+    map[i] = i + 1;
+  for (unsigned i = 0; i < 10; ++i)
+    map.erase(i);
+  for (unsigned i = 20; i < 32; ++i)
+    map[i] = i + 1;
+
+  // Size tests
+  EXPECT_EQ(22u, map.size());
+
+  // Try to find an element which doesn't exist.  There was a bug in
+  // SmallDenseMap which led to a map with num elements == small capacity not
+  // having an empty bucket any more.  Finding an element not in the map would
+  // therefore never terminate.
+  EXPECT_TRUE(map.find(32) == map.end());
+}
+
 }

Modified: llvm/branches/AMDILBackend/unittests/ADT/DenseSetTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ADT/DenseSetTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ADT/DenseSetTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ADT/DenseSetTest.cpp Tue Jan 15 11:16:16 2013
@@ -8,7 +8,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "gtest/gtest.h"
-#include <llvm/ADT/DenseSet.h>
+#include "llvm/ADT/DenseSet.h"
 
 using namespace llvm;
 

Modified: llvm/branches/AMDILBackend/unittests/ADT/StringRefTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ADT/StringRefTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ADT/StringRefTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ADT/StringRefTest.cpp Tue Jan 15 11:16:16 2013
@@ -456,4 +456,27 @@
   }
 }
 
+
+static const char* BadStrings[] = {
+    "18446744073709551617"  // value just over max
+  , "123456789012345678901" // value way too large
+  , "4t23v"                 // illegal decimal characters
+  , "0x123W56"              // illegal hex characters
+  , "0b2"                   // illegal bin characters
+  , "08"                    // illegal oct characters
+  , "0o8"                   // illegal oct characters
+  , "-123"                  // negative unsigned value
+};
+
+
+TEST(StringRefTest, getAsUnsignedIntegerBadStrings) {
+  unsigned long long U64;
+  for (size_t i = 0; i < array_lengthof(BadStrings); ++i) {
+    bool IsBadNumber = StringRef(BadStrings[i]).getAsInteger(0, U64);
+    ASSERT_TRUE(IsBadNumber);
+  }
+}
+
+
+
 } // end anonymous namespace

Modified: llvm/branches/AMDILBackend/unittests/ADT/TripleTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ADT/TripleTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ADT/TripleTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ADT/TripleTest.cpp Tue Jan 15 11:16:16 2013
@@ -105,6 +105,18 @@
   EXPECT_EQ(Triple::Linux, T.getOS());
   EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
 
+  T = Triple("powerpc-ibm-aix");
+  EXPECT_EQ(Triple::ppc, T.getArch());
+  EXPECT_EQ(Triple::IBM, T.getVendor());
+  EXPECT_EQ(Triple::AIX, T.getOS());
+  EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
+
+  T = Triple("powerpc64-ibm-aix");
+  EXPECT_EQ(Triple::ppc64, T.getArch());
+  EXPECT_EQ(Triple::IBM, T.getVendor());
+  EXPECT_EQ(Triple::AIX, T.getOS());
+  EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
+
   T = Triple("powerpc-dunno-notsure");
   EXPECT_EQ(Triple::ppc, T.getArch());
   EXPECT_EQ(Triple::UnknownVendor, T.getVendor());

Modified: llvm/branches/AMDILBackend/unittests/Analysis/ScalarEvolutionTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/Analysis/ScalarEvolutionTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/Analysis/ScalarEvolutionTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/Analysis/ScalarEvolutionTest.cpp Tue Jan 15 11:16:16 2013
@@ -7,14 +7,14 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include <llvm/Analysis/ScalarEvolutionExpressions.h>
-#include <llvm/Analysis/LoopInfo.h>
-#include <llvm/GlobalVariable.h>
-#include <llvm/Constants.h>
-#include <llvm/LLVMContext.h>
-#include <llvm/Module.h>
-#include <llvm/PassManager.h>
-#include <llvm/ADT/SmallVector.h>
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Constants.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/ADT/SmallVector.h"
 #include "gtest/gtest.h"
 
 namespace llvm {

Modified: llvm/branches/AMDILBackend/unittests/ExecutionEngine/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ExecutionEngine/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ExecutionEngine/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/unittests/ExecutionEngine/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -7,3 +7,4 @@
   )
 
 add_subdirectory(JIT)
+add_subdirectory(MCJIT)

Modified: llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -14,8 +14,6 @@
   )
 
 if( LLVM_USE_INTEL_JITEVENTS )
-  include_directories( ${LLVM_INTEL_JITEVENTS_INCDIR} )
-  link_directories( ${LLVM_INTEL_JITEVENTS_LIBDIR} )
   set(ProfileTestSources
     IntelJITEventListenerTest.cpp
     )

Modified: llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/IntelJITEventListenerTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/IntelJITEventListenerTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/IntelJITEventListenerTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/IntelJITEventListenerTest.cpp Tue Jan 15 11:16:16 2013
@@ -11,7 +11,10 @@
 
 using namespace llvm;
 
-#include "llvm/ExecutionEngine/IntelJITEventsWrapper.h"
+// Because we want to keep the implementation details of the Intel API used to
+// communicate with Amplifier out of the public header files, the header below
+// is included from the source tree instead.
+#include "../../../lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h"
 
 #include <map>
 #include <list>
@@ -80,7 +83,7 @@
     EXPECT_TRUE(0 != MockWrapper);
 
     Listener.reset(JITEventListener::createIntelJITEventListener(
-      MockWrapper.get()));
+      MockWrapper.take()));
     EXPECT_TRUE(0 != Listener);
     EE->RegisterJITEventListener(Listener.get());
   }

Modified: llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/JITTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/JITTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/JITTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/JITTest.cpp Tue Jan 15 11:16:16 2013
@@ -224,6 +224,9 @@
   OwningPtr<ExecutionEngine> TheJIT;
 };
 
+// Tests on ARM and PowerPC disabled as we're running the old jit
+#if !defined(__arm__) && !defined(__powerpc__)
+
 // Regression test for a bug.  The JIT used to allocate globals inside the same
 // memory block used for the function, and when the function code was freed,
 // the global was left in the same place.  This test allocates a function
@@ -292,12 +295,14 @@
   EXPECT_EQ(3, *GPtr);
 }
 
+#endif // !defined(__arm__) && !defined(__powerpc__)
+
 int PlusOne(int arg) {
   return arg + 1;
 }
 
-// ARM tests disabled pending fix for PR10783.
-#if !defined(__arm__)
+// ARM and PowerPC tests disabled pending fix for PR10783.
+#if !defined(__arm__) && !defined(__powerpc__)
 TEST_F(JITTest, FarCallToKnownFunction) {
   // x86-64 can only make direct calls to functions within 32 bits of
   // the current PC.  To call anything farther away, we have to load
@@ -475,7 +480,7 @@
   EXPECT_EQ(RJMM->startExceptionTableCalls.size(),
             NumTablesDeallocated);
 }
-#endif // !defined(__arm__)
+#endif // !defined(__arm__) && !defined(__powerpc__)
 
 // ARM, MIPS and PPC still emit stubs for calls since the target may be
 // too far away to call directly.  This #if can probably be removed when
@@ -521,6 +526,9 @@
 }
 #endif  // !ARM && !PPC
 
+// Tests on ARM and PowerPC disabled as we're running the old jit
+#if !defined(__arm__) && !defined(__powerpc__)
+
 TEST_F(JITTest, FunctionPointersOutliveTheirCreator) {
   TheJIT->DisableLazyCompilation(true);
   LoadAssembly("define i8()* @get_foo_addr() { "
@@ -555,10 +563,13 @@
 #endif
 }
 
-// ARM does not have an implementation
+#endif //!defined(__arm__) && !defined(__powerpc__)
+
+// Tests on ARM and PowerPC disabled as we're running the old jit
+// In addition, ARM does not have an implementation
 // of replaceMachineCodeForFunction(), so recompileAndRelinkFunction
 // doesn't work.
-#if !defined(__arm__)
+#if !defined(__arm__) && !defined(__powerpc__)
 TEST_F(JITTest, FunctionIsRecompiledAndRelinked) {
   Function *F = Function::Create(TypeBuilder<int(void), false>::get(Context),
                                  GlobalValue::ExternalLinkage, "test", M);
@@ -589,16 +600,19 @@
   EXPECT_EQ(2, OrigFPtr())
     << "The old pointer's target should now jump to the new version";
 }
-#endif  // !defined(__arm__)
+#endif  // !defined(__arm__) && !defined(__powerpc__)
 
 }  // anonymous namespace
 // This variable is intentionally defined differently in the statically-compiled
 // program from the IR input to the JIT to assert that the JIT doesn't use its
 // definition.
 extern "C" int32_t JITTest_AvailableExternallyGlobal;
-int32_t JITTest_AvailableExternallyGlobal = 42;
+int32_t JITTest_AvailableExternallyGlobal LLVM_ATTRIBUTE_USED = 42;
 namespace {
 
+// Tests on ARM and PowerPC disabled as we're running the old jit
+#if !defined(__arm__) && !defined(__powerpc__)
+
 TEST_F(JITTest, AvailableExternallyGlobalIsntEmitted) {
   TheJIT->DisableLazyCompilation(true);
   LoadAssembly("@JITTest_AvailableExternallyGlobal = "
@@ -615,18 +629,19 @@
   EXPECT_EQ(42, loader()) << "func should return 42 from the external global,"
                           << " not 7 from the IR version.";
 }
-
+#endif //!defined(__arm__) && !defined(__powerpc__)
 }  // anonymous namespace
 // This function is intentionally defined differently in the statically-compiled
 // program from the IR input to the JIT to assert that the JIT doesn't use its
 // definition.
+extern "C" int32_t JITTest_AvailableExternallyFunction() LLVM_ATTRIBUTE_USED;
 extern "C" int32_t JITTest_AvailableExternallyFunction() {
   return 42;
 }
 namespace {
 
-// ARM tests disabled pending fix for PR10783.
-#if !defined(__arm__)
+// ARM and PowerPC tests disabled pending fix for PR10783.
+#if !defined(__arm__) && !defined(__powerpc__)
 TEST_F(JITTest, AvailableExternallyFunctionIsntCompiled) {
   TheJIT->DisableLazyCompilation(true);
   LoadAssembly("define available_externally i32 "
@@ -782,7 +797,7 @@
     (intptr_t)TheJIT->getPointerToFunction(recur1IR));
   EXPECT_EQ(3, recur1(4));
 }
-#endif // !defined(__arm__)
+#endif // !defined(__arm__) && !defined(__powerpc__)
 
 // This code is copied from JITEventListenerTest, but it only runs once for all
 // the tests in this directory.  Everything seems fine, but that's strange

Modified: llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/Makefile (original)
+++ llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/Makefile Tue Jan 15 11:16:16 2013
@@ -35,8 +35,15 @@
   LINK_COMPONENTS += oprofilejit
 endif
 
+EXPORTED_SYMBOL_FILE = $(PROJ_OBJ_DIR)/JITTests.exports
 
 include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest
 
 # Permit these tests to use the JIT's symbolic lookup.
 LD.Flags += $(RDYNAMIC)
+
+# Symbol exports are necessary (at least for now) when building with LTO.
+$(LLVMUnitTestExe): $(NativeExportsFile)
+$(PROJ_OBJ_DIR)/JITTests.exports: $(PROJ_SRC_DIR)/JITTests.def $(PROJ_OBJ_DIR)/.dir
+	tail -n +2 $< > $@
+

Modified: llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/MultiJITTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/MultiJITTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/MultiJITTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/ExecutionEngine/JIT/MultiJITTest.cpp Tue Jan 15 11:16:16 2013
@@ -65,8 +65,8 @@
   FooF2 = M2->getFunction("foo2");
 }
 
-// ARM tests disabled pending fix for PR10783.
-#if !defined(__arm__)
+// ARM and PowerPC tests disabled pending fix for PR10783.
+#if !defined(__arm__) && !defined(__powerpc__)
 
 TEST(MultiJitTest, EagerMode) {
   LLVMContext Context1;
@@ -176,6 +176,6 @@
 #endif
   EXPECT_TRUE(sa == fa);
 }
-#endif  // !defined(__arm__)
+#endif  // !defined(__arm__) && !defined(__powerpc__)
 
 }  // anonymous namespace

Propchange: llvm/branches/AMDILBackend/unittests/ExecutionEngine/MCJIT/
------------------------------------------------------------------------------
    bugtraq:number = true

Modified: llvm/branches/AMDILBackend/unittests/ExecutionEngine/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/ExecutionEngine/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/ExecutionEngine/Makefile (original)
+++ llvm/branches/AMDILBackend/unittests/ExecutionEngine/Makefile Tue Jan 15 11:16:16 2013
@@ -10,7 +10,7 @@
 LEVEL = ../..
 TESTNAME = ExecutionEngine
 LINK_COMPONENTS :=interpreter
-PARALLEL_DIRS = JIT
+PARALLEL_DIRS = JIT MCJIT
 
 include $(LEVEL)/Makefile.config
 include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest

Modified: llvm/branches/AMDILBackend/unittests/Support/AlignOfTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/Support/AlignOfTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/Support/AlignOfTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/Support/AlignOfTest.cpp Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-//===- llvm/unittest/Support/AlignOfTest.cpp - Alignment utility tests ----===//
+//=== - llvm/unittest/Support/AlignOfTest.cpp - Alignment utility tests ----===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -23,31 +23,25 @@
 #endif
 
 // Define some fixed alignment types to use in these tests.
-#if __cplusplus == 201103L || __has_feature(cxx_alignas)
-typedef char alignas(1) A1;
-typedef char alignas(2) A2;
-typedef char alignas(4) A4;
-typedef char alignas(8) A8;
-#elif defined(__clang__) || defined(__GNUC__)
-typedef char A1 __attribute__((aligned(1)));
-typedef char A2 __attribute__((aligned(2)));
-typedef char A4 __attribute__((aligned(4)));
-typedef char A8 __attribute__((aligned(8)));
+#if __has_feature(cxx_alignas)
+struct alignas(1) A1 { };
+struct alignas(2) A2 { };
+struct alignas(4) A4 { };
+struct alignas(8) A8 { };
+#elif defined(__GNUC__)
+struct A1 { } __attribute__((aligned(1)));
+struct A2 { } __attribute__((aligned(2)));
+struct A4 { } __attribute__((aligned(4)));
+struct A8 { } __attribute__((aligned(8)));
 #elif defined(_MSC_VER)
-typedef __declspec(align(1)) char A1;
-typedef __declspec(align(2)) char A2;
-typedef __declspec(align(4)) char A4;
-typedef __declspec(align(8)) char A8;
+__declspec(align(1)) struct A1 { };
+__declspec(align(2)) struct A2 { };
+__declspec(align(4)) struct A4 { };
+__declspec(align(8)) struct A8 { };
 #else
 # error No supported align as directive.
 #endif
 
-// Wrap the forced aligned types in structs to hack around compiler bugs.
-struct SA1 { A1 a; };
-struct SA2 { A2 a; };
-struct SA4 { A4 a; };
-struct SA8 { A8 a; };
-
 struct S1 {};
 struct S2 { char a; };
 struct S3 { int x; };
@@ -72,6 +66,17 @@
 struct V7 : virtual V2, virtual V6 { virtual ~V7(); };
 struct V8 : V5, virtual V6, V7 { double zz; virtual ~V8(); };
 
+double S6::f() { return 0.0; }
+float D2::g() { return 0.0f; }
+V1::~V1() {}
+V2::~V2() {}
+V3::~V3() {}
+V4::~V4() {}
+V5::~V5() {}
+V6::~V6() {}
+V7::~V7() {}
+V8::~V8() {}
+
 // Ensure alignment is a compile-time constant.
 char LLVM_ATTRIBUTE_UNUSED test_arr1
   [AlignOf<char>::Alignment > 0]
@@ -90,11 +95,7 @@
   [AlignOf<A1>::Alignment > 0]
   [AlignOf<A2>::Alignment > 0]
   [AlignOf<A4>::Alignment > 0]
-  [AlignOf<A8>::Alignment > 0]
-  [AlignOf<SA1>::Alignment > 0]
-  [AlignOf<SA2>::Alignment > 0]
-  [AlignOf<SA4>::Alignment > 0]
-  [AlignOf<SA8>::Alignment > 0];
+  [AlignOf<A8>::Alignment > 0];
 char LLVM_ATTRIBUTE_UNUSED test_arr3
   [AlignOf<S1>::Alignment > 0]
   [AlignOf<S2>::Alignment > 0]
@@ -123,20 +124,10 @@
   [AlignOf<V8>::Alignment > 0];
 
 TEST(AlignOfTest, BasicAlignmentInvariants) {
-  // For a very strange reason, many compilers do not support this. Both Clang
-  // and GCC fail to align these properly.
-  EXPECT_EQ(1u, alignOf<A1>());
-#if 0
-  EXPECT_EQ(2u, alignOf<A2>());
-  EXPECT_EQ(4u, alignOf<A4>());
-  EXPECT_EQ(8u, alignOf<A8>());
-#endif
-
-  // But once wrapped in structs, the alignment is correctly managed.
-  EXPECT_LE(1u, alignOf<SA1>());
-  EXPECT_LE(2u, alignOf<SA2>());
-  EXPECT_LE(4u, alignOf<SA4>());
-  EXPECT_LE(8u, alignOf<SA8>());
+  EXPECT_LE(1u, alignOf<A1>());
+  EXPECT_LE(2u, alignOf<A2>());
+  EXPECT_LE(4u, alignOf<A4>());
+  EXPECT_LE(8u, alignOf<A8>());
 
   EXPECT_EQ(1u, alignOf<char>());
   EXPECT_LE(alignOf<char>(),   alignOf<short>());
@@ -174,154 +165,150 @@
 }
 
 TEST(AlignOfTest, BasicAlignedArray) {
-  // Note: this code exclusively uses the struct-wrapped arbitrarily aligned
-  // types because of the bugs mentioned above where GCC and Clang both
-  // disregard the arbitrary alignment specifier until the type is used to
-  // declare a member of a struct.
-  EXPECT_LE(1u, alignOf<AlignedCharArray<SA1>::union_type>());
-  EXPECT_LE(2u, alignOf<AlignedCharArray<SA2>::union_type>());
-  EXPECT_LE(4u, alignOf<AlignedCharArray<SA4>::union_type>());
-  EXPECT_LE(8u, alignOf<AlignedCharArray<SA8>::union_type>());
-
-  EXPECT_LE(1u, sizeof(AlignedCharArray<SA1>::union_type));
-  EXPECT_LE(2u, sizeof(AlignedCharArray<SA2>::union_type));
-  EXPECT_LE(4u, sizeof(AlignedCharArray<SA4>::union_type));
-  EXPECT_LE(8u, sizeof(AlignedCharArray<SA8>::union_type));
-
-  EXPECT_EQ(1u, (alignOf<AlignedCharArray<SA1>::union_type>()));
-  EXPECT_EQ(2u, (alignOf<AlignedCharArray<SA1, SA2>::union_type>()));
-  EXPECT_EQ(4u, (alignOf<AlignedCharArray<SA1, SA2, SA4>::union_type>()));
-  EXPECT_EQ(8u, (alignOf<AlignedCharArray<SA1, SA2, SA4, SA8>::union_type>()));
-
-  EXPECT_EQ(1u, sizeof(AlignedCharArray<SA1>::union_type));
-  EXPECT_EQ(2u, sizeof(AlignedCharArray<SA1, SA2>::union_type));
-  EXPECT_EQ(4u, sizeof(AlignedCharArray<SA1, SA2, SA4>::union_type));
-  EXPECT_EQ(8u, sizeof(AlignedCharArray<SA1, SA2, SA4, SA8>::union_type));
-
-  EXPECT_EQ(1u, (alignOf<AlignedCharArray<SA1[1]>::union_type>()));
-  EXPECT_EQ(2u, (alignOf<AlignedCharArray<SA1[2], SA2[1]>::union_type>()));
-  EXPECT_EQ(4u, (alignOf<AlignedCharArray<SA1[42], SA2[55],
-                                          SA4[13]>::union_type>()));
-  EXPECT_EQ(8u, (alignOf<AlignedCharArray<SA1[2], SA2[1],
-                                          SA4, SA8>::union_type>()));
-
-  EXPECT_EQ(1u,  sizeof(AlignedCharArray<SA1[1]>::union_type));
-  EXPECT_EQ(2u,  sizeof(AlignedCharArray<SA1[2], SA2[1]>::union_type));
-  EXPECT_EQ(4u,  sizeof(AlignedCharArray<SA1[3], SA2[2], SA4>::union_type));
-  EXPECT_EQ(16u, sizeof(AlignedCharArray<SA1, SA2[3],
-                                         SA4[3], SA8>::union_type));
+  EXPECT_LE(1u, alignOf<AlignedCharArrayUnion<A1> >());
+  EXPECT_LE(2u, alignOf<AlignedCharArrayUnion<A2> >());
+  EXPECT_LE(4u, alignOf<AlignedCharArrayUnion<A4> >());
+  EXPECT_LE(8u, alignOf<AlignedCharArrayUnion<A8> >());
+
+  EXPECT_LE(1u, sizeof(AlignedCharArrayUnion<A1>));
+  EXPECT_LE(2u, sizeof(AlignedCharArrayUnion<A2>));
+  EXPECT_LE(4u, sizeof(AlignedCharArrayUnion<A4>));
+  EXPECT_LE(8u, sizeof(AlignedCharArrayUnion<A8>));
+
+  EXPECT_EQ(1u, (alignOf<AlignedCharArrayUnion<A1> >()));
+  EXPECT_EQ(2u, (alignOf<AlignedCharArrayUnion<A1, A2> >()));
+  EXPECT_EQ(4u, (alignOf<AlignedCharArrayUnion<A1, A2, A4> >()));
+  EXPECT_EQ(8u, (alignOf<AlignedCharArrayUnion<A1, A2, A4, A8> >()));
+
+  EXPECT_EQ(1u, sizeof(AlignedCharArrayUnion<A1>));
+  EXPECT_EQ(2u, sizeof(AlignedCharArrayUnion<A1, A2>));
+  EXPECT_EQ(4u, sizeof(AlignedCharArrayUnion<A1, A2, A4>));
+  EXPECT_EQ(8u, sizeof(AlignedCharArrayUnion<A1, A2, A4, A8>));
+
+  EXPECT_EQ(1u, (alignOf<AlignedCharArrayUnion<A1[1]> >()));
+  EXPECT_EQ(2u, (alignOf<AlignedCharArrayUnion<A1[2], A2[1]> >()));
+  EXPECT_EQ(4u, (alignOf<AlignedCharArrayUnion<A1[42], A2[55],
+                                               A4[13]> >()));
+  EXPECT_EQ(8u, (alignOf<AlignedCharArrayUnion<A1[2], A2[1],
+                                               A4, A8> >()));
+
+  EXPECT_EQ(1u,  sizeof(AlignedCharArrayUnion<A1[1]>));
+  EXPECT_EQ(2u,  sizeof(AlignedCharArrayUnion<A1[2], A2[1]>));
+  EXPECT_EQ(4u,  sizeof(AlignedCharArrayUnion<A1[3], A2[2], A4>));
+  EXPECT_EQ(16u, sizeof(AlignedCharArrayUnion<A1, A2[3],
+                                              A4[3], A8>));
 
   // For other tests we simply assert that the alignment of the union mathes
   // that of the fundamental type and hope that we have any weird type
   // productions that would trigger bugs.
-  EXPECT_EQ(alignOf<char>(), alignOf<AlignedCharArray<char>::union_type>());
-  EXPECT_EQ(alignOf<short>(), alignOf<AlignedCharArray<short>::union_type>());
-  EXPECT_EQ(alignOf<int>(), alignOf<AlignedCharArray<int>::union_type>());
-  EXPECT_EQ(alignOf<long>(), alignOf<AlignedCharArray<long>::union_type>());
+  EXPECT_EQ(alignOf<char>(), alignOf<AlignedCharArrayUnion<char> >());
+  EXPECT_EQ(alignOf<short>(), alignOf<AlignedCharArrayUnion<short> >());
+  EXPECT_EQ(alignOf<int>(), alignOf<AlignedCharArrayUnion<int> >());
+  EXPECT_EQ(alignOf<long>(), alignOf<AlignedCharArrayUnion<long> >());
   EXPECT_EQ(alignOf<long long>(),
-            alignOf<AlignedCharArray<long long>::union_type>());
-  EXPECT_EQ(alignOf<float>(), alignOf<AlignedCharArray<float>::union_type>());
-  EXPECT_EQ(alignOf<double>(), alignOf<AlignedCharArray<double>::union_type>());
+            alignOf<AlignedCharArrayUnion<long long> >());
+  EXPECT_EQ(alignOf<float>(), alignOf<AlignedCharArrayUnion<float> >());
+  EXPECT_EQ(alignOf<double>(), alignOf<AlignedCharArrayUnion<double> >());
   EXPECT_EQ(alignOf<long double>(),
-            alignOf<AlignedCharArray<long double>::union_type>());
-  EXPECT_EQ(alignOf<void *>(), alignOf<AlignedCharArray<void *>::union_type>());
-  EXPECT_EQ(alignOf<int *>(), alignOf<AlignedCharArray<int *>::union_type>());
+            alignOf<AlignedCharArrayUnion<long double> >());
+  EXPECT_EQ(alignOf<void *>(), alignOf<AlignedCharArrayUnion<void *> >());
+  EXPECT_EQ(alignOf<int *>(), alignOf<AlignedCharArrayUnion<int *> >());
   EXPECT_EQ(alignOf<double (*)(double)>(),
-            alignOf<AlignedCharArray<double (*)(double)>::union_type>());
+            alignOf<AlignedCharArrayUnion<double (*)(double)> >());
   EXPECT_EQ(alignOf<double (S6::*)()>(),
-            alignOf<AlignedCharArray<double (S6::*)()>::union_type>());
-  EXPECT_EQ(alignOf<S1>(), alignOf<AlignedCharArray<S1>::union_type>());
-  EXPECT_EQ(alignOf<S2>(), alignOf<AlignedCharArray<S2>::union_type>());
-  EXPECT_EQ(alignOf<S3>(), alignOf<AlignedCharArray<S3>::union_type>());
-  EXPECT_EQ(alignOf<S4>(), alignOf<AlignedCharArray<S4>::union_type>());
-  EXPECT_EQ(alignOf<S5>(), alignOf<AlignedCharArray<S5>::union_type>());
-  EXPECT_EQ(alignOf<S6>(), alignOf<AlignedCharArray<S6>::union_type>());
-  EXPECT_EQ(alignOf<D1>(), alignOf<AlignedCharArray<D1>::union_type>());
-  EXPECT_EQ(alignOf<D2>(), alignOf<AlignedCharArray<D2>::union_type>());
-  EXPECT_EQ(alignOf<D3>(), alignOf<AlignedCharArray<D3>::union_type>());
-  EXPECT_EQ(alignOf<D4>(), alignOf<AlignedCharArray<D4>::union_type>());
-  EXPECT_EQ(alignOf<D5>(), alignOf<AlignedCharArray<D5>::union_type>());
-  EXPECT_EQ(alignOf<D6>(), alignOf<AlignedCharArray<D6>::union_type>());
-  EXPECT_EQ(alignOf<D7>(), alignOf<AlignedCharArray<D7>::union_type>());
-  EXPECT_EQ(alignOf<D8>(), alignOf<AlignedCharArray<D8>::union_type>());
-  EXPECT_EQ(alignOf<D9>(), alignOf<AlignedCharArray<D9>::union_type>());
-  EXPECT_EQ(alignOf<V1>(), alignOf<AlignedCharArray<V1>::union_type>());
-  EXPECT_EQ(alignOf<V2>(), alignOf<AlignedCharArray<V2>::union_type>());
-  EXPECT_EQ(alignOf<V3>(), alignOf<AlignedCharArray<V3>::union_type>());
-  EXPECT_EQ(alignOf<V4>(), alignOf<AlignedCharArray<V4>::union_type>());
-  EXPECT_EQ(alignOf<V5>(), alignOf<AlignedCharArray<V5>::union_type>());
-  EXPECT_EQ(alignOf<V6>(), alignOf<AlignedCharArray<V6>::union_type>());
-  EXPECT_EQ(alignOf<V7>(), alignOf<AlignedCharArray<V7>::union_type>());
+            alignOf<AlignedCharArrayUnion<double (S6::*)()> >());
+  EXPECT_EQ(alignOf<S1>(), alignOf<AlignedCharArrayUnion<S1> >());
+  EXPECT_EQ(alignOf<S2>(), alignOf<AlignedCharArrayUnion<S2> >());
+  EXPECT_EQ(alignOf<S3>(), alignOf<AlignedCharArrayUnion<S3> >());
+  EXPECT_EQ(alignOf<S4>(), alignOf<AlignedCharArrayUnion<S4> >());
+  EXPECT_EQ(alignOf<S5>(), alignOf<AlignedCharArrayUnion<S5> >());
+  EXPECT_EQ(alignOf<S6>(), alignOf<AlignedCharArrayUnion<S6> >());
+  EXPECT_EQ(alignOf<D1>(), alignOf<AlignedCharArrayUnion<D1> >());
+  EXPECT_EQ(alignOf<D2>(), alignOf<AlignedCharArrayUnion<D2> >());
+  EXPECT_EQ(alignOf<D3>(), alignOf<AlignedCharArrayUnion<D3> >());
+  EXPECT_EQ(alignOf<D4>(), alignOf<AlignedCharArrayUnion<D4> >());
+  EXPECT_EQ(alignOf<D5>(), alignOf<AlignedCharArrayUnion<D5> >());
+  EXPECT_EQ(alignOf<D6>(), alignOf<AlignedCharArrayUnion<D6> >());
+  EXPECT_EQ(alignOf<D7>(), alignOf<AlignedCharArrayUnion<D7> >());
+  EXPECT_EQ(alignOf<D8>(), alignOf<AlignedCharArrayUnion<D8> >());
+  EXPECT_EQ(alignOf<D9>(), alignOf<AlignedCharArrayUnion<D9> >());
+  EXPECT_EQ(alignOf<V1>(), alignOf<AlignedCharArrayUnion<V1> >());
+  EXPECT_EQ(alignOf<V2>(), alignOf<AlignedCharArrayUnion<V2> >());
+  EXPECT_EQ(alignOf<V3>(), alignOf<AlignedCharArrayUnion<V3> >());
+  EXPECT_EQ(alignOf<V4>(), alignOf<AlignedCharArrayUnion<V4> >());
+  EXPECT_EQ(alignOf<V5>(), alignOf<AlignedCharArrayUnion<V5> >());
+  EXPECT_EQ(alignOf<V6>(), alignOf<AlignedCharArrayUnion<V6> >());
+  EXPECT_EQ(alignOf<V7>(), alignOf<AlignedCharArrayUnion<V7> >());
 
   // Some versions of MSVC get this wrong somewhat disturbingly. The failure
   // appears to be benign: alignOf<V8>() produces a preposterous value: 12
 #ifndef _MSC_VER
-  EXPECT_EQ(alignOf<V8>(), alignOf<AlignedCharArray<V8>::union_type>());
+  EXPECT_EQ(alignOf<V8>(), alignOf<AlignedCharArrayUnion<V8> >());
 #endif
 
-  EXPECT_EQ(sizeof(char), sizeof(AlignedCharArray<char>::union_type));
-  EXPECT_EQ(sizeof(char[1]), sizeof(AlignedCharArray<char[1]>::union_type));
-  EXPECT_EQ(sizeof(char[2]), sizeof(AlignedCharArray<char[2]>::union_type));
-  EXPECT_EQ(sizeof(char[3]), sizeof(AlignedCharArray<char[3]>::union_type));
-  EXPECT_EQ(sizeof(char[4]), sizeof(AlignedCharArray<char[4]>::union_type));
-  EXPECT_EQ(sizeof(char[5]), sizeof(AlignedCharArray<char[5]>::union_type));
-  EXPECT_EQ(sizeof(char[8]), sizeof(AlignedCharArray<char[8]>::union_type));
-  EXPECT_EQ(sizeof(char[13]), sizeof(AlignedCharArray<char[13]>::union_type));
-  EXPECT_EQ(sizeof(char[16]), sizeof(AlignedCharArray<char[16]>::union_type));
-  EXPECT_EQ(sizeof(char[21]), sizeof(AlignedCharArray<char[21]>::union_type));
-  EXPECT_EQ(sizeof(char[32]), sizeof(AlignedCharArray<char[32]>::union_type));
-  EXPECT_EQ(sizeof(short), sizeof(AlignedCharArray<short>::union_type));
-  EXPECT_EQ(sizeof(int), sizeof(AlignedCharArray<int>::union_type));
-  EXPECT_EQ(sizeof(long), sizeof(AlignedCharArray<long>::union_type));
+  EXPECT_EQ(sizeof(char), sizeof(AlignedCharArrayUnion<char>));
+  EXPECT_EQ(sizeof(char[1]), sizeof(AlignedCharArrayUnion<char[1]>));
+  EXPECT_EQ(sizeof(char[2]), sizeof(AlignedCharArrayUnion<char[2]>));
+  EXPECT_EQ(sizeof(char[3]), sizeof(AlignedCharArrayUnion<char[3]>));
+  EXPECT_EQ(sizeof(char[4]), sizeof(AlignedCharArrayUnion<char[4]>));
+  EXPECT_EQ(sizeof(char[5]), sizeof(AlignedCharArrayUnion<char[5]>));
+  EXPECT_EQ(sizeof(char[8]), sizeof(AlignedCharArrayUnion<char[8]>));
+  EXPECT_EQ(sizeof(char[13]), sizeof(AlignedCharArrayUnion<char[13]>));
+  EXPECT_EQ(sizeof(char[16]), sizeof(AlignedCharArrayUnion<char[16]>));
+  EXPECT_EQ(sizeof(char[21]), sizeof(AlignedCharArrayUnion<char[21]>));
+  EXPECT_EQ(sizeof(char[32]), sizeof(AlignedCharArrayUnion<char[32]>));
+  EXPECT_EQ(sizeof(short), sizeof(AlignedCharArrayUnion<short>));
+  EXPECT_EQ(sizeof(int), sizeof(AlignedCharArrayUnion<int>));
+  EXPECT_EQ(sizeof(long), sizeof(AlignedCharArrayUnion<long>));
   EXPECT_EQ(sizeof(long long),
-            sizeof(AlignedCharArray<long long>::union_type));
-  EXPECT_EQ(sizeof(float), sizeof(AlignedCharArray<float>::union_type));
-  EXPECT_EQ(sizeof(double), sizeof(AlignedCharArray<double>::union_type));
+            sizeof(AlignedCharArrayUnion<long long>));
+  EXPECT_EQ(sizeof(float), sizeof(AlignedCharArrayUnion<float>));
+  EXPECT_EQ(sizeof(double), sizeof(AlignedCharArrayUnion<double>));
   EXPECT_EQ(sizeof(long double),
-            sizeof(AlignedCharArray<long double>::union_type));
-  EXPECT_EQ(sizeof(void *), sizeof(AlignedCharArray<void *>::union_type));
-  EXPECT_EQ(sizeof(int *), sizeof(AlignedCharArray<int *>::union_type));
+            sizeof(AlignedCharArrayUnion<long double>));
+  EXPECT_EQ(sizeof(void *), sizeof(AlignedCharArrayUnion<void *>));
+  EXPECT_EQ(sizeof(int *), sizeof(AlignedCharArrayUnion<int *>));
   EXPECT_EQ(sizeof(double (*)(double)),
-            sizeof(AlignedCharArray<double (*)(double)>::union_type));
+            sizeof(AlignedCharArrayUnion<double (*)(double)>));
   EXPECT_EQ(sizeof(double (S6::*)()),
-            sizeof(AlignedCharArray<double (S6::*)()>::union_type));
-  EXPECT_EQ(sizeof(S1), sizeof(AlignedCharArray<S1>::union_type));
-  EXPECT_EQ(sizeof(S2), sizeof(AlignedCharArray<S2>::union_type));
-  EXPECT_EQ(sizeof(S3), sizeof(AlignedCharArray<S3>::union_type));
-  EXPECT_EQ(sizeof(S4), sizeof(AlignedCharArray<S4>::union_type));
-  EXPECT_EQ(sizeof(S5), sizeof(AlignedCharArray<S5>::union_type));
-  EXPECT_EQ(sizeof(S6), sizeof(AlignedCharArray<S6>::union_type));
-  EXPECT_EQ(sizeof(D1), sizeof(AlignedCharArray<D1>::union_type));
-  EXPECT_EQ(sizeof(D2), sizeof(AlignedCharArray<D2>::union_type));
-  EXPECT_EQ(sizeof(D3), sizeof(AlignedCharArray<D3>::union_type));
-  EXPECT_EQ(sizeof(D4), sizeof(AlignedCharArray<D4>::union_type));
-  EXPECT_EQ(sizeof(D5), sizeof(AlignedCharArray<D5>::union_type));
-  EXPECT_EQ(sizeof(D6), sizeof(AlignedCharArray<D6>::union_type));
-  EXPECT_EQ(sizeof(D7), sizeof(AlignedCharArray<D7>::union_type));
-  EXPECT_EQ(sizeof(D8), sizeof(AlignedCharArray<D8>::union_type));
-  EXPECT_EQ(sizeof(D9), sizeof(AlignedCharArray<D9>::union_type));
-  EXPECT_EQ(sizeof(D9[1]), sizeof(AlignedCharArray<D9[1]>::union_type));
-  EXPECT_EQ(sizeof(D9[2]), sizeof(AlignedCharArray<D9[2]>::union_type));
-  EXPECT_EQ(sizeof(D9[3]), sizeof(AlignedCharArray<D9[3]>::union_type));
-  EXPECT_EQ(sizeof(D9[4]), sizeof(AlignedCharArray<D9[4]>::union_type));
-  EXPECT_EQ(sizeof(D9[5]), sizeof(AlignedCharArray<D9[5]>::union_type));
-  EXPECT_EQ(sizeof(D9[8]), sizeof(AlignedCharArray<D9[8]>::union_type));
-  EXPECT_EQ(sizeof(D9[13]), sizeof(AlignedCharArray<D9[13]>::union_type));
-  EXPECT_EQ(sizeof(D9[16]), sizeof(AlignedCharArray<D9[16]>::union_type));
-  EXPECT_EQ(sizeof(D9[21]), sizeof(AlignedCharArray<D9[21]>::union_type));
-  EXPECT_EQ(sizeof(D9[32]), sizeof(AlignedCharArray<D9[32]>::union_type));
-  EXPECT_EQ(sizeof(V1), sizeof(AlignedCharArray<V1>::union_type));
-  EXPECT_EQ(sizeof(V2), sizeof(AlignedCharArray<V2>::union_type));
-  EXPECT_EQ(sizeof(V3), sizeof(AlignedCharArray<V3>::union_type));
-  EXPECT_EQ(sizeof(V4), sizeof(AlignedCharArray<V4>::union_type));
-  EXPECT_EQ(sizeof(V5), sizeof(AlignedCharArray<V5>::union_type));
-  EXPECT_EQ(sizeof(V6), sizeof(AlignedCharArray<V6>::union_type));
-  EXPECT_EQ(sizeof(V7), sizeof(AlignedCharArray<V7>::union_type));
+            sizeof(AlignedCharArrayUnion<double (S6::*)()>));
+  EXPECT_EQ(sizeof(S1), sizeof(AlignedCharArrayUnion<S1>));
+  EXPECT_EQ(sizeof(S2), sizeof(AlignedCharArrayUnion<S2>));
+  EXPECT_EQ(sizeof(S3), sizeof(AlignedCharArrayUnion<S3>));
+  EXPECT_EQ(sizeof(S4), sizeof(AlignedCharArrayUnion<S4>));
+  EXPECT_EQ(sizeof(S5), sizeof(AlignedCharArrayUnion<S5>));
+  EXPECT_EQ(sizeof(S6), sizeof(AlignedCharArrayUnion<S6>));
+  EXPECT_EQ(sizeof(D1), sizeof(AlignedCharArrayUnion<D1>));
+  EXPECT_EQ(sizeof(D2), sizeof(AlignedCharArrayUnion<D2>));
+  EXPECT_EQ(sizeof(D3), sizeof(AlignedCharArrayUnion<D3>));
+  EXPECT_EQ(sizeof(D4), sizeof(AlignedCharArrayUnion<D4>));
+  EXPECT_EQ(sizeof(D5), sizeof(AlignedCharArrayUnion<D5>));
+  EXPECT_EQ(sizeof(D6), sizeof(AlignedCharArrayUnion<D6>));
+  EXPECT_EQ(sizeof(D7), sizeof(AlignedCharArrayUnion<D7>));
+  EXPECT_EQ(sizeof(D8), sizeof(AlignedCharArrayUnion<D8>));
+  EXPECT_EQ(sizeof(D9), sizeof(AlignedCharArrayUnion<D9>));
+  EXPECT_EQ(sizeof(D9[1]), sizeof(AlignedCharArrayUnion<D9[1]>));
+  EXPECT_EQ(sizeof(D9[2]), sizeof(AlignedCharArrayUnion<D9[2]>));
+  EXPECT_EQ(sizeof(D9[3]), sizeof(AlignedCharArrayUnion<D9[3]>));
+  EXPECT_EQ(sizeof(D9[4]), sizeof(AlignedCharArrayUnion<D9[4]>));
+  EXPECT_EQ(sizeof(D9[5]), sizeof(AlignedCharArrayUnion<D9[5]>));
+  EXPECT_EQ(sizeof(D9[8]), sizeof(AlignedCharArrayUnion<D9[8]>));
+  EXPECT_EQ(sizeof(D9[13]), sizeof(AlignedCharArrayUnion<D9[13]>));
+  EXPECT_EQ(sizeof(D9[16]), sizeof(AlignedCharArrayUnion<D9[16]>));
+  EXPECT_EQ(sizeof(D9[21]), sizeof(AlignedCharArrayUnion<D9[21]>));
+  EXPECT_EQ(sizeof(D9[32]), sizeof(AlignedCharArrayUnion<D9[32]>));
+  EXPECT_EQ(sizeof(V1), sizeof(AlignedCharArrayUnion<V1>));
+  EXPECT_EQ(sizeof(V2), sizeof(AlignedCharArrayUnion<V2>));
+  EXPECT_EQ(sizeof(V3), sizeof(AlignedCharArrayUnion<V3>));
+  EXPECT_EQ(sizeof(V4), sizeof(AlignedCharArrayUnion<V4>));
+  EXPECT_EQ(sizeof(V5), sizeof(AlignedCharArrayUnion<V5>));
+  EXPECT_EQ(sizeof(V6), sizeof(AlignedCharArrayUnion<V6>));
+  EXPECT_EQ(sizeof(V7), sizeof(AlignedCharArrayUnion<V7>));
 
   // Some versions of MSVC also get this wrong. The failure again appears to be
   // benign: sizeof(V8) is only 52 bytes, but our array reserves 56.
 #ifndef _MSC_VER
-  EXPECT_EQ(sizeof(V8), sizeof(AlignedCharArray<V8>::union_type));
+  EXPECT_EQ(sizeof(V8), sizeof(AlignedCharArrayUnion<V8>));
 #endif
 }
 

Modified: llvm/branches/AMDILBackend/unittests/Support/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/Support/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/Support/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/unittests/Support/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -17,11 +17,14 @@
   LeakDetectorTest.cpp
   ManagedStatic.cpp
   MathExtrasTest.cpp
+  MemoryBufferTest.cpp
+  MemoryTest.cpp
   Path.cpp
-  raw_ostream_test.cpp
   RegexTest.cpp
   SwapByteOrderTest.cpp
   TimeValue.cpp
   ValueHandleTest.cpp
   YAMLParserTest.cpp
+  formatted_raw_ostream_test.cpp
+  raw_ostream_test.cpp
   )

Modified: llvm/branches/AMDILBackend/unittests/Support/Casting.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/Support/Casting.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/Support/Casting.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/Support/Casting.cpp Tue Jan 15 11:16:16 2013
@@ -95,8 +95,9 @@
   EXPECT_NE(&F5, null_foo);
   const foo *F6 = cast<foo>(B4);
   EXPECT_NE(F6, null_foo);
-  foo *F7 = cast<foo>(fub());
-  EXPECT_EQ(F7, null_foo);
+  // Can't pass null pointer to cast<>.
+  // foo *F7 = cast<foo>(fub());
+  // EXPECT_EQ(F7, null_foo);
   foo *F8 = B1.baz();
   EXPECT_NE(F8, null_foo);
 }
@@ -121,7 +122,8 @@
   EXPECT_NE(F2, null_foo);
   const foo *F3 = dyn_cast<foo>(B4);
   EXPECT_NE(F3, null_foo);
-  // foo *F4 = dyn_cast<foo>(fub()); // not permittible
+  // Can't pass null pointer to dyn_cast<>.
+  // foo *F4 = dyn_cast<foo>(fub());
   // EXPECT_EQ(F4, null_foo);
   foo *F5 = B1.daz();
   EXPECT_NE(F5, null_foo);
@@ -151,3 +153,54 @@
 }  // anonymous namespace
 
 bar *llvm::fub() { return 0; }
+
+namespace {
+namespace inferred_upcasting {
+// This test case verifies correct behavior of inferred upcasts when the
+// types are statically known to be OK to upcast. This is the case when,
+// for example, Derived inherits from Base, and we do `isa<Base>(Derived)`.
+
+// Note: This test will actually fail to compile without inferred
+// upcasting.
+
+class Base {
+public:
+  // No classof. We are testing that the upcast is inferred.
+  Base() {}
+};
+
+class Derived : public Base {
+public:
+  Derived() {}
+};
+
+// Even with no explicit classof() in Base, we should still be able to cast
+// Derived to its base class.
+TEST(CastingTest, UpcastIsInferred) {
+  Derived D;
+  EXPECT_TRUE(isa<Base>(D));
+  Base *BP = dyn_cast<Base>(&D);
+  EXPECT_TRUE(BP != NULL);
+}
+
+
+// This test verifies that the inferred upcast takes precedence over an
+// explicitly written one. This is important because it verifies that the
+// dynamic check gets optimized away.
+class UseInferredUpcast {
+public:
+  int Dummy;
+  static bool classof(const UseInferredUpcast *) {
+    return false;
+  }
+};
+
+TEST(CastingTest, InferredUpcastTakesPrecedence) {
+  UseInferredUpcast UIU;
+  // Since the explicit classof() returns false, this will fail if the
+  // explicit one is used.
+  EXPECT_TRUE(isa<UseInferredUpcast>(&UIU));
+}
+
+} // end namespace inferred_upcasting
+} // end anonymous namespace

Modified: llvm/branches/AMDILBackend/unittests/Support/CommandLineTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/Support/CommandLineTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/Support/CommandLineTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/Support/CommandLineTest.cpp Tue Jan 15 11:16:16 2013
@@ -55,6 +55,17 @@
   EXPECT_EQ("hello", EnvironmentTestOption);
 }
 
+// This test used to make valgrind complain
+// ("Conditional jump or move depends on uninitialised value(s)")
+TEST(CommandLineTest, ParseEnvironmentToLocalVar) {
+  // Put cl::opt on stack to check for proper initialization of fields.
+  cl::opt<std::string> EnvironmentTestOptionLocal("env-test-opt-local");
+  TempEnvVar TEV(test_env_var, "-env-test-opt-local=hello-local");
+  EXPECT_EQ("", EnvironmentTestOptionLocal);
+  cl::ParseEnvironmentOptions("CommandLineTest", test_env_var);
+  EXPECT_EQ("hello-local", EnvironmentTestOptionLocal);
+}
+
 #endif  // SKIP_ENVIRONMENT_TESTS
 
 }  // anonymous namespace

Modified: llvm/branches/AMDILBackend/unittests/Support/DataExtractorTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/Support/DataExtractorTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/Support/DataExtractorTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/Support/DataExtractorTest.cpp Tue Jan 15 11:16:16 2013
@@ -16,6 +16,7 @@
 const char numberData[] = "\x80\x90\xFF\xFF\x80\x00\x00\x00";
 const char stringData[] = "hellohello\0hello";
 const char leb128data[] = "\xA6\x49";
+const char bigleb128data[] = "\xAA\xA9\xFF\xAA\xFF\xAA\xFF\x4A";
 
 TEST(DataExtractorTest, OffsetOverflow) {
   DataExtractor DE(StringRef(numberData, sizeof(numberData)-1), false, 8);
@@ -106,6 +107,14 @@
   offset = 0;
   EXPECT_EQ(-7002LL, DE.getSLEB128(&offset));
   EXPECT_EQ(2U, offset);
+
+  DataExtractor BDE(StringRef(bigleb128data, sizeof(bigleb128data)-1), false,8);
+  offset = 0;
+  EXPECT_EQ(42218325750568106ULL, BDE.getULEB128(&offset));
+  EXPECT_EQ(8U, offset);
+  offset = 0;
+  EXPECT_EQ(-29839268287359830LL, BDE.getSLEB128(&offset));
+  EXPECT_EQ(8U, offset);
 }
 
 }

Modified: llvm/branches/AMDILBackend/unittests/Support/Path.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/Support/Path.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/Support/Path.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/Support/Path.cpp Tue Jan 15 11:16:16 2013
@@ -340,44 +340,51 @@
 }
 #endif
 
-#if !defined(_WIN32) // FIXME: temporary suppressed.
 TEST_F(FileSystemTest, FileMapping) {
   // Create a temp file.
   int FileDescriptor;
   SmallString<64> TempPath;
   ASSERT_NO_ERROR(
     fs::unique_file("%%-%%-%%-%%.temp", FileDescriptor, TempPath));
-
-  // Grow temp file to be 4096 bytes 
-  ASSERT_NO_ERROR(sys::fs::resize_file(Twine(TempPath), 4096));
-  
   // Map in temp file and add some content
-  void* MappedMemory;
-  ASSERT_NO_ERROR(fs::map_file_pages(Twine(TempPath), 0, 4096, 
-                                true /*writable*/, MappedMemory));
-  char* Memory = reinterpret_cast<char*>(MappedMemory);
-  strcpy(Memory, "hello there");
-  
-  // Unmap temp file
-  ASSERT_NO_ERROR(fs::unmap_file_pages(MappedMemory, 4096));
-  MappedMemory = NULL;
-  Memory = NULL;
+  error_code EC;
+  StringRef Val("hello there");
+  {
+    fs::mapped_file_region mfr(FileDescriptor,
+                               fs::mapped_file_region::readwrite,
+                               4096,
+                               0,
+                               EC);
+    ASSERT_NO_ERROR(EC);
+    std::copy(Val.begin(), Val.end(), mfr.data());
+    // Explicitly add a 0.
+    mfr.data()[Val.size()] = 0;
+    // Unmap temp file
+  }
   
   // Map it back in read-only
-  ASSERT_NO_ERROR(fs::map_file_pages(Twine(TempPath), 0, 4096, 
-                                false /*read-only*/, MappedMemory));
+  fs::mapped_file_region mfr(Twine(TempPath),
+                             fs::mapped_file_region::readonly,
+                             0,
+                             0,
+                             EC);
+  ASSERT_NO_ERROR(EC);
   
   // Verify content
-  Memory = reinterpret_cast<char*>(MappedMemory);
-  bool SAME = (strcmp(Memory, "hello there") == 0);
-  EXPECT_TRUE(SAME);
+  EXPECT_EQ(StringRef(mfr.const_data()), Val);
   
   // Unmap temp file
-  ASSERT_NO_ERROR(fs::unmap_file_pages(MappedMemory, 4096));
-  MappedMemory = NULL;
-  Memory = NULL;
-}
-#endif
-
 
+#if LLVM_USE_RVALUE_REFERENCES
+  fs::mapped_file_region m(Twine(TempPath),
+                             fs::mapped_file_region::readonly,
+                             0,
+                             0,
+                             EC);
+  ASSERT_NO_ERROR(EC);
+  const char *Data = m.const_data();
+  fs::mapped_file_region mfrrv(llvm_move(m));
+  EXPECT_EQ(mfrrv.const_data(), Data);
+#endif
+}
 } // anonymous namespace

Modified: llvm/branches/AMDILBackend/unittests/Transforms/Utils/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/Transforms/Utils/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/Transforms/Utils/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/unittests/Transforms/Utils/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -4,5 +4,6 @@
 
 add_llvm_unittest(UtilsTests
   Cloning.cpp
+  IntegerDivision.cpp
   Local.cpp
   )

Modified: llvm/branches/AMDILBackend/unittests/VMCore/IRBuilderTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/VMCore/IRBuilderTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/VMCore/IRBuilderTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/VMCore/IRBuilderTest.cpp Tue Jan 15 11:16:16 2013
@@ -8,6 +8,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/BasicBlock.h"
+#include "llvm/DataLayout.h"
 #include "llvm/Function.h"
 #include "llvm/IRBuilder.h"
 #include "llvm/IntrinsicInst.h"
@@ -96,4 +97,15 @@
   EXPECT_EQ(Weights, TI->getMetadata(LLVMContext::MD_prof));
 }
 
+TEST_F(IRBuilderTest, GetIntTy) {
+  IRBuilder<> Builder(BB);
+  IntegerType *Ty1 = Builder.getInt1Ty();
+  EXPECT_EQ(Ty1, IntegerType::get(getGlobalContext(), 1));
+
+  DataLayout* DL = new DataLayout(M.get());
+  IntegerType *IntPtrTy = Builder.getIntPtrTy(DL);
+  unsigned IntPtrBitSize =  DL->getPointerSizeInBits(0);
+  EXPECT_EQ(IntPtrTy, IntegerType::get(getGlobalContext(), IntPtrBitSize));
+}
+
 }

Modified: llvm/branches/AMDILBackend/unittests/VMCore/InstructionsTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/VMCore/InstructionsTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/VMCore/InstructionsTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/VMCore/InstructionsTest.cpp Tue Jan 15 11:16:16 2013
@@ -9,6 +9,7 @@
 
 #include "llvm/BasicBlock.h"
 #include "llvm/Constants.h"
+#include "llvm/DataLayout.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/IRBuilder.h"
 #include "llvm/Instructions.h"
@@ -17,7 +18,6 @@
 #include "llvm/Operator.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
 #include "gtest/gtest.h"
 
 namespace llvm {
@@ -183,7 +183,7 @@
   EXPECT_NE(S3, Gep3);
 
   int64_t Offset;
-  TargetData TD("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3"
+  DataLayout TD("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3"
                 "2:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80"
                 ":128:128-n8:16:32:64-S128");
   // Make sure we don't crash
@@ -243,5 +243,42 @@
   delete I;
 }
 
+
+TEST(InstructionsTest, isEliminableCastPair) {
+  LLVMContext &C(getGlobalContext());
+
+  Type* Int32Ty = Type::getInt32Ty(C);
+  Type* Int64Ty = Type::getInt64Ty(C);
+  Type* Int64PtrTy = Type::getInt64PtrTy(C);
+
+  // Source and destination pointers have same size -> bitcast.
+  EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt,
+                                           CastInst::IntToPtr,
+                                           Int64PtrTy, Int64Ty, Int64PtrTy,
+                                           Int32Ty, 0, Int32Ty),
+            CastInst::BitCast);
+
+  // Source and destination pointers have different sizes -> fail.
+  EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt,
+                                           CastInst::IntToPtr,
+                                           Int64PtrTy, Int64Ty, Int64PtrTy,
+                                           Int32Ty, 0, Int64Ty),
+            0U);
+
+  // Middle pointer big enough -> bitcast.
+  EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr,
+                                           CastInst::PtrToInt,
+                                           Int64Ty, Int64PtrTy, Int64Ty,
+                                           0, Int64Ty, 0),
+            CastInst::BitCast);
+
+  // Middle pointer too small -> fail.
+  EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr,
+                                           CastInst::PtrToInt,
+                                           Int64Ty, Int64PtrTy, Int64Ty,
+                                           0, Int32Ty, 0),
+            0U);
+}
+
 }  // end anonymous namespace
 }  // end namespace llvm

Modified: llvm/branches/AMDILBackend/unittests/VMCore/PassManagerTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/unittests/VMCore/PassManagerTest.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/unittests/VMCore/PassManagerTest.cpp (original)
+++ llvm/branches/AMDILBackend/unittests/VMCore/PassManagerTest.cpp Tue Jan 15 11:16:16 2013
@@ -14,7 +14,7 @@
 #include "llvm/Pass.h"
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/CallGraphSCCPass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/Constants.h"
@@ -94,7 +94,7 @@
         initializeModuleNDMPass(*PassRegistry::getPassRegistry());
       }
       virtual bool runOnModule(Module &M) {
-        EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
+        EXPECT_TRUE(getAnalysisIfAvailable<DataLayout>());
         run++;
         return false;
       }
@@ -167,7 +167,7 @@
         initializeCGPassPass(*PassRegistry::getPassRegistry());
       }
       virtual bool runOnSCC(CallGraphSCC &SCMM) {
-        EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
+        EXPECT_TRUE(getAnalysisIfAvailable<DataLayout>());
         run();
         return false;
       }
@@ -177,7 +177,7 @@
     public:
       virtual bool runOnFunction(Function &F) {
         // FIXME: PR4112
-        // EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
+        // EXPECT_TRUE(getAnalysisIfAvailable<DataLayout>());
         run();
         return false;
       }
@@ -204,7 +204,7 @@
         return false;
       }
       virtual bool runOnLoop(Loop *L, LPPassManager &LPM) {
-        EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
+        EXPECT_TRUE(getAnalysisIfAvailable<DataLayout>());
         run();
         return false;
       }
@@ -241,7 +241,7 @@
         return false;
       }
       virtual bool runOnBasicBlock(BasicBlock &BB) {
-        EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
+        EXPECT_TRUE(getAnalysisIfAvailable<DataLayout>());
         run();
         return false;
       }
@@ -266,7 +266,7 @@
         initializeFPassPass(*PassRegistry::getPassRegistry());
       }
       virtual bool runOnModule(Module &M) {
-        EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
+        EXPECT_TRUE(getAnalysisIfAvailable<DataLayout>());
         for (Module::iterator I=M.begin(),E=M.end(); I != E; ++I) {
           Function &F = *I;
           {
@@ -292,7 +292,7 @@
       mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
 
       PassManager Passes;
-      Passes.add(new TargetData(&M));
+      Passes.add(new DataLayout(&M));
       Passes.add(mNDM2);
       Passes.add(mNDM);
       Passes.add(mNDNM);
@@ -316,7 +316,7 @@
       mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
 
       PassManager Passes;
-      Passes.add(new TargetData(&M));
+      Passes.add(new DataLayout(&M));
       Passes.add(mNDM);
       Passes.add(mNDNM);
       Passes.add(mNDM2);// invalidates mNDM needed by mDNM
@@ -338,7 +338,7 @@
       OwningPtr<Module> M(makeLLVMModule());
       T *P = new T();
       PassManager Passes;
-      Passes.add(new TargetData(M.get()));
+      Passes.add(new DataLayout(M.get()));
       Passes.add(P);
       Passes.run(*M);
       T::finishedOK(run);
@@ -349,7 +349,7 @@
       Module *M = makeLLVMModule();
       T *P = new T();
       PassManager Passes;
-      Passes.add(new TargetData(M));
+      Passes.add(new DataLayout(M));
       Passes.add(P);
       Passes.run(*M);
       T::finishedOK(run, N);
@@ -387,7 +387,7 @@
         SCOPED_TRACE("Running OnTheFlyTest");
         struct OnTheFlyTest *O = new OnTheFlyTest();
         PassManager Passes;
-        Passes.add(new TargetData(M));
+        Passes.add(new DataLayout(M));
         Passes.add(O);
         Passes.run(*M);
 

Modified: llvm/branches/AMDILBackend/utils/FileCheck/FileCheck.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/FileCheck/FileCheck.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/FileCheck/FileCheck.cpp (original)
+++ llvm/branches/AMDILBackend/utils/FileCheck/FileCheck.cpp Tue Jan 15 11:16:16 2013
@@ -470,7 +470,7 @@
       continue;
     }
 
-    // If C is not a horizontal whitespace, skip it.
+    // If current char is not a horizontal whitespace, dump it to output as is.
     if (*Ptr != ' ' && *Ptr != '\t') {
       NewFile.push_back(*Ptr);
       continue;
@@ -537,11 +537,11 @@
       Buffer = Buffer.substr(CheckPrefix.size()+1);
     } else if (Buffer.size() > CheckPrefix.size()+6 &&
                memcmp(Buffer.data()+CheckPrefix.size(), "-NEXT:", 6) == 0) {
-      Buffer = Buffer.substr(CheckPrefix.size()+7);
+      Buffer = Buffer.substr(CheckPrefix.size()+6);
       IsCheckNext = true;
     } else if (Buffer.size() > CheckPrefix.size()+5 &&
                memcmp(Buffer.data()+CheckPrefix.size(), "-NOT:", 5) == 0) {
-      Buffer = Buffer.substr(CheckPrefix.size()+6);
+      Buffer = Buffer.substr(CheckPrefix.size()+5);
       IsCheckNot = true;
     } else {
       Buffer = Buffer.substr(1);

Modified: llvm/branches/AMDILBackend/utils/TableGen/AsmMatcherEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/AsmMatcherEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/AsmMatcherEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/AsmMatcherEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -77,7 +77,7 @@
 //
 //  Some targets need a custom way to parse operands, some specific instructions
 //  can contain arguments that can represent processor flags and other kinds of
-//  identifiers that need to be mapped to specific valeus in the final encoded
+//  identifiers that need to be mapped to specific values in the final encoded
 //  instructions. The target specific custom operand parsing works in the
 //  following way:
 //
@@ -199,7 +199,7 @@
     return Kind >= UserClass0;
   }
 
-  /// isRelatedTo - Check whether this class is "related" to \arg RHS. Classes
+  /// isRelatedTo - Check whether this class is "related" to \p RHS. Classes
   /// are related if they are in the same class hierarchy.
   bool isRelatedTo(const ClassInfo &RHS) const {
     // Tokens are only related to tokens.
@@ -238,7 +238,7 @@
     return Root == RHSRoot;
   }
 
-  /// isSubsetOf - Test whether this class is a subset of \arg RHS;
+  /// isSubsetOf - Test whether this class is a subset of \p RHS.
   bool isSubsetOf(const ClassInfo &RHS) const {
     // This is a subset of RHS if it is the same class...
     if (this == &RHS)
@@ -279,6 +279,15 @@
   }
 };
 
+namespace {
+/// Sort ClassInfo pointers independently of pointer value.
+struct LessClassInfoPtr {
+  bool operator()(const ClassInfo *LHS, const ClassInfo *RHS) const {
+    return *LHS < *RHS;
+  }
+};
+}
+
 /// MatchableInfo - Helper class for storing the necessary information for an
 /// instruction or alias which is capable of being matched.
 struct MatchableInfo {
@@ -416,7 +425,7 @@
   SmallVector<SubtargetFeatureInfo*, 4> RequiredFeatures;
 
   /// ConversionFnKind - The enum value which is passed to the generated
-  /// ConvertToMCInst to convert parsed operands into an MCInst for this
+  /// convertToMCInst to convert parsed operands into an MCInst for this
   /// function.
   std::string ConversionFnKind;
 
@@ -488,11 +497,20 @@
         return false;
     }
 
+    // Give matches that require more features higher precedence. This is useful
+    // because we cannot define AssemblerPredicates with the negation of
+    // processor features. For example, ARM v6 "nop" may be either a HINT or
+    // MOV. With v6, we want to match HINT. The assembler has no way to
+    // predicate MOV under "NoV6", but HINT will always match first because it
+    // requires V6 while MOV does not.
+    if (RequiredFeatures.size() != RHS.RequiredFeatures.size())
+      return RequiredFeatures.size() > RHS.RequiredFeatures.size();
+
     return false;
   }
 
   /// couldMatchAmbiguouslyWith - Check whether this matchable could
-  /// ambiguously match the same set of operands as \arg RHS (without being a
+  /// ambiguously match the same set of operands as \p RHS (without being a
   /// strictly superior match).
   bool couldMatchAmbiguouslyWith(const MatchableInfo &RHS) {
     // The primary comparator is the instruction mnemonic.
@@ -590,7 +608,8 @@
   std::vector<OperandMatchEntry> OperandMatchInfo;
 
   /// Map of Register records to their class information.
-  std::map<Record*, ClassInfo*> RegisterClasses;
+  typedef std::map<Record*, ClassInfo*, LessRecordByID> RegisterClassesTy;
+  RegisterClassesTy RegisterClasses;
 
   /// Map of Predicate records to their subtarget information.
   std::map<Record*, SubtargetFeatureInfo*> SubtargetFeatures;
@@ -666,22 +685,22 @@
 }
 
 static std::pair<StringRef, StringRef>
-parseTwoOperandConstraint(StringRef S, SMLoc Loc) {
+parseTwoOperandConstraint(StringRef S, ArrayRef<SMLoc> Loc) {
   // Split via the '='.
   std::pair<StringRef, StringRef> Ops = S.split('=');
   if (Ops.second == "")
-    throw TGError(Loc, "missing '=' in two-operand alias constraint");
+    PrintFatalError(Loc, "missing '=' in two-operand alias constraint");
   // Trim whitespace and the leading '$' on the operand names.
   size_t start = Ops.first.find_first_of('$');
   if (start == std::string::npos)
-    throw TGError(Loc, "expected '$' prefix on asm operand name");
+    PrintFatalError(Loc, "expected '$' prefix on asm operand name");
   Ops.first = Ops.first.slice(start + 1, std::string::npos);
   size_t end = Ops.first.find_last_of(" \t");
   Ops.first = Ops.first.slice(0, end);
   // Now the second operand.
   start = Ops.second.find_first_of('$');
   if (start == std::string::npos)
-    throw TGError(Loc, "expected '$' prefix on asm operand name");
+    PrintFatalError(Loc, "expected '$' prefix on asm operand name");
   Ops.second = Ops.second.slice(start + 1, std::string::npos);
   end = Ops.second.find_last_of(" \t");
   Ops.first = Ops.first.slice(0, end);
@@ -697,11 +716,11 @@
   int SrcAsmOperand = findAsmOperandNamed(Ops.first);
   int DstAsmOperand = findAsmOperandNamed(Ops.second);
   if (SrcAsmOperand == -1)
-    throw TGError(TheDef->getLoc(),
+    PrintFatalError(TheDef->getLoc(),
                   "unknown source two-operand alias operand '" +
                   Ops.first.str() + "'.");
   if (DstAsmOperand == -1)
-    throw TGError(TheDef->getLoc(),
+    PrintFatalError(TheDef->getLoc(),
                   "unknown destination two-operand alias operand '" +
                   Ops.second.str() + "'.");
 
@@ -833,15 +852,15 @@
   // The first token of the instruction is the mnemonic, which must be a
   // simple string, not a $foo variable or a singleton register.
   if (AsmOperands.empty())
-    throw TGError(TheDef->getLoc(),
+    PrintFatalError(TheDef->getLoc(),
                   "Instruction '" + TheDef->getName() + "' has no tokens");
   Mnemonic = AsmOperands[0].Token;
   if (Mnemonic.empty())
-    throw TGError(TheDef->getLoc(),
+    PrintFatalError(TheDef->getLoc(),
                   "Missing instruction mnemonic");
   // FIXME : Check and raise an error if it is a register.
   if (Mnemonic[0] == '$')
-    throw TGError(TheDef->getLoc(),
+    PrintFatalError(TheDef->getLoc(),
                   "Invalid instruction mnemonic '" + Mnemonic.str() + "'!");
 
   // Remove the first operand, it is tracked in the mnemonic field.
@@ -851,12 +870,12 @@
 bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const {
   // Reject matchables with no .s string.
   if (AsmString.empty())
-    throw TGError(TheDef->getLoc(), "instruction with empty asm string");
+    PrintFatalError(TheDef->getLoc(), "instruction with empty asm string");
 
   // Reject any matchables with a newline in them, they should be marked
   // isCodeGenOnly if they are pseudo instructions.
   if (AsmString.find('\n') != std::string::npos)
-    throw TGError(TheDef->getLoc(),
+    PrintFatalError(TheDef->getLoc(),
                   "multiline instruction is not valid for the asmparser, "
                   "mark it isCodeGenOnly");
 
@@ -864,7 +883,7 @@
   // has one line.
   if (!CommentDelimiter.empty() &&
       StringRef(AsmString).find(CommentDelimiter) != StringRef::npos)
-    throw TGError(TheDef->getLoc(),
+    PrintFatalError(TheDef->getLoc(),
                   "asmstring for instruction has comment character in it, "
                   "mark it isCodeGenOnly");
 
@@ -878,7 +897,7 @@
   for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
     StringRef Tok = AsmOperands[i].Token;
     if (Tok[0] == '$' && Tok.find(':') != StringRef::npos)
-      throw TGError(TheDef->getLoc(),
+      PrintFatalError(TheDef->getLoc(),
                     "matchable with operand modifier '" + Tok.str() +
                     "' not supported by asm matcher.  Mark isCodeGenOnly!");
 
@@ -886,7 +905,7 @@
     // We reject aliases and ignore instructions for now.
     if (Tok[0] == '$' && !OperandNames.insert(Tok).second) {
       if (!Hack)
-        throw TGError(TheDef->getLoc(),
+        PrintFatalError(TheDef->getLoc(),
                       "ERROR: matchable with tied operand '" + Tok.str() +
                       "' can never be matched!");
       // FIXME: Should reject these.  The ARM backend hits this with $lane in a
@@ -974,7 +993,7 @@
                                 int SubOpIdx) {
   Record *Rec = OI.Rec;
   if (SubOpIdx != -1)
-    Rec = dynamic_cast<DefInit*>(OI.MIOperandInfo->getArg(SubOpIdx))->getDef();
+    Rec = cast<DefInit>(OI.MIOperandInfo->getArg(SubOpIdx))->getDef();
   return getOperandClass(Rec, SubOpIdx);
 }
 
@@ -985,10 +1004,10 @@
     // use it, else just fall back to the underlying register class.
     const RecordVal *R = Rec->getValue("ParserMatchClass");
     if (R == 0 || R->getValue() == 0)
-      throw "Record `" + Rec->getName() +
-        "' does not have a ParserMatchClass!\n";
+      PrintFatalError("Record `" + Rec->getName() +
+        "' does not have a ParserMatchClass!\n");
 
-    if (DefInit *DI= dynamic_cast<DefInit*>(R->getValue())) {
+    if (DefInit *DI= dyn_cast<DefInit>(R->getValue())) {
       Record *MatchClass = DI->getDef();
       if (ClassInfo *CI = AsmOperandClasses[MatchClass])
         return CI;
@@ -997,26 +1016,28 @@
     // No custom match class. Just use the register class.
     Record *ClassRec = Rec->getValueAsDef("RegClass");
     if (!ClassRec)
-      throw TGError(Rec->getLoc(), "RegisterOperand `" + Rec->getName() +
+      PrintFatalError(Rec->getLoc(), "RegisterOperand `" + Rec->getName() +
                     "' has no associated register class!\n");
     if (ClassInfo *CI = RegisterClassClasses[ClassRec])
       return CI;
-    throw TGError(Rec->getLoc(), "register class has no class info!");
+    PrintFatalError(Rec->getLoc(), "register class has no class info!");
   }
 
 
   if (Rec->isSubClassOf("RegisterClass")) {
     if (ClassInfo *CI = RegisterClassClasses[Rec])
       return CI;
-    throw TGError(Rec->getLoc(), "register class has no class info!");
+    PrintFatalError(Rec->getLoc(), "register class has no class info!");
   }
 
-  assert(Rec->isSubClassOf("Operand") && "Unexpected operand!");
+  if (!Rec->isSubClassOf("Operand"))
+    PrintFatalError(Rec->getLoc(), "Operand `" + Rec->getName() +
+                  "' does not derive from class Operand!\n");
   Record *MatchClass = Rec->getValueAsDef("ParserMatchClass");
   if (ClassInfo *CI = AsmOperandClasses[MatchClass])
     return CI;
 
-  throw TGError(Rec->getLoc(), "operand has no match class!");
+  PrintFatalError(Rec->getLoc(), "operand has no match class!");
 }
 
 void AsmMatcherInfo::
@@ -1164,7 +1185,7 @@
 
     ListInit *Supers = (*it)->getValueAsListInit("SuperClasses");
     for (unsigned i = 0, e = Supers->getSize(); i != e; ++i) {
-      DefInit *DI = dynamic_cast<DefInit*>(Supers->getElement(i));
+      DefInit *DI = dyn_cast<DefInit>(Supers->getElement(i));
       if (!DI) {
         PrintError((*it)->getLoc(), "Invalid super class reference!");
         continue;
@@ -1182,33 +1203,31 @@
 
     // Get or construct the predicate method name.
     Init *PMName = (*it)->getValueInit("PredicateMethod");
-    if (StringInit *SI = dynamic_cast<StringInit*>(PMName)) {
+    if (StringInit *SI = dyn_cast<StringInit>(PMName)) {
       CI->PredicateMethod = SI->getValue();
     } else {
-      assert(dynamic_cast<UnsetInit*>(PMName) &&
-             "Unexpected PredicateMethod field!");
+      assert(isa<UnsetInit>(PMName) && "Unexpected PredicateMethod field!");
       CI->PredicateMethod = "is" + CI->ClassName;
     }
 
     // Get or construct the render method name.
     Init *RMName = (*it)->getValueInit("RenderMethod");
-    if (StringInit *SI = dynamic_cast<StringInit*>(RMName)) {
+    if (StringInit *SI = dyn_cast<StringInit>(RMName)) {
       CI->RenderMethod = SI->getValue();
     } else {
-      assert(dynamic_cast<UnsetInit*>(RMName) &&
-             "Unexpected RenderMethod field!");
+      assert(isa<UnsetInit>(RMName) && "Unexpected RenderMethod field!");
       CI->RenderMethod = "add" + CI->ClassName + "Operands";
     }
 
     // Get the parse method name or leave it as empty.
     Init *PRMName = (*it)->getValueInit("ParserMethod");
-    if (StringInit *SI = dynamic_cast<StringInit*>(PRMName))
+    if (StringInit *SI = dyn_cast<StringInit>(PRMName))
       CI->ParserMethod = SI->getValue();
 
     // Get the diagnostic type or leave it as empty.
     // Get the parse method name or leave it as empty.
     Init *DiagnosticType = (*it)->getValueInit("DiagnosticType");
-    if (StringInit *SI = dynamic_cast<StringInit*>(DiagnosticType))
+    if (StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
       CI->DiagnosticType = SI->getValue();
 
     AsmOperandClasses[*it] = CI;
@@ -1228,7 +1247,8 @@
 
   /// Map containing a mask with all operands indices that can be found for
   /// that class inside a instruction.
-  std::map<ClassInfo*, unsigned> OpClassMask;
+  typedef std::map<ClassInfo*, unsigned, LessClassInfoPtr> OpClassMaskTy;
+  OpClassMaskTy OpClassMask;
 
   for (std::vector<MatchableInfo*>::const_iterator it =
        Matchables.begin(), ie = Matchables.end();
@@ -1247,7 +1267,7 @@
     }
 
     // Generate operand match info for each mnemonic/operand class pair.
-    for (std::map<ClassInfo*, unsigned>::iterator iit = OpClassMask.begin(),
+    for (OpClassMaskTy::iterator iit = OpClassMask.begin(),
          iie = OpClassMask.end(); iit != iie; ++iit) {
       unsigned OpMask = iit->second;
       ClassInfo *CI = iit->first;
@@ -1267,7 +1287,7 @@
       continue;
 
     if (Pred->getName().empty())
-      throw TGError(Pred->getLoc(), "Predicate has no name!");
+      PrintFatalError(Pred->getLoc(), "Predicate has no name!");
 
     unsigned FeatureNo = SubtargetFeatures.size();
     SubtargetFeatures[Pred] = new SubtargetFeatureInfo(Pred, FeatureNo);
@@ -1448,7 +1468,7 @@
     ClassInfo *FromClass = getTokenClass(Rec->getValueAsString("FromToken"));
     ClassInfo *ToClass = getTokenClass(Rec->getValueAsString("ToToken"));
     if (FromClass == ToClass)
-      throw TGError(Rec->getLoc(),
+      PrintFatalError(Rec->getLoc(),
                     "error: Destination value identical to source value.");
     FromClass->SuperClasses.push_back(ToClass);
   }
@@ -1470,7 +1490,7 @@
   // Map this token to an operand.
   unsigned Idx;
   if (!Operands.hasOperandNamed(OperandName, Idx))
-    throw TGError(II->TheDef->getLoc(), "error: unable to find operand: '" +
+    PrintFatalError(II->TheDef->getLoc(), "error: unable to find operand: '" +
                   OperandName.str() + "'");
 
   // If the instruction operand has multiple suboperands, but the parser
@@ -1541,7 +1561,7 @@
       return;
     }
 
-  throw TGError(II->TheDef->getLoc(), "error: unable to find operand: '" +
+  PrintFatalError(II->TheDef->getLoc(), "error: unable to find operand: '" +
                 OperandName.str() + "'");
 }
 
@@ -1563,7 +1583,7 @@
     // Find out what operand from the asmparser this MCInst operand comes from.
     int SrcOperand = findAsmOperandNamed(OpInfo.Name);
     if (OpInfo.Name.empty() || SrcOperand == -1)
-      throw TGError(TheDef->getLoc(), "Instruction '" +
+      PrintFatalError(TheDef->getLoc(), "Instruction '" +
                     TheDef->getName() + "' has operand '" + OpInfo.Name +
                     "' that doesn't appear in asm string!");
 
@@ -1615,7 +1635,7 @@
         StringRef Name = CGA.ResultOperands[AliasOpNo].getName();
         int SrcOperand = findAsmOperand(Name, SubIdx);
         if (SrcOperand == -1)
-          throw TGError(TheDef->getLoc(), "Instruction '" +
+          PrintFatalError(TheDef->getLoc(), "Instruction '" +
                         TheDef->getName() + "' has operand '" + OpName +
                         "' that doesn't appear in asm string!");
         unsigned NumOperands = (SubIdx == -1 ? OpInfo->MINumOperands : 1);
@@ -1638,35 +1658,85 @@
   }
 }
 
-static void emitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName,
-                                std::vector<MatchableInfo*> &Infos,
-                                raw_ostream &OS) {
-  // Write the convert function to a separate stream, so we can drop it after
-  // the enum.
-  std::string ConvertFnBody;
-  raw_string_ostream CvtOS(ConvertFnBody);
+static unsigned getConverterOperandID(const std::string &Name,
+                                      SetVector<std::string> &Table,
+                                      bool &IsNew) {
+  IsNew = Table.insert(Name);
 
-  // Function we have already generated.
-  std::set<std::string> GeneratedFns;
+  unsigned ID = IsNew ? Table.size() - 1 :
+    std::find(Table.begin(), Table.end(), Name) - Table.begin();
 
-  // Start the unified conversion function.
-  CvtOS << "bool " << Target.getName() << ClassName << "::\n";
-  CvtOS << "ConvertToMCInst(unsigned Kind, MCInst &Inst, "
-        << "unsigned Opcode,\n"
-        << "                      const SmallVectorImpl<MCParsedAsmOperand*"
-        << "> &Operands) {\n";
-  CvtOS << "  Inst.setOpcode(Opcode);\n";
-  CvtOS << "  switch (Kind) {\n";
-  CvtOS << "  default:\n";
+  assert(ID < Table.size());
+
+  return ID;
+}
 
-  // Start the enum, which we will generate inline.
 
-  OS << "// Unified function for converting operands to MCInst instances.\n\n";
-  OS << "enum ConversionKind {\n";
+static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
+                             std::vector<MatchableInfo*> &Infos,
+                             raw_ostream &OS) {
+  SetVector<std::string> OperandConversionKinds;
+  SetVector<std::string> InstructionConversionKinds;
+  std::vector<std::vector<uint8_t> > ConversionTable;
+  size_t MaxRowLength = 2; // minimum is custom converter plus terminator.
 
   // TargetOperandClass - This is the target's operand class, like X86Operand.
   std::string TargetOperandClass = Target.getName() + "Operand";
 
+  // Write the convert function to a separate stream, so we can drop it after
+  // the enum. We'll build up the conversion handlers for the individual
+  // operand types opportunistically as we encounter them.
+  std::string ConvertFnBody;
+  raw_string_ostream CvtOS(ConvertFnBody);
+  // Start the unified conversion function.
+  CvtOS << "void " << Target.getName() << ClassName << "::\n"
+        << "convertToMCInst(unsigned Kind, MCInst &Inst, "
+        << "unsigned Opcode,\n"
+        << "                const SmallVectorImpl<MCParsedAsmOperand*"
+        << "> &Operands) {\n"
+        << "  assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
+        << "  const uint8_t *Converter = ConversionTable[Kind];\n"
+        << "  Inst.setOpcode(Opcode);\n"
+        << "  for (const uint8_t *p = Converter; *p; p+= 2) {\n"
+        << "    switch (*p) {\n"
+        << "    default: llvm_unreachable(\"invalid conversion entry!\");\n"
+        << "    case CVT_Reg:\n"
+        << "      static_cast<" << TargetOperandClass
+        << "*>(Operands[*(p + 1)])->addRegOperands(Inst, 1);\n"
+        << "      break;\n"
+        << "    case CVT_Tied:\n"
+        << "      Inst.addOperand(Inst.getOperand(*(p + 1)));\n"
+        << "      break;\n";
+
+  std::string OperandFnBody;
+  raw_string_ostream OpOS(OperandFnBody);
+  // Start the operand number lookup function.
+  OpOS << "void " << Target.getName() << ClassName << "::\n"
+       << "convertToMapAndConstraints(unsigned Kind,\n";
+  OpOS.indent(27);
+  OpOS << "const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {\n"
+       << "  assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
+       << "  unsigned NumMCOperands = 0;\n"
+       << "  const uint8_t *Converter = ConversionTable[Kind];\n"
+       << "  for (const uint8_t *p = Converter; *p; p+= 2) {\n"
+       << "    switch (*p) {\n"
+       << "    default: llvm_unreachable(\"invalid conversion entry!\");\n"
+       << "    case CVT_Reg:\n"
+       << "      Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+       << "      Operands[*(p + 1)]->setConstraint(\"m\");\n"
+       << "      ++NumMCOperands;\n"
+       << "      break;\n"
+       << "    case CVT_Tied:\n"
+       << "      ++NumMCOperands;\n"
+       << "      break;\n";
+
+  // Pre-populate the operand conversion kinds with the standard always
+  // available entries.
+  OperandConversionKinds.insert("CVT_Done");
+  OperandConversionKinds.insert("CVT_Reg");
+  OperandConversionKinds.insert("CVT_Tied");
+  enum { CVT_Done, CVT_Reg, CVT_Tied };
+
   for (std::vector<MatchableInfo*>::const_iterator it = Infos.begin(),
          ie = Infos.end(); it != ie; ++it) {
     MatchableInfo &II = **it;
@@ -1679,24 +1749,35 @@
       II.ConversionFnKind = Signature;
 
       // Check if we have already generated this signature.
-      if (!GeneratedFns.insert(Signature).second)
+      if (!InstructionConversionKinds.insert(Signature))
         continue;
 
-      // If not, emit it now.  Add to the enum list.
-      OS << "  " << Signature << ",\n";
+      // Remember this converter for the kind enum.
+      unsigned KindID = OperandConversionKinds.size();
+      OperandConversionKinds.insert("CVT_" + AsmMatchConverter);
+
+      // Add the converter row for this instruction.
+      ConversionTable.push_back(std::vector<uint8_t>());
+      ConversionTable.back().push_back(KindID);
+      ConversionTable.back().push_back(CVT_Done);
+
+      // Add the handler to the conversion driver function.
+      CvtOS << "    case CVT_" << AsmMatchConverter << ":\n"
+            << "      " << AsmMatchConverter << "(Inst, Operands);\n"
+            << "      break;\n";
 
-      CvtOS << "  case " << Signature << ":\n";
-      CvtOS << "    return " << AsmMatchConverter
-            << "(Inst, Opcode, Operands);\n";
+      // FIXME: Handle the operand number lookup for custom match functions.
       continue;
     }
 
     // Build the conversion function signature.
     std::string Signature = "Convert";
-    std::string CaseBody;
-    raw_string_ostream CaseOS(CaseBody);
+
+    std::vector<uint8_t> ConversionRow;
 
     // Compute the convert enum and the case body.
+    MaxRowLength = std::max(MaxRowLength, II.ResOperands.size()*2 + 1 );
+
     for (unsigned i = 0, e = II.ResOperands.size(); i != e; ++i) {
       const MatchableInfo::ResOperand &OpInfo = II.ResOperands[i];
 
@@ -1709,74 +1790,180 @@
         // Registers are always converted the same, don't duplicate the
         // conversion function based on them.
         Signature += "__";
-        if (Op.Class->isRegisterClass())
-          Signature += "Reg";
-        else
-          Signature += Op.Class->ClassName;
+        std::string Class;
+        Class = Op.Class->isRegisterClass() ? "Reg" : Op.Class->ClassName;
+        Signature += Class;
         Signature += utostr(OpInfo.MINumOperands);
         Signature += "_" + itostr(OpInfo.AsmOperandNum);
 
-        CaseOS << "    ((" << TargetOperandClass << "*)Operands["
-               << (OpInfo.AsmOperandNum+1) << "])->" << Op.Class->RenderMethod
-               << "(Inst, " << OpInfo.MINumOperands << ");\n";
+        // Add the conversion kind, if necessary, and get the associated ID
+        // the index of its entry in the vector).
+        std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" :
+                                     Op.Class->RenderMethod);
+
+        bool IsNewConverter = false;
+        unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+                                            IsNewConverter);
+
+        // Add the operand entry to the instruction kind conversion row.
+        ConversionRow.push_back(ID);
+        ConversionRow.push_back(OpInfo.AsmOperandNum + 1);
+
+        if (!IsNewConverter)
+          break;
+
+        // This is a new operand kind. Add a handler for it to the
+        // converter driver.
+        CvtOS << "    case " << Name << ":\n"
+              << "      static_cast<" << TargetOperandClass
+              << "*>(Operands[*(p + 1)])->"
+              << Op.Class->RenderMethod << "(Inst, " << OpInfo.MINumOperands
+              << ");\n"
+              << "      break;\n";
+
+        // Add a handler for the operand number lookup.
+        OpOS << "    case " << Name << ":\n"
+             << "      Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+             << "      Operands[*(p + 1)]->setConstraint(\"m\");\n"
+             << "      NumMCOperands += " << OpInfo.MINumOperands << ";\n"
+             << "      break;\n";
         break;
       }
-
       case MatchableInfo::ResOperand::TiedOperand: {
         // If this operand is tied to a previous one, just copy the MCInst
         // operand from the earlier one.We can only tie single MCOperand values.
         //assert(OpInfo.MINumOperands == 1 && "Not a singular MCOperand");
         unsigned TiedOp = OpInfo.TiedOperandNum;
         assert(i > TiedOp && "Tied operand precedes its target!");
-        CaseOS << "    Inst.addOperand(Inst.getOperand(" << TiedOp << "));\n";
         Signature += "__Tie" + utostr(TiedOp);
+        ConversionRow.push_back(CVT_Tied);
+        ConversionRow.push_back(TiedOp);
+        // FIXME: Handle the operand number lookup for tied operands.
         break;
       }
       case MatchableInfo::ResOperand::ImmOperand: {
         int64_t Val = OpInfo.ImmVal;
-        CaseOS << "    Inst.addOperand(MCOperand::CreateImm(" << Val << "));\n";
-        Signature += "__imm" + itostr(Val);
+        std::string Ty = "imm_" + itostr(Val);
+        Signature += "__" + Ty;
+
+        std::string Name = "CVT_" + Ty;
+        bool IsNewConverter = false;
+        unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+                                            IsNewConverter);
+        // Add the operand entry to the instruction kind conversion row.
+        ConversionRow.push_back(ID);
+        ConversionRow.push_back(0);
+
+        if (!IsNewConverter)
+          break;
+
+        CvtOS << "    case " << Name << ":\n"
+              << "      Inst.addOperand(MCOperand::CreateImm(" << Val << "));\n"
+              << "      break;\n";
+
+        OpOS << "    case " << Name << ":\n"
+             << "      Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+             << "      Operands[*(p + 1)]->setConstraint(\"\");\n"
+             << "      ++NumMCOperands;\n"
+             << "      break;\n";
         break;
       }
       case MatchableInfo::ResOperand::RegOperand: {
+        std::string Reg, Name;
         if (OpInfo.Register == 0) {
-          CaseOS << "    Inst.addOperand(MCOperand::CreateReg(0));\n";
-          Signature += "__reg0";
+          Name = "reg0";
+          Reg = "0";
         } else {
-          std::string N = getQualifiedName(OpInfo.Register);
-          CaseOS << "    Inst.addOperand(MCOperand::CreateReg(" << N << "));\n";
-          Signature += "__reg" + OpInfo.Register->getName();
+          Reg = getQualifiedName(OpInfo.Register);
+          Name = "reg" + OpInfo.Register->getName();
         }
+        Signature += "__" + Name;
+        Name = "CVT_" + Name;
+        bool IsNewConverter = false;
+        unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+                                            IsNewConverter);
+        // Add the operand entry to the instruction kind conversion row.
+        ConversionRow.push_back(ID);
+        ConversionRow.push_back(0);
+
+        if (!IsNewConverter)
+          break;
+        CvtOS << "    case " << Name << ":\n"
+              << "      Inst.addOperand(MCOperand::CreateReg(" << Reg << "));\n"
+              << "      break;\n";
+
+        OpOS << "    case " << Name << ":\n"
+             << "      Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+             << "      Operands[*(p + 1)]->setConstraint(\"m\");\n"
+             << "      ++NumMCOperands;\n"
+             << "      break;\n";
       }
       }
     }
 
+    // If there were no operands, add to the signature to that effect
+    if (Signature == "Convert")
+      Signature += "_NoOperands";
+
     II.ConversionFnKind = Signature;
 
-    // Check if we have already generated this signature.
-    if (!GeneratedFns.insert(Signature).second)
+    // Save the signature. If we already have it, don't add a new row
+    // to the table.
+    if (!InstructionConversionKinds.insert(Signature))
       continue;
 
-    // If not, emit it now.  Add to the enum list.
-    OS << "  " << Signature << ",\n";
-
-    CvtOS << "  case " << Signature << ":\n";
-    CvtOS << CaseOS.str();
-    CvtOS << "    return true;\n";
+    // Add the row to the table.
+    ConversionTable.push_back(ConversionRow);
   }
 
-  // Finish the convert function.
+  // Finish up the converter driver function.
+  CvtOS << "    }\n  }\n}\n\n";
 
-  CvtOS << "  }\n";
-  CvtOS << "  return false;\n";
-  CvtOS << "}\n\n";
+  // Finish up the operand number lookup function.
+  OpOS << "    }\n  }\n}\n\n";
 
-  // Finish the enum, and drop the convert function after it.
+  OS << "namespace {\n";
 
-  OS << "  NumConversionVariants\n";
+  // Output the operand conversion kind enum.
+  OS << "enum OperatorConversionKind {\n";
+  for (unsigned i = 0, e = OperandConversionKinds.size(); i != e; ++i)
+    OS << "  " << OperandConversionKinds[i] << ",\n";
+  OS << "  CVT_NUM_CONVERTERS\n";
   OS << "};\n\n";
 
+  // Output the instruction conversion kind enum.
+  OS << "enum InstructionConversionKind {\n";
+  for (SetVector<std::string>::const_iterator
+         i = InstructionConversionKinds.begin(),
+         e = InstructionConversionKinds.end(); i != e; ++i)
+    OS << "  " << *i << ",\n";
+  OS << "  CVT_NUM_SIGNATURES\n";
+  OS << "};\n\n";
+
+
+  OS << "} // end anonymous namespace\n\n";
+
+  // Output the conversion table.
+  OS << "static const uint8_t ConversionTable[CVT_NUM_SIGNATURES]["
+     << MaxRowLength << "] = {\n";
+
+  for (unsigned Row = 0, ERow = ConversionTable.size(); Row != ERow; ++Row) {
+    assert(ConversionTable[Row].size() % 2 == 0 && "bad conversion row!");
+    OS << "  // " << InstructionConversionKinds[Row] << "\n";
+    OS << "  { ";
+    for (unsigned i = 0, e = ConversionTable[Row].size(); i != e; i += 2)
+      OS << OperandConversionKinds[ConversionTable[Row][i]] << ", "
+         << (unsigned)(ConversionTable[Row][i + 1]) << ", ";
+    OS << "CVT_Done },\n";
+  }
+
+  OS << "};\n\n";
+
+  // Spit out the conversion driver function.
   OS << CvtOS.str();
+
+  // Spit out the operand number lookup function.
+  OS << OpOS.str();
 }
 
 /// emitMatchClassEnumeration - Emit the enumeration for match class kinds.
@@ -1853,7 +2040,7 @@
   OS << "    MatchClassKind OpKind;\n";
   OS << "    switch (Operand.getReg()) {\n";
   OS << "    default: OpKind = InvalidMatchClass; break;\n";
-  for (std::map<Record*, ClassInfo*>::iterator
+  for (AsmMatcherInfo::RegisterClassesTy::iterator
          it = Info.RegisterClasses.begin(), ie = Info.RegisterClasses.end();
        it != ie; ++it)
     OS << "    case " << Info.Target.getName() << "::"
@@ -1874,7 +2061,7 @@
 static void emitIsSubclass(CodeGenTarget &Target,
                            std::vector<ClassInfo*> &Infos,
                            raw_ostream &OS) {
-  OS << "/// isSubclass - Compute whether \\arg A is a subclass of \\arg B.\n";
+  OS << "/// isSubclass - Compute whether \\p A is a subclass of \\p B.\n";
   OS << "static bool isSubclass(MatchClassKind A, MatchClassKind B) {\n";
   OS << "  if (A == B)\n";
   OS << "    return true;\n\n";
@@ -2083,7 +2270,7 @@
     SubtargetFeatureInfo *F = Info.getSubtargetFeature(ReqFeatures[i]);
 
     if (F == 0)
-      throw TGError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() +
+      PrintFatalError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() +
                     "' is not marked as an AssemblerPredicate!");
 
     if (NumFeatures)
@@ -2146,14 +2333,14 @@
           // We can't have two aliases from the same mnemonic with no predicate.
           PrintError(ToVec[AliasWithNoPredicate]->getLoc(),
                      "two MnemonicAliases with the same 'from' mnemonic!");
-          throw TGError(R->getLoc(), "this is the other MnemonicAlias.");
+          PrintFatalError(R->getLoc(), "this is the other MnemonicAlias.");
         }
 
         AliasWithNoPredicate = i;
         continue;
       }
       if (R->getValueAsString("ToMnemonic") == I->first)
-        throw TGError(R->getLoc(), "MnemonicAlias to the same string");
+        PrintFatalError(R->getLoc(), "MnemonicAlias to the same string");
 
       if (!MatchCode.empty())
         MatchCode += "else ";
@@ -2189,17 +2376,27 @@
 }
 
 static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
-                              const AsmMatcherInfo &Info, StringRef ClassName) {
+                              const AsmMatcherInfo &Info, StringRef ClassName,
+                              StringToOffsetTable &StringTable,
+                              unsigned MaxMnemonicIndex) {
+  unsigned MaxMask = 0;
+  for (std::vector<OperandMatchEntry>::const_iterator it =
+       Info.OperandMatchInfo.begin(), ie = Info.OperandMatchInfo.end();
+       it != ie; ++it) {
+    MaxMask |= it->OperandMask;
+  }
+
   // Emit the static custom operand parsing table;
   OS << "namespace {\n";
   OS << "  struct OperandMatchEntry {\n";
-  OS << "    static const char *const MnemonicTable;\n";
-  OS << "    uint32_t OperandMask;\n";
-  OS << "    uint32_t Mnemonic;\n";
   OS << "    " << getMinimalTypeForRange(1ULL << Info.SubtargetFeatures.size())
                << " RequiredFeatures;\n";
+  OS << "    " << getMinimalTypeForRange(MaxMnemonicIndex)
+               << " Mnemonic;\n";
   OS << "    " << getMinimalTypeForRange(Info.Classes.size())
-               << " Class;\n\n";
+               << " Class;\n";
+  OS << "    " << getMinimalTypeForRange(MaxMask)
+               << " OperandMask;\n\n";
   OS << "    StringRef getMnemonic() const {\n";
   OS << "      return StringRef(MnemonicTable + Mnemonic + 1,\n";
   OS << "                       MnemonicTable[Mnemonic]);\n";
@@ -2222,8 +2419,6 @@
 
   OS << "} // end anonymous namespace.\n\n";
 
-  StringToOffsetTable StringTable;
-
   OS << "static const OperandMatchEntry OperandMatchTable["
      << Info.OperandMatchInfo.size() << "] = {\n";
 
@@ -2234,8 +2429,25 @@
     const OperandMatchEntry &OMI = *it;
     const MatchableInfo &II = *OMI.MI;
 
-    OS << "  { " << OMI.OperandMask;
+    OS << "  { ";
 
+    // Write the required features mask.
+    if (!II.RequiredFeatures.empty()) {
+      for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) {
+        if (i) OS << "|";
+        OS << II.RequiredFeatures[i]->getEnumName();
+      }
+    } else
+      OS << "0";
+
+    // Store a pascal-style length byte in the mnemonic.
+    std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
+    OS << ", " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
+       << " /* " << II.Mnemonic << " */, ";
+
+    OS << OMI.CI->Name;
+
+    OS << ", " << OMI.OperandMask;
     OS << " /* ";
     bool printComma = false;
     for (int i = 0, e = 31; i !=e; ++i)
@@ -2247,30 +2459,10 @@
       }
     OS << " */";
 
-    // Store a pascal-style length byte in the mnemonic.
-    std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
-    OS << ", " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
-       << " /* " << II.Mnemonic << " */, ";
-
-    // Write the required features mask.
-    if (!II.RequiredFeatures.empty()) {
-      for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) {
-        if (i) OS << "|";
-        OS << II.RequiredFeatures[i]->getEnumName();
-      }
-    } else
-      OS << "0";
-
-    OS << ", " << OMI.CI->Name;
-
     OS << " },\n";
   }
   OS << "};\n\n";
 
-  OS << "const char *const OperandMatchEntry::MnemonicTable =\n";
-  StringTable.EmitString(OS);
-  OS << ";\n\n";
-
   // Emit the operand class switch to call the correct custom parser for
   // the found operand class.
   OS << Target.getName() << ClassName << "::OperandMatchResultTy "
@@ -2407,14 +2599,20 @@
   OS << "  // This should be included into the middle of the declaration of\n";
   OS << "  // your subclasses implementation of MCTargetAsmParser.\n";
   OS << "  unsigned ComputeAvailableFeatures(uint64_t FeatureBits) const;\n";
-  OS << "  bool ConvertToMCInst(unsigned Kind, MCInst &Inst, "
+  OS << "  void convertToMCInst(unsigned Kind, MCInst &Inst, "
      << "unsigned Opcode,\n"
      << "                       const SmallVectorImpl<MCParsedAsmOperand*> "
      << "&Operands);\n";
-  OS << "  bool MnemonicIsValid(StringRef Mnemonic);\n";
+  OS << "  void convertToMapAndConstraints(unsigned Kind,\n                ";
+  OS << "           const SmallVectorImpl<MCParsedAsmOperand*> &Operands);\n";
+  OS << "  bool mnemonicIsValid(StringRef Mnemonic);\n";
   OS << "  unsigned MatchInstructionImpl(\n";
-  OS << "    const SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n";
-  OS << "    MCInst &Inst, unsigned &ErrorInfo, unsigned VariantID = 0);\n";
+  OS.indent(27);
+  OS << "const SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n"
+     << "                                MCInst &Inst,\n"
+     << "                                unsigned &ErrorInfo,"
+     << " bool matchingInlineAsm,\n"
+     << "                                unsigned VariantID = 0);\n";
 
   if (Info.OperandMatchInfo.size()) {
     OS << "\n  enum OperandMatchResultTy {\n";
@@ -2447,7 +2645,9 @@
   emitSubtargetFeatureFlagEnumeration(Info, OS);
 
   // Emit the function to match a register name to number.
-  emitMatchRegisterName(Target, AsmParser, OS);
+  // This should be omitted for Mips target
+  if (AsmParser->getValueAsBit("ShouldEmitMatchRegisterName"))
+    emitMatchRegisterName(Target, AsmParser, OS);
 
   OS << "#endif // GET_REGISTER_MATCHER\n\n";
 
@@ -2465,8 +2665,10 @@
   // Generate the function that remaps for mnemonic aliases.
   bool HasMnemonicAliases = emitMnemonicAliases(OS, Info);
 
-  // Generate the unified function to convert operands into an MCInst.
-  emitConvertToMCInst(Target, ClassName, Info.Matchables, OS);
+  // Generate the convertToMCInst function to convert operands into an MCInst.
+  // Also, generate the convertToMapAndConstraints function for MS-style inline
+  // assembly.  The latter doesn't actually generate a MCInst.
+  emitConvertFuncs(Target, ClassName, Info.Matchables, OS);
 
   // Emit the enumeration for classes which participate in matching.
   emitMatchClassEnumeration(Target, Info.Classes, OS);
@@ -2484,11 +2686,25 @@
   emitComputeAvailableFeatures(Info, OS);
 
 
+  StringToOffsetTable StringTable;
+
   size_t MaxNumOperands = 0;
+  unsigned MaxMnemonicIndex = 0;
   for (std::vector<MatchableInfo*>::const_iterator it =
          Info.Matchables.begin(), ie = Info.Matchables.end();
-       it != ie; ++it)
-    MaxNumOperands = std::max(MaxNumOperands, (*it)->AsmOperands.size());
+       it != ie; ++it) {
+    MatchableInfo &II = **it;
+    MaxNumOperands = std::max(MaxNumOperands, II.AsmOperands.size());
+
+    // Store a pascal-style length byte in the mnemonic.
+    std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
+    MaxMnemonicIndex = std::max(MaxMnemonicIndex,
+                        StringTable.GetOrAddStringOffset(LenMnemonic, false));
+  }
+
+  OS << "static const char *const MnemonicTable =\n";
+  StringTable.EmitString(OS);
+  OS << ";\n\n";
 
   // Emit the static match table; unused classes get initalized to 0 which is
   // guaranteed to be InvalidMatchClass.
@@ -2502,8 +2718,8 @@
   // following the mnemonic.
   OS << "namespace {\n";
   OS << "  struct MatchEntry {\n";
-  OS << "    static const char *const MnemonicTable;\n";
-  OS << "    uint32_t Mnemonic;\n";
+  OS << "    " << getMinimalTypeForRange(MaxMnemonicIndex)
+               << " Mnemonic;\n";
   OS << "    uint16_t Opcode;\n";
   OS << "    " << getMinimalTypeForRange(Info.Matchables.size())
                << " ConvertFn;\n";
@@ -2533,8 +2749,6 @@
 
   OS << "} // end anonymous namespace.\n\n";
 
-  StringToOffsetTable StringTable;
-
   OS << "static const MatchEntry MatchTable["
      << Info.Matchables.size() << "] = {\n";
 
@@ -2573,13 +2787,9 @@
 
   OS << "};\n\n";
 
-  OS << "const char *const MatchEntry::MnemonicTable =\n";
-  StringTable.EmitString(OS);
-  OS << ";\n\n";
-
   // A method to determine if a mnemonic is in the list.
   OS << "bool " << Target.getName() << ClassName << "::\n"
-     << "MnemonicIsValid(StringRef Mnemonic) {\n";
+     << "mnemonicIsValid(StringRef Mnemonic) {\n";
   OS << "  // Search the table.\n";
   OS << "  std::pair<const MatchEntry*, const MatchEntry*> MnemonicRange =\n";
   OS << "    std::equal_range(MatchTable, MatchTable+"
@@ -2592,8 +2802,14 @@
      << Target.getName() << ClassName << "::\n"
      << "MatchInstructionImpl(const SmallVectorImpl<MCParsedAsmOperand*>"
      << " &Operands,\n";
-  OS << "                     MCInst &Inst, unsigned &ErrorInfo, ";
-  OS << "unsigned VariantID) {\n";
+  OS << "                     MCInst &Inst,\n"
+     << "unsigned &ErrorInfo, bool matchingInlineAsm, unsigned VariantID) {\n";
+
+  OS << "  // Eliminate obvious mismatches.\n";
+  OS << "  if (Operands.size() > " << (MaxNumOperands+1) << ") {\n";
+  OS << "    ErrorInfo = " << (MaxNumOperands+1) << ";\n";
+  OS << "    return Match_InvalidOperand;\n";
+  OS << "  }\n\n";
 
   // Emit code to get the available features.
   OS << "  // Get the current feature set.\n";
@@ -2611,12 +2827,6 @@
   }
 
   // Emit code to compute the class list for this operand vector.
-  OS << "  // Eliminate obvious mismatches.\n";
-  OS << "  if (Operands.size() > " << (MaxNumOperands+1) << ") {\n";
-  OS << "    ErrorInfo = " << (MaxNumOperands+1) << ";\n";
-  OS << "    return Match_InvalidOperand;\n";
-  OS << "  }\n\n";
-
   OS << "  // Some state to try to produce better error messages.\n";
   OS << "  bool HadMatchOtherThanFeatures = false;\n";
   OS << "  bool HadMatchOtherThanPredicate = false;\n";
@@ -2681,17 +2891,20 @@
   OS << "      HadMatchOtherThanFeatures = true;\n";
   OS << "      unsigned NewMissingFeatures = it->RequiredFeatures & "
         "~AvailableFeatures;\n";
-  OS << "      if (CountPopulation_32(NewMissingFeatures) <= "
-        "CountPopulation_32(MissingFeatures))\n";
+  OS << "      if (CountPopulation_32(NewMissingFeatures) <=\n"
+        "          CountPopulation_32(MissingFeatures))\n";
   OS << "        MissingFeatures = NewMissingFeatures;\n";
   OS << "      continue;\n";
   OS << "    }\n";
   OS << "\n";
+  OS << "    if (matchingInlineAsm) {\n";
+  OS << "      Inst.setOpcode(it->Opcode);\n";
+  OS << "      convertToMapAndConstraints(it->ConvertFn, Operands);\n";
+  OS << "      return Match_Success;\n";
+  OS << "    }\n\n";
   OS << "    // We have selected a definite instruction, convert the parsed\n"
      << "    // operands into the appropriate MCInst.\n";
-  OS << "    if (!ConvertToMCInst(it->ConvertFn, Inst,\n"
-     << "                         it->Opcode, Operands))\n";
-  OS << "      return Match_ConversionFail;\n";
+  OS << "    convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
   OS << "\n";
 
   // Verify the instruction with the target-specific match predicate function.
@@ -2716,15 +2929,16 @@
   OS << "  }\n\n";
 
   OS << "  // Okay, we had no match.  Try to return a useful error code.\n";
-  OS << "  if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)";
-  OS << "  return RetCode;\n";
+  OS << "  if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)\n";
+  OS << "    return RetCode;\n\n";
   OS << "  // Missing feature matches return which features were missing\n";
   OS << "  ErrorInfo = MissingFeatures;\n";
   OS << "  return Match_MissingFeature;\n";
   OS << "}\n\n";
 
   if (Info.OperandMatchInfo.size())
-    emitCustomOperandParsing(OS, Target, Info, ClassName);
+    emitCustomOperandParsing(OS, Target, Info, ClassName, StringTable,
+                             MaxMnemonicIndex);
 
   OS << "#endif // GET_MATCHER_IMPLEMENTATION\n\n";
 }

Modified: llvm/branches/AMDILBackend/utils/TableGen/AsmWriterEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/AsmWriterEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/AsmWriterEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/AsmWriterEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -313,7 +313,9 @@
 
   /// OpcodeInfo - This encodes the index of the string to use for the first
   /// chunk of the output as well as indices used for operand printing.
-  std::vector<unsigned> OpcodeInfo;
+  /// To reduce the number of unhandled cases, we expand the size from 32-bit
+  /// to 32+16 = 48-bit.
+  std::vector<uint64_t> OpcodeInfo;
 
   // Add all strings to the string table upfront so it can generate an optimized
   // representation.
@@ -362,7 +364,7 @@
 
   // To reduce code size, we compactify common instructions into a few bits
   // in the opcode-indexed table.
-  unsigned BitsLeft = 32-AsmStrBits;
+  unsigned BitsLeft = 64-AsmStrBits;
 
   std::vector<std::vector<std::string> > TableDrivenOperandPrinters;
 
@@ -388,10 +390,11 @@
     }
 
     // Otherwise, we can include this in the initial lookup table.  Add it in.
-    BitsLeft -= NumBits;
     for (unsigned i = 0, e = InstIdxs.size(); i != e; ++i)
-      if (InstIdxs[i] != ~0U)
-        OpcodeInfo[i] |= InstIdxs[i] << (BitsLeft+AsmStrBits);
+      if (InstIdxs[i] != ~0U) {
+        OpcodeInfo[i] |= (uint64_t)InstIdxs[i] << (64-BitsLeft);
+      }
+    BitsLeft -= NumBits;
 
     // Remove the info about this operand.
     for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
@@ -410,16 +413,32 @@
   }
 
 
-
-  O<<"  static const unsigned OpInfo[] = {\n";
+  // We always emit at least one 32-bit table. A second table is emitted if
+  // more bits are needed.
+  O<<"  static const uint32_t OpInfo[] = {\n";
   for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
-    O << "    " << OpcodeInfo[i] << "U,\t// "
+    O << "    " << (OpcodeInfo[i] & 0xffffffff) << "U,\t// "
       << NumberedInstructions[i]->TheDef->getName() << "\n";
   }
   // Add a dummy entry so the array init doesn't end with a comma.
   O << "    0U\n";
   O << "  };\n\n";
 
+  if (BitsLeft < 32) {
+    // Add a second OpInfo table only when it is necessary.
+    // Adjust the type of the second table based on the number of bits needed.
+    O << "  static const uint"
+      << ((BitsLeft < 16) ? "32" : (BitsLeft < 24) ? "16" : "8")
+      << "_t OpInfo2[] = {\n";
+    for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
+      O << "    " << (OpcodeInfo[i] >> 32) << "U,\t// "
+        << NumberedInstructions[i]->TheDef->getName() << "\n";
+    }
+    // Add a dummy entry so the array init doesn't end with a comma.
+    O << "    0U\n";
+    O << "  };\n\n";
+  }
+
   // Emit the string itself.
   O << "  const char AsmStrs[] = {\n";
   StringTable.emit(O, printChar);
@@ -427,13 +446,22 @@
 
   O << "  O << \"\\t\";\n\n";
 
-  O << "  // Emit the opcode for the instruction.\n"
-    << "  unsigned Bits = OpInfo[MI->getOpcode()];\n"
-    << "  assert(Bits != 0 && \"Cannot print this instruction.\");\n"
+  O << "  // Emit the opcode for the instruction.\n";
+  if (BitsLeft < 32) {
+    // If we have two tables then we need to perform two lookups and combine
+    // the results into a single 64-bit value.
+    O << "  uint64_t Bits1 = OpInfo[MI->getOpcode()];\n"
+      << "  uint64_t Bits2 = OpInfo2[MI->getOpcode()];\n"
+      << "  uint64_t Bits = (Bits2 << 32) | Bits1;\n";
+  } else {
+    // If only one table is used we just need to perform a single lookup.
+    O << "  uint32_t Bits = OpInfo[MI->getOpcode()];\n";
+  }
+  O << "  assert(Bits != 0 && \"Cannot print this instruction.\");\n"
     << "  O << AsmStrs+(Bits & " << (1 << AsmStrBits)-1 << ")-1;\n\n";
 
   // Output the table driven operand information.
-  BitsLeft = 32-AsmStrBits;
+  BitsLeft = 64-AsmStrBits;
   for (unsigned i = 0, e = TableDrivenOperandPrinters.size(); i != e; ++i) {
     std::vector<std::string> &Commands = TableDrivenOperandPrinters[i];
 
@@ -443,14 +471,13 @@
     assert(NumBits <= BitsLeft && "consistency error");
 
     // Emit code to extract this field from Bits.
-    BitsLeft -= NumBits;
-
     O << "\n  // Fragment " << i << " encoded into " << NumBits
       << " bits for " << Commands.size() << " unique commands.\n";
 
     if (Commands.size() == 2) {
       // Emit two possibilitys with if/else.
-      O << "  if ((Bits >> " << (BitsLeft+AsmStrBits) << ") & "
+      O << "  if ((Bits >> "
+        << (64-BitsLeft) << ") & "
         << ((1 << NumBits)-1) << ") {\n"
         << Commands[1]
         << "  } else {\n"
@@ -460,7 +487,8 @@
       // Emit a single possibility.
       O << Commands[0] << "\n\n";
     } else {
-      O << "  switch ((Bits >> " << (BitsLeft+AsmStrBits) << ") & "
+      O << "  switch ((Bits >> "
+        << (64-BitsLeft) << ") & "
         << ((1 << NumBits)-1) << ") {\n"
         << "  default:   // unreachable.\n";
 
@@ -472,6 +500,7 @@
       }
       O << "  }\n\n";
     }
+    BitsLeft -= NumBits;
   }
 
   // Okay, delete instructions with no operand info left.
@@ -537,9 +566,9 @@
         std::vector<std::string> AltNames =
           Reg.TheDef->getValueAsListOfStrings("AltNames");
         if (AltNames.size() <= Idx)
-          throw TGError(Reg.TheDef->getLoc(),
-                        (Twine("Register definition missing alt name for '") +
-                        AltName + "'.").str());
+          PrintFatalError(Reg.TheDef->getLoc(),
+            (Twine("Register definition missing alt name for '") +
+             AltName + "'.").str());
         AsmName = AltNames[Idx];
       }
     }
@@ -551,7 +580,7 @@
   StringTable.emit(O, printChar);
   O << "  };\n\n";
 
-  O << "  static const unsigned RegAsmOffset" << AltName << "[] = {";
+  O << "  static const uint32_t RegAsmOffset" << AltName << "[] = {";
   for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
     if ((i % 14) == 0)
       O << "\n    ";
@@ -590,7 +619,7 @@
     emitRegisterNameString(O, "", Registers);
 
   if (hasAltNames) {
-    O << "  const unsigned *RegAsmOffset;\n"
+    O << "  const uint32_t *RegAsmOffset;\n"
       << "  const char *AsmStrs;\n"
       << "  switch(AltIdx) {\n"
       << "  default: llvm_unreachable(\"Invalid register alt name index!\");\n";
@@ -763,7 +792,7 @@
     if (!R->getValueAsBit("EmitAlias"))
       continue; // We were told not to emit the alias, but to emit the aliasee.
     const DagInit *DI = R->getValueAsDag("ResultInst");
-    const DefInit *Op = dynamic_cast<const DefInit*>(DI->getOperator());
+    const DefInit *Op = cast<DefInit>(DI->getOperator());
     AliasMap[getQualifiedName(Op->getDef())].push_back(Alias);
   }
 

Modified: llvm/branches/AMDILBackend/utils/TableGen/AsmWriterInst.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/AsmWriterInst.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/AsmWriterInst.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/AsmWriterInst.cpp Tue Jan 15 11:16:16 2013
@@ -14,6 +14,7 @@
 #include "AsmWriterInst.h"
 #include "CodeGenTarget.h"
 #include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 
 using namespace llvm;
@@ -123,8 +124,8 @@
                    != std::string::npos) {
           AddLiteralString(std::string(1, AsmString[DollarPos+1]));
         } else {
-          throw "Non-supported escaped character found in instruction '" +
-          CGI.TheDef->getName() + "'!";
+          PrintFatalError("Non-supported escaped character found in instruction '" +
+            CGI.TheDef->getName() + "'!");
         }
         LastEmitted = DollarPos+2;
         continue;
@@ -162,15 +163,15 @@
       // brace.
       if (hasCurlyBraces) {
         if (VarEnd >= AsmString.size())
-          throw "Reached end of string before terminating curly brace in '"
-          + CGI.TheDef->getName() + "'";
+          PrintFatalError("Reached end of string before terminating curly brace in '"
+            + CGI.TheDef->getName() + "'");
         
         // Look for a modifier string.
         if (AsmString[VarEnd] == ':') {
           ++VarEnd;
           if (VarEnd >= AsmString.size())
-            throw "Reached end of string before terminating curly brace in '"
-            + CGI.TheDef->getName() + "'";
+            PrintFatalError("Reached end of string before terminating curly brace in '"
+              + CGI.TheDef->getName() + "'");
           
           unsigned ModifierStart = VarEnd;
           while (VarEnd < AsmString.size() && isIdentChar(AsmString[VarEnd]))
@@ -178,17 +179,17 @@
           Modifier = std::string(AsmString.begin()+ModifierStart,
                                  AsmString.begin()+VarEnd);
           if (Modifier.empty())
-            throw "Bad operand modifier name in '"+ CGI.TheDef->getName() + "'";
+            PrintFatalError("Bad operand modifier name in '"+ CGI.TheDef->getName() + "'");
         }
         
         if (AsmString[VarEnd] != '}')
-          throw "Variable name beginning with '{' did not end with '}' in '"
-          + CGI.TheDef->getName() + "'";
+          PrintFatalError("Variable name beginning with '{' did not end with '}' in '"
+            + CGI.TheDef->getName() + "'");
         ++VarEnd;
       }
       if (VarName.empty() && Modifier.empty())
-        throw "Stray '$' in '" + CGI.TheDef->getName() +
-        "' asm string, maybe you want $$?";
+        PrintFatalError("Stray '$' in '" + CGI.TheDef->getName() +
+          "' asm string, maybe you want $$?");
       
       if (VarName.empty()) {
         // Just a modifier, pass this into PrintSpecial.

Modified: llvm/branches/AMDILBackend/utils/TableGen/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -1,5 +1,3 @@
-set(LLVM_REQUIRES_EH 1)
-set(LLVM_REQUIRES_RTTI 1)
 set(LLVM_LINK_COMPONENTS Support)
 
 add_tablegen(llvm-tblgen LLVM
@@ -10,6 +8,7 @@
   CodeEmitterGen.cpp
   CodeGenDAGPatterns.cpp
   CodeGenInstruction.cpp
+  CodeGenMapTable.cpp
   CodeGenRegisters.cpp
   CodeGenSchedule.cpp
   CodeGenTarget.cpp

Modified: llvm/branches/AMDILBackend/utils/TableGen/CallingConvEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CallingConvEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CallingConvEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CallingConvEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/TableGen/TableGenBackend.h"
 #include <cassert>
@@ -93,7 +94,7 @@
       O << Action->getValueAsString("Predicate");
     } else {
       Action->dump();
-      throw "Unknown CCPredicateAction!";
+      PrintFatalError("Unknown CCPredicateAction!");
     }
     
     O << ") {\n";
@@ -131,7 +132,7 @@
       ListInit *ShadowRegList = Action->getValueAsListInit("ShadowRegList");
       if (ShadowRegList->getSize() >0 &&
           ShadowRegList->getSize() != RegList->getSize())
-        throw "Invalid length of list of shadowed registers";
+        PrintFatalError("Invalid length of list of shadowed registers");
 
       if (RegList->getSize() == 1) {
         O << IndentStr << "if (unsigned Reg = State.AllocateReg(";
@@ -177,12 +178,12 @@
       if (Size)
         O << Size << ", ";
       else
-        O << "\n" << IndentStr << "  State.getTarget().getTargetData()"
+        O << "\n" << IndentStr << "  State.getTarget().getDataLayout()"
           "->getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext())), ";
       if (Align)
         O << Align;
       else
-        O << "\n" << IndentStr << "  State.getTarget().getTargetData()"
+        O << "\n" << IndentStr << "  State.getTarget().getDataLayout()"
           "->getABITypeAlignment(EVT(LocVT).getTypeForEVT(State.getContext()))";
       if (Action->isSubClassOf("CCAssignToStackWithShadow"))
         O << ", " << getQualifiedName(Action->getValueAsDef("ShadowReg"));
@@ -221,7 +222,7 @@
       O << IndentStr << IndentStr << "return false;\n";
     } else {
       Action->dump();
-      throw "Unknown CCAction!";
+      PrintFatalError("Unknown CCAction!");
     }
   }
 }

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeEmitterGen.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeEmitterGen.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeEmitterGen.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeEmitterGen.cpp Tue Jan 15 11:16:16 2013
@@ -91,11 +91,11 @@
 // return the variable bit position.  Otherwise return -1.
 int CodeEmitterGen::getVariableBit(const std::string &VarName,
                                    BitsInit *BI, int bit) {
-  if (VarBitInit *VBI = dynamic_cast<VarBitInit*>(BI->getBit(bit))) {
-    if (VarInit *VI = dynamic_cast<VarInit*>(VBI->getVariable()))
+  if (VarBitInit *VBI = dyn_cast<VarBitInit>(BI->getBit(bit))) {
+    if (VarInit *VI = dyn_cast<VarInit>(VBI->getBitVar()))
       if (VI->getName() == VarName)
         return VBI->getBitNum();
-  } else if (VarInit *VI = dynamic_cast<VarInit*>(BI->getBit(bit))) {
+  } else if (VarInit *VI = dyn_cast<VarInit>(BI->getBit(bit))) {
     if (VI->getName() == VarName)
       return 0;
   }
@@ -134,10 +134,13 @@
     assert(!CGI.Operands.isFlatOperandNotEmitted(OpIdx) &&
            "Explicitly used operand also marked as not emitted!");
   } else {
+    unsigned NumberOps = CGI.Operands.size();
     /// If this operand is not supposed to be emitted by the
     /// generated emitter, skip it.
-    while (CGI.Operands.isFlatOperandNotEmitted(NumberedOp))
+    while (NumberedOp < NumberOps &&
+           CGI.Operands.isFlatOperandNotEmitted(NumberedOp))
       ++NumberedOp;
+
     OpIdx = NumberedOp++;
   }
   
@@ -269,7 +272,7 @@
     // Start by filling in fixed values.
     uint64_t Value = 0;
     for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) {
-      if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(e-i-1)))
+      if (BitInit *B = dyn_cast<BitInit>(BI->getBit(e-i-1)))
         Value |= (uint64_t)B->getValue() << (e-i-1);
     }
     o << "    UINT64_C(" << Value << ")," << '\t' << "// " << R->getName() << "\n";

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenDAGPatterns.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenDAGPatterns.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenDAGPatterns.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenDAGPatterns.cpp Tue Jan 15 11:16:16 2013
@@ -79,14 +79,19 @@
   const std::vector<MVT::SimpleValueType> &LegalTypes =
     TP.getDAGPatterns().getTargetInfo().getLegalValueTypes();
 
+  if (TP.hasError())
+    return false;
+
   for (unsigned i = 0, e = LegalTypes.size(); i != e; ++i)
     if (Pred == 0 || Pred(LegalTypes[i]))
       TypeVec.push_back(LegalTypes[i]);
 
   // If we have nothing that matches the predicate, bail out.
-  if (TypeVec.empty())
+  if (TypeVec.empty()) {
     TP.error("Type inference contradiction found, no " +
              std::string(PredicateName) + " types found");
+    return false;
+  }
   // No need to sort with one element.
   if (TypeVec.size() == 1) return true;
 
@@ -146,9 +151,9 @@
 
 /// MergeInTypeInfo - This merges in type information from the specified
 /// argument.  If 'this' changes, it returns true.  If the two types are
-/// contradictory (e.g. merge f32 into i32) then this throws an exception.
+/// contradictory (e.g. merge f32 into i32) then this flags an error.
 bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
-  if (InVT.isCompletelyUnknown() || *this == InVT)
+  if (InVT.isCompletelyUnknown() || *this == InVT || TP.hasError())
     return false;
 
   if (isCompletelyUnknown()) {
@@ -224,11 +229,13 @@
   // FIXME: Really want an SMLoc here!
   TP.error("Type inference contradiction found, merging '" +
            InVT.getName() + "' into '" + InputSet.getName() + "'");
-  return true; // unreachable
+  return false;
 }
 
 /// EnforceInteger - Remove all non-integer types from this set.
 bool EEVT::TypeSet::EnforceInteger(TreePattern &TP) {
+  if (TP.hasError())
+    return false;
   // If we know nothing, then get the full set.
   if (TypeVec.empty())
     return FillWithPossibleTypes(TP, isInteger, "integer");
@@ -242,14 +249,18 @@
     if (!isInteger(TypeVec[i]))
       TypeVec.erase(TypeVec.begin()+i--);
 
-  if (TypeVec.empty())
+  if (TypeVec.empty()) {
     TP.error("Type inference contradiction found, '" +
              InputSet.getName() + "' needs to be integer");
+    return false;
+  }
   return true;
 }
 
 /// EnforceFloatingPoint - Remove all integer types from this set.
 bool EEVT::TypeSet::EnforceFloatingPoint(TreePattern &TP) {
+  if (TP.hasError())
+    return false;
   // If we know nothing, then get the full set.
   if (TypeVec.empty())
     return FillWithPossibleTypes(TP, isFloatingPoint, "floating point");
@@ -264,14 +275,19 @@
     if (!isFloatingPoint(TypeVec[i]))
       TypeVec.erase(TypeVec.begin()+i--);
 
-  if (TypeVec.empty())
+  if (TypeVec.empty()) {
     TP.error("Type inference contradiction found, '" +
              InputSet.getName() + "' needs to be floating point");
+    return false;
+  }
   return true;
 }
 
 /// EnforceScalar - Remove all vector types from this.
 bool EEVT::TypeSet::EnforceScalar(TreePattern &TP) {
+  if (TP.hasError())
+    return false;
+
   // If we know nothing, then get the full set.
   if (TypeVec.empty())
     return FillWithPossibleTypes(TP, isScalar, "scalar");
@@ -286,14 +302,19 @@
     if (!isScalar(TypeVec[i]))
       TypeVec.erase(TypeVec.begin()+i--);
 
-  if (TypeVec.empty())
+  if (TypeVec.empty()) {
     TP.error("Type inference contradiction found, '" +
              InputSet.getName() + "' needs to be scalar");
+    return false;
+  }
   return true;
 }
 
 /// EnforceVector - Remove all vector types from this.
 bool EEVT::TypeSet::EnforceVector(TreePattern &TP) {
+  if (TP.hasError())
+    return false;
+
   // If we know nothing, then get the full set.
   if (TypeVec.empty())
     return FillWithPossibleTypes(TP, isVector, "vector");
@@ -308,9 +329,11 @@
       MadeChange = true;
     }
 
-  if (TypeVec.empty())
+  if (TypeVec.empty()) {
     TP.error("Type inference contradiction found, '" +
              InputSet.getName() + "' needs to be a vector");
+    return false;
+  }
   return MadeChange;
 }
 
@@ -319,6 +342,9 @@
 /// EnforceSmallerThan - 'this' must be a smaller VT than Other.  Update
 /// this an other based on this information.
 bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
+  if (TP.hasError())
+    return false;
+
   // Both operands must be integer or FP, but we don't care which.
   bool MadeChange = false;
 
@@ -365,19 +391,22 @@
     if (hasVectorTypes() && Other.hasVectorTypes()) {
       if (Type.getSizeInBits() >= OtherType.getSizeInBits())
         if (Type.getVectorElementType().getSizeInBits()
-            >= OtherType.getVectorElementType().getSizeInBits())
+            >= OtherType.getVectorElementType().getSizeInBits()) {
           TP.error("Type inference contradiction found, '" +
                    getName() + "' element type not smaller than '" +
                    Other.getName() +"'!");
+          return false;
+        }
     }
     else
       // For scalar types, the bitsize of this type must be larger
       // than that of the other.
-      if (Type.getSizeInBits() >= OtherType.getSizeInBits())
+      if (Type.getSizeInBits() >= OtherType.getSizeInBits()) {
         TP.error("Type inference contradiction found, '" +
                  getName() + "' is not smaller than '" +
                  Other.getName() +"'!");
-
+        return false;
+      }
   }
   
 
@@ -437,9 +466,11 @@
   // If this is the only type in the large set, the constraint can never be
   // satisfied.
   if ((Other.hasIntegerTypes() && OtherIntSize == 0)
-      || (Other.hasFloatingPointTypes() && OtherFPSize == 0))
+      || (Other.hasFloatingPointTypes() && OtherFPSize == 0)) {
     TP.error("Type inference contradiction found, '" +
              Other.getName() + "' has nothing larger than '" + getName() +"'!");
+    return false;
+  }
 
   // Okay, find the largest type in the Other set and remove it from the
   // current set.
@@ -493,9 +524,11 @@
   // If this is the only type in the small set, the constraint can never be
   // satisfied.
   if ((hasIntegerTypes() && IntSize == 0)
-      || (hasFloatingPointTypes() && FPSize == 0))
+      || (hasFloatingPointTypes() && FPSize == 0)) {
     TP.error("Type inference contradiction found, '" +
              getName() + "' has nothing smaller than '" + Other.getName()+"'!");
+    return false;
+  }
 
   return MadeChange;
 }
@@ -504,6 +537,9 @@
 /// whose element is specified by VTOperand.
 bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand,
                                            TreePattern &TP) {
+  if (TP.hasError())
+    return false;
+
   // "This" must be a vector and "VTOperand" must be a scalar.
   bool MadeChange = false;
   MadeChange |= EnforceVector(TP);
@@ -535,9 +571,11 @@
     }
   }
 
-  if (TypeVec.empty())  // FIXME: Really want an SMLoc here!
+  if (TypeVec.empty()) {  // FIXME: Really want an SMLoc here!
     TP.error("Type inference contradiction found, forcing '" +
              InputSet.getName() + "' to have a vector element");
+    return false;
+  }
   return MadeChange;
 }
 
@@ -574,10 +612,6 @@
 //===----------------------------------------------------------------------===//
 // Helpers for working with extended types.
 
-bool RecordPtrCmp::operator()(const Record *LHS, const Record *RHS) const {
-  return LHS->getID() < RHS->getID();
-}
-
 /// Dependent variable map for CodeGenDAGPattern variant generation
 typedef std::map<std::string, int> DepVarMap;
 
@@ -586,7 +620,7 @@
 
 static void FindDepVarsOf(TreePatternNode *N, DepVarMap &DepMap) {
   if (N->isLeaf()) {
-    if (dynamic_cast<DefInit*>(N->getLeafValue()) != NULL)
+    if (isa<DefInit>(N->getLeafValue()))
       DepMap[N->getName()]++;
   } else {
     for (size_t i = 0, e = N->getNumChildren(); i != e; ++i)
@@ -695,7 +729,7 @@
   unsigned Size = 3;  // The node itself.
   // If the root node is a ConstantSDNode, increases its size.
   // e.g. (set R32:$dst, 0).
-  if (P->isLeaf() && dynamic_cast<IntInit*>(P->getLeafValue()))
+  if (P->isLeaf() && isa<IntInit>(P->getLeafValue()))
     Size += 2;
 
   // FIXME: This is a hack to statically increase the priority of patterns
@@ -719,7 +753,7 @@
         Child->getType(0) != MVT::Other)
       Size += getPatternSize(Child, CGP);
     else if (Child->isLeaf()) {
-      if (dynamic_cast<IntInit*>(Child->getLeafValue()))
+      if (isa<IntInit>(Child->getLeafValue()))
         Size += 5;  // Matches a ConstantSDNode (+3) and a specific value (+2).
       else if (Child->getComplexPatternInfo(CGP))
         Size += getPatternSize(Child, CGP);
@@ -745,7 +779,7 @@
 std::string PatternToMatch::getPredicateCheck() const {
   std::string PredicateCheck;
   for (unsigned i = 0, e = Predicates->getSize(); i != e; ++i) {
-    if (DefInit *Pred = dynamic_cast<DefInit*>(Predicates->getElement(i))) {
+    if (DefInit *Pred = dyn_cast<DefInit>(Predicates->getElement(i))) {
       Record *Def = Pred->getDef();
       if (!Def->isSubClassOf("Predicate")) {
 #ifndef NDEBUG
@@ -773,7 +807,7 @@
     ConstraintType = SDTCisVT;
     x.SDTCisVT_Info.VT = getValueType(R->getValueAsDef("VT"));
     if (x.SDTCisVT_Info.VT == MVT::isVoid)
-      throw TGError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT");
+      PrintFatalError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT");
 
   } else if (R->isSubClassOf("SDTCisPtrTy")) {
     ConstraintType = SDTCisPtrTy;
@@ -833,11 +867,13 @@
 
 /// ApplyTypeConstraint - Given a node in a pattern, apply this type
 /// constraint to the nodes operands.  This returns true if it makes a
-/// change, false otherwise.  If a type contradiction is found, throw an
-/// exception.
+/// change, false otherwise.  If a type contradiction is found, flag an error.
 bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
                                            const SDNodeInfo &NodeInfo,
                                            TreePattern &TP) const {
+  if (TP.hasError())
+    return false;
+
   unsigned ResNo = 0; // The result number being referenced.
   TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NodeInfo, ResNo);
 
@@ -868,10 +904,12 @@
     // The NodeToApply must be a leaf node that is a VT.  OtherOperandNum must
     // have an integer type that is smaller than the VT.
     if (!NodeToApply->isLeaf() ||
-        !dynamic_cast<DefInit*>(NodeToApply->getLeafValue()) ||
+        !isa<DefInit>(NodeToApply->getLeafValue()) ||
         !static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef()
-               ->isSubClassOf("ValueType"))
+               ->isSubClassOf("ValueType")) {
       TP.error(N->getOperator()->getName() + " expects a VT operand!");
+      return false;
+    }
     MVT::SimpleValueType VT =
      getValueType(static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef());
 
@@ -1025,8 +1063,9 @@
     // Get the result tree.
     DagInit *Tree = Operator->getValueAsDag("Fragment");
     Record *Op = 0;
-    if (Tree && dynamic_cast<DefInit*>(Tree->getOperator()))
-      Op = dynamic_cast<DefInit*>(Tree->getOperator())->getDef();
+    if (Tree)
+      if (DefInit *DI = dyn_cast<DefInit>(Tree->getOperator()))
+        Op = DI->getDef();
     assert(Op && "Invalid Fragment");
     return GetNumNodeResults(Op, CDP);
   }
@@ -1100,8 +1139,8 @@
     return false;
 
   if (isLeaf()) {
-    if (DefInit *DI = dynamic_cast<DefInit*>(getLeafValue())) {
-      if (DefInit *NDI = dynamic_cast<DefInit*>(N->getLeafValue())) {
+    if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
+      if (DefInit *NDI = dyn_cast<DefInit>(N->getLeafValue())) {
         return ((DI->getDef() == NDI->getDef())
                 && (DepVars.find(getName()) == DepVars.end()
                     || getName() == N->getName()));
@@ -1158,8 +1197,8 @@
     TreePatternNode *Child = getChild(i);
     if (Child->isLeaf()) {
       Init *Val = Child->getLeafValue();
-      if (dynamic_cast<DefInit*>(Val) &&
-          static_cast<DefInit*>(Val)->getDef()->getName() == "node") {
+      if (isa<DefInit>(Val) &&
+          cast<DefInit>(Val)->getDef()->getName() == "node") {
         // We found a use of a formal argument, replace it with its value.
         TreePatternNode *NewChild = ArgMap[Child->getName()];
         assert(NewChild && "Couldn't find formal argument!");
@@ -1179,7 +1218,11 @@
 /// fragments, inline them into place, giving us a pattern without any
 /// PatFrag references.
 TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
-  if (isLeaf()) return this;  // nothing to do.
+  if (TP.hasError())
+    return 0;
+
+  if (isLeaf())
+     return this;  // nothing to do.
   Record *Op = getOperator();
 
   if (!Op->isSubClassOf("PatFrag")) {
@@ -1202,9 +1245,11 @@
   TreePattern *Frag = TP.getDAGPatterns().getPatternFragment(Op);
 
   // Verify that we are passing the right number of operands.
-  if (Frag->getNumArgs() != Children.size())
+  if (Frag->getNumArgs() != Children.size()) {
     TP.error("'" + Op->getName() + "' fragment requires " +
              utostr(Frag->getNumArgs()) + " operands!");
+    return 0;
+  }
 
   TreePatternNode *FragTree = Frag->getOnlyTree()->clone();
 
@@ -1320,8 +1365,7 @@
       getOperator() != CDP.get_intrinsic_wo_chain_sdnode())
     return 0;
 
-  unsigned IID =
-    dynamic_cast<IntInit*>(getChild(0)->getLeafValue())->getValue();
+  unsigned IID = cast<IntInit>(getChild(0)->getLeafValue())->getValue();
   return &CDP.getIntrinsicInfo(IID);
 }
 
@@ -1331,7 +1375,7 @@
 TreePatternNode::getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const {
   if (!isLeaf()) return 0;
 
-  DefInit *DI = dynamic_cast<DefInit*>(getLeafValue());
+  DefInit *DI = dyn_cast<DefInit>(getLeafValue());
   if (DI && DI->getDef()->isSubClassOf("ComplexPattern"))
     return &CGP.getComplexPattern(DI->getDef());
   return 0;
@@ -1379,12 +1423,14 @@
 
 /// ApplyTypeConstraints - Apply all of the type constraints relevant to
 /// this node and its children in the tree.  This returns true if it makes a
-/// change, false otherwise.  If a type contradiction is found, throw an
-/// exception.
+/// change, false otherwise.  If a type contradiction is found, flag an error.
 bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
+  if (TP.hasError())
+    return false;
+
   CodeGenDAGPatterns &CDP = TP.getDAGPatterns();
   if (isLeaf()) {
-    if (DefInit *DI = dynamic_cast<DefInit*>(getLeafValue())) {
+    if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
       // If it's a regclass or something else known, include the type.
       bool MadeChange = false;
       for (unsigned i = 0, e = Types.size(); i != e; ++i)
@@ -1393,7 +1439,7 @@
       return MadeChange;
     }
 
-    if (IntInit *II = dynamic_cast<IntInit*>(getLeafValue())) {
+    if (IntInit *II = dyn_cast<IntInit>(getLeafValue())) {
       assert(Types.size() == 1 && "Invalid IntInit");
 
       // Int inits are always integers. :)
@@ -1410,21 +1456,15 @@
       // Make sure that the value is representable for this type.
       if (Size >= 32) return MadeChange;
 
-      int Val = (II->getValue() << (32-Size)) >> (32-Size);
-      if (Val == II->getValue()) return MadeChange;
-
-      // If sign-extended doesn't fit, does it fit as unsigned?
-      unsigned ValueMask;
-      unsigned UnsignedVal;
-      ValueMask = unsigned(~uint32_t(0UL) >> (32-Size));
-      UnsignedVal = unsigned(II->getValue());
-
-      if ((ValueMask & UnsignedVal) == UnsignedVal)
+      // Check that the value doesn't use more bits than we have. It must either
+      // be a sign- or zero-extended equivalent of the original.
+      int64_t SignBitAndAbove = II->getValue() >> (Size - 1);
+      if (SignBitAndAbove == -1 || SignBitAndAbove == 0 || SignBitAndAbove == 1)
         return MadeChange;
 
-      TP.error("Integer value '" + itostr(II->getValue())+
+      TP.error("Integer value '" + itostr(II->getValue()) +
                "' is out of range for type '" + getEnumName(getType(0)) + "'!");
-      return MadeChange;
+      return false;
     }
     return false;
   }
@@ -1487,10 +1527,12 @@
     for (unsigned i = 0, e = NumRetVTs; i != e; ++i)
       MadeChange |= UpdateNodeType(i, Int->IS.RetVTs[i], TP);
 
-    if (getNumChildren() != NumParamVTs + 1)
+    if (getNumChildren() != NumParamVTs + 1) {
       TP.error("Intrinsic '" + Int->Name + "' expects " +
                utostr(NumParamVTs) + " operands, not " +
                utostr(getNumChildren() - 1) + " operands!");
+      return false;
+    }
 
     // Apply type info to the intrinsic ID.
     MadeChange |= getChild(0)->UpdateNodeType(0, MVT::iPTR, TP);
@@ -1510,9 +1552,11 @@
 
     // Check that the number of operands is sane.  Negative operands -> varargs.
     if (NI.getNumOperands() >= 0 &&
-        getNumChildren() != (unsigned)NI.getNumOperands())
+        getNumChildren() != (unsigned)NI.getNumOperands()) {
       TP.error(getOperator()->getName() + " node requires exactly " +
                itostr(NI.getNumOperands()) + " operands!");
+      return false;
+    }
 
     bool MadeChange = NI.ApplyTypeConstraints(this, TP);
     for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
@@ -1541,7 +1585,7 @@
         const CodeGenRegisterClass &RC =
           CDP.getTargetInfo().getRegisterClass(RegClass);
         MadeChange |= UpdateNodeType(ResNo, RC.getValueTypes(), TP);
-      } else if (ResultNode->getName() == "unknown") {
+      } else if (ResultNode->isSubClassOf("unknown_class")) {
         // Nothing to do.
       } else {
         assert(ResultNode->isSubClassOf("RegisterClass") &&
@@ -1581,15 +1625,16 @@
       // If the instruction expects a predicate or optional def operand, we
       // codegen this by setting the operand to it's default value if it has a
       // non-empty DefaultOps field.
-      if ((OperandNode->isSubClassOf("PredicateOperand") ||
-           OperandNode->isSubClassOf("OptionalDefOperand")) &&
+      if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
           !CDP.getDefaultOperand(OperandNode).DefaultOps.empty())
         continue;
 
       // Verify that we didn't run out of provided operands.
-      if (ChildNo >= getNumChildren())
+      if (ChildNo >= getNumChildren()) {
         TP.error("Instruction '" + getOperator()->getName() +
                  "' expects more operands than were provided.");
+        return false;
+      }
 
       MVT::SimpleValueType VT;
       TreePatternNode *Child = getChild(ChildNo++);
@@ -1609,7 +1654,7 @@
         MadeChange |= Child->UpdateNodeType(ChildResNo, VT, TP);
       } else if (OperandNode->isSubClassOf("PointerLikeRegClass")) {
         MadeChange |= Child->UpdateNodeType(ChildResNo, MVT::iPTR, TP);
-      } else if (OperandNode->getName() == "unknown") {
+      } else if (OperandNode->isSubClassOf("unknown_class")) {
         // Nothing to do.
       } else
         llvm_unreachable("Unknown operand type!");
@@ -1617,9 +1662,11 @@
       MadeChange |= Child->ApplyTypeConstraints(TP, NotRegisters);
     }
 
-    if (ChildNo != getNumChildren())
+    if (ChildNo != getNumChildren()) {
       TP.error("Instruction '" + getOperator()->getName() +
                "' was provided too many operands!");
+      return false;
+    }
 
     return MadeChange;
   }
@@ -1627,9 +1674,11 @@
   assert(getOperator()->isSubClassOf("SDNodeXForm") && "Unknown node type!");
 
   // Node transforms always take one operand.
-  if (getNumChildren() != 1)
+  if (getNumChildren() != 1) {
     TP.error("Node transform '" + getOperator()->getName() +
              "' requires one operand!");
+    return false;
+  }
 
   bool MadeChange = getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
 
@@ -1652,7 +1701,7 @@
 static bool OnlyOnRHSOfCommutative(TreePatternNode *N) {
   if (!N->isLeaf() && N->getOperator()->getName() == "imm")
     return true;
-  if (N->isLeaf() && dynamic_cast<IntInit*>(N->getLeafValue()))
+  if (N->isLeaf() && isa<IntInit>(N->getLeafValue()))
     return true;
   return false;
 }
@@ -1703,27 +1752,30 @@
 //
 
 TreePattern::TreePattern(Record *TheRec, ListInit *RawPat, bool isInput,
-                         CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){
-  isInputPattern = isInput;
+                         CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
+                         isInputPattern(isInput), HasError(false) {
   for (unsigned i = 0, e = RawPat->getSize(); i != e; ++i)
     Trees.push_back(ParseTreePattern(RawPat->getElement(i), ""));
 }
 
 TreePattern::TreePattern(Record *TheRec, DagInit *Pat, bool isInput,
-                         CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){
-  isInputPattern = isInput;
+                         CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
+                         isInputPattern(isInput), HasError(false) {
   Trees.push_back(ParseTreePattern(Pat, ""));
 }
 
 TreePattern::TreePattern(Record *TheRec, TreePatternNode *Pat, bool isInput,
-                         CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){
-  isInputPattern = isInput;
+                         CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
+                         isInputPattern(isInput), HasError(false) {
   Trees.push_back(Pat);
 }
 
-void TreePattern::error(const std::string &Msg) const {
+void TreePattern::error(const std::string &Msg) {
+  if (HasError)
+    return;
   dump();
-  throw TGError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg);
+  PrintError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg);
+  HasError = true;
 }
 
 void TreePattern::ComputeNamedNodes() {
@@ -1741,7 +1793,7 @@
 
 
 TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
-  if (DefInit *DI = dynamic_cast<DefInit*>(TheInit)) {
+  if (DefInit *DI = dyn_cast<DefInit>(TheInit)) {
     Record *R = DI->getDef();
 
     // Direct reference to a leaf DagNode or PatFrag?  Turn it into a
@@ -1765,26 +1817,26 @@
     return Res;
   }
 
-  if (IntInit *II = dynamic_cast<IntInit*>(TheInit)) {
+  if (IntInit *II = dyn_cast<IntInit>(TheInit)) {
     if (!OpName.empty())
       error("Constant int argument should not have a name!");
     return new TreePatternNode(II, 1);
   }
 
-  if (BitsInit *BI = dynamic_cast<BitsInit*>(TheInit)) {
+  if (BitsInit *BI = dyn_cast<BitsInit>(TheInit)) {
     // Turn this into an IntInit.
     Init *II = BI->convertInitializerTo(IntRecTy::get());
-    if (II == 0 || !dynamic_cast<IntInit*>(II))
+    if (II == 0 || !isa<IntInit>(II))
       error("Bits value must be constants!");
     return ParseTreePattern(II, OpName);
   }
 
-  DagInit *Dag = dynamic_cast<DagInit*>(TheInit);
+  DagInit *Dag = dyn_cast<DagInit>(TheInit);
   if (!Dag) {
     TheInit->dump();
     error("Pattern has unexpected init kind!");
   }
-  DefInit *OpDef = dynamic_cast<DefInit*>(Dag->getOperator());
+  DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
   if (!OpDef) error("Pattern has unexpected operator type!");
   Record *Operator = OpDef->getDef();
 
@@ -1912,7 +1964,7 @@
 
 /// InferAllTypes - Infer/propagate as many types throughout the expression
 /// patterns as possible.  Return true if all types are inferred, false
-/// otherwise.  Throw an exception if a type contradiction is found.
+/// otherwise.  Flags an error if a type contradiction is found.
 bool TreePattern::
 InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
   if (NamedNodes.empty())
@@ -1949,7 +2001,7 @@
           // us to match things like:
           //  def : Pat<(v1i64 (bitconvert(v2i32 DPR:$src))), (v1i64 DPR:$src)>;
           if (Nodes[i] == Trees[0] && Nodes[i]->isLeaf()) {
-            DefInit *DI = dynamic_cast<DefInit*>(Nodes[i]->getLeafValue());
+            DefInit *DI = dyn_cast<DefInit>(Nodes[i]->getLeafValue());
             if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
                        DI->getDef()->isSubClassOf("RegisterOperand")))
               continue;
@@ -2033,6 +2085,9 @@
   // stores, and side effects in many cases by examining an
   // instruction's pattern.
   InferInstructionFlags();
+
+  // Verify that instruction flags match the patterns.
+  VerifyInstructionFlags();
 }
 
 CodeGenDAGPatterns::~CodeGenDAGPatterns() {
@@ -2111,7 +2166,7 @@
 
     // Parse the operands list.
     DagInit *OpsList = Fragments[i]->getValueAsDag("Operands");
-    DefInit *OpsOp = dynamic_cast<DefInit*>(OpsList->getOperator());
+    DefInit *OpsOp = dyn_cast<DefInit>(OpsList->getOperator());
     // Special cases: ops == outs == ins. Different names are used to
     // improve readability.
     if (!OpsOp ||
@@ -2123,9 +2178,8 @@
     // Copy over the arguments.
     Args.clear();
     for (unsigned j = 0, e = OpsList->getNumArgs(); j != e; ++j) {
-      if (!dynamic_cast<DefInit*>(OpsList->getArg(j)) ||
-          static_cast<DefInit*>(OpsList->getArg(j))->
-          getDef()->getName() != "node")
+      if (!isa<DefInit>(OpsList->getArg(j)) ||
+          cast<DefInit>(OpsList->getArg(j))->getDef()->getName() != "node")
         P->error("Operands list should all be 'node' values.");
       if (OpsList->getArgName(j).empty())
         P->error("Operands list should have names for each operand!");
@@ -2161,14 +2215,8 @@
 
     // Infer as many types as possible.  Don't worry about it if we don't infer
     // all of them, some may depend on the inputs of the pattern.
-    try {
-      ThePat->InferAllTypes();
-    } catch (...) {
-      // If this pattern fragment is not supported by this target (no types can
-      // satisfy its constraints), just ignore it.  If the bogus pattern is
-      // actually used by instructions, the type consistency error will be
-      // reported there.
-    }
+    ThePat->InferAllTypes();
+    ThePat->resetError();
 
     // If debugging, print out the pattern fragment result.
     DEBUG(ThePat->dump());
@@ -2176,53 +2224,46 @@
 }
 
 void CodeGenDAGPatterns::ParseDefaultOperands() {
-  std::vector<Record*> DefaultOps[2];
-  DefaultOps[0] = Records.getAllDerivedDefinitions("PredicateOperand");
-  DefaultOps[1] = Records.getAllDerivedDefinitions("OptionalDefOperand");
+  std::vector<Record*> DefaultOps;
+  DefaultOps = Records.getAllDerivedDefinitions("OperandWithDefaultOps");
 
   // Find some SDNode.
   assert(!SDNodes.empty() && "No SDNodes parsed?");
   Init *SomeSDNode = DefInit::get(SDNodes.begin()->first);
 
-  for (unsigned iter = 0; iter != 2; ++iter) {
-    for (unsigned i = 0, e = DefaultOps[iter].size(); i != e; ++i) {
-      DagInit *DefaultInfo = DefaultOps[iter][i]->getValueAsDag("DefaultOps");
-
-      // Clone the DefaultInfo dag node, changing the operator from 'ops' to
-      // SomeSDnode so that we can parse this.
-      std::vector<std::pair<Init*, std::string> > Ops;
-      for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op)
-        Ops.push_back(std::make_pair(DefaultInfo->getArg(op),
-                                     DefaultInfo->getArgName(op)));
-      DagInit *DI = DagInit::get(SomeSDNode, "", Ops);
-
-      // Create a TreePattern to parse this.
-      TreePattern P(DefaultOps[iter][i], DI, false, *this);
-      assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!");
-
-      // Copy the operands over into a DAGDefaultOperand.
-      DAGDefaultOperand DefaultOpInfo;
-
-      TreePatternNode *T = P.getTree(0);
-      for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) {
-        TreePatternNode *TPN = T->getChild(op);
-        while (TPN->ApplyTypeConstraints(P, false))
-          /* Resolve all types */;
-
-        if (TPN->ContainsUnresolvedType()) {
-          if (iter == 0)
-            throw "Value #" + utostr(i) + " of PredicateOperand '" +
-              DefaultOps[iter][i]->getName() +"' doesn't have a concrete type!";
-          else
-            throw "Value #" + utostr(i) + " of OptionalDefOperand '" +
-              DefaultOps[iter][i]->getName() +"' doesn't have a concrete type!";
-        }
-        DefaultOpInfo.DefaultOps.push_back(TPN);
-      }
+  for (unsigned i = 0, e = DefaultOps.size(); i != e; ++i) {
+    DagInit *DefaultInfo = DefaultOps[i]->getValueAsDag("DefaultOps");
 
-      // Insert it into the DefaultOperands map so we can find it later.
-      DefaultOperands[DefaultOps[iter][i]] = DefaultOpInfo;
+    // Clone the DefaultInfo dag node, changing the operator from 'ops' to
+    // SomeSDnode so that we can parse this.
+    std::vector<std::pair<Init*, std::string> > Ops;
+    for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op)
+      Ops.push_back(std::make_pair(DefaultInfo->getArg(op),
+                                   DefaultInfo->getArgName(op)));
+    DagInit *DI = DagInit::get(SomeSDNode, "", Ops);
+
+    // Create a TreePattern to parse this.
+    TreePattern P(DefaultOps[i], DI, false, *this);
+    assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!");
+
+    // Copy the operands over into a DAGDefaultOperand.
+    DAGDefaultOperand DefaultOpInfo;
+
+    TreePatternNode *T = P.getTree(0);
+    for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) {
+      TreePatternNode *TPN = T->getChild(op);
+      while (TPN->ApplyTypeConstraints(P, false))
+        /* Resolve all types */;
+
+      if (TPN->ContainsUnresolvedType()) {
+        PrintFatalError("Value #" + utostr(i) + " of OperandWithDefaultOps '" +
+          DefaultOps[i]->getName() +"' doesn't have a concrete type!");
+      }
+      DefaultOpInfo.DefaultOps.push_back(TPN);
     }
+
+    // Insert it into the DefaultOperands map so we can find it later.
+    DefaultOperands[DefaultOps[i]] = DefaultOpInfo;
   }
 }
 
@@ -2233,7 +2274,7 @@
   // No name -> not interesting.
   if (Pat->getName().empty()) {
     if (Pat->isLeaf()) {
-      DefInit *DI = dynamic_cast<DefInit*>(Pat->getLeafValue());
+      DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
       if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
                  DI->getDef()->isSubClassOf("RegisterOperand")))
         I->error("Input " + DI->getDef()->getName() + " must be named!");
@@ -2243,7 +2284,7 @@
 
   Record *Rec;
   if (Pat->isLeaf()) {
-    DefInit *DI = dynamic_cast<DefInit*>(Pat->getLeafValue());
+    DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
     if (!DI) I->error("Input $" + Pat->getName() + " must be an identifier!");
     Rec = DI->getDef();
   } else {
@@ -2261,7 +2302,7 @@
   }
   Record *SlotRec;
   if (Slot->isLeaf()) {
-    SlotRec = dynamic_cast<DefInit*>(Slot->getLeafValue())->getDef();
+    SlotRec = cast<DefInit>(Slot->getLeafValue())->getDef();
   } else {
     assert(Slot->getNumChildren() == 0 && "can't be a use with children!");
     SlotRec = Slot->getOperator();
@@ -2296,7 +2337,7 @@
       if (!Dest->isLeaf())
         I->error("implicitly defined value should be a register!");
 
-      DefInit *Val = dynamic_cast<DefInit*>(Dest->getLeafValue());
+      DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
       if (!Val || !Val->getDef()->isSubClassOf("Register"))
         I->error("implicitly defined value should be a register!");
       InstImpResults.push_back(Val->getDef());
@@ -2337,7 +2378,7 @@
     if (!Dest->isLeaf())
       I->error("set destination should be a register!");
 
-    DefInit *Val = dynamic_cast<DefInit*>(Dest->getLeafValue());
+    DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
     if (!Val)
       I->error("set destination should be a register!");
 
@@ -2367,43 +2408,36 @@
 
 class InstAnalyzer {
   const CodeGenDAGPatterns &CDP;
-  bool &mayStore;
-  bool &mayLoad;
-  bool &IsBitcast;
-  bool &HasSideEffects;
-  bool &IsVariadic;
 public:
-  InstAnalyzer(const CodeGenDAGPatterns &cdp,
-               bool &maystore, bool &mayload, bool &isbc, bool &hse, bool &isv)
-    : CDP(cdp), mayStore(maystore), mayLoad(mayload), IsBitcast(isbc),
-      HasSideEffects(hse), IsVariadic(isv) {
+  bool hasSideEffects;
+  bool mayStore;
+  bool mayLoad;
+  bool isBitcast;
+  bool isVariadic;
+
+  InstAnalyzer(const CodeGenDAGPatterns &cdp)
+    : CDP(cdp), hasSideEffects(false), mayStore(false), mayLoad(false),
+      isBitcast(false), isVariadic(false) {}
+
+  void Analyze(const TreePattern *Pat) {
+    // Assume only the first tree is the pattern. The others are clobber nodes.
+    AnalyzeNode(Pat->getTree(0));
   }
 
-  /// Analyze - Analyze the specified instruction, returning true if the
-  /// instruction had a pattern.
-  bool Analyze(Record *InstRecord) {
-    const TreePattern *Pattern = CDP.getInstruction(InstRecord).getPattern();
-    if (Pattern == 0) {
-      HasSideEffects = 1;
-      return false;  // No pattern.
-    }
-
-    // FIXME: Assume only the first tree is the pattern. The others are clobber
-    // nodes.
-    AnalyzeNode(Pattern->getTree(0));
-    return true;
+  void Analyze(const PatternToMatch *Pat) {
+    AnalyzeNode(Pat->getSrcPattern());
   }
 
 private:
   bool IsNodeBitcast(const TreePatternNode *N) const {
-    if (HasSideEffects || mayLoad || mayStore || IsVariadic)
+    if (hasSideEffects || mayLoad || mayStore || isVariadic)
       return false;
 
     if (N->getNumChildren() != 2)
       return false;
 
     const TreePatternNode *N0 = N->getChild(0);
-    if (!N0->isLeaf() || !dynamic_cast<DefInit*>(N0->getLeafValue()))
+    if (!N0->isLeaf() || !isa<DefInit>(N0->getLeafValue()))
       return false;
 
     const TreePatternNode *N1 = N->getChild(1);
@@ -2418,16 +2452,17 @@
     return OpInfo.getEnumName() == "ISD::BITCAST";
   }
 
+public:
   void AnalyzeNode(const TreePatternNode *N) {
     if (N->isLeaf()) {
-      if (DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue())) {
+      if (DefInit *DI = dyn_cast<DefInit>(N->getLeafValue())) {
         Record *LeafRec = DI->getDef();
         // Handle ComplexPattern leaves.
         if (LeafRec->isSubClassOf("ComplexPattern")) {
           const ComplexPattern &CP = CDP.getComplexPattern(LeafRec);
           if (CP.hasProperty(SDNPMayStore)) mayStore = true;
           if (CP.hasProperty(SDNPMayLoad)) mayLoad = true;
-          if (CP.hasProperty(SDNPSideEffect)) HasSideEffects = true;
+          if (CP.hasProperty(SDNPSideEffect)) hasSideEffects = true;
         }
       }
       return;
@@ -2439,7 +2474,7 @@
 
     // Ignore set nodes, which are not SDNodes.
     if (N->getOperator()->getName() == "set") {
-      IsBitcast = IsNodeBitcast(N);
+      isBitcast = IsNodeBitcast(N);
       return;
     }
 
@@ -2449,8 +2484,8 @@
     // Notice properties of the node.
     if (OpInfo.hasProperty(SDNPMayStore)) mayStore = true;
     if (OpInfo.hasProperty(SDNPMayLoad)) mayLoad = true;
-    if (OpInfo.hasProperty(SDNPSideEffect)) HasSideEffects = true;
-    if (OpInfo.hasProperty(SDNPVariadic)) IsVariadic = true;
+    if (OpInfo.hasProperty(SDNPSideEffect)) hasSideEffects = true;
+    if (OpInfo.hasProperty(SDNPVariadic)) isVariadic = true;
 
     if (const CodeGenIntrinsic *IntInfo = N->getIntrinsicInfo(CDP)) {
       // If this is an intrinsic, analyze it.
@@ -2462,68 +2497,70 @@
 
       if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteMem)
         // WriteMem intrinsics can have other strange effects.
-        HasSideEffects = true;
+        hasSideEffects = true;
     }
   }
 
 };
 
-static void InferFromPattern(const CodeGenInstruction &Inst,
-                             bool &MayStore, bool &MayLoad,
-                             bool &IsBitcast,
-                             bool &HasSideEffects, bool &IsVariadic,
-                             const CodeGenDAGPatterns &CDP) {
-  MayStore = MayLoad = IsBitcast = HasSideEffects = IsVariadic = false;
-
-  bool HadPattern =
-    InstAnalyzer(CDP, MayStore, MayLoad, IsBitcast, HasSideEffects, IsVariadic)
-    .Analyze(Inst.TheDef);
-
-  // InstAnalyzer only correctly analyzes mayStore/mayLoad so far.
-  if (Inst.mayStore) {  // If the .td file explicitly sets mayStore, use it.
-    // If we decided that this is a store from the pattern, then the .td file
-    // entry is redundant.
-    if (MayStore)
-      PrintWarning(Inst.TheDef->getLoc(),
-                   "mayStore flag explicitly set on "
-                   "instruction, but flag already inferred from pattern.");
-    MayStore = true;
-  }
-
-  if (Inst.mayLoad) {  // If the .td file explicitly sets mayLoad, use it.
-    // If we decided that this is a load from the pattern, then the .td file
-    // entry is redundant.
-    if (MayLoad)
-      PrintWarning(Inst.TheDef->getLoc(),
-                   "mayLoad flag explicitly set on "
-                   "instruction, but flag already inferred from pattern.");
-    MayLoad = true;
-  }
-
-  if (Inst.neverHasSideEffects) {
-    if (HadPattern)
-      PrintWarning(Inst.TheDef->getLoc(),
-                   "neverHasSideEffects flag explicitly set on "
-                   "instruction, but flag already inferred from pattern.");
-    HasSideEffects = false;
-  }
-
-  if (Inst.hasSideEffects) {
-    if (HasSideEffects)
-      PrintWarning(Inst.TheDef->getLoc(),
-                   "hasSideEffects flag explicitly set on "
-                   "instruction, but flag already inferred from pattern.");
-    HasSideEffects = true;
+static bool InferFromPattern(CodeGenInstruction &InstInfo,
+                             const InstAnalyzer &PatInfo,
+                             Record *PatDef) {
+  bool Error = false;
+
+  // Remember where InstInfo got its flags.
+  if (InstInfo.hasUndefFlags())
+      InstInfo.InferredFrom = PatDef;
+
+  // Check explicitly set flags for consistency.
+  if (InstInfo.hasSideEffects != PatInfo.hasSideEffects &&
+      !InstInfo.hasSideEffects_Unset) {
+    // Allow explicitly setting hasSideEffects = 1 on instructions, even when
+    // the pattern has no side effects. That could be useful for div/rem
+    // instructions that may trap.
+    if (!InstInfo.hasSideEffects) {
+      Error = true;
+      PrintError(PatDef->getLoc(), "Pattern doesn't match hasSideEffects = " +
+                 Twine(InstInfo.hasSideEffects));
+    }
+  }
+
+  if (InstInfo.mayStore != PatInfo.mayStore && !InstInfo.mayStore_Unset) {
+    Error = true;
+    PrintError(PatDef->getLoc(), "Pattern doesn't match mayStore = " +
+               Twine(InstInfo.mayStore));
+  }
+
+  if (InstInfo.mayLoad != PatInfo.mayLoad && !InstInfo.mayLoad_Unset) {
+    // Allow explicitly setting mayLoad = 1, even when the pattern has no loads.
+    // Some targets translate imediates to loads.
+    if (!InstInfo.mayLoad) {
+      Error = true;
+      PrintError(PatDef->getLoc(), "Pattern doesn't match mayLoad = " +
+                 Twine(InstInfo.mayLoad));
+    }
   }
 
-  if (Inst.Operands.isVariadic)
-    IsVariadic = true;  // Can warn if we want.
+  // Transfer inferred flags.
+  InstInfo.hasSideEffects |= PatInfo.hasSideEffects;
+  InstInfo.mayStore |= PatInfo.mayStore;
+  InstInfo.mayLoad |= PatInfo.mayLoad;
+
+  // These flags are silently added without any verification.
+  InstInfo.isBitcast |= PatInfo.isBitcast;
+
+  // Don't infer isVariadic. This flag means something different on SDNodes and
+  // instructions. For example, a CALL SDNode is variadic because it has the
+  // call arguments as operands, but a CALL instruction is not variadic - it
+  // has argument registers as implicit, not explicit uses.
+
+  return Error;
 }
 
 /// hasNullFragReference - Return true if the DAG has any reference to the
 /// null_frag operator.
 static bool hasNullFragReference(DagInit *DI) {
-  DefInit *OpDef = dynamic_cast<DefInit*>(DI->getOperator());
+  DefInit *OpDef = dyn_cast<DefInit>(DI->getOperator());
   if (!OpDef) return false;
   Record *Operator = OpDef->getDef();
 
@@ -2531,7 +2568,7 @@
   if (Operator->getName() == "null_frag") return true;
   // If any of the arguments reference the null fragment, return true.
   for (unsigned i = 0, e = DI->getNumArgs(); i != e; ++i) {
-    DagInit *Arg = dynamic_cast<DagInit*>(DI->getArg(i));
+    DagInit *Arg = dyn_cast<DagInit>(DI->getArg(i));
     if (Arg && hasNullFragReference(Arg))
       return true;
   }
@@ -2543,7 +2580,7 @@
 /// the null_frag operator.
 static bool hasNullFragReference(ListInit *LI) {
   for (unsigned i = 0, e = LI->getSize(); i != e; ++i) {
-    DagInit *DI = dynamic_cast<DagInit*>(LI->getElement(i));
+    DagInit *DI = dyn_cast<DagInit>(LI->getElement(i));
     assert(DI && "non-dag in an instruction Pattern list?!");
     if (hasNullFragReference(DI))
       return true;
@@ -2551,6 +2588,17 @@
   return false;
 }
 
+/// Get all the instructions in a tree.
+static void
+getInstructionsInTree(TreePatternNode *Tree, SmallVectorImpl<Record*> &Instrs) {
+  if (Tree->isLeaf())
+    return;
+  if (Tree->getOperator()->isSubClassOf("Instruction"))
+    Instrs.push_back(Tree->getOperator());
+  for (unsigned i = 0, e = Tree->getNumChildren(); i != e; ++i)
+    getInstructionsInTree(Tree->getChild(i), Instrs);
+}
+
 /// ParseInstructions - Parse all of the instructions, inlining and resolving
 /// any fragments involved.  This populates the Instructions list with fully
 /// resolved instructions.
@@ -2560,7 +2608,7 @@
   for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
     ListInit *LI = 0;
 
-    if (dynamic_cast<ListInit*>(Instrs[i]->getValueInit("Pattern")))
+    if (isa<ListInit>(Instrs[i]->getValueInit("Pattern")))
       LI = Instrs[i]->getValueAsListInit("Pattern");
 
     // If there is no pattern, only collect minimal information about the
@@ -2655,7 +2703,7 @@
 
       if (i == 0)
         Res0Node = RNode;
-      Record *R = dynamic_cast<DefInit*>(RNode->getLeafValue())->getDef();
+      Record *R = cast<DefInit>(RNode->getLeafValue())->getDef();
       if (R == 0)
         I->error("Operand $" + OpName + " should be a set destination: all "
                  "outputs must occur before inputs in operand list!");
@@ -2683,11 +2731,9 @@
         I->error("Operand #" + utostr(i) + " in operands list has no name!");
 
       if (!InstInputsCheck.count(OpName)) {
-        // If this is an predicate operand or optional def operand with an
-        // DefaultOps set filled in, we can ignore this.  When we codegen it,
-        // we will do so as always executed.
-        if (Op.Rec->isSubClassOf("PredicateOperand") ||
-            Op.Rec->isSubClassOf("OptionalDefOperand")) {
+        // If this is an operand with a DefaultOps set filled in, we can ignore
+        // this.  When we codegen it, we will do so as always executed.
+        if (Op.Rec->isSubClassOf("OperandWithDefaultOps")) {
           // Does it have a non-empty DefaultOps field?  If so, ignore this
           // operand.
           if (!getDefaultOperand(Op.Rec).DefaultOps.empty())
@@ -2699,8 +2745,7 @@
       TreePatternNode *InVal = InstInputsCheck[OpName];
       InstInputsCheck.erase(OpName);   // It occurred, remove from map.
 
-      if (InVal->isLeaf() &&
-          dynamic_cast<DefInit*>(InVal->getLeafValue())) {
+      if (InVal->isLeaf() && isa<DefInit>(InVal->getLeafValue())) {
         Record *InRec = static_cast<DefInit*>(InVal->getLeafValue())->getDef();
         if (Op.Rec != InRec && !InRec->isSubClassOf("ComplexPattern"))
           I->error("Operand $" + OpName + "'s register class disagrees"
@@ -2754,11 +2799,11 @@
   }
 
   // If we can, convert the instructions to be patterns that are matched!
-  for (std::map<Record*, DAGInstruction, RecordPtrCmp>::iterator II =
+  for (std::map<Record*, DAGInstruction, LessRecordByID>::iterator II =
         Instructions.begin(),
        E = Instructions.end(); II != E; ++II) {
     DAGInstruction &TheInst = II->second;
-    const TreePattern *I = TheInst.getPattern();
+    TreePattern *I = TheInst.getPattern();
     if (I == 0) continue;  // No pattern.
 
     // FIXME: Assume only the first tree is the pattern. The others are clobber
@@ -2789,7 +2834,7 @@
 
 static void FindNames(const TreePatternNode *P,
                       std::map<std::string, NameRecord> &Names,
-                      const TreePattern *PatternTop) {
+                      TreePattern *PatternTop) {
   if (!P->getName().empty()) {
     NameRecord &Rec = Names[P->getName()];
     // If this is the first instance of the name, remember the node.
@@ -2806,12 +2851,15 @@
   }
 }
 
-void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern,
+void CodeGenDAGPatterns::AddPatternToMatch(TreePattern *Pattern,
                                            const PatternToMatch &PTM) {
   // Do some sanity checking on the pattern we're about to match.
   std::string Reason;
-  if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this))
-    Pattern->error("Pattern can never match: " + Reason);
+  if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this)) {
+    PrintWarning(Pattern->getRecord()->getLoc(),
+      Twine("Pattern can never match: ") + Reason);
+    return;
+  }
 
   // If the source pattern's root is a complex pattern, that complex pattern
   // must specify the nodes it can potentially match.
@@ -2852,27 +2900,158 @@
 void CodeGenDAGPatterns::InferInstructionFlags() {
   const std::vector<const CodeGenInstruction*> &Instructions =
     Target.getInstructionsByEnumValue();
+
+  // First try to infer flags from the primary instruction pattern, if any.
+  SmallVector<CodeGenInstruction*, 8> Revisit;
+  unsigned Errors = 0;
   for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
     CodeGenInstruction &InstInfo =
       const_cast<CodeGenInstruction &>(*Instructions[i]);
-    // Determine properties of the instruction from its pattern.
-    bool MayStore, MayLoad, IsBitcast, HasSideEffects, IsVariadic;
-    InferFromPattern(InstInfo, MayStore, MayLoad, IsBitcast,
-                     HasSideEffects, IsVariadic, *this);
-    InstInfo.mayStore = MayStore;
-    InstInfo.mayLoad = MayLoad;
-    InstInfo.isBitcast = IsBitcast;
-    InstInfo.hasSideEffects = HasSideEffects;
-    InstInfo.Operands.isVariadic = IsVariadic;
-
-    // Sanity checks.
-    if (InstInfo.isReMaterializable && InstInfo.hasSideEffects)
-      throw TGError(InstInfo.TheDef->getLoc(), "The instruction " +
-                    InstInfo.TheDef->getName() +
-                    " is rematerializable AND has unmodeled side effects?");
+
+    // Treat neverHasSideEffects = 1 as the equivalent of hasSideEffects = 0.
+    // This flag is obsolete and will be removed.
+    if (InstInfo.neverHasSideEffects) {
+      assert(!InstInfo.hasSideEffects);
+      InstInfo.hasSideEffects_Unset = false;
+    }
+
+    // Get the primary instruction pattern.
+    const TreePattern *Pattern = getInstruction(InstInfo.TheDef).getPattern();
+    if (!Pattern) {
+      if (InstInfo.hasUndefFlags())
+        Revisit.push_back(&InstInfo);
+      continue;
+    }
+    InstAnalyzer PatInfo(*this);
+    PatInfo.Analyze(Pattern);
+    Errors += InferFromPattern(InstInfo, PatInfo, InstInfo.TheDef);
+  }
+
+  // Second, look for single-instruction patterns defined outside the
+  // instruction.
+  for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) {
+    const PatternToMatch &PTM = *I;
+
+    // We can only infer from single-instruction patterns, otherwise we won't
+    // know which instruction should get the flags.
+    SmallVector<Record*, 8> PatInstrs;
+    getInstructionsInTree(PTM.getDstPattern(), PatInstrs);
+    if (PatInstrs.size() != 1)
+      continue;
+
+    // Get the single instruction.
+    CodeGenInstruction &InstInfo = Target.getInstruction(PatInstrs.front());
+
+    // Only infer properties from the first pattern. We'll verify the others.
+    if (InstInfo.InferredFrom)
+      continue;
+
+    InstAnalyzer PatInfo(*this);
+    PatInfo.Analyze(&PTM);
+    Errors += InferFromPattern(InstInfo, PatInfo, PTM.getSrcRecord());
+  }
+
+  if (Errors)
+    PrintFatalError("pattern conflicts");
+
+  // Revisit instructions with undefined flags and no pattern.
+  if (Target.guessInstructionProperties()) {
+    for (unsigned i = 0, e = Revisit.size(); i != e; ++i) {
+      CodeGenInstruction &InstInfo = *Revisit[i];
+      if (InstInfo.InferredFrom)
+        continue;
+      // The mayLoad and mayStore flags default to false.
+      // Conservatively assume hasSideEffects if it wasn't explicit.
+      if (InstInfo.hasSideEffects_Unset)
+        InstInfo.hasSideEffects = true;
+    }
+    return;
+  }
+
+  // Complain about any flags that are still undefined.
+  for (unsigned i = 0, e = Revisit.size(); i != e; ++i) {
+    CodeGenInstruction &InstInfo = *Revisit[i];
+    if (InstInfo.InferredFrom)
+      continue;
+    if (InstInfo.hasSideEffects_Unset)
+      PrintError(InstInfo.TheDef->getLoc(),
+                 "Can't infer hasSideEffects from patterns");
+    if (InstInfo.mayStore_Unset)
+      PrintError(InstInfo.TheDef->getLoc(),
+                 "Can't infer mayStore from patterns");
+    if (InstInfo.mayLoad_Unset)
+      PrintError(InstInfo.TheDef->getLoc(),
+                 "Can't infer mayLoad from patterns");
   }
 }
 
+
+/// Verify instruction flags against pattern node properties.
+void CodeGenDAGPatterns::VerifyInstructionFlags() {
+  unsigned Errors = 0;
+  for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) {
+    const PatternToMatch &PTM = *I;
+    SmallVector<Record*, 8> Instrs;
+    getInstructionsInTree(PTM.getDstPattern(), Instrs);
+    if (Instrs.empty())
+      continue;
+
+    // Count the number of instructions with each flag set.
+    unsigned NumSideEffects = 0;
+    unsigned NumStores = 0;
+    unsigned NumLoads = 0;
+    for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
+      const CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
+      NumSideEffects += InstInfo.hasSideEffects;
+      NumStores += InstInfo.mayStore;
+      NumLoads += InstInfo.mayLoad;
+    }
+
+    // Analyze the source pattern.
+    InstAnalyzer PatInfo(*this);
+    PatInfo.Analyze(&PTM);
+
+    // Collect error messages.
+    SmallVector<std::string, 4> Msgs;
+
+    // Check for missing flags in the output.
+    // Permit extra flags for now at least.
+    if (PatInfo.hasSideEffects && !NumSideEffects)
+      Msgs.push_back("pattern has side effects, but hasSideEffects isn't set");
+
+    // Don't verify store flags on instructions with side effects. At least for
+    // intrinsics, side effects implies mayStore.
+    if (!PatInfo.hasSideEffects && PatInfo.mayStore && !NumStores)
+      Msgs.push_back("pattern may store, but mayStore isn't set");
+
+    // Similarly, mayStore implies mayLoad on intrinsics.
+    if (!PatInfo.mayStore && PatInfo.mayLoad && !NumLoads)
+      Msgs.push_back("pattern may load, but mayLoad isn't set");
+
+    // Print error messages.
+    if (Msgs.empty())
+      continue;
+    ++Errors;
+
+    for (unsigned i = 0, e = Msgs.size(); i != e; ++i)
+      PrintError(PTM.getSrcRecord()->getLoc(), Twine(Msgs[i]) + " on the " +
+                 (Instrs.size() == 1 ?
+                  "instruction" : "output instructions"));
+    // Provide the location of the relevant instruction definitions.
+    for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
+      if (Instrs[i] != PTM.getSrcRecord())
+        PrintError(Instrs[i]->getLoc(), "defined here");
+      const CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
+      if (InstInfo.InferredFrom &&
+          InstInfo.InferredFrom != InstInfo.TheDef &&
+          InstInfo.InferredFrom != PTM.getSrcRecord())
+        PrintError(InstInfo.InferredFrom->getLoc(), "inferred from patttern");
+    }
+  }
+  if (Errors)
+    PrintFatalError("Errors in DAG patterns");
+}
+
 /// Given a pattern result with an unresolved type, see if we can find one
 /// instruction with an unresolved result type.  Force this result type to an
 /// arbitrary element if it's possible types to converge results.
@@ -3230,7 +3409,7 @@
     for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i) {
       TreePatternNode *Child = N->getChild(i);
       if (Child->isLeaf())
-        if (DefInit *DI = dynamic_cast<DefInit*>(Child->getLeafValue())) {
+        if (DefInit *DI = dyn_cast<DefInit>(Child->getLeafValue())) {
           Record *RR = DI->getDef();
           if (RR->isSubClassOf("Register"))
             continue;
@@ -3330,4 +3509,3 @@
     DEBUG(errs() << "\n");
   }
 }
-

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenDAGPatterns.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenDAGPatterns.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenDAGPatterns.h (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenDAGPatterns.h Tue Jan 15 11:16:16 2013
@@ -105,7 +105,7 @@
 
     /// MergeInTypeInfo - This merges in type information from the specified
     /// argument.  If 'this' changes, it returns true.  If the two types are
-    /// contradictory (e.g. merge f32 into i32) then this throws an exception.
+    /// contradictory (e.g. merge f32 into i32) then this flags an error.
     bool MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP);
 
     bool MergeInTypeInfo(MVT::SimpleValueType InVT, TreePattern &TP) {
@@ -187,8 +187,8 @@
 
   /// ApplyTypeConstraint - Given a node in a pattern, apply this type
   /// constraint to the nodes operands.  This returns true if it makes a
-  /// change, false otherwise.  If a type contradiction is found, throw an
-  /// exception.
+  /// change, false otherwise.  If a type contradiction is found, an error
+  /// is flagged.
   bool ApplyTypeConstraint(TreePatternNode *N, const SDNodeInfo &NodeInfo,
                            TreePattern &TP) const;
 };
@@ -232,7 +232,7 @@
   /// ApplyTypeConstraints - Given a node in a pattern, apply the type
   /// constraints for this node to the operands of the node.  This returns
   /// true if it makes a change, false otherwise.  If a type contradiction is
-  /// found, throw an exception.
+  /// found, an error is flagged.
   bool ApplyTypeConstraints(TreePatternNode *N, TreePattern &TP) const {
     bool MadeChange = false;
     for (unsigned i = 0, e = TypeConstraints.size(); i != e; ++i)
@@ -446,13 +446,12 @@
 
   /// ApplyTypeConstraints - Apply all of the type constraints relevant to
   /// this node and its children in the tree.  This returns true if it makes a
-  /// change, false otherwise.  If a type contradiction is found, throw an
-  /// exception.
+  /// change, false otherwise.  If a type contradiction is found, flag an error.
   bool ApplyTypeConstraints(TreePattern &TP, bool NotRegisters);
 
   /// UpdateNodeType - Set the node type of N to VT if VT contains
-  /// information.  If N already contains a conflicting type, then throw an
-  /// exception.  This returns true if any information was updated.
+  /// information.  If N already contains a conflicting type, then flag an
+  /// error.  This returns true if any information was updated.
   ///
   bool UpdateNodeType(unsigned ResNo, const EEVT::TypeSet &InTy,
                       TreePattern &TP) {
@@ -514,6 +513,10 @@
   /// isInputPattern - True if this is an input pattern, something to match.
   /// False if this is an output pattern, something to emit.
   bool isInputPattern;
+
+  /// hasError - True if the currently processed nodes have unresolvable types
+  /// or other non-fatal errors
+  bool HasError;
 public:
 
   /// TreePattern constructor - Parse the specified DagInits into the
@@ -565,13 +568,19 @@
 
   /// InferAllTypes - Infer/propagate as many types throughout the expression
   /// patterns as possible.  Return true if all types are inferred, false
-  /// otherwise.  Throw an exception if a type contradiction is found.
+  /// otherwise.  Bail out if a type contradiction is found.
   bool InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> >
                           *NamedTypes=0);
 
-  /// error - Throw an exception, prefixing it with information about this
-  /// pattern.
-  void error(const std::string &Msg) const;
+  /// error - If this is the first error in the current resolution step,
+  /// print it and set the error flag.  Otherwise, continue silently.
+  void error(const std::string &Msg);
+  bool hasError() const {
+    return HasError;
+  }
+  void resetError() {
+    HasError = false;
+  }
 
   void print(raw_ostream &OS) const;
   void dump() const;
@@ -582,8 +591,8 @@
   void ComputeNamedNodes(TreePatternNode *N);
 };
 
-/// DAGDefaultOperand - One of these is created for each PredicateOperand
-/// or OptionalDefOperand that has a set ExecuteAlways / DefaultOps field.
+/// DAGDefaultOperand - One of these is created for each OperandWithDefaultOps
+/// that has a set ExecuteAlways / DefaultOps field.
 struct DAGDefaultOperand {
   std::vector<TreePatternNode*> DefaultOps;
 };
@@ -602,7 +611,7 @@
     : Pattern(TP), Results(results), Operands(operands),
       ImpResults(impresults), ResultPattern(0) {}
 
-  const TreePattern *getPattern() const { return Pattern; }
+  TreePattern *getPattern() const { return Pattern; }
   unsigned getNumResults() const { return Results.size(); }
   unsigned getNumOperands() const { return Operands.size(); }
   unsigned getNumImpResults() const { return ImpResults.size(); }
@@ -661,23 +670,18 @@
   unsigned getPatternComplexity(const CodeGenDAGPatterns &CGP) const;
 };
 
-// Deterministic comparison of Record*.
-struct RecordPtrCmp {
-  bool operator()(const Record *LHS, const Record *RHS) const;
-};
-
 class CodeGenDAGPatterns {
   RecordKeeper &Records;
   CodeGenTarget Target;
   std::vector<CodeGenIntrinsic> Intrinsics;
   std::vector<CodeGenIntrinsic> TgtIntrinsics;
 
-  std::map<Record*, SDNodeInfo, RecordPtrCmp> SDNodes;
-  std::map<Record*, std::pair<Record*, std::string>, RecordPtrCmp> SDNodeXForms;
-  std::map<Record*, ComplexPattern, RecordPtrCmp> ComplexPatterns;
-  std::map<Record*, TreePattern*, RecordPtrCmp> PatternFragments;
-  std::map<Record*, DAGDefaultOperand, RecordPtrCmp> DefaultOperands;
-  std::map<Record*, DAGInstruction, RecordPtrCmp> Instructions;
+  std::map<Record*, SDNodeInfo, LessRecordByID> SDNodes;
+  std::map<Record*, std::pair<Record*, std::string>, LessRecordByID> SDNodeXForms;
+  std::map<Record*, ComplexPattern, LessRecordByID> ComplexPatterns;
+  std::map<Record*, TreePattern*, LessRecordByID> PatternFragments;
+  std::map<Record*, DAGDefaultOperand, LessRecordByID> DefaultOperands;
+  std::map<Record*, DAGInstruction, LessRecordByID> Instructions;
 
   // Specific SDNode definitions:
   Record *intrinsic_void_sdnode;
@@ -708,7 +712,7 @@
     return SDNodeXForms.find(R)->second;
   }
 
-  typedef std::map<Record*, NodeXForm, RecordPtrCmp>::const_iterator
+  typedef std::map<Record*, NodeXForm, LessRecordByID>::const_iterator
           nx_iterator;
   nx_iterator nx_begin() const { return SDNodeXForms.begin(); }
   nx_iterator nx_end() const { return SDNodeXForms.end(); }
@@ -758,7 +762,7 @@
     return PatternFragments.find(R)->second;
   }
 
-  typedef std::map<Record*, TreePattern*, RecordPtrCmp>::const_iterator
+  typedef std::map<Record*, TreePattern*, LessRecordByID>::const_iterator
           pf_iterator;
   pf_iterator pf_begin() const { return PatternFragments.begin(); }
   pf_iterator pf_end() const { return PatternFragments.end(); }
@@ -797,8 +801,9 @@
   void ParsePatterns();
   void InferInstructionFlags();
   void GenerateVariants();
+  void VerifyInstructionFlags();
 
-  void AddPatternToMatch(const TreePattern *Pattern, const PatternToMatch &PTM);
+  void AddPatternToMatch(TreePattern *Pattern, const PatternToMatch &PTM);
   void FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
                                    std::map<std::string,
                                    TreePatternNode*> &InstInputs,

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenInstruction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenInstruction.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenInstruction.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenInstruction.cpp Tue Jan 15 11:16:16 2013
@@ -32,20 +32,20 @@
 
   DagInit *OutDI = R->getValueAsDag("OutOperandList");
 
-  if (DefInit *Init = dynamic_cast<DefInit*>(OutDI->getOperator())) {
+  if (DefInit *Init = dyn_cast<DefInit>(OutDI->getOperator())) {
     if (Init->getDef()->getName() != "outs")
-      throw R->getName() + ": invalid def name for output list: use 'outs'";
+      PrintFatalError(R->getName() + ": invalid def name for output list: use 'outs'");
   } else
-    throw R->getName() + ": invalid output list: use 'outs'";
+    PrintFatalError(R->getName() + ": invalid output list: use 'outs'");
 
   NumDefs = OutDI->getNumArgs();
 
   DagInit *InDI = R->getValueAsDag("InOperandList");
-  if (DefInit *Init = dynamic_cast<DefInit*>(InDI->getOperator())) {
+  if (DefInit *Init = dyn_cast<DefInit>(InDI->getOperator())) {
     if (Init->getDef()->getName() != "ins")
-      throw R->getName() + ": invalid def name for input list: use 'ins'";
+      PrintFatalError(R->getName() + ": invalid def name for input list: use 'ins'");
   } else
-    throw R->getName() + ": invalid input list: use 'ins'";
+    PrintFatalError(R->getName() + ": invalid input list: use 'ins'");
 
   unsigned MIOperandNo = 0;
   std::set<std::string> OperandNames;
@@ -60,9 +60,9 @@
       ArgName = InDI->getArgName(i-NumDefs);
     }
 
-    DefInit *Arg = dynamic_cast<DefInit*>(ArgInit);
+    DefInit *Arg = dyn_cast<DefInit>(ArgInit);
     if (!Arg)
-      throw "Illegal operand for the '" + R->getName() + "' instruction!";
+      PrintFatalError("Illegal operand for the '" + R->getName() + "' instruction!");
 
     Record *Rec = Arg->getDef();
     std::string PrintMethod = "printOperand";
@@ -80,11 +80,10 @@
       MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
 
       // Verify that MIOpInfo has an 'ops' root value.
-      if (!dynamic_cast<DefInit*>(MIOpInfo->getOperator()) ||
-          dynamic_cast<DefInit*>(MIOpInfo->getOperator())
-          ->getDef()->getName() != "ops")
-        throw "Bad value for MIOperandInfo in operand '" + Rec->getName() +
-        "'\n";
+      if (!isa<DefInit>(MIOpInfo->getOperator()) ||
+          cast<DefInit>(MIOpInfo->getOperator())->getDef()->getName() != "ops")
+        PrintFatalError("Bad value for MIOperandInfo in operand '" + Rec->getName() +
+          "'\n");
 
       // If we have MIOpInfo, then we have #operands equal to number of entries
       // in MIOperandInfo.
@@ -101,17 +100,17 @@
     } else if (Rec->isSubClassOf("RegisterClass")) {
       OperandType = "OPERAND_REGISTER";
     } else if (!Rec->isSubClassOf("PointerLikeRegClass") &&
-               Rec->getName() != "unknown")
-      throw "Unknown operand class '" + Rec->getName() +
-      "' in '" + R->getName() + "' instruction!";
+               !Rec->isSubClassOf("unknown_class"))
+      PrintFatalError("Unknown operand class '" + Rec->getName() +
+        "' in '" + R->getName() + "' instruction!");
 
     // Check that the operand has a name and that it's unique.
     if (ArgName.empty())
-      throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
-      " has no name!";
+      PrintFatalError("In instruction '" + R->getName() + "', operand #" + utostr(i) +
+        " has no name!");
     if (!OperandNames.insert(ArgName).second)
-      throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
-      " has the same name as a previous operand!";
+      PrintFatalError("In instruction '" + R->getName() + "', operand #" + utostr(i) +
+        " has the same name as a previous operand!");
 
     OperandList.push_back(OperandInfo(Rec, ArgName, PrintMethod, EncoderMethod,
                                       OperandType, MIOperandNo, NumOps,
@@ -129,13 +128,13 @@
 
 /// getOperandNamed - Return the index of the operand with the specified
 /// non-empty name.  If the instruction does not have an operand with the
-/// specified name, throw an exception.
+/// specified name, abort.
 ///
 unsigned CGIOperandList::getOperandNamed(StringRef Name) const {
   unsigned OpIdx;
   if (hasOperandNamed(Name, OpIdx)) return OpIdx;
-  throw "'" + TheDef->getName() + "' does not have an operand named '$" +
-    Name.str() + "'!";
+  PrintFatalError("'" + TheDef->getName() + "' does not have an operand named '$" +
+    Name.str() + "'!");
 }
 
 /// hasOperandNamed - Query whether the instruction has an operand of the
@@ -154,7 +153,7 @@
 std::pair<unsigned,unsigned>
 CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
   if (Op.empty() || Op[0] != '$')
-    throw TheDef->getName() + ": Illegal operand name: '" + Op + "'";
+    PrintFatalError(TheDef->getName() + ": Illegal operand name: '" + Op + "'");
 
   std::string OpName = Op.substr(1);
   std::string SubOpName;
@@ -164,7 +163,7 @@
   if (DotIdx != std::string::npos) {
     SubOpName = OpName.substr(DotIdx+1);
     if (SubOpName.empty())
-      throw TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'";
+      PrintFatalError(TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'");
     OpName = OpName.substr(0, DotIdx);
   }
 
@@ -174,8 +173,8 @@
     // If one was needed, throw.
     if (OperandList[OpIdx].MINumOperands > 1 && !AllowWholeOp &&
         SubOpName.empty())
-      throw TheDef->getName() + ": Illegal to refer to"
-      " whole operand part of complex operand '" + Op + "'";
+      PrintFatalError(TheDef->getName() + ": Illegal to refer to"
+        " whole operand part of complex operand '" + Op + "'");
 
     // Otherwise, return the operand.
     return std::make_pair(OpIdx, 0U);
@@ -184,7 +183,7 @@
   // Find the suboperand number involved.
   DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo;
   if (MIOpInfo == 0)
-    throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'";
+    PrintFatalError(TheDef->getName() + ": unknown suboperand name in '" + Op + "'");
 
   // Find the operand with the right name.
   for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i)
@@ -192,7 +191,7 @@
       return std::make_pair(OpIdx, i);
 
   // Otherwise, didn't find it!
-  throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'";
+  PrintFatalError(TheDef->getName() + ": unknown suboperand name in '" + Op + "'");
 }
 
 static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) {
@@ -204,13 +203,13 @@
     std::string Name = CStr.substr(wpos+1);
     wpos = Name.find_first_not_of(" \t");
     if (wpos == std::string::npos)
-      throw "Illegal format for @earlyclobber constraint: '" + CStr + "'";
+      PrintFatalError("Illegal format for @earlyclobber constraint: '" + CStr + "'");
     Name = Name.substr(wpos);
     std::pair<unsigned,unsigned> Op = Ops.ParseOperandName(Name, false);
 
     // Build the string for the operand
     if (!Ops[Op.first].Constraints[Op.second].isNone())
-      throw "Operand '" + Name + "' cannot have multiple constraints!";
+      PrintFatalError("Operand '" + Name + "' cannot have multiple constraints!");
     Ops[Op.first].Constraints[Op.second] =
     CGIOperandList::ConstraintInfo::getEarlyClobber();
     return;
@@ -225,25 +224,27 @@
   // TIED_TO: $src1 = $dst
   wpos = Name.find_first_of(" \t");
   if (wpos == std::string::npos)
-    throw "Illegal format for tied-to constraint: '" + CStr + "'";
+    PrintFatalError("Illegal format for tied-to constraint: '" + CStr + "'");
   std::string DestOpName = Name.substr(0, wpos);
   std::pair<unsigned,unsigned> DestOp = Ops.ParseOperandName(DestOpName, false);
 
   Name = CStr.substr(pos+1);
   wpos = Name.find_first_not_of(" \t");
   if (wpos == std::string::npos)
-    throw "Illegal format for tied-to constraint: '" + CStr + "'";
-
-  std::pair<unsigned,unsigned> SrcOp =
-  Ops.ParseOperandName(Name.substr(wpos), false);
-  if (SrcOp > DestOp)
-    throw "Illegal tied-to operand constraint '" + CStr + "'";
+    PrintFatalError("Illegal format for tied-to constraint: '" + CStr + "'");
 
+  std::string SrcOpName = Name.substr(wpos);
+  std::pair<unsigned,unsigned> SrcOp = Ops.ParseOperandName(SrcOpName, false);
+  if (SrcOp > DestOp) {
+    std::swap(SrcOp, DestOp);
+    std::swap(SrcOpName, DestOpName);
+  }
 
   unsigned FlatOpNo = Ops.getFlattenedOperandNumber(SrcOp);
 
   if (!Ops[DestOp.first].Constraints[DestOp.second].isNone())
-    throw "Operand '" + DestOpName + "' cannot have multiple constraints!";
+    PrintFatalError("Operand '" + DestOpName +
+      "' cannot have multiple constraints!");
   Ops[DestOp.first].Constraints[DestOp.second] =
     CGIOperandList::ConstraintInfo::getTied(FlatOpNo);
 }
@@ -287,7 +288,8 @@
 // CodeGenInstruction Implementation
 //===----------------------------------------------------------------------===//
 
-CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) {
+CodeGenInstruction::CodeGenInstruction(Record *R)
+  : TheDef(R), Operands(R), InferredFrom(0) {
   Namespace = R->getValueAsString("Namespace");
   AsmString = R->getValueAsString("AsmString");
 
@@ -297,11 +299,10 @@
   isCompare    = R->getValueAsBit("isCompare");
   isMoveImm    = R->getValueAsBit("isMoveImm");
   isBitcast    = R->getValueAsBit("isBitcast");
+  isSelect     = R->getValueAsBit("isSelect");
   isBarrier    = R->getValueAsBit("isBarrier");
   isCall       = R->getValueAsBit("isCall");
   canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
-  mayLoad      = R->getValueAsBit("mayLoad");
-  mayStore     = R->getValueAsBit("mayStore");
   isPredicable = Operands.isPredicable || R->getValueAsBit("isPredicable");
   isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress");
   isCommutable = R->getValueAsBit("isCommutable");
@@ -312,8 +313,13 @@
   hasPostISelHook = R->getValueAsBit("hasPostISelHook");
   hasCtrlDep   = R->getValueAsBit("hasCtrlDep");
   isNotDuplicable = R->getValueAsBit("isNotDuplicable");
-  hasSideEffects = R->getValueAsBit("hasSideEffects");
+
+  mayLoad      = R->getValueAsBitOrUnset("mayLoad", mayLoad_Unset);
+  mayStore     = R->getValueAsBitOrUnset("mayStore", mayStore_Unset);
+  hasSideEffects = R->getValueAsBitOrUnset("hasSideEffects",
+                                           hasSideEffects_Unset);
   neverHasSideEffects = R->getValueAsBit("neverHasSideEffects");
+
   isAsCheapAsAMove = R->getValueAsBit("isAsCheapAsAMove");
   hasExtraSrcRegAllocReq = R->getValueAsBit("hasExtraSrcRegAllocReq");
   hasExtraDefRegAllocReq = R->getValueAsBit("hasExtraDefRegAllocReq");
@@ -323,7 +329,7 @@
   ImplicitUses = R->getValueAsListOfDefs("Uses");
 
   if (neverHasSideEffects + hasSideEffects > 1)
-    throw R->getName() + ": multiple conflicting side-effect flags set!";
+    PrintFatalError(R->getName() + ": multiple conflicting side-effect flags set!");
 
   // Parse Constraints.
   ParseConstraints(R->getValueAsString("Constraints"), Operands);
@@ -408,16 +414,16 @@
 /// successful match, with ResOp set to the result operand to be used.
 bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
                                        Record *InstOpRec, bool hasSubOps,
-                                       SMLoc Loc, CodeGenTarget &T,
+                                       ArrayRef<SMLoc> Loc, CodeGenTarget &T,
                                        ResultOperand &ResOp) {
   Init *Arg = Result->getArg(AliasOpNo);
-  DefInit *ADI = dynamic_cast<DefInit*>(Arg);
+  DefInit *ADI = dyn_cast<DefInit>(Arg);
 
   if (ADI && ADI->getDef() == InstOpRec) {
     // If the operand is a record, it must have a name, and the record type
     // must match up with the instruction's argument type.
     if (Result->getArgName(AliasOpNo).empty())
-      throw TGError(Loc, "result argument #" + utostr(AliasOpNo) +
+      PrintFatalError(Loc, "result argument #" + utostr(AliasOpNo) +
                     " must have a name!");
     ResOp = ResultOperand(Result->getArgName(AliasOpNo), ADI->getDef());
     return true;
@@ -441,7 +447,7 @@
       DagInit *DI = InstOpRec->getValueAsDag("MIOperandInfo");
       // The operand info should only have a single (register) entry. We
       // want the register class of it.
-      InstOpRec = dynamic_cast<DefInit*>(DI->getArg(0))->getDef();
+      InstOpRec = cast<DefInit>(DI->getArg(0))->getDef();
     }
 
     if (InstOpRec->isSubClassOf("RegisterOperand"))
@@ -452,13 +458,13 @@
 
     if (!T.getRegisterClass(InstOpRec)
         .contains(T.getRegBank().getReg(ADI->getDef())))
-      throw TGError(Loc, "fixed register " + ADI->getDef()->getName() +
-                    " is not a member of the " + InstOpRec->getName() +
-                    " register class!");
+      PrintFatalError(Loc, "fixed register " + ADI->getDef()->getName() +
+                      " is not a member of the " + InstOpRec->getName() +
+                      " register class!");
 
     if (!Result->getArgName(AliasOpNo).empty())
-      throw TGError(Loc, "result fixed register argument must "
-                    "not have a name!");
+      PrintFatalError(Loc, "result fixed register argument must "
+                      "not have a name!");
 
     ResOp = ResultOperand(ADI->getDef());
     return true;
@@ -481,13 +487,13 @@
   }
 
   // Literal integers.
-  if (IntInit *II = dynamic_cast<IntInit*>(Arg)) {
+  if (IntInit *II = dyn_cast<IntInit>(Arg)) {
     if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
       return false;
     // Integer arguments can't have names.
     if (!Result->getArgName(AliasOpNo).empty())
-      throw TGError(Loc, "result argument #" + utostr(AliasOpNo) +
-                    " must not have a name!");
+      PrintFatalError(Loc, "result argument #" + utostr(AliasOpNo) +
+                      " must not have a name!");
     ResOp = ResultOperand(II->getValue());
     return true;
   }
@@ -513,9 +519,10 @@
   Result = R->getValueAsDag("ResultInst");
 
   // Verify that the root of the result is an instruction.
-  DefInit *DI = dynamic_cast<DefInit*>(Result->getOperator());
+  DefInit *DI = dyn_cast<DefInit>(Result->getOperator());
   if (DI == 0 || !DI->getDef()->isSubClassOf("Instruction"))
-    throw TGError(R->getLoc(), "result of inst alias should be an instruction");
+    PrintFatalError(R->getLoc(),
+                    "result of inst alias should be an instruction");
 
   ResultInst = &T.getInstruction(DI->getDef());
 
@@ -523,7 +530,7 @@
   // the same class.
   StringMap<Record*> NameClass;
   for (unsigned i = 0, e = Result->getNumArgs(); i != e; ++i) {
-    DefInit *ADI = dynamic_cast<DefInit*>(Result->getArg(i));
+    DefInit *ADI = dyn_cast<DefInit>(Result->getArg(i));
     if (!ADI || Result->getArgName(i).empty())
       continue;
     // Verify we don't have something like: (someinst GR16:$foo, GR32:$foo)
@@ -531,9 +538,9 @@
     // same type.
     Record *&Entry = NameClass[Result->getArgName(i)];
     if (Entry && Entry != ADI->getDef())
-      throw TGError(R->getLoc(), "result value $" + Result->getArgName(i) +
-                    " is both " + Entry->getName() + " and " +
-                    ADI->getDef()->getName() + "!");
+      PrintFatalError(R->getLoc(), "result value $" + Result->getArgName(i) +
+                      " is both " + Entry->getName() + " and " +
+                      ADI->getDef()->getName() + "!");
     Entry = ADI->getDef();
   }
 
@@ -549,7 +556,7 @@
       continue;
 
     if (AliasOpNo >= Result->getNumArgs())
-      throw TGError(R->getLoc(), "not enough arguments for instruction!");
+      PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
 
     Record *InstOpRec = ResultInst->Operands[i].Rec;
     unsigned NumSubOps = ResultInst->Operands[i].MINumOperands;
@@ -570,7 +577,7 @@
       } else {
          DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
          for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
-          Record *SubRec = dynamic_cast<DefInit*>(MIOI->getArg(SubOp))->getDef();
+          Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
 
           // Take care to instantiate each of the suboperands with the correct
           // nomenclature: $foo.bar
@@ -590,26 +597,26 @@
       DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
       for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
         if (AliasOpNo >= Result->getNumArgs())
-          throw TGError(R->getLoc(), "not enough arguments for instruction!");
-        Record *SubRec = dynamic_cast<DefInit*>(MIOI->getArg(SubOp))->getDef();
+          PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
+        Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
         if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false,
                             R->getLoc(), T, ResOp)) {
           ResultOperands.push_back(ResOp);
           ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
           ++AliasOpNo;
         } else {
-          throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
+          PrintFatalError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
                         " does not match instruction operand class " +
                         (SubOp == 0 ? InstOpRec->getName() :SubRec->getName()));
         }
       }
       continue;
     }
-    throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
-                  " does not match instruction operand class " +
-                  InstOpRec->getName());
+    PrintFatalError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
+                    " does not match instruction operand class " +
+                    InstOpRec->getName());
   }
 
   if (AliasOpNo != Result->getNumArgs())
-    throw TGError(R->getLoc(), "too many operands for instruction!");
+    PrintFatalError(R->getLoc(), "too many operands for instruction!");
 }

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenInstruction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenInstruction.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenInstruction.h (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenInstruction.h Tue Jan 15 11:16:16 2013
@@ -152,7 +152,7 @@
 
     /// getOperandNamed - Return the index of the operand with the specified
     /// non-empty name.  If the instruction does not have an operand with the
-    /// specified name, throw an exception.
+    /// specified name, abort.
     unsigned getOperandNamed(StringRef Name) const;
 
     /// hasOperandNamed - Query whether the instruction has an operand of the
@@ -162,9 +162,8 @@
 
     /// ParseOperandName - Parse an operand name like "$foo" or "$foo.bar",
     /// where $foo is a whole operand and $foo.bar refers to a suboperand.
-    /// This throws an exception if the name is invalid.  If AllowWholeOp is
-    /// true, references to operands with suboperands are allowed, otherwise
-    /// not.
+    /// This aborts if the name is invalid.  If AllowWholeOp is true, references
+    /// to operands with suboperands are allowed, otherwise not.
     std::pair<unsigned,unsigned> ParseOperandName(const std::string &Op,
                                                   bool AllowWholeOp = true);
 
@@ -222,10 +221,14 @@
     bool isCompare;
     bool isMoveImm;
     bool isBitcast;
+    bool isSelect;
     bool isBarrier;
     bool isCall;
     bool canFoldAsLoad;
-    bool mayLoad, mayStore;
+    bool mayLoad;
+    bool mayLoad_Unset;
+    bool mayStore;
+    bool mayStore_Unset;
     bool isPredicable;
     bool isConvertibleToThreeAddress;
     bool isCommutable;
@@ -237,6 +240,7 @@
     bool hasCtrlDep;
     bool isNotDuplicable;
     bool hasSideEffects;
+    bool hasSideEffects_Unset;
     bool neverHasSideEffects;
     bool isAsCheapAsAMove;
     bool hasExtraSrcRegAllocReq;
@@ -244,6 +248,14 @@
     bool isCodeGenOnly;
     bool isPseudo;
 
+    /// Are there any undefined flags?
+    bool hasUndefFlags() const {
+      return mayLoad_Unset || mayStore_Unset || hasSideEffects_Unset;
+    }
+
+    // The record used to infer instruction flags, or NULL if no flag values
+    // have been inferred.
+    Record *InferredFrom;
 
     CodeGenInstruction(Record *R);
 
@@ -318,7 +330,7 @@
     CodeGenInstAlias(Record *R, CodeGenTarget &T);
 
     bool tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
-                         Record *InstOpRec, bool hasSubOps, SMLoc Loc,
+                         Record *InstOpRec, bool hasSubOps, ArrayRef<SMLoc> Loc,
                          CodeGenTarget &T, ResultOperand &ResOp);
   };
 }

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenRegisters.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenRegisters.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenRegisters.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenRegisters.cpp Tue Jan 15 11:16:16 2013
@@ -28,19 +28,15 @@
 //===----------------------------------------------------------------------===//
 
 CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
-  : TheDef(R),
-    EnumValue(Enum)
-{}
-
-std::string CodeGenSubRegIndex::getNamespace() const {
-  if (TheDef->getValue("Namespace"))
-    return TheDef->getValueAsString("Namespace");
-  else
-    return "";
+  : TheDef(R), EnumValue(Enum), LaneMask(0) {
+  Name = R->getName();
+  if (R->getValue("Namespace"))
+    Namespace = R->getValueAsString("Namespace");
 }
 
-const std::string &CodeGenSubRegIndex::getName() const {
-  return TheDef->getName();
+CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace,
+                                       unsigned Enum)
+  : TheDef(0), Name(N), Namespace(Nspace), EnumValue(Enum), LaneMask(0) {
 }
 
 std::string CodeGenSubRegIndex::getQualifiedName() const {
@@ -52,28 +48,51 @@
 }
 
 void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
-  std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
-  if (Comps.empty())
+  if (!TheDef)
     return;
-  if (Comps.size() != 2)
-    throw TGError(TheDef->getLoc(), "ComposedOf must have exactly two entries");
-  CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
-  CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
-  CodeGenSubRegIndex *X = A->addComposite(B, this);
-  if (X)
-    throw TGError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
-}
-
-void CodeGenSubRegIndex::cleanComposites() {
-  // Clean out redundant mappings of the form this+X -> X.
-  for (CompMap::iterator i = Composed.begin(), e = Composed.end(); i != e;) {
-    CompMap::iterator j = i;
-    ++i;
-    if (j->first == j->second)
-      Composed.erase(j);
+
+  std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
+  if (!Comps.empty()) {
+    if (Comps.size() != 2)
+      PrintFatalError(TheDef->getLoc(),
+                      "ComposedOf must have exactly two entries");
+    CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
+    CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
+    CodeGenSubRegIndex *X = A->addComposite(B, this);
+    if (X)
+      PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
+  }
+
+  std::vector<Record*> Parts =
+    TheDef->getValueAsListOfDefs("CoveringSubRegIndices");
+  if (!Parts.empty()) {
+    if (Parts.size() < 2)
+      PrintFatalError(TheDef->getLoc(),
+                    "CoveredBySubRegs must have two or more entries");
+    SmallVector<CodeGenSubRegIndex*, 8> IdxParts;
+    for (unsigned i = 0, e = Parts.size(); i != e; ++i)
+      IdxParts.push_back(RegBank.getSubRegIdx(Parts[i]));
+    RegBank.addConcatSubRegIndex(IdxParts, this);
   }
 }
 
+unsigned CodeGenSubRegIndex::computeLaneMask() {
+  // Already computed?
+  if (LaneMask)
+    return LaneMask;
+
+  // Recursion guard, shouldn't be required.
+  LaneMask = ~0u;
+
+  // The lane mask is simply the union of all sub-indices.
+  unsigned M = 0;
+  for (CompMap::iterator I = Composed.begin(), E = Composed.end(); I != E; ++I)
+    M |= I->second->computeLaneMask();
+  assert(M && "Missing lane mask, sub-register cycle?");
+  LaneMask = M;
+  return LaneMask;
+}
+
 //===----------------------------------------------------------------------===//
 //                              CodeGenRegister
 //===----------------------------------------------------------------------===//
@@ -94,8 +113,8 @@
   std::vector<Record*> SRs = TheDef->getValueAsListOfDefs("SubRegs");
 
   if (SRIs.size() != SRs.size())
-    throw TGError(TheDef->getLoc(),
-                  "SubRegs and SubRegIndices must have the same size");
+    PrintFatalError(TheDef->getLoc(),
+                    "SubRegs and SubRegIndices must have the same size");
 
   for (unsigned i = 0, e = SRIs.size(); i != e; ++i) {
     ExplicitSubRegIndices.push_back(RegBank.getSubRegIdx(SRIs[i]));
@@ -206,8 +225,8 @@
     CodeGenRegister *SR = ExplicitSubRegs[i];
     CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i];
     if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
-      throw TGError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
-                    " appears twice in Register " + getName());
+      PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
+                      " appears twice in Register " + getName());
     // Map explicit sub-registers first, so the names take precedence.
     // The inherited sub-registers are mapped below.
     SubReg2Idx.insert(std::make_pair(SR, Idx));
@@ -287,11 +306,11 @@
   for (SubRegMap::const_iterator SI = SubRegs.begin(), SE = SubRegs.end();
        SI != SE; ++SI) {
     if (SI->second == this) {
-      SMLoc Loc;
+      ArrayRef<SMLoc> Loc;
       if (TheDef)
         Loc = TheDef->getLoc();
-      throw TGError(Loc, "Register " + getName() +
-                    " has itself as a sub-register");
+      PrintFatalError(Loc, "Register " + getName() +
+                      " has itself as a sub-register");
     }
     // Ensure that every sub-register has a unique name.
     DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*>::iterator Ins =
@@ -299,10 +318,10 @@
     if (Ins->second == SI->first)
       continue;
     // Trouble: Two different names for SI->second.
-    SMLoc Loc;
+    ArrayRef<SMLoc> Loc;
     if (TheDef)
       Loc = TheDef->getLoc();
-    throw TGError(Loc, "Sub-register can't have two names: " +
+    PrintFatalError(Loc, "Sub-register can't have two names: " +
                   SI->second->getName() + " available as " +
                   SI->first->getName() + " and " + Ins->second->getName());
   }
@@ -449,8 +468,8 @@
            SE = NewSubReg->SubRegs.end(); SI != SE; ++SI) {
       CodeGenSubRegIndex *SubIdx = getSubRegIndex(SI->second);
       if (!SubIdx)
-        throw TGError(TheDef->getLoc(), "No SubRegIndex for " +
-                      SI->second->getName() + " in " + getName());
+        PrintFatalError(TheDef->getLoc(), "No SubRegIndex for " +
+                        SI->second->getName() + " in " + getName());
       NewIdx->addComposite(SI->first, SubIdx);
     }
   }
@@ -574,15 +593,16 @@
     unsigned Dim = Indices.size();
     ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
     if (Dim != SubRegs->getSize())
-      throw TGError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
+      PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
     if (Dim < 2)
-      throw TGError(Def->getLoc(), "Tuples must have at least 2 sub-registers");
+      PrintFatalError(Def->getLoc(),
+                      "Tuples must have at least 2 sub-registers");
 
     // Evaluate the sub-register lists to be zipped.
     unsigned Length = ~0u;
     SmallVector<SetTheory::RecSet, 4> Lists(Dim);
     for (unsigned i = 0; i != Dim; ++i) {
-      ST.evaluate(SubRegs->getElement(i), Lists[i]);
+      ST.evaluate(SubRegs->getElement(i), Lists[i], Def->getLoc());
       Length = std::min(Length, unsigned(Lists[i].size()));
     }
 
@@ -688,8 +708,8 @@
   for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
     Record *Type = TypeList[i];
     if (!Type->isSubClassOf("ValueType"))
-      throw "RegTypes list member '" + Type->getName() +
-        "' does not derive from the ValueType class!";
+      PrintFatalError("RegTypes list member '" + Type->getName() +
+        "' does not derive from the ValueType class!");
     VTs.push_back(getValueType(Type));
   }
   assert(!VTs.empty() && "RegisterClass must contain at least one ValueType!");
@@ -710,14 +730,14 @@
   // Alternative allocation orders may be subsets.
   SetTheory::RecSet Order;
   for (unsigned i = 0, e = AltOrders->size(); i != e; ++i) {
-    RegBank.getSets().evaluate(AltOrders->getElement(i), Order);
+    RegBank.getSets().evaluate(AltOrders->getElement(i), Order, R->getLoc());
     Orders[1 + i].append(Order.begin(), Order.end());
     // Verify that all altorder members are regclass members.
     while (!Order.empty()) {
       CodeGenRegister *Reg = RegBank.getReg(Order.back());
       Order.pop_back();
       if (!contains(Reg))
-        throw TGError(R->getLoc(), " AltOrder register " + Reg->getName() +
+        PrintFatalError(R->getLoc(), " AltOrder register " + Reg->getName() +
                       " is not a class member");
     }
   }
@@ -937,7 +957,7 @@
 //                               CodeGenRegBank
 //===----------------------------------------------------------------------===//
 
-CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) : Records(Records) {
+CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) {
   // Configure register Sets to understand register classes and tuples.
   Sets.addFieldExpander("RegisterClass", "MemberList");
   Sets.addFieldExpander("CalleeSavedRegs", "SaveList");
@@ -947,7 +967,6 @@
   // More indices will be synthesized later.
   std::vector<Record*> SRIs = Records.getAllDerivedDefinitions("SubRegIndex");
   std::sort(SRIs.begin(), SRIs.end(), LessRecord());
-  NumNamedIndices = SRIs.size();
   for (unsigned i = 0, e = SRIs.size(); i != e; ++i)
     getSubRegIdx(SRIs[i]);
   // Build composite maps from ComposedOf fields.
@@ -976,6 +995,12 @@
   for (unsigned i = 0, e = Registers.size(); i != e; ++i)
     Registers[i]->buildObjectGraph(*this);
 
+  // Compute register name map.
+  for (unsigned i = 0, e = Registers.size(); i != e; ++i)
+    RegistersByName.GetOrCreateValue(
+                       Registers[i]->TheDef->getValueAsString("AsmName"),
+                       Registers[i]);
+
   // Precompute all sub-register maps.
   // This will create Composite entries for all inferred sub-register indices.
   for (unsigned i = 0, e = Registers.size(); i != e; ++i)
@@ -998,7 +1023,7 @@
   // Read in register class definitions.
   std::vector<Record*> RCs = Records.getAllDerivedDefinitions("RegisterClass");
   if (RCs.empty())
-    throw std::string("No 'RegisterClass' subclasses defined!");
+    PrintFatalError(std::string("No 'RegisterClass' subclasses defined!"));
 
   // Allocate user-defined register classes.
   RegClasses.reserve(RCs.size());
@@ -1015,6 +1040,15 @@
   CodeGenRegisterClass::computeSubClasses(*this);
 }
 
+// Create a synthetic CodeGenSubRegIndex without a corresponding Record.
+CodeGenSubRegIndex*
+CodeGenRegBank::createSubRegIndex(StringRef Name, StringRef Namespace) {
+  CodeGenSubRegIndex *Idx = new CodeGenSubRegIndex(Name, Namespace,
+                                                   SubRegIndices.size() + 1);
+  SubRegIndices.push_back(Idx);
+  return Idx;
+}
+
 CodeGenSubRegIndex *CodeGenRegBank::getSubRegIdx(Record *Def) {
   CodeGenSubRegIndex *&Idx = Def2SubRegIdx[Def];
   if (Idx)
@@ -1066,7 +1100,7 @@
   if (CodeGenRegisterClass *RC = Def2RC[Def])
     return RC;
 
-  throw TGError(Def->getLoc(), "Not a known RegisterClass!");
+  PrintFatalError(Def->getLoc(), "Not a known RegisterClass!");
 }
 
 CodeGenSubRegIndex*
@@ -1079,7 +1113,7 @@
 
   // None exists, synthesize one.
   std::string Name = A->getName() + "_then_" + B->getName();
-  Comp = getSubRegIdx(new Record(Name, SMLoc(), Records));
+  Comp = createSubRegIndex(Name, A->getNamespace());
   A->addComposite(B, Comp);
   return Comp;
 }
@@ -1099,7 +1133,7 @@
     Name += '_';
     Name += Parts[i]->getName();
   }
-  return Idx = getSubRegIdx(new Record(Name, SMLoc(), Records));
+  return Idx = createSubRegIndex(Name, Parts.front()->getNamespace());
 }
 
 void CodeGenRegBank::computeComposites() {
@@ -1145,11 +1179,35 @@
       }
     }
   }
+}
+
+// Compute lane masks. This is similar to register units, but at the
+// sub-register index level. Each bit in the lane mask is like a register unit
+// class, and two lane masks will have a bit in common if two sub-register
+// indices overlap in some register.
+//
+// Conservatively share a lane mask bit if two sub-register indices overlap in
+// some registers, but not in others. That shouldn't happen a lot.
+void CodeGenRegBank::computeSubRegIndexLaneMasks() {
+  // First assign individual bits to all the leaf indices.
+  unsigned Bit = 0;
+  for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+    CodeGenSubRegIndex *Idx = SubRegIndices[i];
+    if (Idx->getComposites().empty()) {
+      Idx->LaneMask = 1u << Bit;
+      // Share bit 31 in the unlikely case there are more than 32 leafs.
+      if (Bit < 31) ++Bit;
+    } else {
+      Idx->LaneMask = 0;
+    }
+  }
+
+  // FIXME: What if ad-hoc aliasing introduces overlaps that aren't represented
+  // by the sub-register graph? This doesn't occur in any known targets.
 
-  // We don't care about the difference between (Idx1, Idx2) -> Idx2 and invalid
-  // compositions, so remove any mappings of that form.
+  // Inherit lanes from composites.
   for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
-    SubRegIndices[i]->cleanComposites();
+    SubRegIndices[i]->computeLaneMask();
 }
 
 namespace {
@@ -1535,6 +1593,7 @@
 
 void CodeGenRegBank::computeDerivedInfo() {
   computeComposites();
+  computeSubRegIndexLaneMasks();
 
   // Compute a weight for each register unit created during getSubRegs.
   // This may create adopted register units (with unit # >= NumNativeRegUnits).

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenRegisters.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenRegisters.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenRegisters.h (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenRegisters.h Tue Jan 15 11:16:16 2013
@@ -35,14 +35,18 @@
   /// CodeGenSubRegIndex - Represents a sub-register index.
   class CodeGenSubRegIndex {
     Record *const TheDef;
+    std::string Name;
+    std::string Namespace;
 
   public:
     const unsigned EnumValue;
+    unsigned LaneMask;
 
     CodeGenSubRegIndex(Record *R, unsigned Enum);
+    CodeGenSubRegIndex(StringRef N, StringRef Nspace, unsigned Enum);
 
-    const std::string &getName() const;
-    std::string getNamespace() const;
+    const std::string &getName() const { return Name; }
+    const std::string &getNamespace() const { return Namespace; }
     std::string getQualifiedName() const;
 
     // Order CodeGenSubRegIndex pointers by EnumValue.
@@ -77,12 +81,12 @@
     // Update the composite maps of components specified in 'ComposedOf'.
     void updateComponents(CodeGenRegBank&);
 
-    // Clean out redundant composite mappings.
-    void cleanComposites();
-
     // Return the map of composites.
     const CompMap &getComposites() const { return Composed; }
 
+    // Compute LaneMask from Composed. Return LaneMask.
+    unsigned computeLaneMask();
+
   private:
     CompMap Composed;
   };
@@ -422,13 +426,13 @@
   // CodeGenRegBank - Represent a target's registers and the relations between
   // them.
   class CodeGenRegBank {
-    RecordKeeper &Records;
     SetTheory Sets;
 
     // SubRegIndices.
     std::vector<CodeGenSubRegIndex*> SubRegIndices;
     DenseMap<Record*, CodeGenSubRegIndex*> Def2SubRegIdx;
-    unsigned NumNamedIndices;
+
+    CodeGenSubRegIndex *createSubRegIndex(StringRef Name, StringRef NameSpace);
 
     typedef std::map<SmallVector<CodeGenSubRegIndex*, 8>,
                      CodeGenSubRegIndex*> ConcatIdxMap;
@@ -436,6 +440,7 @@
 
     // Registers.
     std::vector<CodeGenRegister*> Registers;
+    StringMap<CodeGenRegister*> RegistersByName;
     DenseMap<Record*, CodeGenRegister*> Def2Reg;
     unsigned NumNativeRegUnits;
 
@@ -486,6 +491,9 @@
     // Populate the Composite map from sub-register relationships.
     void computeComposites();
 
+    // Compute a lane mask for each sub-register index.
+    void computeSubRegIndexLaneMasks();
+
   public:
     CodeGenRegBank(RecordKeeper&);
 
@@ -495,7 +503,6 @@
     // in the .td files. The rest are synthesized such that all sub-registers
     // have a unique name.
     ArrayRef<CodeGenSubRegIndex*> getSubRegIndices() { return SubRegIndices; }
-    unsigned getNumNamedIndices() { return NumNamedIndices; }
 
     // Find a SubRegIndex form its Record def.
     CodeGenSubRegIndex *getSubRegIdx(Record*);
@@ -516,6 +523,9 @@
     }
 
     const std::vector<CodeGenRegister*> &getRegisters() { return Registers; }
+    const StringMap<CodeGenRegister*> &getRegistersByName() {
+      return RegistersByName;
+    }
 
     // Find a register from its Record def.
     CodeGenRegister *getReg(Record*);

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenSchedule.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenSchedule.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenSchedule.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenSchedule.cpp Tue Jan 15 11:16:16 2013
@@ -16,41 +16,505 @@
 
 #include "CodeGenSchedule.h"
 #include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/ADT/STLExtras.h"
 
 using namespace llvm;
 
-// CodeGenModels ctor interprets machine model records and populates maps.
+#ifndef NDEBUG
+static void dumpIdxVec(const IdxVec &V) {
+  for (unsigned i = 0, e = V.size(); i < e; ++i) {
+    dbgs() << V[i] << ", ";
+  }
+}
+static void dumpIdxVec(const SmallVectorImpl<unsigned> &V) {
+  for (unsigned i = 0, e = V.size(); i < e; ++i) {
+    dbgs() << V[i] << ", ";
+  }
+}
+#endif
+
+// (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp.
+struct InstrsOp : public SetTheory::Operator {
+  void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
+             ArrayRef<SMLoc> Loc) {
+    ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
+  }
+};
+
+// (instregex "OpcPat",...) Find all instructions matching an opcode pattern.
+//
+// TODO: Since this is a prefix match, perform a binary search over the
+// instruction names using lower_bound. Note that the predefined instrs must be
+// scanned linearly first. However, this is only safe if the regex pattern has
+// no top-level bars. The DAG already has a list of patterns, so there's no
+// reason to use top-level bars, but we need a way to verify they don't exist
+// before implementing the optimization.
+struct InstRegexOp : public SetTheory::Operator {
+  const CodeGenTarget &Target;
+  InstRegexOp(const CodeGenTarget &t): Target(t) {}
+
+  void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
+             ArrayRef<SMLoc> Loc) {
+    SmallVector<Regex*, 4> RegexList;
+    for (DagInit::const_arg_iterator
+           AI = Expr->arg_begin(), AE = Expr->arg_end(); AI != AE; ++AI) {
+      StringInit *SI = dyn_cast<StringInit>(*AI);
+      if (!SI)
+        PrintFatalError(Loc, "instregex requires pattern string: "
+          + Expr->getAsString());
+      std::string pat = SI->getValue();
+      // Implement a python-style prefix match.
+      if (pat[0] != '^') {
+        pat.insert(0, "^(");
+        pat.insert(pat.end(), ')');
+      }
+      RegexList.push_back(new Regex(pat));
+    }
+    for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+           E = Target.inst_end(); I != E; ++I) {
+      for (SmallVectorImpl<Regex*>::iterator
+             RI = RegexList.begin(), RE = RegexList.end(); RI != RE; ++RI) {
+        if ((*RI)->match((*I)->TheDef->getName()))
+          Elts.insert((*I)->TheDef);
+      }
+    }
+    DeleteContainerPointers(RegexList);
+  }
+};
+
+/// CodeGenModels ctor interprets machine model records and populates maps.
 CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
                                        const CodeGenTarget &TGT):
-  Records(RK), Target(TGT), NumItineraryClasses(0), HasProcItineraries(false) {
+  Records(RK), Target(TGT), NumItineraryClasses(0) {
+
+  Sets.addFieldExpander("InstRW", "Instrs");
+
+  // Allow Set evaluation to recognize the dags used in InstRW records:
+  // (instrs Op1, Op1...)
+  Sets.addOperator("instrs", new InstrsOp);
+  Sets.addOperator("instregex", new InstRegexOp(Target));
+
+  // Instantiate a CodeGenProcModel for each SchedMachineModel with the values
+  // that are explicitly referenced in tablegen records. Resources associated
+  // with each processor will be derived later. Populate ProcModelMap with the
+  // CodeGenProcModel instances.
+  collectProcModels();
+
+  // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly
+  // defined, and populate SchedReads and SchedWrites vectors. Implicit
+  // SchedReadWrites that represent sequences derived from expanded variant will
+  // be inferred later.
+  collectSchedRW();
+
+  // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly
+  // required by an instruction definition, and populate SchedClassIdxMap. Set
+  // NumItineraryClasses to the number of explicit itinerary classes referenced
+  // by instructions. Set NumInstrSchedClasses to the number of itinerary
+  // classes plus any classes implied by instructions that derive from class
+  // Sched and provide SchedRW list. This does not infer any new classes from
+  // SchedVariant.
+  collectSchedClasses();
+
+  // Find instruction itineraries for each processor. Sort and populate
+  // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires
+  // all itinerary classes to be discovered.
+  collectProcItins();
+
+  // Find ItinRW records for each processor and itinerary class.
+  // (For per-operand resources mapped to itinerary classes).
+  collectProcItinRW();
+
+  // Infer new SchedClasses from SchedVariant.
+  inferSchedClasses();
+
+  // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
+  // ProcResourceDefs.
+  collectProcResources();
+}
+
+/// Gather all processor models.
+void CodeGenSchedModels::collectProcModels() {
+  RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor");
+  std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
+
+  // Reserve space because we can. Reallocation would be ok.
+  ProcModels.reserve(ProcRecords.size()+1);
+
+  // Use idx=0 for NoModel/NoItineraries.
+  Record *NoModelDef = Records.getDef("NoSchedModel");
+  Record *NoItinsDef = Records.getDef("NoItineraries");
+  ProcModels.push_back(CodeGenProcModel(0, "NoSchedModel",
+                                        NoModelDef, NoItinsDef));
+  ProcModelMap[NoModelDef] = 0;
+
+  // For each processor, find a unique machine model.
+  for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
+    addProcModel(ProcRecords[i]);
+}
+
+/// Get a unique processor model based on the defined MachineModel and
+/// ProcessorItineraries.
+void CodeGenSchedModels::addProcModel(Record *ProcDef) {
+  Record *ModelKey = getModelOrItinDef(ProcDef);
+  if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second)
+    return;
+
+  std::string Name = ModelKey->getName();
+  if (ModelKey->isSubClassOf("SchedMachineModel")) {
+    Record *ItinsDef = ModelKey->getValueAsDef("Itineraries");
+    ProcModels.push_back(
+      CodeGenProcModel(ProcModels.size(), Name, ModelKey, ItinsDef));
+  }
+  else {
+    // An itinerary is defined without a machine model. Infer a new model.
+    if (!ModelKey->getValueAsListOfDefs("IID").empty())
+      Name = Name + "Model";
+    ProcModels.push_back(
+      CodeGenProcModel(ProcModels.size(), Name,
+                       ProcDef->getValueAsDef("SchedModel"), ModelKey));
+  }
+  DEBUG(ProcModels.back().dump());
+}
+
+// Recursively find all reachable SchedReadWrite records.
+static void scanSchedRW(Record *RWDef, RecVec &RWDefs,
+                        SmallPtrSet<Record*, 16> &RWSet) {
+  if (!RWSet.insert(RWDef))
+    return;
+  RWDefs.push_back(RWDef);
+  // Reads don't current have sequence records, but it can be added later.
+  if (RWDef->isSubClassOf("WriteSequence")) {
+    RecVec Seq = RWDef->getValueAsListOfDefs("Writes");
+    for (RecIter I = Seq.begin(), E = Seq.end(); I != E; ++I)
+      scanSchedRW(*I, RWDefs, RWSet);
+  }
+  else if (RWDef->isSubClassOf("SchedVariant")) {
+    // Visit each variant (guarded by a different predicate).
+    RecVec Vars = RWDef->getValueAsListOfDefs("Variants");
+    for (RecIter VI = Vars.begin(), VE = Vars.end(); VI != VE; ++VI) {
+      // Visit each RW in the sequence selected by the current variant.
+      RecVec Selected = (*VI)->getValueAsListOfDefs("Selected");
+      for (RecIter I = Selected.begin(), E = Selected.end(); I != E; ++I)
+        scanSchedRW(*I, RWDefs, RWSet);
+    }
+  }
+}
+
+// Collect and sort all SchedReadWrites reachable via tablegen records.
+// More may be inferred later when inferring new SchedClasses from variants.
+void CodeGenSchedModels::collectSchedRW() {
+  // Reserve idx=0 for invalid writes/reads.
+  SchedWrites.resize(1);
+  SchedReads.resize(1);
+
+  SmallPtrSet<Record*, 16> RWSet;
+
+  // Find all SchedReadWrites referenced by instruction defs.
+  RecVec SWDefs, SRDefs;
+  for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+         E = Target.inst_end(); I != E; ++I) {
+    Record *SchedDef = (*I)->TheDef;
+    if (!SchedDef->isSubClassOf("Sched"))
+      continue;
+    RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW");
+    for (RecIter RWI = RWs.begin(), RWE = RWs.end(); RWI != RWE; ++RWI) {
+      if ((*RWI)->isSubClassOf("SchedWrite"))
+        scanSchedRW(*RWI, SWDefs, RWSet);
+      else {
+        assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+        scanSchedRW(*RWI, SRDefs, RWSet);
+      }
+    }
+  }
+  // Find all ReadWrites referenced by InstRW.
+  RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
+  for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI) {
+    // For all OperandReadWrites.
+    RecVec RWDefs = (*OI)->getValueAsListOfDefs("OperandReadWrites");
+    for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
+         RWI != RWE; ++RWI) {
+      if ((*RWI)->isSubClassOf("SchedWrite"))
+        scanSchedRW(*RWI, SWDefs, RWSet);
+      else {
+        assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+        scanSchedRW(*RWI, SRDefs, RWSet);
+      }
+    }
+  }
+  // Find all ReadWrites referenced by ItinRW.
+  RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
+  for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) {
+    // For all OperandReadWrites.
+    RecVec RWDefs = (*II)->getValueAsListOfDefs("OperandReadWrites");
+    for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
+         RWI != RWE; ++RWI) {
+      if ((*RWI)->isSubClassOf("SchedWrite"))
+        scanSchedRW(*RWI, SWDefs, RWSet);
+      else {
+        assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+        scanSchedRW(*RWI, SRDefs, RWSet);
+      }
+    }
+  }
+  // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted
+  // for the loop below that initializes Alias vectors.
+  RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias");
+  std::sort(AliasDefs.begin(), AliasDefs.end(), LessRecord());
+  for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) {
+    Record *MatchDef = (*AI)->getValueAsDef("MatchRW");
+    Record *AliasDef = (*AI)->getValueAsDef("AliasRW");
+    if (MatchDef->isSubClassOf("SchedWrite")) {
+      if (!AliasDef->isSubClassOf("SchedWrite"))
+        PrintFatalError((*AI)->getLoc(), "SchedWrite Alias must be SchedWrite");
+      scanSchedRW(AliasDef, SWDefs, RWSet);
+    }
+    else {
+      assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+      if (!AliasDef->isSubClassOf("SchedRead"))
+        PrintFatalError((*AI)->getLoc(), "SchedRead Alias must be SchedRead");
+      scanSchedRW(AliasDef, SRDefs, RWSet);
+    }
+  }
+  // Sort and add the SchedReadWrites directly referenced by instructions or
+  // itinerary resources. Index reads and writes in separate domains.
+  std::sort(SWDefs.begin(), SWDefs.end(), LessRecord());
+  for (RecIter SWI = SWDefs.begin(), SWE = SWDefs.end(); SWI != SWE; ++SWI) {
+    assert(!getSchedRWIdx(*SWI, /*IsRead=*/false) && "duplicate SchedWrite");
+    SchedWrites.push_back(CodeGenSchedRW(SchedWrites.size(), *SWI));
+  }
+  std::sort(SRDefs.begin(), SRDefs.end(), LessRecord());
+  for (RecIter SRI = SRDefs.begin(), SRE = SRDefs.end(); SRI != SRE; ++SRI) {
+    assert(!getSchedRWIdx(*SRI, /*IsRead-*/true) && "duplicate SchedWrite");
+    SchedReads.push_back(CodeGenSchedRW(SchedReads.size(), *SRI));
+  }
+  // Initialize WriteSequence vectors.
+  for (std::vector<CodeGenSchedRW>::iterator WI = SchedWrites.begin(),
+         WE = SchedWrites.end(); WI != WE; ++WI) {
+    if (!WI->IsSequence)
+      continue;
+    findRWs(WI->TheDef->getValueAsListOfDefs("Writes"), WI->Sequence,
+            /*IsRead=*/false);
+  }
+  // Initialize Aliases vectors.
+  for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) {
+    Record *AliasDef = (*AI)->getValueAsDef("AliasRW");
+    getSchedRW(AliasDef).IsAlias = true;
+    Record *MatchDef = (*AI)->getValueAsDef("MatchRW");
+    CodeGenSchedRW &RW = getSchedRW(MatchDef);
+    if (RW.IsAlias)
+      PrintFatalError((*AI)->getLoc(), "Cannot Alias an Alias");
+    RW.Aliases.push_back(*AI);
+  }
+  DEBUG(
+    for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
+      dbgs() << WIdx << ": ";
+      SchedWrites[WIdx].dump();
+      dbgs() << '\n';
+    }
+    for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; ++RIdx) {
+      dbgs() << RIdx << ": ";
+      SchedReads[RIdx].dump();
+      dbgs() << '\n';
+    }
+    RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite");
+    for (RecIter RI = RWDefs.begin(), RE = RWDefs.end();
+         RI != RE; ++RI) {
+      if (!getSchedRWIdx(*RI, (*RI)->isSubClassOf("SchedRead"))) {
+        const std::string &Name = (*RI)->getName();
+        if (Name != "NoWrite" && Name != "ReadDefault")
+          dbgs() << "Unused SchedReadWrite " << (*RI)->getName() << '\n';
+      }
+    });
+}
+
+/// Compute a SchedWrite name from a sequence of writes.
+std::string CodeGenSchedModels::genRWName(const IdxVec& Seq, bool IsRead) {
+  std::string Name("(");
+  for (IdxIter I = Seq.begin(), E = Seq.end(); I != E; ++I) {
+    if (I != Seq.begin())
+      Name += '_';
+    Name += getSchedRW(*I, IsRead).Name;
+  }
+  Name += ')';
+  return Name;
+}
+
+unsigned CodeGenSchedModels::getSchedRWIdx(Record *Def, bool IsRead,
+                                           unsigned After) const {
+  const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
+  assert(After < RWVec.size() && "start position out of bounds");
+  for (std::vector<CodeGenSchedRW>::const_iterator I = RWVec.begin() + After,
+         E = RWVec.end(); I != E; ++I) {
+    if (I->TheDef == Def)
+      return I - RWVec.begin();
+  }
+  return 0;
+}
+
+bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const {
+  for (unsigned i = 0, e = SchedReads.size(); i < e; ++i) {
+    Record *ReadDef = SchedReads[i].TheDef;
+    if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance"))
+      continue;
+
+    RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites");
+    if (std::find(ValidWrites.begin(), ValidWrites.end(), WriteDef)
+        != ValidWrites.end()) {
+      return true;
+    }
+  }
+  return false;
+}
 
-  // Populate SchedClassIdxMap and set NumItineraryClasses.
-  CollectSchedClasses();
+namespace llvm {
+void splitSchedReadWrites(const RecVec &RWDefs,
+                          RecVec &WriteDefs, RecVec &ReadDefs) {
+  for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) {
+    if ((*RWI)->isSubClassOf("SchedWrite"))
+      WriteDefs.push_back(*RWI);
+    else {
+      assert((*RWI)->isSubClassOf("SchedRead") && "unknown SchedReadWrite");
+      ReadDefs.push_back(*RWI);
+    }
+  }
+}
+} // namespace llvm
 
-  // Populate ProcModelMap.
-  CollectProcModels();
+// Split the SchedReadWrites defs and call findRWs for each list.
+void CodeGenSchedModels::findRWs(const RecVec &RWDefs,
+                                 IdxVec &Writes, IdxVec &Reads) const {
+    RecVec WriteDefs;
+    RecVec ReadDefs;
+    splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs);
+    findRWs(WriteDefs, Writes, false);
+    findRWs(ReadDefs, Reads, true);
 }
 
-// Visit all the instruction definitions for this target to gather and enumerate
-// the itinerary classes. These are the explicitly specified SchedClasses. More
-// SchedClasses may be inferred.
-void CodeGenSchedModels::CollectSchedClasses() {
+// Call getSchedRWIdx for all elements in a sequence of SchedRW defs.
+void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs,
+                                 bool IsRead) const {
+  for (RecIter RI = RWDefs.begin(), RE = RWDefs.end(); RI != RE; ++RI) {
+    unsigned Idx = getSchedRWIdx(*RI, IsRead);
+    assert(Idx && "failed to collect SchedReadWrite");
+    RWs.push_back(Idx);
+  }
+}
 
-  // NoItinerary is always the first class at Index=0
+void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq,
+                                          bool IsRead) const {
+  const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
+  if (!SchedRW.IsSequence) {
+    RWSeq.push_back(RWIdx);
+    return;
+  }
+  int Repeat =
+    SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1;
+  for (int i = 0; i < Repeat; ++i) {
+    for (IdxIter I = SchedRW.Sequence.begin(), E = SchedRW.Sequence.end();
+         I != E; ++I) {
+      expandRWSequence(*I, RWSeq, IsRead);
+    }
+  }
+}
+
+// Expand a SchedWrite as a sequence following any aliases that coincide with
+// the given processor model.
+void CodeGenSchedModels::expandRWSeqForProc(
+  unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
+  const CodeGenProcModel &ProcModel) const {
+
+  const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead);
+  Record *AliasDef = 0;
+  for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end();
+       AI != AE; ++AI) {
+    const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
+    if ((*AI)->getValueInit("SchedModel")->isComplete()) {
+      Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
+      if (&getProcModel(ModelDef) != &ProcModel)
+        continue;
+    }
+    if (AliasDef)
+      PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+                      "defined for processor " + ProcModel.ModelName +
+                      " Ensure only one SchedAlias exists per RW.");
+    AliasDef = AliasRW.TheDef;
+  }
+  if (AliasDef) {
+    expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead),
+                       RWSeq, IsRead,ProcModel);
+    return;
+  }
+  if (!SchedWrite.IsSequence) {
+    RWSeq.push_back(RWIdx);
+    return;
+  }
+  int Repeat =
+    SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1;
+  for (int i = 0; i < Repeat; ++i) {
+    for (IdxIter I = SchedWrite.Sequence.begin(), E = SchedWrite.Sequence.end();
+         I != E; ++I) {
+      expandRWSeqForProc(*I, RWSeq, IsRead, ProcModel);
+    }
+  }
+}
+
+// Find the existing SchedWrite that models this sequence of writes.
+unsigned CodeGenSchedModels::findRWForSequence(const IdxVec &Seq,
+                                               bool IsRead) {
+  std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
+
+  for (std::vector<CodeGenSchedRW>::iterator I = RWVec.begin(), E = RWVec.end();
+       I != E; ++I) {
+    if (I->Sequence == Seq)
+      return I - RWVec.begin();
+  }
+  // Index zero reserved for invalid RW.
+  return 0;
+}
+
+/// Add this ReadWrite if it doesn't already exist.
+unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq,
+                                            bool IsRead) {
+  assert(!Seq.empty() && "cannot insert empty sequence");
+  if (Seq.size() == 1)
+    return Seq.back();
+
+  unsigned Idx = findRWForSequence(Seq, IsRead);
+  if (Idx)
+    return Idx;
+
+  unsigned RWIdx = IsRead ? SchedReads.size() : SchedWrites.size();
+  CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead));
+  if (IsRead)
+    SchedReads.push_back(SchedRW);
+  else
+    SchedWrites.push_back(SchedRW);
+  return RWIdx;
+}
+
+/// Visit all the instruction definitions for this target to gather and
+/// enumerate the itinerary classes. These are the explicitly specified
+/// SchedClasses. More SchedClasses may be inferred.
+void CodeGenSchedModels::collectSchedClasses() {
+
+  // NoItinerary is always the first class at Idx=0
   SchedClasses.resize(1);
   SchedClasses.back().Name = "NoItinerary";
+  SchedClasses.back().ProcIndices.push_back(0);
   SchedClassIdxMap[SchedClasses.back().Name] = 0;
 
   // Gather and sort all itinerary classes used by instruction descriptions.
-  std::vector<Record*> ItinClassList;
+  RecVec ItinClassList;
   for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
          E = Target.inst_end(); I != E; ++I) {
-    Record *SchedDef = (*I)->TheDef->getValueAsDef("Itinerary");
+    Record *ItinDef = (*I)->TheDef->getValueAsDef("Itinerary");
     // Map a new SchedClass with no index.
-    if (!SchedClassIdxMap.count(SchedDef->getName())) {
-      SchedClassIdxMap[SchedDef->getName()] = 0;
-      ItinClassList.push_back(SchedDef);
+    if (!SchedClassIdxMap.count(ItinDef->getName())) {
+      SchedClassIdxMap[ItinDef->getName()] = 0;
+      ItinClassList.push_back(ItinDef);
     }
   }
   // Assign each itinerary class unique number, skipping NoItinerary==0
@@ -61,91 +525,1139 @@
     SchedClassIdxMap[ItinDef->getName()] = SchedClasses.size();
     SchedClasses.push_back(CodeGenSchedClass(ItinDef));
   }
+  // Infer classes from SchedReadWrite resources listed for each
+  // instruction definition that inherits from class Sched.
+  for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+         E = Target.inst_end(); I != E; ++I) {
+    if (!(*I)->TheDef->isSubClassOf("Sched"))
+      continue;
+    IdxVec Writes, Reads;
+    findRWs((*I)->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads);
+    // ProcIdx == 0 indicates the class applies to all processors.
+    IdxVec ProcIndices(1, 0);
+    addSchedClass(Writes, Reads, ProcIndices);
+  }
+  // Create classes for InstRW defs.
+  RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
+  std::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord());
+  for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI)
+    createInstRWClass(*OI);
+
+  NumInstrSchedClasses = SchedClasses.size();
+
+  bool EnableDump = false;
+  DEBUG(EnableDump = true);
+  if (!EnableDump)
+    return;
+  for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+         E = Target.inst_end(); I != E; ++I) {
+    Record *SchedDef = (*I)->TheDef;
+    std::string InstName = (*I)->TheDef->getName();
+    if (SchedDef->isSubClassOf("Sched")) {
+      IdxVec Writes;
+      IdxVec Reads;
+      findRWs((*I)->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads);
+      dbgs() << "SchedRW machine model for " << InstName;
+      for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
+        dbgs() << " " << SchedWrites[*WI].Name;
+      for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
+        dbgs() << " " << SchedReads[*RI].Name;
+      dbgs() << '\n';
+    }
+    unsigned SCIdx = InstrClassMap.lookup((*I)->TheDef);
+    if (SCIdx) {
+      const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs;
+      for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
+           RWI != RWE; ++RWI) {
+        const CodeGenProcModel &ProcModel =
+          getProcModel((*RWI)->getValueAsDef("SchedModel"));
+        dbgs() << "InstRW on " << ProcModel.ModelName << " for " << InstName;
+        IdxVec Writes;
+        IdxVec Reads;
+        findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"),
+                Writes, Reads);
+        for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
+          dbgs() << " " << SchedWrites[*WI].Name;
+        for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
+          dbgs() << " " << SchedReads[*RI].Name;
+        dbgs() << '\n';
+      }
+      continue;
+    }
+    if (!SchedDef->isSubClassOf("Sched")
+        && (SchedDef->getValueAsDef("Itinerary")->getName() == "NoItinerary")) {
+      dbgs() << "No machine model for " << (*I)->TheDef->getName() << '\n';
+    }
+  }
+}
+
+unsigned CodeGenSchedModels::getSchedClassIdx(
+  const RecVec &RWDefs) const {
 
-  // TODO: Infer classes from non-itinerary scheduler resources.
+  IdxVec Writes, Reads;
+  findRWs(RWDefs, Writes, Reads);
+  return findSchedClassIdx(Writes, Reads);
 }
 
-// Gather all processor models.
-void CodeGenSchedModels::CollectProcModels() {
-  std::vector<Record*> ProcRecords =
-    Records.getAllDerivedDefinitions("Processor");
-  std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
+/// Find an SchedClass that has been inferred from a per-operand list of
+/// SchedWrites and SchedReads.
+unsigned CodeGenSchedModels::findSchedClassIdx(const IdxVec &Writes,
+                                               const IdxVec &Reads) const {
+  for (SchedClassIter I = schedClassBegin(), E = schedClassEnd(); I != E; ++I) {
+    // Classes with InstRWs may have the same Writes/Reads as a class originally
+    // produced by a SchedRW definition. We need to be able to recover the
+    // original class index for processors that don't match any InstRWs.
+    if (I->ItinClassDef || !I->InstRWs.empty())
+      continue;
 
-  // Reserve space because we can. Reallocation would be ok.
-  ProcModels.reserve(ProcRecords.size());
+    if (I->Writes == Writes && I->Reads == Reads) {
+      return I - schedClassBegin();
+    }
+  }
+  return 0;
+}
 
-  // For each processor, find a unique machine model.
-  for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
-    addProcModel(ProcRecords[i]);
+// Get the SchedClass index for an instruction.
+unsigned CodeGenSchedModels::getSchedClassIdx(
+  const CodeGenInstruction &Inst) const {
+
+  unsigned SCIdx = InstrClassMap.lookup(Inst.TheDef);
+  if (SCIdx)
+    return SCIdx;
+
+  // If this opcode isn't mapped by the subtarget fallback to the instruction
+  // definition's SchedRW or ItinDef values.
+  if (Inst.TheDef->isSubClassOf("Sched")) {
+    RecVec RWs = Inst.TheDef->getValueAsListOfDefs("SchedRW");
+    return getSchedClassIdx(RWs);
+  }
+  Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary");
+  assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
+  unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
+  assert(Idx <= NumItineraryClasses && "bad ItinClass index");
+  return Idx;
 }
 
-// Get a unique processor model based on the defined MachineModel and
-// ProcessorItineraries.
-void CodeGenSchedModels::addProcModel(Record *ProcDef) {
-  unsigned Idx = getProcModelIdx(ProcDef);
-  if (Idx < ProcModels.size())
-    return;
+std::string CodeGenSchedModels::createSchedClassName(
+  const IdxVec &OperWrites, const IdxVec &OperReads) {
+
+  std::string Name;
+  for (IdxIter WI = OperWrites.begin(), WE = OperWrites.end(); WI != WE; ++WI) {
+    if (WI != OperWrites.begin())
+      Name += '_';
+    Name += SchedWrites[*WI].Name;
+  }
+  for (IdxIter RI = OperReads.begin(), RE = OperReads.end(); RI != RE; ++RI) {
+    Name += '_';
+    Name += SchedReads[*RI].Name;
+  }
+  return Name;
+}
+
+std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) {
+
+  std::string Name;
+  for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) {
+    if (I != InstDefs.begin())
+      Name += '_';
+    Name += (*I)->getName();
+  }
+  return Name;
+}
 
-  Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
-  Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
+/// Add an inferred sched class from a per-operand list of SchedWrites and
+/// SchedReads. ProcIndices contains the set of IDs of processors that may
+/// utilize this class.
+unsigned CodeGenSchedModels::addSchedClass(const IdxVec &OperWrites,
+                                           const IdxVec &OperReads,
+                                           const IdxVec &ProcIndices)
+{
+  assert(!ProcIndices.empty() && "expect at least one ProcIdx");
+
+  unsigned Idx = findSchedClassIdx(OperWrites, OperReads);
+  if (Idx) {
+    IdxVec PI;
+    std::set_union(SchedClasses[Idx].ProcIndices.begin(),
+                   SchedClasses[Idx].ProcIndices.end(),
+                   ProcIndices.begin(), ProcIndices.end(),
+                   std::back_inserter(PI));
+    SchedClasses[Idx].ProcIndices.swap(PI);
+    return Idx;
+  }
+  Idx = SchedClasses.size();
+  SchedClasses.resize(Idx+1);
+  CodeGenSchedClass &SC = SchedClasses.back();
+  SC.Name = createSchedClassName(OperWrites, OperReads);
+  SC.Writes = OperWrites;
+  SC.Reads = OperReads;
+  SC.ProcIndices = ProcIndices;
+
+  return Idx;
+}
 
-  std::string ModelName = ModelDef->getName();
-  const std::string &ItinName = ItinsDef->getName();
-
-  bool NoModel = ModelDef->getValueAsBit("NoModel");
-  bool hasTopLevelItin = !ItinsDef->getValueAsListOfDefs("IID").empty();
-  if (NoModel) {
-    // If an itinerary is defined without a machine model, infer a new model.
-    if (NoModel && hasTopLevelItin) {
-      ModelName = ItinName + "Model";
-      ModelDef = NULL;
+// Create classes for each set of opcodes that are in the same InstReadWrite
+// definition across all processors.
+void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) {
+  // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that
+  // intersects with an existing class via a previous InstRWDef. Instrs that do
+  // not intersect with an existing class refer back to their former class as
+  // determined from ItinDef or SchedRW.
+  SmallVector<std::pair<unsigned, SmallVector<Record *, 8> >, 4> ClassInstrs;
+  // Sort Instrs into sets.
+  const RecVec *InstDefs = Sets.expand(InstRWDef);
+  if (InstDefs->empty())
+    PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes");
+
+  for (RecIter I = InstDefs->begin(), E = InstDefs->end(); I != E; ++I) {
+    unsigned SCIdx = 0;
+    InstClassMapTy::const_iterator Pos = InstrClassMap.find(*I);
+    if (Pos != InstrClassMap.end())
+      SCIdx = Pos->second;
+    else {
+      // This instruction has not been mapped yet. Get the original class. All
+      // instructions in the same InstrRW class must be from the same original
+      // class because that is the fall-back class for other processors.
+      Record *ItinDef = (*I)->getValueAsDef("Itinerary");
+      SCIdx = SchedClassIdxMap.lookup(ItinDef->getName());
+      if (!SCIdx && (*I)->isSubClassOf("Sched"))
+        SCIdx = getSchedClassIdx((*I)->getValueAsListOfDefs("SchedRW"));
+    }
+    unsigned CIdx = 0, CEnd = ClassInstrs.size();
+    for (; CIdx != CEnd; ++CIdx) {
+      if (ClassInstrs[CIdx].first == SCIdx)
+        break;
+    }
+    if (CIdx == CEnd) {
+      ClassInstrs.resize(CEnd + 1);
+      ClassInstrs[CIdx].first = SCIdx;
+    }
+    ClassInstrs[CIdx].second.push_back(*I);
+  }
+  // For each set of Instrs, create a new class if necessary, and map or remap
+  // the Instrs to it.
+  unsigned CIdx = 0, CEnd = ClassInstrs.size();
+  for (; CIdx != CEnd; ++CIdx) {
+    unsigned OldSCIdx = ClassInstrs[CIdx].first;
+    ArrayRef<Record*> InstDefs = ClassInstrs[CIdx].second;
+    // If the all instrs in the current class are accounted for, then leave
+    // them mapped to their old class.
+    if (SchedClasses[OldSCIdx].InstRWs.size() == InstDefs.size()) {
+      assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 &&
+             "expected a generic SchedClass");
+      continue;
+    }
+    unsigned SCIdx = SchedClasses.size();
+    SchedClasses.resize(SCIdx+1);
+    CodeGenSchedClass &SC = SchedClasses.back();
+    SC.Name = createSchedClassName(InstDefs);
+    // Preserve ItinDef and Writes/Reads for processors without an InstRW entry.
+    SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef;
+    SC.Writes = SchedClasses[OldSCIdx].Writes;
+    SC.Reads = SchedClasses[OldSCIdx].Reads;
+    SC.ProcIndices.push_back(0);
+    // Map each Instr to this new class.
+    // Note that InstDefs may be a smaller list than InstRWDef's "Instrs".
+    Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel");
+    SmallSet<unsigned, 4> RemappedClassIDs;
+    for (ArrayRef<Record*>::const_iterator
+           II = InstDefs.begin(), IE = InstDefs.end(); II != IE; ++II) {
+      unsigned OldSCIdx = InstrClassMap[*II];
+      if (OldSCIdx && RemappedClassIDs.insert(OldSCIdx)) {
+        for (RecIter RI = SchedClasses[OldSCIdx].InstRWs.begin(),
+               RE = SchedClasses[OldSCIdx].InstRWs.end(); RI != RE; ++RI) {
+          if ((*RI)->getValueAsDef("SchedModel") == RWModelDef) {
+            PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " +
+                          (*II)->getName() + " also matches " +
+                          (*RI)->getValue("Instrs")->getValue()->getAsString());
+          }
+          assert(*RI != InstRWDef && "SchedClass has duplicate InstRW def");
+          SC.InstRWs.push_back(*RI);
+        }
+      }
+      InstrClassMap[*II] = SCIdx;
     }
+    SC.InstRWs.push_back(InstRWDef);
+  }
+}
+
+// Gather the processor itineraries.
+void CodeGenSchedModels::collectProcItins() {
+  for (std::vector<CodeGenProcModel>::iterator PI = ProcModels.begin(),
+         PE = ProcModels.end(); PI != PE; ++PI) {
+    CodeGenProcModel &ProcModel = *PI;
+    RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID");
+    // Skip empty itinerary.
+    if (ItinRecords.empty())
+      continue;
+
+    ProcModel.ItinDefList.resize(NumItineraryClasses+1);
+
+    // Insert each itinerary data record in the correct position within
+    // the processor model's ItinDefList.
+    for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) {
+      Record *ItinData = ItinRecords[i];
+      Record *ItinDef = ItinData->getValueAsDef("TheClass");
+      if (!SchedClassIdxMap.count(ItinDef->getName())) {
+        DEBUG(dbgs() << ProcModel.ItinsDef->getName()
+              << " has unused itinerary class " << ItinDef->getName() << '\n');
+        continue;
+      }
+      assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
+      unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
+      assert(Idx <= NumItineraryClasses && "bad ItinClass index");
+      ProcModel.ItinDefList[Idx] = ItinData;
+    }
+    // Check for missing itinerary entries.
+    assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
+    DEBUG(
+      for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
+        if (!ProcModel.ItinDefList[i])
+          dbgs() << ProcModel.ItinsDef->getName()
+                 << " missing itinerary for class "
+                 << SchedClasses[i].Name << '\n';
+      });
+  }
+}
+
+// Gather the read/write types for each itinerary class.
+void CodeGenSchedModels::collectProcItinRW() {
+  RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
+  std::sort(ItinRWDefs.begin(), ItinRWDefs.end(), LessRecord());
+  for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) {
+    if (!(*II)->getValueInit("SchedModel")->isComplete())
+      PrintFatalError((*II)->getLoc(), "SchedModel is undefined");
+    Record *ModelDef = (*II)->getValueAsDef("SchedModel");
+    ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+    if (I == ProcModelMap.end()) {
+      PrintFatalError((*II)->getLoc(), "Undefined SchedMachineModel "
+                    + ModelDef->getName());
+    }
+    ProcModels[I->second].ItinRWDefs.push_back(*II);
+  }
+}
+
+/// Infer new classes from existing classes. In the process, this may create new
+/// SchedWrites from sequences of existing SchedWrites.
+void CodeGenSchedModels::inferSchedClasses() {
+  // Visit all existing classes and newly created classes.
+  for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) {
+    if (SchedClasses[Idx].ItinClassDef)
+      inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx);
+    else if (!SchedClasses[Idx].InstRWs.empty())
+      inferFromInstRWs(Idx);
+    else {
+      inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads,
+                  Idx, SchedClasses[Idx].ProcIndices);
+    }
+    assert(SchedClasses.size() < (NumInstrSchedClasses*6) &&
+           "too many SchedVariants");
+  }
+}
+
+/// Infer classes from per-processor itinerary resources.
+void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef,
+                                            unsigned FromClassIdx) {
+  for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
+    const CodeGenProcModel &PM = ProcModels[PIdx];
+    // For all ItinRW entries.
+    bool HasMatch = false;
+    for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
+         II != IE; ++II) {
+      RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
+      if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
+        continue;
+      if (HasMatch)
+        PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
+                      + ItinClassDef->getName()
+                      + " in ItinResources for " + PM.ModelName);
+      HasMatch = true;
+      IdxVec Writes, Reads;
+      findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+      IdxVec ProcIndices(1, PIdx);
+      inferFromRW(Writes, Reads, FromClassIdx, ProcIndices);
+    }
+  }
+}
+
+/// Infer classes from per-processor InstReadWrite definitions.
+void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) {
+  const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs;
+  for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) {
+    const RecVec *InstDefs = Sets.expand(*RWI);
+    RecIter II = InstDefs->begin(), IE = InstDefs->end();
+    for (; II != IE; ++II) {
+      if (InstrClassMap[*II] == SCIdx)
+        break;
+    }
+    // If this class no longer has any instructions mapped to it, it has become
+    // irrelevant.
+    if (II == IE)
+      continue;
+    IdxVec Writes, Reads;
+    findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+    unsigned PIdx = getProcModel((*RWI)->getValueAsDef("SchedModel")).Index;
+    IdxVec ProcIndices(1, PIdx);
+    inferFromRW(Writes, Reads, SCIdx, ProcIndices);
+  }
+}
+
+namespace {
+// Helper for substituteVariantOperand.
+struct TransVariant {
+  Record *VarOrSeqDef;  // Variant or sequence.
+  unsigned RWIdx;       // Index of this variant or sequence's matched type.
+  unsigned ProcIdx;     // Processor model index or zero for any.
+  unsigned TransVecIdx; // Index into PredTransitions::TransVec.
+
+  TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti):
+    VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {}
+};
+
+// Associate a predicate with the SchedReadWrite that it guards.
+// RWIdx is the index of the read/write variant.
+struct PredCheck {
+  bool IsRead;
+  unsigned RWIdx;
+  Record *Predicate;
+
+  PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {}
+};
+
+// A Predicate transition is a list of RW sequences guarded by a PredTerm.
+struct PredTransition {
+  // A predicate term is a conjunction of PredChecks.
+  SmallVector<PredCheck, 4> PredTerm;
+  SmallVector<SmallVector<unsigned,4>, 16> WriteSequences;
+  SmallVector<SmallVector<unsigned,4>, 16> ReadSequences;
+  SmallVector<unsigned, 4> ProcIndices;
+};
+
+// Encapsulate a set of partially constructed transitions.
+// The results are built by repeated calls to substituteVariants.
+class PredTransitions {
+  CodeGenSchedModels &SchedModels;
+
+public:
+  std::vector<PredTransition> TransVec;
+
+  PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {}
+
+  void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq,
+                                bool IsRead, unsigned StartIdx);
+
+  void substituteVariants(const PredTransition &Trans);
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+
+private:
+  bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term);
+  void getIntersectingVariants(
+    const CodeGenSchedRW &SchedRW, unsigned TransIdx,
+    std::vector<TransVariant> &IntersectingVariants);
+  void pushVariant(const TransVariant &VInfo, bool IsRead);
+};
+} // anonymous
+
+// Return true if this predicate is mutually exclusive with a PredTerm. This
+// degenerates into checking if the predicate is mutually exclusive with any
+// predicate in the Term's conjunction.
+//
+// All predicates associated with a given SchedRW are considered mutually
+// exclusive. This should work even if the conditions expressed by the
+// predicates are not exclusive because the predicates for a given SchedWrite
+// are always checked in the order they are defined in the .td file. Later
+// conditions implicitly negate any prior condition.
+bool PredTransitions::mutuallyExclusive(Record *PredDef,
+                                        ArrayRef<PredCheck> Term) {
+
+  for (ArrayRef<PredCheck>::iterator I = Term.begin(), E = Term.end();
+       I != E; ++I) {
+    if (I->Predicate == PredDef)
+      return false;
+
+    const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(I->RWIdx, I->IsRead);
+    assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant");
+    RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants");
+    for (RecIter VI = Variants.begin(), VE = Variants.end(); VI != VE; ++VI) {
+      if ((*VI)->getValueAsDef("Predicate") == PredDef)
+        return true;
+    }
+  }
+  return false;
+}
+
+static bool hasAliasedVariants(const CodeGenSchedRW &RW,
+                               CodeGenSchedModels &SchedModels) {
+  if (RW.HasVariants)
+    return true;
+
+  for (RecIter I = RW.Aliases.begin(), E = RW.Aliases.end(); I != E; ++I) {
+    const CodeGenSchedRW &AliasRW =
+      SchedModels.getSchedRW((*I)->getValueAsDef("AliasRW"));
+    if (AliasRW.HasVariants)
+      return true;
+    if (AliasRW.IsSequence) {
+      IdxVec ExpandedRWs;
+      SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead);
+      for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
+           SI != SE; ++SI) {
+        if (hasAliasedVariants(SchedModels.getSchedRW(*SI, AliasRW.IsRead),
+                               SchedModels)) {
+          return true;
+        }
+      }
+    }
+  }
+  return false;
+}
+
+static bool hasVariant(ArrayRef<PredTransition> Transitions,
+                       CodeGenSchedModels &SchedModels) {
+  for (ArrayRef<PredTransition>::iterator
+         PTI = Transitions.begin(), PTE = Transitions.end();
+       PTI != PTE; ++PTI) {
+    for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+           WSI = PTI->WriteSequences.begin(), WSE = PTI->WriteSequences.end();
+         WSI != WSE; ++WSI) {
+      for (SmallVectorImpl<unsigned>::const_iterator
+             WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
+        if (hasAliasedVariants(SchedModels.getSchedWrite(*WI), SchedModels))
+          return true;
+      }
+    }
+    for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+           RSI = PTI->ReadSequences.begin(), RSE = PTI->ReadSequences.end();
+         RSI != RSE; ++RSI) {
+      for (SmallVectorImpl<unsigned>::const_iterator
+             RI = RSI->begin(), RE = RSI->end(); RI != RE; ++RI) {
+        if (hasAliasedVariants(SchedModels.getSchedRead(*RI), SchedModels))
+          return true;
+      }
+    }
+  }
+  return false;
+}
+
+// Populate IntersectingVariants with any variants or aliased sequences of the
+// given SchedRW whose processor indices and predicates are not mutually
+// exclusive with the given transition,
+void PredTransitions::getIntersectingVariants(
+  const CodeGenSchedRW &SchedRW, unsigned TransIdx,
+  std::vector<TransVariant> &IntersectingVariants) {
+
+  std::vector<TransVariant> Variants;
+  if (SchedRW.HasVariants) {
+    unsigned VarProcIdx = 0;
+    if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+      Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel");
+      VarProcIdx = SchedModels.getProcModel(ModelDef).Index;
+    }
+    // Push each variant. Assign TransVecIdx later.
+    const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants");
+    for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI)
+      Variants.push_back(TransVariant(*RI, SchedRW.Index, VarProcIdx, 0));
+  }
+  for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
+       AI != AE; ++AI) {
+    // If either the SchedAlias itself or the SchedReadWrite that it aliases
+    // to is defined within a processor model, constrain all variants to
+    // that processor.
+    unsigned AliasProcIdx = 0;
+    if ((*AI)->getValueInit("SchedModel")->isComplete()) {
+      Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
+      AliasProcIdx = SchedModels.getProcModel(ModelDef).Index;
+    }
+    const CodeGenSchedRW &AliasRW =
+      SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
+
+    if (AliasRW.HasVariants) {
+      const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants");
+      for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI)
+        Variants.push_back(TransVariant(*RI, AliasRW.Index, AliasProcIdx, 0));
+    }
+    if (AliasRW.IsSequence) {
+      Variants.push_back(
+        TransVariant(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0));
+    }
+  }
+  for (unsigned VIdx = 0, VEnd = Variants.size(); VIdx != VEnd; ++VIdx) {
+    TransVariant &Variant = Variants[VIdx];
+    // Don't expand variants if the processor models don't intersect.
+    // A zero processor index means any processor.
+    SmallVector<unsigned, 4> &ProcIndices = TransVec[TransIdx].ProcIndices;
+    if (ProcIndices[0] && Variants[VIdx].ProcIdx) {
+      unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(),
+                                Variant.ProcIdx);
+      if (!Cnt)
+        continue;
+      if (Cnt > 1) {
+        const CodeGenProcModel &PM =
+          *(SchedModels.procModelBegin() + Variant.ProcIdx);
+        PrintFatalError(Variant.VarOrSeqDef->getLoc(),
+                        "Multiple variants defined for processor " +
+                        PM.ModelName +
+                        " Ensure only one SchedAlias exists per RW.");
+      }
+    }
+    if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) {
+      Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate");
+      if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm))
+        continue;
+    }
+    if (IntersectingVariants.empty()) {
+      // The first variant builds on the existing transition.
+      Variant.TransVecIdx = TransIdx;
+      IntersectingVariants.push_back(Variant);
+    }
+    else {
+      // Push another copy of the current transition for more variants.
+      Variant.TransVecIdx = TransVec.size();
+      IntersectingVariants.push_back(Variant);
+      TransVec.push_back(TransVec[TransIdx]);
+    }
+  }
+}
+
+// Push the Reads/Writes selected by this variant onto the PredTransition
+// specified by VInfo.
+void PredTransitions::
+pushVariant(const TransVariant &VInfo, bool IsRead) {
+
+  PredTransition &Trans = TransVec[VInfo.TransVecIdx];
+
+  // If this operand transition is reached through a processor-specific alias,
+  // then the whole transition is specific to this processor.
+  if (VInfo.ProcIdx != 0)
+    Trans.ProcIndices.assign(1, VInfo.ProcIdx);
+
+  IdxVec SelectedRWs;
+  if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) {
+    Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate");
+    Trans.PredTerm.push_back(PredCheck(IsRead, VInfo.RWIdx,PredDef));
+    RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected");
+    SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead);
   }
   else {
-    // If a machine model is defined, the itinerary must be defined within it
-    // rather than in the Processor definition itself.
-    assert(!hasTopLevelItin && "Itinerary must be defined in SchedModel");
-    ItinsDef = ModelDef->getValueAsDef("Itineraries");
+    assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") &&
+           "variant must be a SchedVariant or aliased WriteSequence");
+    SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead));
   }
 
-  ProcModelMap[getProcModelKey(ProcDef)]= ProcModels.size();
+  const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead);
 
-  ProcModels.push_back(CodeGenProcModel(ModelName, ModelDef, ItinsDef));
+  SmallVectorImpl<SmallVector<unsigned,4> > &RWSequences = IsRead
+    ? Trans.ReadSequences : Trans.WriteSequences;
+  if (SchedRW.IsVariadic) {
+    unsigned OperIdx = RWSequences.size()-1;
+    // Make N-1 copies of this transition's last sequence.
+    for (unsigned i = 1, e = SelectedRWs.size(); i != e; ++i) {
+      RWSequences.push_back(RWSequences[OperIdx]);
+    }
+    // Push each of the N elements of the SelectedRWs onto a copy of the last
+    // sequence (split the current operand into N operands).
+    // Note that write sequences should be expanded within this loop--the entire
+    // sequence belongs to a single operand.
+    for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
+         RWI != RWE; ++RWI, ++OperIdx) {
+      IdxVec ExpandedRWs;
+      if (IsRead)
+        ExpandedRWs.push_back(*RWI);
+      else
+        SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
+      RWSequences[OperIdx].insert(RWSequences[OperIdx].end(),
+                                  ExpandedRWs.begin(), ExpandedRWs.end());
+    }
+    assert(OperIdx == RWSequences.size() && "missed a sequence");
+  }
+  else {
+    // Push this transition's expanded sequence onto this transition's last
+    // sequence (add to the current operand's sequence).
+    SmallVectorImpl<unsigned> &Seq = RWSequences.back();
+    IdxVec ExpandedRWs;
+    for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
+         RWI != RWE; ++RWI) {
+      if (IsRead)
+        ExpandedRWs.push_back(*RWI);
+      else
+        SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
+    }
+    Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end());
+  }
+}
 
-  std::vector<Record*> ItinRecords = ItinsDef->getValueAsListOfDefs("IID");
-  CollectProcItin(ProcModels.back(), ItinRecords);
+// RWSeq is a sequence of all Reads or all Writes for the next read or write
+// operand. StartIdx is an index into TransVec where partial results
+// starts. RWSeq must be applied to all transitions between StartIdx and the end
+// of TransVec.
+void PredTransitions::substituteVariantOperand(
+  const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) {
+
+  // Visit each original RW within the current sequence.
+  for (SmallVectorImpl<unsigned>::const_iterator
+         RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) {
+    const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead);
+    // Push this RW on all partial PredTransitions or distribute variants.
+    // New PredTransitions may be pushed within this loop which should not be
+    // revisited (TransEnd must be loop invariant).
+    for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size();
+         TransIdx != TransEnd; ++TransIdx) {
+      // In the common case, push RW onto the current operand's sequence.
+      if (!hasAliasedVariants(SchedRW, SchedModels)) {
+        if (IsRead)
+          TransVec[TransIdx].ReadSequences.back().push_back(*RWI);
+        else
+          TransVec[TransIdx].WriteSequences.back().push_back(*RWI);
+        continue;
+      }
+      // Distribute this partial PredTransition across intersecting variants.
+      // This will push a copies of TransVec[TransIdx] on the back of TransVec.
+      std::vector<TransVariant> IntersectingVariants;
+      getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants);
+      if (IntersectingVariants.empty())
+        PrintFatalError(SchedRW.TheDef->getLoc(),
+                      "No variant of this type has "
+                      "a matching predicate on any processor");
+      // Now expand each variant on top of its copy of the transition.
+      for (std::vector<TransVariant>::const_iterator
+             IVI = IntersectingVariants.begin(),
+             IVE = IntersectingVariants.end();
+           IVI != IVE; ++IVI) {
+        pushVariant(*IVI, IsRead);
+      }
+    }
+  }
 }
 
-// Gather the processor itineraries.
-void CodeGenSchedModels::CollectProcItin(CodeGenProcModel &ProcModel,
-                                         std::vector<Record*> ItinRecords) {
-  // Skip empty itinerary.
-  if (ItinRecords.empty())
+// For each variant of a Read/Write in Trans, substitute the sequence of
+// Read/Writes guarded by the variant. This is exponential in the number of
+// variant Read/Writes, but in practice detection of mutually exclusive
+// predicates should result in linear growth in the total number variants.
+//
+// This is one step in a breadth-first search of nested variants.
+void PredTransitions::substituteVariants(const PredTransition &Trans) {
+  // Build up a set of partial results starting at the back of
+  // PredTransitions. Remember the first new transition.
+  unsigned StartIdx = TransVec.size();
+  TransVec.resize(TransVec.size() + 1);
+  TransVec.back().PredTerm = Trans.PredTerm;
+  TransVec.back().ProcIndices = Trans.ProcIndices;
+
+  // Visit each original write sequence.
+  for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+         WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end();
+       WSI != WSE; ++WSI) {
+    // Push a new (empty) write sequence onto all partial Transitions.
+    for (std::vector<PredTransition>::iterator I =
+           TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
+      I->WriteSequences.resize(I->WriteSequences.size() + 1);
+    }
+    substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx);
+  }
+  // Visit each original read sequence.
+  for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+         RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end();
+       RSI != RSE; ++RSI) {
+    // Push a new (empty) read sequence onto all partial Transitions.
+    for (std::vector<PredTransition>::iterator I =
+           TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
+      I->ReadSequences.resize(I->ReadSequences.size() + 1);
+    }
+    substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx);
+  }
+}
+
+// Create a new SchedClass for each variant found by inferFromRW. Pass
+static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions,
+                                 unsigned FromClassIdx,
+                                 CodeGenSchedModels &SchedModels) {
+  // For each PredTransition, create a new CodeGenSchedTransition, which usually
+  // requires creating a new SchedClass.
+  for (ArrayRef<PredTransition>::iterator
+         I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) {
+    IdxVec OperWritesVariant;
+    for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+           WSI = I->WriteSequences.begin(), WSE = I->WriteSequences.end();
+         WSI != WSE; ++WSI) {
+      // Create a new write representing the expanded sequence.
+      OperWritesVariant.push_back(
+        SchedModels.findOrInsertRW(*WSI, /*IsRead=*/false));
+    }
+    IdxVec OperReadsVariant;
+    for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+           RSI = I->ReadSequences.begin(), RSE = I->ReadSequences.end();
+         RSI != RSE; ++RSI) {
+      // Create a new read representing the expanded sequence.
+      OperReadsVariant.push_back(
+        SchedModels.findOrInsertRW(*RSI, /*IsRead=*/true));
+    }
+    IdxVec ProcIndices(I->ProcIndices.begin(), I->ProcIndices.end());
+    CodeGenSchedTransition SCTrans;
+    SCTrans.ToClassIdx =
+      SchedModels.addSchedClass(OperWritesVariant, OperReadsVariant,
+                                ProcIndices);
+    SCTrans.ProcIndices = ProcIndices;
+    // The final PredTerm is unique set of predicates guarding the transition.
+    RecVec Preds;
+    for (SmallVectorImpl<PredCheck>::const_iterator
+           PI = I->PredTerm.begin(), PE = I->PredTerm.end(); PI != PE; ++PI) {
+      Preds.push_back(PI->Predicate);
+    }
+    RecIter PredsEnd = std::unique(Preds.begin(), Preds.end());
+    Preds.resize(PredsEnd - Preds.begin());
+    SCTrans.PredTerm = Preds;
+    SchedModels.getSchedClass(FromClassIdx).Transitions.push_back(SCTrans);
+  }
+}
+
+// Create new SchedClasses for the given ReadWrite list. If any of the
+// ReadWrites refers to a SchedVariant, create a new SchedClass for each variant
+// of the ReadWrite list, following Aliases if necessary.
+void CodeGenSchedModels::inferFromRW(const IdxVec &OperWrites,
+                                     const IdxVec &OperReads,
+                                     unsigned FromClassIdx,
+                                     const IdxVec &ProcIndices) {
+  DEBUG(dbgs() << "INFER RW: ");
+
+  // Create a seed transition with an empty PredTerm and the expanded sequences
+  // of SchedWrites for the current SchedClass.
+  std::vector<PredTransition> LastTransitions;
+  LastTransitions.resize(1);
+  LastTransitions.back().ProcIndices.append(ProcIndices.begin(),
+                                            ProcIndices.end());
+
+  for (IdxIter I = OperWrites.begin(), E = OperWrites.end(); I != E; ++I) {
+    IdxVec WriteSeq;
+    expandRWSequence(*I, WriteSeq, /*IsRead=*/false);
+    unsigned Idx = LastTransitions[0].WriteSequences.size();
+    LastTransitions[0].WriteSequences.resize(Idx + 1);
+    SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences[Idx];
+    for (IdxIter WI = WriteSeq.begin(), WE = WriteSeq.end(); WI != WE; ++WI)
+      Seq.push_back(*WI);
+    DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
+  }
+  DEBUG(dbgs() << " Reads: ");
+  for (IdxIter I = OperReads.begin(), E = OperReads.end(); I != E; ++I) {
+    IdxVec ReadSeq;
+    expandRWSequence(*I, ReadSeq, /*IsRead=*/true);
+    unsigned Idx = LastTransitions[0].ReadSequences.size();
+    LastTransitions[0].ReadSequences.resize(Idx + 1);
+    SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences[Idx];
+    for (IdxIter RI = ReadSeq.begin(), RE = ReadSeq.end(); RI != RE; ++RI)
+      Seq.push_back(*RI);
+    DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
+  }
+  DEBUG(dbgs() << '\n');
+
+  // Collect all PredTransitions for individual operands.
+  // Iterate until no variant writes remain.
+  while (hasVariant(LastTransitions, *this)) {
+    PredTransitions Transitions(*this);
+    for (std::vector<PredTransition>::const_iterator
+           I = LastTransitions.begin(), E = LastTransitions.end();
+         I != E; ++I) {
+      Transitions.substituteVariants(*I);
+    }
+    DEBUG(Transitions.dump());
+    LastTransitions.swap(Transitions.TransVec);
+  }
+  // If the first transition has no variants, nothing to do.
+  if (LastTransitions[0].PredTerm.empty())
     return;
 
-  HasProcItineraries = true;
+  // WARNING: We are about to mutate the SchedClasses vector. Do not refer to
+  // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions.
+  inferFromTransitions(LastTransitions, FromClassIdx, *this);
+}
+
+// Collect and sort WriteRes, ReadAdvance, and ProcResources.
+void CodeGenSchedModels::collectProcResources() {
+  // Add any subtarget-specific SchedReadWrites that are directly associated
+  // with processor resources. Refer to the parent SchedClass's ProcIndices to
+  // determine which processors they apply to.
+  for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd();
+       SCI != SCE; ++SCI) {
+    if (SCI->ItinClassDef)
+      collectItinProcResources(SCI->ItinClassDef);
+    else
+      collectRWResources(SCI->Writes, SCI->Reads, SCI->ProcIndices);
+  }
+  // Add resources separately defined by each subtarget.
+  RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes");
+  for (RecIter WRI = WRDefs.begin(), WRE = WRDefs.end(); WRI != WRE; ++WRI) {
+    Record *ModelDef = (*WRI)->getValueAsDef("SchedModel");
+    addWriteRes(*WRI, getProcModel(ModelDef).Index);
+  }
+  RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance");
+  for (RecIter RAI = RADefs.begin(), RAE = RADefs.end(); RAI != RAE; ++RAI) {
+    Record *ModelDef = (*RAI)->getValueAsDef("SchedModel");
+    addReadAdvance(*RAI, getProcModel(ModelDef).Index);
+  }
+  // Finalize each ProcModel by sorting the record arrays.
+  for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
+    CodeGenProcModel &PM = ProcModels[PIdx];
+    std::sort(PM.WriteResDefs.begin(), PM.WriteResDefs.end(),
+              LessRecord());
+    std::sort(PM.ReadAdvanceDefs.begin(), PM.ReadAdvanceDefs.end(),
+              LessRecord());
+    std::sort(PM.ProcResourceDefs.begin(), PM.ProcResourceDefs.end(),
+              LessRecord());
+    DEBUG(
+      PM.dump();
+      dbgs() << "WriteResDefs: ";
+      for (RecIter RI = PM.WriteResDefs.begin(),
+             RE = PM.WriteResDefs.end(); RI != RE; ++RI) {
+        if ((*RI)->isSubClassOf("WriteRes"))
+          dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " ";
+        else
+          dbgs() << (*RI)->getName() << " ";
+      }
+      dbgs() << "\nReadAdvanceDefs: ";
+      for (RecIter RI = PM.ReadAdvanceDefs.begin(),
+             RE = PM.ReadAdvanceDefs.end(); RI != RE; ++RI) {
+        if ((*RI)->isSubClassOf("ReadAdvance"))
+          dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " ";
+        else
+          dbgs() << (*RI)->getName() << " ";
+      }
+      dbgs() << "\nProcResourceDefs: ";
+      for (RecIter RI = PM.ProcResourceDefs.begin(),
+             RE = PM.ProcResourceDefs.end(); RI != RE; ++RI) {
+        dbgs() << (*RI)->getName() << " ";
+      }
+      dbgs() << '\n');
+  }
+}
 
-  ProcModel.ItinDefList.resize(NumItineraryClasses+1);
+// Collect itinerary class resources for each processor.
+void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) {
+  for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
+    const CodeGenProcModel &PM = ProcModels[PIdx];
+    // For all ItinRW entries.
+    bool HasMatch = false;
+    for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
+         II != IE; ++II) {
+      RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
+      if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
+        continue;
+      if (HasMatch)
+        PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
+                        + ItinClassDef->getName()
+                        + " in ItinResources for " + PM.ModelName);
+      HasMatch = true;
+      IdxVec Writes, Reads;
+      findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+      IdxVec ProcIndices(1, PIdx);
+      collectRWResources(Writes, Reads, ProcIndices);
+    }
+  }
+}
 
-  // Insert each itinerary data record in the correct position within
-  // the processor model's ItinDefList.
-  for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) {
-    Record *ItinData = ItinRecords[i];
-    Record *ItinDef = ItinData->getValueAsDef("TheClass");
-    if (!SchedClassIdxMap.count(ItinDef->getName())) {
-      DEBUG(dbgs() << ProcModel.ItinsDef->getName()
-            << " has unused itinerary class " << ItinDef->getName() << '\n');
-      continue;
+void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead,
+                                            const IdxVec &ProcIndices) {
+  const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
+  if (SchedRW.TheDef) {
+    if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) {
+      for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
+           PI != PE; ++PI) {
+        addWriteRes(SchedRW.TheDef, *PI);
+      }
+    }
+    else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) {
+      for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
+           PI != PE; ++PI) {
+        addReadAdvance(SchedRW.TheDef, *PI);
+      }
+    }
+  }
+  for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
+       AI != AE; ++AI) {
+    IdxVec AliasProcIndices;
+    if ((*AI)->getValueInit("SchedModel")->isComplete()) {
+      AliasProcIndices.push_back(
+        getProcModel((*AI)->getValueAsDef("SchedModel")).Index);
+    }
+    else
+      AliasProcIndices = ProcIndices;
+    const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
+    assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes");
+
+    IdxVec ExpandedRWs;
+    expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead);
+    for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
+         SI != SE; ++SI) {
+      collectRWResources(*SI, IsRead, AliasProcIndices);
+    }
+  }
+}
+
+// Collect resources for a set of read/write types and processor indices.
+void CodeGenSchedModels::collectRWResources(const IdxVec &Writes,
+                                            const IdxVec &Reads,
+                                            const IdxVec &ProcIndices) {
+
+  for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
+    collectRWResources(*WI, /*IsRead=*/false, ProcIndices);
+
+  for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
+    collectRWResources(*RI, /*IsRead=*/true, ProcIndices);
+}
+
+
+// Find the processor's resource units for this kind of resource.
+Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind,
+                                             const CodeGenProcModel &PM) const {
+  if (ProcResKind->isSubClassOf("ProcResourceUnits"))
+    return ProcResKind;
+
+  Record *ProcUnitDef = 0;
+  RecVec ProcResourceDefs =
+    Records.getAllDerivedDefinitions("ProcResourceUnits");
+
+  for (RecIter RI = ProcResourceDefs.begin(), RE = ProcResourceDefs.end();
+       RI != RE; ++RI) {
+
+    if ((*RI)->getValueAsDef("Kind") == ProcResKind
+        && (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) {
+      if (ProcUnitDef) {
+        PrintFatalError((*RI)->getLoc(),
+                        "Multiple ProcessorResourceUnits associated with "
+                        + ProcResKind->getName());
+      }
+      ProcUnitDef = *RI;
     }
-    ProcModel.ItinDefList[getItinClassIdx(ItinDef)] = ItinData;
   }
+  if (!ProcUnitDef) {
+    PrintFatalError(ProcResKind->getLoc(),
+                    "No ProcessorResources associated with "
+                    + ProcResKind->getName());
+  }
+  return ProcUnitDef;
+}
+
+// Iteratively add a resource and its super resources.
+void CodeGenSchedModels::addProcResource(Record *ProcResKind,
+                                         CodeGenProcModel &PM) {
+  for (;;) {
+    Record *ProcResUnits = findProcResUnits(ProcResKind, PM);
+
+    // See if this ProcResource is already associated with this processor.
+    RecIter I = std::find(PM.ProcResourceDefs.begin(),
+                          PM.ProcResourceDefs.end(), ProcResUnits);
+    if (I != PM.ProcResourceDefs.end())
+      return;
+
+    PM.ProcResourceDefs.push_back(ProcResUnits);
+    if (!ProcResUnits->getValueInit("Super")->isComplete())
+      return;
+
+    ProcResKind = ProcResUnits->getValueAsDef("Super");
+  }
+}
+
+// Add resources for a SchedWrite to this processor if they don't exist.
+void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) {
+  assert(PIdx && "don't add resources to an invalid Processor model");
+
+  RecVec &WRDefs = ProcModels[PIdx].WriteResDefs;
+  RecIter WRI = std::find(WRDefs.begin(), WRDefs.end(), ProcWriteResDef);
+  if (WRI != WRDefs.end())
+    return;
+  WRDefs.push_back(ProcWriteResDef);
+
+  // Visit ProcResourceKinds referenced by the newly discovered WriteRes.
+  RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources");
+  for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end();
+       WritePRI != WritePRE; ++WritePRI) {
+    addProcResource(*WritePRI, ProcModels[PIdx]);
+  }
+}
+
+// Add resources for a ReadAdvance to this processor if they don't exist.
+void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef,
+                                        unsigned PIdx) {
+  RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs;
+  RecIter I = std::find(RADefs.begin(), RADefs.end(), ProcReadAdvanceDef);
+  if (I != RADefs.end())
+    return;
+  RADefs.push_back(ProcReadAdvanceDef);
+}
+
+unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const {
+  RecIter PRPos = std::find(ProcResourceDefs.begin(), ProcResourceDefs.end(),
+                            PRDef);
+  if (PRPos == ProcResourceDefs.end())
+    PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in "
+                    "the ProcResources list for " + ModelName);
+  // Idx=0 is reserved for invalid.
+  return 1 + (PRPos - ProcResourceDefs.begin());
+}
+
 #ifndef NDEBUG
-  // Check for missing itinerary entries.
-  assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
-  for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
-    if (!ProcModel.ItinDefList[i])
-      DEBUG(dbgs() << ProcModel.ItinsDef->getName()
-            << " missing itinerary for class " << SchedClasses[i].Name << '\n');
+void CodeGenProcModel::dump() const {
+  dbgs() << Index << ": " << ModelName << " "
+         << (ModelDef ? ModelDef->getName() : "inferred") << " "
+         << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n';
+}
+
+void CodeGenSchedRW::dump() const {
+  dbgs() << Name << (IsVariadic ? " (V) " : " ");
+  if (IsSequence) {
+    dbgs() << "(";
+    dumpIdxVec(Sequence);
+    dbgs() << ")";
+  }
+}
+
+void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const {
+  dbgs() << "SCHEDCLASS " << Name << '\n'
+         << "  Writes: ";
+  for (unsigned i = 0, N = Writes.size(); i < N; ++i) {
+    SchedModels->getSchedWrite(Writes[i]).dump();
+    if (i < N-1) {
+      dbgs() << '\n';
+      dbgs().indent(10);
+    }
+  }
+  dbgs() << "\n  Reads: ";
+  for (unsigned i = 0, N = Reads.size(); i < N; ++i) {
+    SchedModels->getSchedRead(Reads[i]).dump();
+    if (i < N-1) {
+      dbgs() << '\n';
+      dbgs().indent(10);
+    }
+  }
+  dbgs() << "\n  ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n';
+}
+
+void PredTransitions::dump() const {
+  dbgs() << "Expanded Variants:\n";
+  for (std::vector<PredTransition>::const_iterator
+         TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) {
+    dbgs() << "{";
+    for (SmallVectorImpl<PredCheck>::const_iterator
+           PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end();
+         PCI != PCE; ++PCI) {
+      if (PCI != TI->PredTerm.begin())
+        dbgs() << ", ";
+      dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name
+             << ":" << PCI->Predicate->getName();
+    }
+    dbgs() << "},\n  => {";
+    for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+           WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end();
+         WSI != WSE; ++WSI) {
+      dbgs() << "(";
+      for (SmallVectorImpl<unsigned>::const_iterator
+             WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
+        if (WI != WSI->begin())
+          dbgs() << ", ";
+        dbgs() << SchedModels.getSchedWrite(*WI).Name;
+      }
+      dbgs() << "),";
+    }
+    dbgs() << "}\n";
   }
-#endif
 }
+#endif // NDEBUG

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenSchedule.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenSchedule.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenSchedule.h (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenSchedule.h Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
 #ifndef CODEGEN_SCHEDULE_H
 #define CODEGEN_SCHEDULE_H
 
+#include "SetTheory.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/ADT/DenseMap.h"
@@ -23,21 +24,131 @@
 namespace llvm {
 
 class CodeGenTarget;
+class CodeGenSchedModels;
+class CodeGenInstruction;
 
-// Scheduling class.
-//
-// Each instruction description will be mapped to a scheduling class. It may be
-// an explicitly defined itinerary class, or an inferred class in which case
-// ItinClassDef == NULL.
+typedef std::vector<Record*> RecVec;
+typedef std::vector<Record*>::const_iterator RecIter;
+
+typedef std::vector<unsigned> IdxVec;
+typedef std::vector<unsigned>::const_iterator IdxIter;
+
+void splitSchedReadWrites(const RecVec &RWDefs,
+                          RecVec &WriteDefs, RecVec &ReadDefs);
+
+/// We have two kinds of SchedReadWrites. Explicitly defined and inferred
+/// sequences.  TheDef is nonnull for explicit SchedWrites, but Sequence may or
+/// may not be empty. TheDef is null for inferred sequences, and Sequence must
+/// be nonempty.
+///
+/// IsVariadic controls whether the variants are expanded into multiple operands
+/// or a sequence of writes on one operand.
+struct CodeGenSchedRW {
+  unsigned Index;
+  std::string Name;
+  Record *TheDef;
+  bool IsRead;
+  bool IsAlias;
+  bool HasVariants;
+  bool IsVariadic;
+  bool IsSequence;
+  IdxVec Sequence;
+  RecVec Aliases;
+
+  CodeGenSchedRW(): Index(0), TheDef(0), IsAlias(false), HasVariants(false),
+                    IsVariadic(false), IsSequence(false) {}
+  CodeGenSchedRW(unsigned Idx, Record *Def): Index(Idx), TheDef(Def),
+                                             IsAlias(false), IsVariadic(false) {
+    Name = Def->getName();
+    IsRead = Def->isSubClassOf("SchedRead");
+    HasVariants = Def->isSubClassOf("SchedVariant");
+    if (HasVariants)
+      IsVariadic = Def->getValueAsBit("Variadic");
+
+    // Read records don't currently have sequences, but it can be easily
+    // added. Note that implicit Reads (from ReadVariant) may have a Sequence
+    // (but no record).
+    IsSequence = Def->isSubClassOf("WriteSequence");
+  }
+
+  CodeGenSchedRW(unsigned Idx, bool Read, const IdxVec &Seq,
+                 const std::string &Name):
+    Index(Idx), Name(Name), TheDef(0), IsRead(Read), IsAlias(false),
+    HasVariants(false), IsVariadic(false), IsSequence(true), Sequence(Seq) {
+    assert(Sequence.size() > 1 && "implied sequence needs >1 RWs");
+  }
+
+  bool isValid() const {
+    assert((!HasVariants || TheDef) && "Variant write needs record def");
+    assert((!IsVariadic || HasVariants) && "Variadic write needs variants");
+    assert((!IsSequence || !HasVariants) && "Sequence can't have variant");
+    assert((!IsSequence || !Sequence.empty()) && "Sequence should be nonempty");
+    assert((!IsAlias || Aliases.empty()) && "Alias cannot have aliases");
+    return TheDef || !Sequence.empty();
+  }
+
+#ifndef NDEBUG
+  void dump() const;
+#endif
+};
+
+/// Represent a transition between SchedClasses induced by SchedVariant.
+struct CodeGenSchedTransition {
+  unsigned ToClassIdx;
+  IdxVec ProcIndices;
+  RecVec PredTerm;
+};
+
+/// Scheduling class.
+///
+/// Each instruction description will be mapped to a scheduling class. There are
+/// four types of classes:
+///
+/// 1) An explicitly defined itinerary class with ItinClassDef set.
+/// Writes and ReadDefs are empty. ProcIndices contains 0 for any processor.
+///
+/// 2) An implied class with a list of SchedWrites and SchedReads that are
+/// defined in an instruction definition and which are common across all
+/// subtargets. ProcIndices contains 0 for any processor.
+///
+/// 3) An implied class with a list of InstRW records that map instructions to
+/// SchedWrites and SchedReads per-processor. InstrClassMap should map the same
+/// instructions to this class. ProcIndices contains all the processors that
+/// provided InstrRW records for this class. ItinClassDef or Writes/Reads may
+/// still be defined for processors with no InstRW entry.
+///
+/// 4) An inferred class represents a variant of another class that may be
+/// resolved at runtime. ProcIndices contains the set of processors that may
+/// require the class. ProcIndices are propagated through SchedClasses as
+/// variants are expanded. Multiple SchedClasses may be inferred from an
+/// itinerary class. Each inherits the processor index from the ItinRW record
+/// that mapped the itinerary class to the variant Writes or Reads.
 struct CodeGenSchedClass {
   std::string Name;
-  unsigned Index;
   Record *ItinClassDef;
 
-  CodeGenSchedClass(): Index(0), ItinClassDef(0) {}
-  CodeGenSchedClass(Record *rec): Index(0), ItinClassDef(rec) {
+  IdxVec Writes;
+  IdxVec Reads;
+  // Sorted list of ProcIdx, where ProcIdx==0 implies any processor.
+  IdxVec ProcIndices;
+
+  std::vector<CodeGenSchedTransition> Transitions;
+
+  // InstRW records associated with this class. These records may refer to an
+  // Instruction no longer mapped to this class by InstrClassMap. These
+  // Instructions should be ignored by this class because they have been split
+  // off to join another inferred class.
+  RecVec InstRWs;
+
+  CodeGenSchedClass(): ItinClassDef(0) {}
+  CodeGenSchedClass(Record *rec): ItinClassDef(rec) {
     Name = rec->getName();
+    ProcIndices.push_back(0);
   }
+
+#ifndef NDEBUG
+  void dump(const CodeGenSchedModels *SchedModels) const;
+#endif
 };
 
 // Processor model.
@@ -55,28 +166,69 @@
 //
 // ItinDefList orders this processor's InstrItinData records by SchedClass idx.
 struct CodeGenProcModel {
+  unsigned Index;
   std::string ModelName;
   Record *ModelDef;
   Record *ItinsDef;
 
-  // Array of InstrItinData records indexed by CodeGenSchedClass::Index.
-  // The list is empty if the subtarget has no itineraries.
-  std::vector<Record *> ItinDefList;
+  // Derived members...
+
+  // Array of InstrItinData records indexed by a CodeGenSchedClass index.
+  // This list is empty if the Processor has no value for Itineraries.
+  // Initialized by collectProcItins().
+  RecVec ItinDefList;
+
+  // Map itinerary classes to per-operand resources.
+  // This list is empty if no ItinRW refers to this Processor.
+  RecVec ItinRWDefs;
+
+  // All read/write resources associated with this processor.
+  RecVec WriteResDefs;
+  RecVec ReadAdvanceDefs;
+
+  // Per-operand machine model resources associated with this processor.
+  RecVec ProcResourceDefs;
+
+  CodeGenProcModel(unsigned Idx, const std::string &Name, Record *MDef,
+                   Record *IDef) :
+    Index(Idx), ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {}
+
+  bool hasInstrSchedModel() const {
+    return !WriteResDefs.empty() || !ItinRWDefs.empty();
+  }
+
+  unsigned getProcResourceIdx(Record *PRDef) const;
 
-  CodeGenProcModel(const std::string &Name, Record *MDef, Record *IDef):
-    ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {}
+#ifndef NDEBUG
+  void dump() const;
+#endif
 };
 
-// Top level container for machine model data.
+/// Top level container for machine model data.
 class CodeGenSchedModels {
   RecordKeeper &Records;
   const CodeGenTarget &Target;
 
+  // Map dag expressions to Instruction lists.
+  SetTheory Sets;
+
+  // List of unique processor models.
+  std::vector<CodeGenProcModel> ProcModels;
+
+  // Map Processor's MachineModel or ProcItin to a CodeGenProcModel index.
+  typedef DenseMap<Record*, unsigned> ProcModelMapTy;
+  ProcModelMapTy ProcModelMap;
+
+  // Per-operand SchedReadWrite types.
+  std::vector<CodeGenSchedRW> SchedWrites;
+  std::vector<CodeGenSchedRW> SchedReads;
+
   // List of unique SchedClasses.
   std::vector<CodeGenSchedClass> SchedClasses;
 
   // Map SchedClass name to itinerary index.
-  // These are either explicit itinerary classes or inferred classes.
+  // These are either explicit itinerary classes or classes implied by
+  // instruction definitions with SchedReadWrite lists.
   StringMap<unsigned> SchedClassIdxMap;
 
   // SchedClass indices 1 up to and including NumItineraryClasses identify
@@ -84,22 +236,80 @@
   // definitions. NoItinerary always has index 0 regardless of whether it is
   // explicitly referenced.
   //
-  // Any inferred SchedClass have a index greater than NumItineraryClasses.
+  // Any implied SchedClass has an index greater than NumItineraryClasses.
   unsigned NumItineraryClasses;
 
-  // List of unique processor models.
-  std::vector<CodeGenProcModel> ProcModels;
+  // Any inferred SchedClass has an index greater than NumInstrSchedClassses.
+  unsigned NumInstrSchedClasses;
 
-  // Map Processor's MachineModel + ProcItin fields to a CodeGenProcModel index.
-  typedef DenseMap<std::pair<Record*, Record*>, unsigned> ProcModelMapTy;
-  ProcModelMapTy ProcModelMap;
-
-  // True if any processors have nonempty itineraries.
-  bool HasProcItineraries;
+  // Map Instruction to SchedClass index. Only for Instructions mentioned in
+  // InstRW records.
+  typedef DenseMap<Record*, unsigned> InstClassMapTy;
+  InstClassMapTy InstrClassMap;
 
 public:
   CodeGenSchedModels(RecordKeeper& RK, const CodeGenTarget &TGT);
 
+  Record *getModelOrItinDef(Record *ProcDef) const {
+    Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
+    Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
+    if (!ItinsDef->getValueAsListOfDefs("IID").empty()) {
+      assert(ModelDef->getValueAsBit("NoModel")
+             && "Itineraries must be defined within SchedMachineModel");
+      return ItinsDef;
+    }
+    return ModelDef;
+  }
+
+  const CodeGenProcModel &getModelForProc(Record *ProcDef) const {
+    Record *ModelDef = getModelOrItinDef(ProcDef);
+    ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+    assert(I != ProcModelMap.end() && "missing machine model");
+    return ProcModels[I->second];
+  }
+
+  const CodeGenProcModel &getProcModel(Record *ModelDef) const {
+    ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+    assert(I != ProcModelMap.end() && "missing machine model");
+    return ProcModels[I->second];
+  }
+
+  // Iterate over the unique processor models.
+  typedef std::vector<CodeGenProcModel>::const_iterator ProcIter;
+  ProcIter procModelBegin() const { return ProcModels.begin(); }
+  ProcIter procModelEnd() const { return ProcModels.end(); }
+
+  // Get a SchedWrite from its index.
+  const CodeGenSchedRW &getSchedWrite(unsigned Idx) const {
+    assert(Idx < SchedWrites.size() && "bad SchedWrite index");
+    assert(SchedWrites[Idx].isValid() && "invalid SchedWrite");
+    return SchedWrites[Idx];
+  }
+  // Get a SchedWrite from its index.
+  const CodeGenSchedRW &getSchedRead(unsigned Idx) const {
+    assert(Idx < SchedReads.size() && "bad SchedRead index");
+    assert(SchedReads[Idx].isValid() && "invalid SchedRead");
+    return SchedReads[Idx];
+  }
+
+  const CodeGenSchedRW &getSchedRW(unsigned Idx, bool IsRead) const {
+    return IsRead ? getSchedRead(Idx) : getSchedWrite(Idx);
+  }
+  CodeGenSchedRW &getSchedRW(Record *Def) {
+    bool IsRead = Def->isSubClassOf("SchedRead");
+    unsigned Idx = getSchedRWIdx(Def, IsRead);
+    return const_cast<CodeGenSchedRW&>(
+      IsRead ? getSchedRead(Idx) : getSchedWrite(Idx));
+  }
+  const CodeGenSchedRW &getSchedRW(Record*Def) const {
+    return const_cast<CodeGenSchedModels&>(*this).getSchedRW(Def);
+  }
+
+  unsigned getSchedRWIdx(Record *Def, bool IsRead, unsigned After = 0) const;
+
+  // Return true if the given write record is referenced by a ReadAdvance.
+  bool hasReadOfWrite(Record *WriteDef) const;
+
   // Check if any instructions are assigned to an explicit itinerary class other
   // than NoItinerary.
   bool hasItineraryClasses() const { return NumItineraryClasses > 0; }
@@ -111,60 +321,90 @@
   }
 
   // Get a SchedClass from its index.
-  const CodeGenSchedClass &getSchedClass(unsigned Idx) {
+  CodeGenSchedClass &getSchedClass(unsigned Idx) {
     assert(Idx < SchedClasses.size() && "bad SchedClass index");
     return SchedClasses[Idx];
   }
-
-  // Get an itinerary class's index. Value indices are '0' for NoItinerary up to
-  // and including numItineraryClasses().
-  unsigned getItinClassIdx(Record *ItinDef) const {
-    assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
-    unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
-    assert(Idx <= NumItineraryClasses && "bad ItinClass index");
-    return Idx;
-  }
-
-  bool hasProcessorItineraries() const {
-    return HasProcItineraries;
-  }
-
-  // Get an existing machine model for a processor definition.
-  const CodeGenProcModel &getProcModel(Record *ProcDef) const {
-    unsigned idx = getProcModelIdx(ProcDef);
-    assert(idx < ProcModels.size() && "missing machine model");
-    return ProcModels[idx];
+  const CodeGenSchedClass &getSchedClass(unsigned Idx) const {
+    assert(Idx < SchedClasses.size() && "bad SchedClass index");
+    return SchedClasses[Idx];
   }
 
-  // Iterate over the unique processor models.
-  typedef std::vector<CodeGenProcModel>::const_iterator ProcIter;
-  ProcIter procModelBegin() const { return ProcModels.begin(); }
-  ProcIter procModelEnd() const { return ProcModels.end(); }
+  // Get the SchedClass index for an instruction. Instructions with no
+  // itinerary, no SchedReadWrites, and no InstrReadWrites references return 0
+  // for NoItinerary.
+  unsigned getSchedClassIdx(const CodeGenInstruction &Inst) const;
 
-private:
-  // Get a key that can uniquely identify a machine model.
-  ProcModelMapTy::key_type getProcModelKey(Record *ProcDef) const {
-    Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
-    Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
-    return std::make_pair(ModelDef, ItinsDef);
-  }
+  unsigned getSchedClassIdx(const RecVec &RWDefs) const;
 
-  // Get the unique index of a machine model.
-  unsigned getProcModelIdx(Record *ProcDef) const {
-    ProcModelMapTy::const_iterator I =
-      ProcModelMap.find(getProcModelKey(ProcDef));
-    if (I == ProcModelMap.end())
-      return ProcModels.size();
-    return I->second;
+  unsigned getSchedClassIdxForItin(const Record *ItinDef) {
+    return SchedClassIdxMap[ItinDef->getName()];
   }
 
+  typedef std::vector<CodeGenSchedClass>::const_iterator SchedClassIter;
+  SchedClassIter schedClassBegin() const { return SchedClasses.begin(); }
+  SchedClassIter schedClassEnd() const { return SchedClasses.end(); }
+
+  void findRWs(const RecVec &RWDefs, IdxVec &Writes, IdxVec &Reads) const;
+  void findRWs(const RecVec &RWDefs, IdxVec &RWs, bool IsRead) const;
+  void expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, bool IsRead) const;
+  void expandRWSeqForProc(unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
+                          const CodeGenProcModel &ProcModel) const;
+
+  unsigned addSchedClass(const IdxVec &OperWrites, const IdxVec &OperReads,
+                         const IdxVec &ProcIndices);
+
+  unsigned findOrInsertRW(ArrayRef<unsigned> Seq, bool IsRead);
+
+  unsigned findSchedClassIdx(const IdxVec &Writes, const IdxVec &Reads) const;
+
+  Record *findProcResUnits(Record *ProcResKind,
+                           const CodeGenProcModel &PM) const;
+
+private:
+  void collectProcModels();
+
   // Initialize a new processor model if it is unique.
   void addProcModel(Record *ProcDef);
 
-  void CollectSchedClasses();
-  void CollectProcModels();
-  void CollectProcItin(CodeGenProcModel &ProcModel,
-                       std::vector<Record*> ItinRecords);
+  void collectSchedRW();
+
+  std::string genRWName(const IdxVec& Seq, bool IsRead);
+  unsigned findRWForSequence(const IdxVec &Seq, bool IsRead);
+
+  void collectSchedClasses();
+
+  std::string createSchedClassName(const IdxVec &OperWrites,
+                                   const IdxVec &OperReads);
+  std::string createSchedClassName(const RecVec &InstDefs);
+  void createInstRWClass(Record *InstRWDef);
+
+  void collectProcItins();
+
+  void collectProcItinRW();
+
+  void inferSchedClasses();
+
+  void inferFromRW(const IdxVec &OperWrites, const IdxVec &OperReads,
+                   unsigned FromClassIdx, const IdxVec &ProcIndices);
+  void inferFromItinClass(Record *ItinClassDef, unsigned FromClassIdx);
+  void inferFromInstRWs(unsigned SCIdx);
+
+  void collectProcResources();
+
+  void collectItinProcResources(Record *ItinClassDef);
+
+  void collectRWResources(unsigned RWIdx, bool IsRead,
+                          const IdxVec &ProcIndices);
+
+  void collectRWResources(const IdxVec &Writes, const IdxVec &Reads,
+                          const IdxVec &ProcIndices);
+
+  void addProcResource(Record *ProcResourceKind, CodeGenProcModel &PM);
+
+  void addWriteRes(Record *ProcWriteResDef, unsigned PIdx);
+
+  void addReadAdvance(Record *ProcReadAdvanceDef, unsigned PIdx);
 };
 
 } // namespace llvm

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenTarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenTarget.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenTarget.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenTarget.cpp Tue Jan 15 11:16:16 2013
@@ -10,13 +10,14 @@
 // This class wraps target description classes used by the various code
 // generation TableGen backends.  This makes it easier to access the data and
 // provides a single place that needs to check it for validity.  All of these
-// classes throw exceptions on error conditions.
+// classes abort on error conditions.
 //
 //===----------------------------------------------------------------------===//
 
 #include "CodeGenTarget.h"
 #include "CodeGenIntrinsics.h"
 #include "CodeGenSchedule.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/STLExtras.h"
@@ -68,22 +69,30 @@
   case MVT::x86mmx:   return "MVT::x86mmx";
   case MVT::Glue:     return "MVT::Glue";
   case MVT::isVoid:   return "MVT::isVoid";
+  case MVT::v2i1:     return "MVT::v2i1";
+  case MVT::v4i1:     return "MVT::v4i1";
+  case MVT::v8i1:     return "MVT::v8i1";
+  case MVT::v16i1:    return "MVT::v16i1";
   case MVT::v2i8:     return "MVT::v2i8";
   case MVT::v4i8:     return "MVT::v4i8";
   case MVT::v8i8:     return "MVT::v8i8";
   case MVT::v16i8:    return "MVT::v16i8";
   case MVT::v32i8:    return "MVT::v32i8";
+  case MVT::v1i16:    return "MVT::v1i16";
   case MVT::v2i16:    return "MVT::v2i16";
   case MVT::v4i16:    return "MVT::v4i16";
   case MVT::v8i16:    return "MVT::v8i16";
   case MVT::v16i16:   return "MVT::v16i16";
+  case MVT::v1i32:    return "MVT::v1i32";
   case MVT::v2i32:    return "MVT::v2i32";
   case MVT::v4i32:    return "MVT::v4i32";
   case MVT::v8i32:    return "MVT::v8i32";
+  case MVT::v16i32:   return "MVT::v16i32";
   case MVT::v1i64:    return "MVT::v1i64";
   case MVT::v2i64:    return "MVT::v2i64";
   case MVT::v4i64:    return "MVT::v4i64";
   case MVT::v8i64:    return "MVT::v8i64";
+  case MVT::v16i64:   return "MVT::v16i64";
   case MVT::v2f16:    return "MVT::v2f16";
   case MVT::v2f32:    return "MVT::v2f32";
   case MVT::v4f32:    return "MVT::v4f32";
@@ -116,9 +125,9 @@
   : Records(records), RegBank(0), SchedModels(0) {
   std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target");
   if (Targets.size() == 0)
-    throw std::string("ERROR: No 'Target' subclasses defined!");
+    PrintFatalError("ERROR: No 'Target' subclasses defined!");
   if (Targets.size() != 1)
-    throw std::string("ERROR: Multiple subclasses of Target defined!");
+    PrintFatalError("ERROR: Multiple subclasses of Target defined!");
   TargetRec = Targets[0];
 }
 
@@ -152,7 +161,7 @@
 Record *CodeGenTarget::getAsmParser() const {
   std::vector<Record*> LI = TargetRec->getValueAsListOfDefs("AssemblyParsers");
   if (AsmParserNum >= LI.size())
-    throw "Target does not have an AsmParser #" + utostr(AsmParserNum) + "!";
+    PrintFatalError("Target does not have an AsmParser #" + utostr(AsmParserNum) + "!");
   return LI[AsmParserNum];
 }
 
@@ -163,7 +172,7 @@
   std::vector<Record*> LI =
     TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
   if (i >= LI.size())
-    throw "Target does not have an AsmParserVariant #" + utostr(i) + "!";
+    PrintFatalError("Target does not have an AsmParserVariant #" + utostr(i) + "!");
   return LI[i];
 }
 
@@ -181,7 +190,7 @@
 Record *CodeGenTarget::getAsmWriter() const {
   std::vector<Record*> LI = TargetRec->getValueAsListOfDefs("AssemblyWriters");
   if (AsmWriterNum >= LI.size())
-    throw "Target does not have an AsmWriter #" + utostr(AsmWriterNum) + "!";
+    PrintFatalError("Target does not have an AsmWriter #" + utostr(AsmWriterNum) + "!");
   return LI[AsmWriterNum];
 }
 
@@ -199,12 +208,11 @@
 /// getRegisterByName - If there is a register with the specific AsmName,
 /// return it.
 const CodeGenRegister *CodeGenTarget::getRegisterByName(StringRef Name) const {
-  const std::vector<CodeGenRegister*> &Regs = getRegBank().getRegisters();
-  for (unsigned i = 0, e = Regs.size(); i != e; ++i)
-    if (Regs[i]->TheDef->getValueAsString("AsmName") == Name)
-      return Regs[i];
-
-  return 0;
+  const StringMap<CodeGenRegister*> &Regs = getRegBank().getRegistersByName();
+  StringMap<CodeGenRegister*>::const_iterator I = Regs.find(Name);
+  if (I == Regs.end())
+    return 0;
+  return I->second;
 }
 
 std::vector<MVT::SimpleValueType> CodeGenTarget::
@@ -249,7 +257,7 @@
 void CodeGenTarget::ReadInstructions() const {
   std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
   if (Insts.size() <= 2)
-    throw std::string("No 'Instruction' subclasses defined!");
+    PrintFatalError("No 'Instruction' subclasses defined!");
 
   // Parse the instructions defined in the .td file.
   for (unsigned i = 0, e = Insts.size(); i != e; ++i)
@@ -265,7 +273,7 @@
   DenseMap<const Record*, CodeGenInstruction*>::const_iterator
     I = Insts.find(Rec);
   if (Rec == 0 || I == Insts.end())
-    throw std::string("Could not find '") + Name + "' instruction!";
+    PrintFatalError(std::string("Could not find '") + Name + "' instruction!");
   return I->second;
 }
 
@@ -300,6 +308,8 @@
     "REG_SEQUENCE",
     "COPY",
     "BUNDLE",
+    "LIFETIME_START",
+    "LIFETIME_END",
     0
   };
   const DenseMap<const Record*, CodeGenInstruction*> &Insts = getInstructions();
@@ -334,6 +344,15 @@
   return getInstructionSet()->getValueAsBit("isLittleEndianEncoding");
 }
 
+/// guessInstructionProperties - Return true if it's OK to guess instruction
+/// properties instead of raising an error.
+///
+/// This is configurable as a temporary migration aid. It will eventually be
+/// permanently false.
+bool CodeGenTarget::guessInstructionProperties() const {
+  return getInstructionSet()->getValueAsBit("guessInstructionProperties");
+}
+
 //===----------------------------------------------------------------------===//
 // ComplexPattern implementation
 //
@@ -401,7 +420,7 @@
 
   if (DefName.size() <= 4 ||
       std::string(DefName.begin(), DefName.begin() + 4) != "int_")
-    throw "Intrinsic '" + DefName + "' does not start with 'int_'!";
+    PrintFatalError("Intrinsic '" + DefName + "' does not start with 'int_'!");
 
   EnumName = std::string(DefName.begin()+4, DefName.end());
 
@@ -421,7 +440,7 @@
     // Verify it starts with "llvm.".
     if (Name.size() <= 5 ||
         std::string(Name.begin(), Name.begin() + 5) != "llvm.")
-      throw "Intrinsic '" + DefName + "'s name does not start with 'llvm.'!";
+      PrintFatalError("Intrinsic '" + DefName + "'s name does not start with 'llvm.'!");
   }
 
   // If TargetPrefix is specified, make sure that Name starts with
@@ -430,8 +449,8 @@
     if (Name.size() < 6+TargetPrefix.size() ||
         std::string(Name.begin() + 5, Name.begin() + 6 + TargetPrefix.size())
         != (TargetPrefix + "."))
-      throw "Intrinsic '" + DefName + "' does not start with 'llvm." +
-        TargetPrefix + ".'!";
+      PrintFatalError("Intrinsic '" + DefName + "' does not start with 'llvm." +
+        TargetPrefix + ".'!");
   }
 
   // Parse the list of return types.
@@ -463,7 +482,7 @@
 
     // Reject invalid types.
     if (VT == MVT::isVoid)
-      throw "Intrinsic '" + DefName + " has void in result type list!";
+      PrintFatalError("Intrinsic '" + DefName + " has void in result type list!");
 
     IS.RetVTs.push_back(VT);
     IS.RetTypeDefs.push_back(TyEl);
@@ -497,7 +516,7 @@
 
     // Reject invalid types.
     if (VT == MVT::isVoid && i != e-1 /*void at end means varargs*/)
-      throw "Intrinsic '" + DefName + " has void in result type list!";
+      PrintFatalError("Intrinsic '" + DefName + " has void in result type list!");
 
     IS.ParamVTs.push_back(VT);
     IS.ParamTypeDefs.push_back(TyEl);

Modified: llvm/branches/AMDILBackend/utils/TableGen/CodeGenTarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/CodeGenTarget.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/CodeGenTarget.h (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/CodeGenTarget.h Tue Jan 15 11:16:16 2013
@@ -9,8 +9,8 @@
 //
 // This file defines wrappers for the Target class and related global
 // functionality.  This makes it easier to access the data and provides a single
-// place that needs to check it for validity.  All of these classes throw
-// exceptions on error conditions.
+// place that needs to check it for validity.  All of these classes abort
+// on error conditions.
 //
 //===----------------------------------------------------------------------===//
 
@@ -177,6 +177,10 @@
   ///
   bool isLittleEndianEncoding() const;
 
+  /// guessInstructionProperties - should we just guess unset instruction
+  /// properties?
+  bool guessInstructionProperties() const;
+
 private:
   void ComputeInstrsByEnum() const;
 };

Modified: llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcher.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcher.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcher.h (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcher.h Tue Jan 15 11:16:16 2013
@@ -99,8 +99,6 @@
 
   OwningPtr<Matcher> &getNextPtr() { return Next; }
 
-  static inline bool classof(const Matcher *) { return true; }
-
   bool isEqual(const Matcher *M) const {
     if (getKind() != M->getKind()) return false;
     return isEqualImpl(M);

Modified: llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcherEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcherEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcherEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcherEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -598,7 +598,7 @@
 void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
   // Emit pattern predicates.
   if (!PatternPredicates.empty()) {
-    OS << "bool CheckPatternPredicate(unsigned PredNo) const {\n";
+    OS << "virtual bool CheckPatternPredicate(unsigned PredNo) const {\n";
     OS << "  switch (PredNo) {\n";
     OS << "  default: llvm_unreachable(\"Invalid predicate in table?\");\n";
     for (unsigned i = 0, e = PatternPredicates.size(); i != e; ++i)
@@ -616,7 +616,8 @@
     PFsByName[I->first->getName()] = I->second;
 
   if (!NodePredicates.empty()) {
-    OS << "bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {\n";
+    OS << "virtual bool CheckNodePredicate(SDNode *Node,\n";
+    OS << "                                unsigned PredNo) const {\n";
     OS << "  switch (PredNo) {\n";
     OS << "  default: llvm_unreachable(\"Invalid predicate in table?\");\n";
     for (unsigned i = 0, e = NodePredicates.size(); i != e; ++i) {
@@ -635,8 +636,8 @@
   // Emit CompletePattern matchers.
   // FIXME: This should be const.
   if (!ComplexPatterns.empty()) {
-    OS << "bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,\n";
-    OS << "                         unsigned PatternNo,\n";
+    OS << "virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent,\n";
+    OS << "                                 SDValue N, unsigned PatternNo,\n";
     OS << "         SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {\n";
     OS << "  unsigned NextRes = Result.size();\n";
     OS << "  switch (PatternNo) {\n";
@@ -676,7 +677,7 @@
   // Emit SDNodeXForm handlers.
   // FIXME: This should be const.
   if (!NodeXForms.empty()) {
-    OS << "SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {\n";
+    OS << "virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {\n";
     OS << "  switch (XFormNo) {\n";
     OS << "  default: llvm_unreachable(\"Invalid xform # in table?\");\n";
 

Modified: llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcherGen.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcherGen.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcherGen.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/DAGISelMatcherGen.cpp Tue Jan 15 11:16:16 2013
@@ -10,6 +10,7 @@
 #include "DAGISelMatcher.h"
 #include "CodeGenDAGPatterns.h"
 #include "CodeGenRegisters.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
@@ -172,15 +173,10 @@
   // diagnostics, which we know are impossible at this point.
   TreePattern &TP = *CGP.pf_begin()->second;
 
-  try {
-    bool MadeChange = true;
-    while (MadeChange)
-      MadeChange = PatWithNoTypes->ApplyTypeConstraints(TP,
-                                                true/*Ignore reg constraints*/);
-  } catch (...) {
-    errs() << "Type constraint application shouldn't fail!";
-    abort();
-  }
+  bool MadeChange = true;
+  while (MadeChange)
+    MadeChange = PatWithNoTypes->ApplyTypeConstraints(TP,
+                                              true/*Ignore reg constraints*/);
 }
 
 
@@ -203,7 +199,7 @@
   assert(N->isLeaf() && "Not a leaf?");
 
   // Direct match against an integer constant.
-  if (IntInit *II = dynamic_cast<IntInit*>(N->getLeafValue())) {
+  if (IntInit *II = dyn_cast<IntInit>(N->getLeafValue())) {
     // If this is the root of the dag we're matching, we emit a redundant opcode
     // check to ensure that this gets folded into the normal top-level
     // OpcodeSwitch.
@@ -215,7 +211,7 @@
     return AddMatcher(new CheckIntegerMatcher(II->getValue()));
   }
 
-  DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue());
+  DefInit *DI = dyn_cast<DefInit>(N->getLeafValue());
   if (DI == 0) {
     errs() << "Unknown leaf kind: " << *N << "\n";
     abort();
@@ -283,7 +279,7 @@
        N->getOperator()->getName() == "or") &&
       N->getChild(1)->isLeaf() && N->getChild(1)->getPredicateFns().empty() &&
       N->getPredicateFns().empty()) {
-    if (IntInit *II = dynamic_cast<IntInit*>(N->getChild(1)->getLeafValue())) {
+    if (IntInit *II = dyn_cast<IntInit>(N->getChild(1)->getLeafValue())) {
       if (!isPowerOf2_32(II->getValue())) {  // Don't bother with single bits.
         // If this is at the root of the pattern, we emit a redundant
         // CheckOpcode so that the following checks get factored properly under
@@ -572,14 +568,14 @@
                                          SmallVectorImpl<unsigned> &ResultOps) {
   assert(N->isLeaf() && "Must be a leaf");
 
-  if (IntInit *II = dynamic_cast<IntInit*>(N->getLeafValue())) {
+  if (IntInit *II = dyn_cast<IntInit>(N->getLeafValue())) {
     AddMatcher(new EmitIntegerMatcher(II->getValue(), N->getType(0)));
     ResultOps.push_back(NextRecordedOperandNo++);
     return;
   }
 
   // If this is an explicit register reference, handle it.
-  if (DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue())) {
+  if (DefInit *DI = dyn_cast<DefInit>(N->getLeafValue())) {
     Record *Def = DI->getDef();
     if (Def->isSubClassOf("Register")) {
       const CodeGenRegister *Reg =
@@ -727,8 +723,7 @@
 
     // Determine what to emit for this operand.
     Record *OperandNode = II.Operands[InstOpNo].Rec;
-    if ((OperandNode->isSubClassOf("PredicateOperand") ||
-         OperandNode->isSubClassOf("OptionalDefOperand")) &&
+    if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
         !CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
       // This is a predicate or optional def operand; emit the
       // 'default ops' operands.
@@ -877,7 +872,7 @@
   if (OpRec->isSubClassOf("SDNodeXForm"))
     return EmitResultSDNodeXFormAsOperand(N, ResultOps);
   errs() << "Unknown result node to emit code for: " << *N << '\n';
-  throw std::string("Unknown node in result pattern!");
+  PrintFatalError("Unknown node in result pattern!");
 }
 
 void MatcherGen::EmitResultCode() {

Modified: llvm/branches/AMDILBackend/utils/TableGen/DFAPacketizerEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/DFAPacketizerEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/DFAPacketizerEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/DFAPacketizerEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -17,6 +17,7 @@
 
 #include "CodeGenTarget.h"
 #include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/TableGen/TableGenBackend.h"
 #include <list>
@@ -74,6 +75,8 @@
 // Another way of thinking about this transition is we are mapping a NDFA with
 // two states [0x01] and [0x10] into a DFA with a single state [0x01, 0x10].
 //
+// A State instance also contains a collection of transitions from that state:
+// a map from inputs to new states.
 //
 namespace {
 class State {
@@ -82,10 +85,16 @@
   int stateNum;
   bool isInitial;
   std::set<unsigned> stateInfo;
+  typedef std::map<unsigned, State *> TransitionMap;
+  TransitionMap Transitions;
 
   State();
   State(const State &S);
 
+  bool operator<(const State &s) const {
+    return stateNum < s.stateNum;
+  }
+
   //
   // canAddInsnClass - Returns true if an instruction of type InsnClass is a
   // valid transition from this state, i.e., can an instruction of type InsnClass
@@ -100,38 +109,18 @@
   // which are possible from this state (PossibleStates).
   //
   void AddInsnClass(unsigned InsnClass, std::set<unsigned> &PossibleStates);
+  // 
+  // addTransition - Add a transition from this state given the input InsnClass
+  //
+  void addTransition(unsigned InsnClass, State *To);
+  //
+  // hasTransition - Returns true if there is a transition from this state
+  // given the input InsnClass
+  //
+  bool hasTransition(unsigned InsnClass);
 };
 } // End anonymous namespace.
 
-
-namespace {
-struct Transition {
- public:
-  static int currentTransitionNum;
-  int transitionNum;
-  State *from;
-  unsigned input;
-  State *to;
-
-  Transition(State *from_, unsigned input_, State *to_);
-};
-} // End anonymous namespace.
-
-
-//
-// Comparators to keep set of states sorted.
-//
-namespace {
-struct ltState {
-  bool operator()(const State *s1, const State *s2) const;
-};
-
-struct ltTransition {
-  bool operator()(const Transition *s1, const Transition *s2) const;
-};
-} // End anonymous namespace.
-
-
 //
 // class DFA: deterministic finite automaton for processor resource tracking.
 //
@@ -139,36 +128,19 @@
 class DFA {
 public:
   DFA();
+  ~DFA();
 
   // Set of states. Need to keep this sorted to emit the transition table.
-  std::set<State*, ltState> states;
+  typedef std::set<State *, less_ptr<State> > StateSet;
+  StateSet states;
 
-  // Map from a state to the list of transitions with that state as source.
-  std::map<State*, std::set<Transition*, ltTransition>, ltState>
-    stateTransitions;
   State *currentState;
 
-  // Highest valued Input seen.
-  unsigned LargestInput;
-
   //
   // Modify the DFA.
   //
   void initialize();
   void addState(State *);
-  void addTransition(Transition *);
-
-  //
-  // getTransition -  Return the state when a transition is made from
-  // State From with Input I. If a transition is not found, return NULL.
-  //
-  State *getTransition(State *, unsigned);
-
-  //
-  // isValidTransition: Predicate that checks if there is a valid transition
-  // from state From on input InsnClass.
-  //
-  bool isValidTransition(State *From, unsigned InsnClass);
 
   //
   // writeTable: Print out a table representing the DFA.
@@ -179,7 +151,7 @@
 
 
 //
-// Constructors for State, Transition, and DFA
+// Constructors and destructors for State and DFA
 //
 State::State() :
   stateNum(currentStateNum++), isInitial(false) {}
@@ -189,22 +161,27 @@
   stateNum(currentStateNum++), isInitial(S.isInitial),
   stateInfo(S.stateInfo) {}
 
+DFA::DFA(): currentState(NULL) {}
 
-Transition::Transition(State *from_, unsigned input_, State *to_) :
-  transitionNum(currentTransitionNum++), from(from_), input(input_),
-  to(to_) {}
-
-
-DFA::DFA() :
-  LargestInput(0) {}
-
+DFA::~DFA() {
+  DeleteContainerPointers(states);
+}
 
-bool ltState::operator()(const State *s1, const State *s2) const {
-    return (s1->stateNum < s2->stateNum);
+// 
+// addTransition - Add a transition from this state given the input InsnClass
+//
+void State::addTransition(unsigned InsnClass, State *To) {
+  assert(!Transitions.count(InsnClass) &&
+      "Cannot have multiple transitions for the same input");
+  Transitions[InsnClass] = To;
 }
 
-bool ltTransition::operator()(const Transition *s1, const Transition *s2) const {
-    return (s1->input < s2->input);
+//
+// hasTransition - Returns true if there is a transition from this state
+// given the input InsnClass
+//
+bool State::hasTransition(unsigned InsnClass) {
+  return Transitions.count(InsnClass) > 0;
 }
 
 //
@@ -272,6 +249,7 @@
 
 
 void DFA::initialize() {
+  assert(currentState && "Missing current state");
   currentState->isInitial = true;
 }
 
@@ -282,47 +260,7 @@
 }
 
 
-void DFA::addTransition(Transition *T) {
-  // Update LargestInput.
-  if (T->input > LargestInput)
-    LargestInput = T->input;
-
-  // Add the new transition.
-  bool Added = stateTransitions[T->from].insert(T).second;
-  assert(Added && "Cannot have multiple states for the same input");
-  (void)Added;
-}
-
-
-//
-// getTransition - Return the state when a transition is made from
-// State From with Input I. If a transition is not found, return NULL.
-//
-State *DFA::getTransition(State *From, unsigned I) {
-  // Do we have a transition from state From?
-  if (!stateTransitions.count(From))
-    return NULL;
-
-  // Do we have a transition from state From with Input I?
-  Transition TVal(NULL, I, NULL);
-  // Do not count this temporal instance
-  Transition::currentTransitionNum--;
-  std::set<Transition*, ltTransition>::iterator T =
-    stateTransitions[From].find(&TVal);
-  if (T != stateTransitions[From].end())
-    return (*T)->to;
-
-  return NULL;
-}
-
-
-bool DFA::isValidTransition(State *From, unsigned InsnClass) {
-  return (getTransition(From, InsnClass) != NULL);
-}
-
-
 int State::currentStateNum = 0;
-int Transition::currentTransitionNum = 0;
 
 DFAPacketizerEmitter::DFAPacketizerEmitter(RecordKeeper &R):
   TargetName(CodeGenTarget(R).getName()),
@@ -341,7 +279,7 @@
 //
 //
 void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) {
-  std::set<State*, ltState>::iterator SI = states.begin();
+  DFA::StateSet::iterator SI = states.begin();
   // This table provides a map to the beginning of the transitions for State s
   // in DFAStateInputTable.
   std::vector<int> StateEntry(states.size());
@@ -353,18 +291,16 @@
   // to construct the StateEntry table.
   int ValidTransitions = 0;
   for (unsigned i = 0; i < states.size(); ++i, ++SI) {
+    assert (((*SI)->stateNum == (int) i) && "Mismatch in state numbers");
     StateEntry[i] = ValidTransitions;
-    for (unsigned j = 0; j <= LargestInput; ++j) {
-      assert (((*SI)->stateNum == (int) i) && "Mismatch in state numbers");
-      State *To = getTransition(*SI, j);
-      if (To == NULL)
-        continue;
-
-      OS << "{" << j << ", "
-         << To->stateNum
+    for (State::TransitionMap::iterator
+        II = (*SI)->Transitions.begin(), IE = (*SI)->Transitions.end();
+        II != IE; ++II) {
+      OS << "{" << II->first << ", "
+         << II->second->stateNum
          << "},    ";
-      ++ValidTransitions;
     }
+    ValidTransitions += (*SI)->Transitions.size();
 
     // If there are no valid transitions from this stage, we need a sentinel
     // transition.
@@ -539,7 +475,7 @@
       // If we haven't already created a transition for this input
       // and the state can accommodate this InsnClass, create a transition.
       //
-      if (!D.getTransition(current, InsnClass) &&
+      if (!current->hasTransition(InsnClass) &&
           current->canAddInsnClass(InsnClass)) {
         State *NewState = NULL;
         current->AddInsnClass(InsnClass, NewStateResources);
@@ -559,10 +495,8 @@
           Visited[NewStateResources] = NewState;
           WorkList.push_back(NewState);
         }
-
-        Transition *NewTransition = new Transition(current, InsnClass,
-                                                   NewState);
-        D.addTransition(NewTransition);
+        
+        current->addTransition(InsnClass, NewState);
       }
     }
   }

Modified: llvm/branches/AMDILBackend/utils/TableGen/DisassemblerEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/DisassemblerEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/DisassemblerEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/DisassemblerEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -117,11 +117,9 @@
     for (unsigned i = 0, e = numberedInstructions.size(); i != e; ++i)
       RecognizableInstr::processInstr(Tables, *numberedInstructions[i], i);
 
-    // FIXME: As long as we are using exceptions, might as well drop this to the
-    // actual conflict site.
     if (Tables.hasConflicts())
-      throw TGError(Target.getTargetRecord()->getLoc(),
-                    "Primary decode conflict");
+      PrintFatalError(Target.getTargetRecord()->getLoc(),
+                      "Primary decode conflict");
 
     Tables.emit(OS);
     return;

Modified: llvm/branches/AMDILBackend/utils/TableGen/EDEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/EDEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/EDEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/EDEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -19,6 +19,7 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/Format.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/TableGen/TableGenBackend.h"
 #include <string>
@@ -358,8 +359,8 @@
 /// X86PopulateOperands - Handles all the operands in an X86 instruction, adding
 ///   the appropriate flags to their descriptors
 ///
-/// @operandFlags - A reference the array of operand flag objects
-/// @inst         - The instruction to use as a source of information
+/// \param operandTypes A reference the array of operand type objects
+/// \param inst         The instruction to use as a source of information
 static void X86PopulateOperands(
   LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
   const CodeGenInstruction &inst) {
@@ -385,11 +386,12 @@
 
 /// decorate1 - Decorates a named operand with a new flag
 ///
-/// @operandFlags - The array of operand flag objects, which don't have names
-/// @inst         - The CodeGenInstruction, which provides a way to translate
-///                 between names and operand indices
-/// @opName       - The name of the operand
-/// @flag         - The name of the flag to add
+/// \param operandFlags The array of operand flag objects, which don't have
+///                     names
+/// \param inst         The CodeGenInstruction, which provides a way to
+//                      translate between names and operand indices
+/// \param opName       The name of the operand
+/// \param opFlag       The name of the flag to add
 static inline void decorate1(
   FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
   const CodeGenInstruction &inst,
@@ -438,9 +440,9 @@
 ///   instruction to determine what sort of an instruction it is and then adds
 ///   the appropriate flags to the instruction and its operands
 ///
-/// @arg instType     - A reference to the type for the instruction as a whole
-/// @arg operandFlags - A reference to the array of operand flag object pointers
-/// @arg inst         - A reference to the original instruction
+/// \param instType     A reference to the type for the instruction as a whole
+/// \param operandFlags A reference to the array of operand flag object pointers
+/// \param inst         A reference to the original instruction
 static void X86ExtractSemantics(
   LiteralConstantEmitter &instType,
   FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
@@ -567,8 +569,8 @@
 /// ARMFlagFromOpName - Processes the name of a single ARM operand (which is
 ///   actually its type) and translates it into an operand type
 ///
-/// @arg type     - The type object to set
-/// @arg name     - The name of the operand
+/// \param type The type object to set
+/// \param name The name of the operand
 static int ARMFlagFromOpName(LiteralConstantEmitter *type,
                              const std::string &name) {
   REG("GPR");
@@ -750,8 +752,8 @@
 /// ARMPopulateOperands - Handles all the operands in an ARM instruction, adding
 ///   the appropriate flags to their descriptors
 ///
-/// @operandFlags - A reference the array of operand flag objects
-/// @inst         - The instruction to use as a source of information
+/// \param operandTypes A reference the array of operand type objects
+/// \param inst         The instruction to use as a source of information
 static void ARMPopulateOperands(
   LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
   const CodeGenInstruction &inst) {
@@ -776,7 +778,7 @@
       errs() << "Operand type: " << rec.getName() << '\n';
       errs() << "Operand name: " << operandInfo.Name << '\n';
       errs() << "Instruction name: " << inst.TheDef->getName() << '\n';
-      throw("Unhandled type in EDEmitter");
+      PrintFatalError("Unhandled type in EDEmitter");
     }
   }
 }
@@ -790,10 +792,10 @@
 ///   instruction to determine what sort of an instruction it is and then adds
 ///   the appropriate flags to the instruction and its operands
 ///
-/// @arg instType     - A reference to the type for the instruction as a whole
-/// @arg operandTypes - A reference to the array of operand type object pointers
-/// @arg operandFlags - A reference to the array of operand flag object pointers
-/// @arg inst         - A reference to the original instruction
+/// \param instType     A reference to the type for the instruction as a whole
+/// \param operandTypes A reference to the array of operand type object pointers
+/// \param operandFlags A reference to the array of operand flag object pointers
+/// \param inst         A reference to the original instruction
 static void ARMExtractSemantics(
   LiteralConstantEmitter &instType,
   LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
@@ -831,8 +833,8 @@
 /// populateInstInfo - Fills an array of InstInfos with information about each
 ///   instruction in a target
 ///
-/// @arg infoArray  - The array of InstInfo objects to populate
-/// @arg target     - The CodeGenTarget to use as a source of instructions
+/// \param infoArray The array of InstInfo objects to populate
+/// \param target    The CodeGenTarget to use as a source of instructions
 static void populateInstInfo(CompoundConstantEmitter &infoArray,
                              CodeGenTarget &target) {
   const std::vector<const CodeGenInstruction*> &numberedInstructions =

Modified: llvm/branches/AMDILBackend/utils/TableGen/FastISelEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/FastISelEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/FastISelEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/FastISelEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -245,7 +245,7 @@
       if (Op->getType(0) != VT)
         return false;
 
-      DefInit *OpDI = dynamic_cast<DefInit*>(Op->getLeafValue());
+      DefInit *OpDI = dyn_cast<DefInit>(Op->getLeafValue());
       if (!OpDI)
         return false;
       Record *OpLeafRec = OpDI->getDef();
@@ -406,13 +406,12 @@
   if (!Op->isLeaf())
     return PhysReg;
 
-  DefInit *OpDI = dynamic_cast<DefInit*>(Op->getLeafValue());
-  Record *OpLeafRec = OpDI->getDef();
+  Record *OpLeafRec = cast<DefInit>(Op->getLeafValue())->getDef();
   if (!OpLeafRec->isSubClassOf("Register"))
     return PhysReg;
 
-  PhysReg += static_cast<StringInit*>(OpLeafRec->getValue( \
-             "Namespace")->getValue())->getValue();
+  PhysReg += cast<StringInit>(OpLeafRec->getValue("Namespace")->getValue())
+               ->getValue();
   PhysReg += "::";
   PhysReg += Target.getRegBank().getReg(OpLeafRec)->getName();
   return PhysReg;
@@ -473,7 +472,7 @@
       // a bit too complicated for now.
       if (!Dst->getChild(1)->isLeaf()) continue;
 
-      DefInit *SR = dynamic_cast<DefInit*>(Dst->getChild(1)->getLeafValue());
+      DefInit *SR = dyn_cast<DefInit>(Dst->getChild(1)->getLeafValue());
       if (SR)
         SubRegNo = getQualifiedName(SR->getDef());
       else
@@ -550,7 +549,7 @@
     };
     
     if (SimplePatterns[Operands][OpcodeName][VT][RetVT].count(PredicateCheck))
-      throw TGError(Pattern.getSrcRecord()->getLoc(),
+      PrintFatalError(Pattern.getSrcRecord()->getLoc(),
                     "Duplicate record in FastISel table!");
 
     SimplePatterns[Operands][OpcodeName][VT][RetVT][PredicateCheck] = Memo;

Modified: llvm/branches/AMDILBackend/utils/TableGen/FixedLenDecoderEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/FixedLenDecoderEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/FixedLenDecoderEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/FixedLenDecoderEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -15,11 +15,18 @@
 #define DEBUG_TYPE "decoder-emitter"
 
 #include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
 #include "llvm/Support/DataTypes.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/LEB128.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/TableGen/TableGenBackend.h"
 
@@ -35,9 +42,7 @@
   EncodingField(unsigned B, unsigned W, unsigned O)
     : Base(B), Width(W), Offset(O) { }
 };
-} // End anonymous namespace
 
-namespace {
 struct OperandInfo {
   std::vector<EncodingField> Fields;
   std::string Decoder;
@@ -56,10 +61,25 @@
   const_iterator begin() const { return Fields.begin(); }
   const_iterator end() const   { return Fields.end();   }
 };
+
+typedef std::vector<uint8_t> DecoderTable;
+typedef uint32_t DecoderFixup;
+typedef std::vector<DecoderFixup> FixupList;
+typedef std::vector<FixupList> FixupScopeList;
+typedef SetVector<std::string> PredicateSet;
+typedef SetVector<std::string> DecoderSet;
+struct DecoderTableInfo {
+  DecoderTable Table;
+  FixupScopeList FixupStack;
+  PredicateSet Predicates;
+  DecoderSet Decoders;
+};
+
 } // End anonymous namespace
 
 namespace {
 class FixedLenDecoderEmitter {
+  const std::vector<const CodeGenInstruction*> *NumberedInstructions;
 public:
 
   // Defaults preserved here for documentation, even though they aren't
@@ -77,6 +97,17 @@
     GuardPrefix(GPrefix), GuardPostfix(GPostfix),
     ReturnOK(ROK), ReturnFail(RFail), Locals(L) {}
 
+  // Emit the decoder state machine table.
+  void emitTable(formatted_raw_ostream &o, DecoderTable &Table,
+                 unsigned Indentation, unsigned BitWidth,
+                 StringRef Namespace) const;
+  void emitPredicateFunction(formatted_raw_ostream &OS,
+                             PredicateSet &Predicates,
+                             unsigned Indentation) const;
+  void emitDecoderFunction(formatted_raw_ostream &OS,
+                           DecoderSet &Decoders,
+                           unsigned Indentation) const;
+
   // run - Output the code emitter
   void run(raw_ostream &o);
 
@@ -112,7 +143,7 @@
   return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1);
 }
 static bit_value_t bitFromBits(const BitsInit &bits, unsigned index) {
-  if (BitInit *bit = dynamic_cast<BitInit*>(bits.getBit(index)))
+  if (BitInit *bit = dyn_cast<BitInit>(bits.getBit(index)))
     return bit->getValue() ? BIT_TRUE : BIT_FALSE;
 
   // The bit is uninitialized.
@@ -120,9 +151,7 @@
 }
 // Prints the bit value for each position.
 static void dumpBits(raw_ostream &o, const BitsInit &bits) {
-  unsigned index;
-
-  for (index = bits.getNumBits(); index > 0; index--) {
+  for (unsigned index = bits.getNumBits(); index > 0; --index) {
     switch (bitFromBits(bits, index - 1)) {
     case BIT_TRUE:
       o << "1";
@@ -238,8 +267,9 @@
   // match the remaining undecoded encoding bits against the singleton.
   void recurse();
 
-  // Emit code to decode instructions given a segment or segments of bits.
-  void emit(raw_ostream &o, unsigned &Indentation) const;
+  // Emit table entries to decode instructions given a segment or segments of
+  // bits.
+  void emitTableEntry(DecoderTableInfo &TableInfo) const;
 
   // Returns the number of fanout produced by the filter.  More fanout implies
   // the filter distinguishes more categories of instructions.
@@ -338,12 +368,7 @@
     doFilter();
   }
 
-  // The top level filter chooser has NULL as its parent.
-  bool isTopLevel() const { return Parent == NULL; }
-
-  // Emit the top level typedef and decodeInstruction() function.
-  void emitTop(raw_ostream &o, unsigned Indentation,
-               const std::string &Namespace) const;
+  unsigned getBitWidth() const { return BitWidth; }
 
 protected:
   // Populates the insn given the uid.
@@ -414,21 +439,28 @@
   bool emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
                           unsigned Opc) const;
 
-  void emitSoftFailCheck(raw_ostream &o, unsigned Indentation,
-                         unsigned Opc) const;
-
-  // Emits code to decode the singleton.  Return true if we have matched all the
-  // well-known bits.
-  bool emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
-                            unsigned Opc) const;
+  bool doesOpcodeNeedPredicate(unsigned Opc) const;
+  unsigned getPredicateIndex(DecoderTableInfo &TableInfo, StringRef P) const;
+  void emitPredicateTableEntry(DecoderTableInfo &TableInfo,
+                               unsigned Opc) const;
+
+  void emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
+                              unsigned Opc) const;
+
+  // Emits table entries to decode the singleton.
+  void emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+                               unsigned Opc) const;
 
   // Emits code to decode the singleton, and then to decode the rest.
-  void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
-                            const Filter &Best) const;
+  void emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+                               const Filter &Best) const;
 
-  void emitBinaryParser(raw_ostream &o , unsigned &Indentation,
+  void emitBinaryParser(raw_ostream &o, unsigned &Indentation,
                         const OperandInfo &OpInfo) const;
 
+  void emitDecoder(raw_ostream &OS, unsigned Indentation, unsigned Opc) const;
+  unsigned getDecoderIndex(DecoderSet &Decoders, unsigned Opc) const;
+
   // Assign a single filter and run with it.
   void runSingleFilter(unsigned startBit, unsigned numBit, bool mixed);
 
@@ -447,10 +479,10 @@
   // dump the conflict set to the standard error.
   void doFilter();
 
-  // Emits code to decode our share of instructions.  Returns true if the
-  // emitted code causes a return, which occurs if we know how to decode
-  // the instruction at this level or the instruction is not decodeable.
-  bool emit(raw_ostream &o, unsigned &Indentation) const;
+public:
+  // emitTableEntries - Emit state machine entries to decode our share of
+  // instructions.
+  void emitTableEntries(DecoderTableInfo &TableInfo) const;
 };
 } // End anonymous namespace
 
@@ -524,11 +556,9 @@
   // Starts by inheriting our parent filter chooser's filter bit values.
   std::vector<bit_value_t> BitValueArray(Owner->FilterBitValues);
 
-  unsigned bitIndex;
-
   if (VariableInstructions.size()) {
     // Conservatively marks each segment position as BIT_UNSET.
-    for (bitIndex = 0; bitIndex < NumBits; bitIndex++)
+    for (unsigned bitIndex = 0; bitIndex < NumBits; ++bitIndex)
       BitValueArray[StartBit + bitIndex] = BIT_UNSET;
 
     // Delegates to an inferior filter chooser for further processing on this
@@ -544,7 +574,7 @@
   }
 
   // No need to recurse for a singleton filtered instruction.
-  // See also Filter::emit().
+  // See also Filter::emit*().
   if (getNumFiltered() == 1) {
     //Owner->SingletonExists(LastOpcFiltered);
     assert(FilterChooserMap.size() == 1);
@@ -557,7 +587,7 @@
        mapIterator++) {
 
     // Marks all the segment positions with either BIT_TRUE or BIT_FALSE.
-    for (bitIndex = 0; bitIndex < NumBits; bitIndex++) {
+    for (unsigned bitIndex = 0; bitIndex < NumBits; ++bitIndex) {
       if (mapIterator->first & (1ULL << bitIndex))
         BitValueArray[StartBit + bitIndex] = BIT_TRUE;
       else
@@ -577,64 +607,100 @@
   }
 }
 
-// Emit code to decode instructions given a segment or segments of bits.
-void Filter::emit(raw_ostream &o, unsigned &Indentation) const {
-  o.indent(Indentation) << "// Check Inst{";
-
-  if (NumBits > 1)
-    o << (StartBit + NumBits - 1) << '-';
-
-  o << StartBit << "} ...\n";
-
-  o.indent(Indentation) << "switch (fieldFromInstruction" << Owner->BitWidth
-                        << "(insn, " << StartBit << ", "
-                        << NumBits << ")) {\n";
+static void resolveTableFixups(DecoderTable &Table, const FixupList &Fixups,
+                               uint32_t DestIdx) {
+  // Any NumToSkip fixups in the current scope can resolve to the
+  // current location.
+  for (FixupList::const_reverse_iterator I = Fixups.rbegin(),
+                                         E = Fixups.rend();
+       I != E; ++I) {
+    // Calculate the distance from the byte following the fixup entry byte
+    // to the destination. The Target is calculated from after the 16-bit
+    // NumToSkip entry itself, so subtract two  from the displacement here
+    // to account for that.
+    uint32_t FixupIdx = *I;
+    uint32_t Delta = DestIdx - FixupIdx - 2;
+    // Our NumToSkip entries are 16-bits. Make sure our table isn't too
+    // big.
+    assert(Delta < 65536U && "disassembler decoding table too large!");
+    Table[FixupIdx] = (uint8_t)Delta;
+    Table[FixupIdx + 1] = (uint8_t)(Delta >> 8);
+  }
+}
+
+// Emit table entries to decode instructions given a segment or segments
+// of bits.
+void Filter::emitTableEntry(DecoderTableInfo &TableInfo) const {
+  TableInfo.Table.push_back(MCD::OPC_ExtractField);
+  TableInfo.Table.push_back(StartBit);
+  TableInfo.Table.push_back(NumBits);
+
+  // A new filter entry begins a new scope for fixup resolution.
+  TableInfo.FixupStack.push_back(FixupList());
 
   std::map<unsigned, const FilterChooser*>::const_iterator filterIterator;
 
-  bool DefaultCase = false;
+  DecoderTable &Table = TableInfo.Table;
+
+  size_t PrevFilter = 0;
+  bool HasFallthrough = false;
   for (filterIterator = FilterChooserMap.begin();
        filterIterator != FilterChooserMap.end();
        filterIterator++) {
-
     // Field value -1 implies a non-empty set of variable instructions.
     // See also recurse().
     if (filterIterator->first == (unsigned)-1) {
-      DefaultCase = true;
-
-      o.indent(Indentation) << "default:\n";
-      o.indent(Indentation) << "  break; // fallthrough\n";
-
-      // Closing curly brace for the switch statement.
-      // This is unconventional because we want the default processing to be
-      // performed for the fallthrough cases as well, i.e., when the "cases"
-      // did not prove a decoded instruction.
-      o.indent(Indentation) << "}\n";
+      HasFallthrough = true;
 
-    } else
-      o.indent(Indentation) << "case " << filterIterator->first << ":\n";
+      // Each scope should always have at least one filter value to check
+      // for.
+      assert(PrevFilter != 0 && "empty filter set!");
+      FixupList &CurScope = TableInfo.FixupStack.back();
+      // Resolve any NumToSkip fixups in the current scope.
+      resolveTableFixups(Table, CurScope, Table.size());
+      CurScope.clear();
+      PrevFilter = 0;  // Don't re-process the filter's fallthrough.
+    } else {
+      Table.push_back(MCD::OPC_FilterValue);
+      // Encode and emit the value to filter against.
+      uint8_t Buffer[8];
+      unsigned Len = encodeULEB128(filterIterator->first, Buffer);
+      Table.insert(Table.end(), Buffer, Buffer + Len);
+      // Reserve space for the NumToSkip entry. We'll backpatch the value
+      // later.
+      PrevFilter = Table.size();
+      Table.push_back(0);
+      Table.push_back(0);
+    }
 
     // We arrive at a category of instructions with the same segment value.
     // Now delegate to the sub filter chooser for further decodings.
     // The case may fallthrough, which happens if the remaining well-known
     // encoding bits do not match exactly.
-    if (!DefaultCase) { ++Indentation; ++Indentation; }
+    filterIterator->second->emitTableEntries(TableInfo);
 
-    filterIterator->second->emit(o, Indentation);
-    // For top level default case, there's no need for a break statement.
-    if (Owner->isTopLevel() && DefaultCase)
-      break;
-    
-    o.indent(Indentation) << "break;\n";
-
-    if (!DefaultCase) { --Indentation; --Indentation; }
+    // Now that we've emitted the body of the handler, update the NumToSkip
+    // of the filter itself to be able to skip forward when false. Subtract
+    // two as to account for the width of the NumToSkip field itself.
+    if (PrevFilter) {
+      uint32_t NumToSkip = Table.size() - PrevFilter - 2;
+      assert(NumToSkip < 65536U && "disassembler decoding table too large!");
+      Table[PrevFilter] = (uint8_t)NumToSkip;
+      Table[PrevFilter + 1] = (uint8_t)(NumToSkip >> 8);
+    }
   }
 
-  // If there is no default case, we still need to supply a closing brace.
-  if (!DefaultCase) {
-    // Closing curly brace for the switch statement.
-    o.indent(Indentation) << "}\n";
-  }
+  // Any remaining unresolved fixups bubble up to the parent fixup scope.
+  assert(TableInfo.FixupStack.size() > 1 && "fixup stack underflow!");
+  FixupScopeList::iterator Source = TableInfo.FixupStack.end() - 1;
+  FixupScopeList::iterator Dest = Source - 1;
+  Dest->insert(Dest->end(), Source->begin(), Source->end());
+  TableInfo.FixupStack.pop_back();
+
+  // If there is no fallthrough, then the final filter should get fixed
+  // up according to the enclosing scope rather than the current position.
+  if (!HasFallthrough)
+    TableInfo.FixupStack.back().push_back(PrevFilter);
 }
 
 // Returns the number of fanout produced by the filter.  More fanout implies
@@ -652,31 +718,205 @@
 //                              //
 //////////////////////////////////
 
-// Emit the top level typedef and decodeInstruction() function.
-void FilterChooser::emitTop(raw_ostream &o, unsigned Indentation,
-                            const std::string &Namespace) const {
-  o.indent(Indentation) <<
-    "static MCDisassembler::DecodeStatus decode" << Namespace << "Instruction"
-    << BitWidth << "(MCInst &MI, uint" << BitWidth
-    << "_t insn, uint64_t Address, "
-    << "const void *Decoder, const MCSubtargetInfo &STI) {\n";
-  o.indent(Indentation) << "  unsigned tmp = 0;\n";
-  o.indent(Indentation) << "  (void)tmp;\n";
-  o.indent(Indentation) << Emitter->Locals << "\n";
-  o.indent(Indentation) << "  uint64_t Bits = STI.getFeatureBits();\n";
-  o.indent(Indentation) << "  (void)Bits;\n";
-
-  ++Indentation; ++Indentation;
-  // Emits code to decode the instructions.
-  emit(o, Indentation);
-
-  o << '\n';
-  o.indent(Indentation) << "return " << Emitter->ReturnFail << ";\n";
-  --Indentation; --Indentation;
+// Emit the decoder state machine table.
+void FixedLenDecoderEmitter::emitTable(formatted_raw_ostream &OS,
+                                       DecoderTable &Table,
+                                       unsigned Indentation,
+                                       unsigned BitWidth,
+                                       StringRef Namespace) const {
+  OS.indent(Indentation) << "static const uint8_t DecoderTable" << Namespace
+    << BitWidth << "[] = {\n";
+
+  Indentation += 2;
+
+  // FIXME: We may be able to use the NumToSkip values to recover
+  // appropriate indentation levels.
+  DecoderTable::const_iterator I = Table.begin();
+  DecoderTable::const_iterator E = Table.end();
+  while (I != E) {
+    assert (I < E && "incomplete decode table entry!");
+
+    uint64_t Pos = I - Table.begin();
+    OS << "/* " << Pos << " */";
+    OS.PadToColumn(12);
+
+    switch (*I) {
+    default:
+      PrintFatalError("invalid decode table opcode");
+    case MCD::OPC_ExtractField: {
+      ++I;
+      unsigned Start = *I++;
+      unsigned Len = *I++;
+      OS.indent(Indentation) << "MCD::OPC_ExtractField, " << Start << ", "
+        << Len << ",  // Inst{";
+      if (Len > 1)
+        OS << (Start + Len - 1) << "-";
+      OS << Start << "} ...\n";
+      break;
+    }
+    case MCD::OPC_FilterValue: {
+      ++I;
+      OS.indent(Indentation) << "MCD::OPC_FilterValue, ";
+      // The filter value is ULEB128 encoded.
+      while (*I >= 128)
+        OS << utostr(*I++) << ", ";
+      OS << utostr(*I++) << ", ";
+
+      // 16-bit numtoskip value.
+      uint8_t Byte = *I++;
+      uint32_t NumToSkip = Byte;
+      OS << utostr(Byte) << ", ";
+      Byte = *I++;
+      OS << utostr(Byte) << ", ";
+      NumToSkip |= Byte << 8;
+      OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+      break;
+    }
+    case MCD::OPC_CheckField: {
+      ++I;
+      unsigned Start = *I++;
+      unsigned Len = *I++;
+      OS.indent(Indentation) << "MCD::OPC_CheckField, " << Start << ", "
+        << Len << ", ";// << Val << ", " << NumToSkip << ",\n";
+      // ULEB128 encoded field value.
+      for (; *I >= 128; ++I)
+        OS << utostr(*I) << ", ";
+      OS << utostr(*I++) << ", ";
+      // 16-bit numtoskip value.
+      uint8_t Byte = *I++;
+      uint32_t NumToSkip = Byte;
+      OS << utostr(Byte) << ", ";
+      Byte = *I++;
+      OS << utostr(Byte) << ", ";
+      NumToSkip |= Byte << 8;
+      OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+      break;
+    }
+    case MCD::OPC_CheckPredicate: {
+      ++I;
+      OS.indent(Indentation) << "MCD::OPC_CheckPredicate, ";
+      for (; *I >= 128; ++I)
+        OS << utostr(*I) << ", ";
+      OS << utostr(*I++) << ", ";
+
+      // 16-bit numtoskip value.
+      uint8_t Byte = *I++;
+      uint32_t NumToSkip = Byte;
+      OS << utostr(Byte) << ", ";
+      Byte = *I++;
+      OS << utostr(Byte) << ", ";
+      NumToSkip |= Byte << 8;
+      OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+      break;
+    }
+    case MCD::OPC_Decode: {
+      ++I;
+      // Extract the ULEB128 encoded Opcode to a buffer.
+      uint8_t Buffer[8], *p = Buffer;
+      while ((*p++ = *I++) >= 128)
+        assert((p - Buffer) <= (ptrdiff_t)sizeof(Buffer)
+               && "ULEB128 value too large!");
+      // Decode the Opcode value.
+      unsigned Opc = decodeULEB128(Buffer);
+      OS.indent(Indentation) << "MCD::OPC_Decode, ";
+      for (p = Buffer; *p >= 128; ++p)
+        OS << utostr(*p) << ", ";
+      OS << utostr(*p) << ", ";
+
+      // Decoder index.
+      for (; *I >= 128; ++I)
+        OS << utostr(*I) << ", ";
+      OS << utostr(*I++) << ", ";
+
+      OS << "// Opcode: "
+         << NumberedInstructions->at(Opc)->TheDef->getName() << "\n";
+      break;
+    }
+    case MCD::OPC_SoftFail: {
+      ++I;
+      OS.indent(Indentation) << "MCD::OPC_SoftFail";
+      // Positive mask
+      uint64_t Value = 0;
+      unsigned Shift = 0;
+      do {
+        OS << ", " << utostr(*I);
+        Value += (*I & 0x7f) << Shift;
+        Shift += 7;
+      } while (*I++ >= 128);
+      if (Value > 127)
+        OS << " /* 0x" << utohexstr(Value) << " */";
+      // Negative mask
+      Value = 0;
+      Shift = 0;
+      do {
+        OS << ", " << utostr(*I);
+        Value += (*I & 0x7f) << Shift;
+        Shift += 7;
+      } while (*I++ >= 128);
+      if (Value > 127)
+        OS << " /* 0x" << utohexstr(Value) << " */";
+      OS << ",\n";
+      break;
+    }
+    case MCD::OPC_Fail: {
+      ++I;
+      OS.indent(Indentation) << "MCD::OPC_Fail,\n";
+      break;
+    }
+    }
+  }
+  OS.indent(Indentation) << "0\n";
+
+  Indentation -= 2;
 
-  o.indent(Indentation) << "}\n";
+  OS.indent(Indentation) << "};\n\n";
+}
+
+void FixedLenDecoderEmitter::
+emitPredicateFunction(formatted_raw_ostream &OS, PredicateSet &Predicates,
+                      unsigned Indentation) const {
+  // The predicate function is just a big switch statement based on the
+  // input predicate index.
+  OS.indent(Indentation) << "static bool checkDecoderPredicate(unsigned Idx, "
+    << "uint64_t Bits) {\n";
+  Indentation += 2;
+  OS.indent(Indentation) << "switch (Idx) {\n";
+  OS.indent(Indentation) << "default: llvm_unreachable(\"Invalid index!\");\n";
+  unsigned Index = 0;
+  for (PredicateSet::const_iterator I = Predicates.begin(), E = Predicates.end();
+       I != E; ++I, ++Index) {
+    OS.indent(Indentation) << "case " << Index << ":\n";
+    OS.indent(Indentation+2) << "return (" << *I << ");\n";
+  }
+  OS.indent(Indentation) << "}\n";
+  Indentation -= 2;
+  OS.indent(Indentation) << "}\n\n";
+}
 
-  o << '\n';
+void FixedLenDecoderEmitter::
+emitDecoderFunction(formatted_raw_ostream &OS, DecoderSet &Decoders,
+                    unsigned Indentation) const {
+  // The decoder function is just a big switch statement based on the
+  // input decoder index.
+  OS.indent(Indentation) << "template<typename InsnType>\n";
+  OS.indent(Indentation) << "static DecodeStatus decodeToMCInst(DecodeStatus S,"
+    << " unsigned Idx, InsnType insn, MCInst &MI,\n";
+  OS.indent(Indentation) << "                                   uint64_t "
+    << "Address, const void *Decoder) {\n";
+  Indentation += 2;
+  OS.indent(Indentation) << "InsnType tmp;\n";
+  OS.indent(Indentation) << "switch (Idx) {\n";
+  OS.indent(Indentation) << "default: llvm_unreachable(\"Invalid index!\");\n";
+  unsigned Index = 0;
+  for (DecoderSet::const_iterator I = Decoders.begin(), E = Decoders.end();
+       I != E; ++I, ++Index) {
+    OS.indent(Indentation) << "case " << Index << ":\n";
+    OS << *I;
+    OS.indent(Indentation+2) << "return S;\n";
+  }
+  OS.indent(Indentation) << "}\n";
+  Indentation -= 2;
+  OS.indent(Indentation) << "}\n\n";
 }
 
 // Populates the field of the insn given the start position and the number of
@@ -703,9 +943,7 @@
 /// filter array as a series of chars.
 void FilterChooser::dumpFilterArray(raw_ostream &o,
                                  const std::vector<bit_value_t> &filter) const {
-  unsigned bitIndex;
-
-  for (bitIndex = BitWidth; bitIndex > 0; bitIndex--) {
+  for (unsigned bitIndex = BitWidth; bitIndex > 0; bitIndex--) {
     switch (filter[bitIndex - 1]) {
     case BIT_UNFILTERED:
       o << ".";
@@ -827,26 +1065,71 @@
 
   if (OpInfo.numFields() == 1) {
     OperandInfo::const_iterator OI = OpInfo.begin();
-    o.indent(Indentation) << "  tmp = fieldFromInstruction" << BitWidth
-                            << "(insn, " << OI->Base << ", " << OI->Width
-                            << ");\n";
+    o.indent(Indentation) << "tmp = fieldFromInstruction"
+                          << "(insn, " << OI->Base << ", " << OI->Width
+                          << ");\n";
   } else {
-    o.indent(Indentation) << "  tmp = 0;\n";
+    o.indent(Indentation) << "tmp = 0;\n";
     for (OperandInfo::const_iterator OI = OpInfo.begin(), OE = OpInfo.end();
          OI != OE; ++OI) {
-      o.indent(Indentation) << "  tmp |= (fieldFromInstruction" << BitWidth
+      o.indent(Indentation) << "tmp |= (fieldFromInstruction"
                             << "(insn, " << OI->Base << ", " << OI->Width
                             << ") << " << OI->Offset << ");\n";
     }
   }
 
   if (Decoder != "")
-    o.indent(Indentation) << "  " << Emitter->GuardPrefix << Decoder
+    o.indent(Indentation) << Emitter->GuardPrefix << Decoder
                           << "(MI, tmp, Address, Decoder)"
                           << Emitter->GuardPostfix << "\n";
   else
-    o.indent(Indentation) << "  MI.addOperand(MCOperand::CreateImm(tmp));\n";
+    o.indent(Indentation) << "MI.addOperand(MCOperand::CreateImm(tmp));\n";
+
+}
+
+void FilterChooser::emitDecoder(raw_ostream &OS, unsigned Indentation,
+                                unsigned Opc) const {
+  std::map<unsigned, std::vector<OperandInfo> >::const_iterator OpIter =
+    Operands.find(Opc);
+  const std::vector<OperandInfo>& InsnOperands = OpIter->second;
+  for (std::vector<OperandInfo>::const_iterator
+       I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
+    // If a custom instruction decoder was specified, use that.
+    if (I->numFields() == 0 && I->Decoder.size()) {
+      OS.indent(Indentation) << Emitter->GuardPrefix << I->Decoder
+        << "(MI, insn, Address, Decoder)"
+        << Emitter->GuardPostfix << "\n";
+      break;
+    }
+
+    emitBinaryParser(OS, Indentation, *I);
+  }
+}
 
+unsigned FilterChooser::getDecoderIndex(DecoderSet &Decoders,
+                                        unsigned Opc) const {
+  // Build up the predicate string.
+  SmallString<256> Decoder;
+  // FIXME: emitDecoder() function can take a buffer directly rather than
+  // a stream.
+  raw_svector_ostream S(Decoder);
+  unsigned I = 4;
+  emitDecoder(S, I, Opc);
+  S.flush();
+
+  // Using the full decoder string as the key value here is a bit
+  // heavyweight, but is effective. If the string comparisons become a
+  // performance concern, we can implement a mangling of the predicate
+  // data easilly enough with a map back to the actual string. That's
+  // overkill for now, though.
+
+  // Make sure the predicate is in the table.
+  Decoders.insert(Decoder.str());
+  // Now figure out the index for when we write out the table.
+  DecoderSet::const_iterator P = std::find(Decoders.begin(),
+                                           Decoders.end(),
+                                           Decoder.str());
+  return (unsigned)(P - Decoders.begin());
 }
 
 static void emitSinglePredicateMatch(raw_ostream &o, StringRef str,
@@ -887,8 +1170,74 @@
   return Predicates->getSize() > 0;
 }
 
-void FilterChooser::emitSoftFailCheck(raw_ostream &o, unsigned Indentation,
-                                      unsigned Opc) const {
+bool FilterChooser::doesOpcodeNeedPredicate(unsigned Opc) const {
+  ListInit *Predicates =
+    AllInstructions[Opc]->TheDef->getValueAsListInit("Predicates");
+  for (unsigned i = 0; i < Predicates->getSize(); ++i) {
+    Record *Pred = Predicates->getElementAsRecord(i);
+    if (!Pred->getValue("AssemblerMatcherPredicate"))
+      continue;
+
+    std::string P = Pred->getValueAsString("AssemblerCondString");
+
+    if (!P.length())
+      continue;
+
+    return true;
+  }
+  return false;
+}
+
+unsigned FilterChooser::getPredicateIndex(DecoderTableInfo &TableInfo,
+                                          StringRef Predicate) const {
+  // Using the full predicate string as the key value here is a bit
+  // heavyweight, but is effective. If the string comparisons become a
+  // performance concern, we can implement a mangling of the predicate
+  // data easilly enough with a map back to the actual string. That's
+  // overkill for now, though.
+
+  // Make sure the predicate is in the table.
+  TableInfo.Predicates.insert(Predicate.str());
+  // Now figure out the index for when we write out the table.
+  PredicateSet::const_iterator P = std::find(TableInfo.Predicates.begin(),
+                                             TableInfo.Predicates.end(),
+                                             Predicate.str());
+  return (unsigned)(P - TableInfo.Predicates.begin());
+}
+
+void FilterChooser::emitPredicateTableEntry(DecoderTableInfo &TableInfo,
+                                            unsigned Opc) const {
+  if (!doesOpcodeNeedPredicate(Opc))
+    return;
+
+  // Build up the predicate string.
+  SmallString<256> Predicate;
+  // FIXME: emitPredicateMatch() functions can take a buffer directly rather
+  // than a stream.
+  raw_svector_ostream PS(Predicate);
+  unsigned I = 0;
+  emitPredicateMatch(PS, I, Opc);
+
+  // Figure out the index into the predicate table for the predicate just
+  // computed.
+  unsigned PIdx = getPredicateIndex(TableInfo, PS.str());
+  SmallString<16> PBytes;
+  raw_svector_ostream S(PBytes);
+  encodeULEB128(PIdx, S);
+  S.flush();
+
+  TableInfo.Table.push_back(MCD::OPC_CheckPredicate);
+  // Predicate index
+  for (unsigned i = 0, e = PBytes.size(); i != e; ++i)
+    TableInfo.Table.push_back(PBytes[i]);
+  // Push location for NumToSkip backpatching.
+  TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
+  TableInfo.Table.push_back(0);
+  TableInfo.Table.push_back(0);
+}
+
+void FilterChooser::emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
+                                           unsigned Opc) const {
   BitsInit *SFBits =
     AllInstructions[Opc]->TheDef->getValueAsBitsInit("SoftFail");
   if (!SFBits) return;
@@ -914,13 +1263,11 @@
     default:
       // The bit is not set; this must be an error!
       StringRef Name = AllInstructions[Opc]->TheDef->getName();
-      errs() << "SoftFail Conflict: bit SoftFail{" << i << "} in "
-             << Name
-             << " is set but Inst{" << i <<"} is unset!\n"
+      errs() << "SoftFail Conflict: bit SoftFail{" << i << "} in " << Name
+             << " is set but Inst{" << i << "} is unset!\n"
              << "  - You can only mark a bit as SoftFail if it is fully defined"
              << " (1/0 - not '?') in Inst\n";
-      o << "#error SoftFail Conflict, " << Name << "::SoftFail{" << i 
-        << "} set but Inst{" << i << "} undefined!\n";
+      return;
     }
   }
 
@@ -930,27 +1277,31 @@
   if (!NeedPositiveMask && !NeedNegativeMask)
     return;
 
-  std::string PositiveMaskStr = PositiveMask.toString(16, /*signed=*/false);
-  std::string NegativeMaskStr = NegativeMask.toString(16, /*signed=*/false);
-  StringRef BitExt = "";
-  if (BitWidth > 32)
-    BitExt = "ULL";
-
-  o.indent(Indentation) << "if (";
-  if (NeedPositiveMask)
-    o << "insn & 0x" << PositiveMaskStr << BitExt;
-  if (NeedPositiveMask && NeedNegativeMask)
-    o << " || ";
-  if (NeedNegativeMask)
-    o << "~insn & 0x" << NegativeMaskStr << BitExt;
-  o << ")\n";
-  o.indent(Indentation+2) << "S = MCDisassembler::SoftFail;\n";
-}
-
-// Emits code to decode the singleton.  Return true if we have matched all the
-// well-known bits.
-bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
-                                         unsigned Opc) const {
+  TableInfo.Table.push_back(MCD::OPC_SoftFail);
+
+  SmallString<16> MaskBytes;
+  raw_svector_ostream S(MaskBytes);
+  if (NeedPositiveMask) {
+    encodeULEB128(PositiveMask.getZExtValue(), S);
+    S.flush();
+    for (unsigned i = 0, e = MaskBytes.size(); i != e; ++i)
+      TableInfo.Table.push_back(MaskBytes[i]);
+  } else
+    TableInfo.Table.push_back(0);
+  if (NeedNegativeMask) {
+    MaskBytes.clear();
+    S.resync();
+    encodeULEB128(NegativeMask.getZExtValue(), S);
+    S.flush();
+    for (unsigned i = 0, e = MaskBytes.size(); i != e; ++i)
+      TableInfo.Table.push_back(MaskBytes[i]);
+  } else
+    TableInfo.Table.push_back(0);
+}
+
+// Emits table entries to decode the singleton.
+void FilterChooser::emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+                                            unsigned Opc) const {
   std::vector<unsigned> StartBits;
   std::vector<unsigned> EndBits;
   std::vector<uint64_t> FieldVals;
@@ -961,106 +1312,69 @@
   getIslands(StartBits, EndBits, FieldVals, Insn);
 
   unsigned Size = StartBits.size();
-  unsigned I, NumBits;
-
-  // If we have matched all the well-known bits, just issue a return.
-  if (Size == 0) {
-    o.indent(Indentation) << "if (";
-    if (!emitPredicateMatch(o, Indentation, Opc))
-      o << "1";
-    o << ") {\n";
-    emitSoftFailCheck(o, Indentation+2, Opc);
-    o.indent(Indentation) << "  MI.setOpcode(" << Opc << ");\n";
-    std::map<unsigned, std::vector<OperandInfo> >::const_iterator OpIter =
-      Operands.find(Opc);
-    const std::vector<OperandInfo>& InsnOperands = OpIter->second;
-    for (std::vector<OperandInfo>::const_iterator
-         I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
-      // If a custom instruction decoder was specified, use that.
-      if (I->numFields() == 0 && I->Decoder.size()) {
-        o.indent(Indentation) << "  " << Emitter->GuardPrefix << I->Decoder
-                              << "(MI, insn, Address, Decoder)"
-                              << Emitter->GuardPostfix << "\n";
-        break;
-      }
-
-      emitBinaryParser(o, Indentation, *I);
-    }
-
-    o.indent(Indentation) << "  return " << Emitter->ReturnOK << "; // "
-                          << nameWithID(Opc) << '\n';
-    o.indent(Indentation) << "}\n"; // Closing predicate block.
-    return true;
-  }
-
-  // Otherwise, there are more decodings to be done!
 
-  // Emit code to match the island(s) for the singleton.
-  o.indent(Indentation) << "// Check ";
+  // Emit the predicate table entry if one is needed.
+  emitPredicateTableEntry(TableInfo, Opc);
 
-  for (I = Size; I != 0; --I) {
-    o << "Inst{" << EndBits[I-1] << '-' << StartBits[I-1] << "} ";
-    if (I > 1)
-      o << " && ";
-    else
-      o << "for singleton decoding...\n";
-  }
+  // Check any additional encoding fields needed.
+  for (unsigned I = Size; I != 0; --I) {
+    unsigned NumBits = EndBits[I-1] - StartBits[I-1] + 1;
+    TableInfo.Table.push_back(MCD::OPC_CheckField);
+    TableInfo.Table.push_back(StartBits[I-1]);
+    TableInfo.Table.push_back(NumBits);
+    uint8_t Buffer[8], *p;
+    encodeULEB128(FieldVals[I-1], Buffer);
+    for (p = Buffer; *p >= 128 ; ++p)
+      TableInfo.Table.push_back(*p);
+    TableInfo.Table.push_back(*p);
+    // Push location for NumToSkip backpatching.
+    TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
+    // The fixup is always 16-bits, so go ahead and allocate the space
+    // in the table so all our relative position calculations work OK even
+    // before we fully resolve the real value here.
+    TableInfo.Table.push_back(0);
+    TableInfo.Table.push_back(0);
+  }
+
+  // Check for soft failure of the match.
+  emitSoftFailTableEntry(TableInfo, Opc);
+
+  TableInfo.Table.push_back(MCD::OPC_Decode);
+  uint8_t Buffer[8], *p;
+  encodeULEB128(Opc, Buffer);
+  for (p = Buffer; *p >= 128 ; ++p)
+    TableInfo.Table.push_back(*p);
+  TableInfo.Table.push_back(*p);
+
+  unsigned DIdx = getDecoderIndex(TableInfo.Decoders, Opc);
+  SmallString<16> Bytes;
+  raw_svector_ostream S(Bytes);
+  encodeULEB128(DIdx, S);
+  S.flush();
+
+  // Decoder index
+  for (unsigned i = 0, e = Bytes.size(); i != e; ++i)
+    TableInfo.Table.push_back(Bytes[i]);
+}
+
+// Emits table entries to decode the singleton, and then to decode the rest.
+void FilterChooser::emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+                                            const Filter &Best) const {
+  unsigned Opc = Best.getSingletonOpc();
 
-  o.indent(Indentation) << "if (";
-  if (emitPredicateMatch(o, Indentation, Opc)) {
-    o << " &&\n";
-    o.indent(Indentation+4);
-  }
+  // complex singletons need predicate checks from the first singleton
+  // to refer forward to the variable filterchooser that follows.
+  TableInfo.FixupStack.push_back(FixupList());
 
-  for (I = Size; I != 0; --I) {
-    NumBits = EndBits[I-1] - StartBits[I-1] + 1;
-    o << "fieldFromInstruction" << BitWidth << "(insn, "
-      << StartBits[I-1] << ", " << NumBits
-      << ") == " << FieldVals[I-1];
-    if (I > 1)
-      o << " && ";
-    else
-      o << ") {\n";
-  }
-  emitSoftFailCheck(o, Indentation+2, Opc);
-  o.indent(Indentation) << "  MI.setOpcode(" << Opc << ");\n";
-  std::map<unsigned, std::vector<OperandInfo> >::const_iterator OpIter =
-    Operands.find(Opc);
-  const std::vector<OperandInfo>& InsnOperands = OpIter->second;
-  for (std::vector<OperandInfo>::const_iterator
-       I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
-    // If a custom instruction decoder was specified, use that.
-    if (I->numFields() == 0 && I->Decoder.size()) {
-      o.indent(Indentation) << "  " << Emitter->GuardPrefix << I->Decoder
-                            << "(MI, insn, Address, Decoder)"
-                            << Emitter->GuardPostfix << "\n";
-      break;
-    }
+  emitSingletonTableEntry(TableInfo, Opc);
 
-    emitBinaryParser(o, Indentation, *I);
-  }
-  o.indent(Indentation) << "  return " << Emitter->ReturnOK << "; // "
-                        << nameWithID(Opc) << '\n';
-  o.indent(Indentation) << "}\n";
+  resolveTableFixups(TableInfo.Table, TableInfo.FixupStack.back(),
+                     TableInfo.Table.size());
+  TableInfo.FixupStack.pop_back();
 
-  return false;
+  Best.getVariableFC().emitTableEntries(TableInfo);
 }
 
-// Emits code to decode the singleton, and then to decode the rest.
-void FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
-                                         const Filter &Best) const {
-
-  unsigned Opc = Best.getSingletonOpc();
-
-  emitSingletonDecoder(o, Indentation, Opc);
-
-  // Emit code for the rest.
-  o.indent(Indentation) << "else\n";
-
-  Indentation += 2;
-  Best.getVariableFC().emit(o, Indentation);
-  Indentation -= 2;
-}
 
 // Assign a single filter and run with it.  Top level API client can initialize
 // with a single filter to start the filtering process.
@@ -1119,7 +1433,7 @@
     }
   }
 
-  unsigned BitIndex, InsnIndex;
+  unsigned BitIndex;
 
   // We maintain BIT_WIDTH copies of the bitAttrs automaton.
   // The automaton consumes the corresponding bit from each
@@ -1149,7 +1463,7 @@
     else
       bitAttrs.push_back(ATTR_NONE);
 
-  for (InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
+  for (unsigned InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
     insn_t insn;
 
     insnWithID(insn, Opcodes[InsnIndex]);
@@ -1200,7 +1514,7 @@
   bitAttr_t RA = ATTR_NONE;
   unsigned StartBit = 0;
 
-  for (BitIndex = 0; BitIndex < BitWidth; BitIndex++) {
+  for (BitIndex = 0; BitIndex < BitWidth; ++BitIndex) {
     bitAttr_t bitAttr = bitAttrs[BitIndex];
 
     assert(bitAttr != ATTR_NONE && "Bit without attributes");
@@ -1341,36 +1655,29 @@
   BestIndex = -1;
 }
 
-// Emits code to decode our share of instructions.  Returns true if the
-// emitted code causes a return, which occurs if we know how to decode
-// the instruction at this level or the instruction is not decodeable.
-bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) const {
-  if (Opcodes.size() == 1)
+// emitTableEntries - Emit state machine entries to decode our share of
+// instructions.
+void FilterChooser::emitTableEntries(DecoderTableInfo &TableInfo) const {
+  if (Opcodes.size() == 1) {
     // There is only one instruction in the set, which is great!
     // Call emitSingletonDecoder() to see whether there are any remaining
     // encodings bits.
-    return emitSingletonDecoder(o, Indentation, Opcodes[0]);
+    emitSingletonTableEntry(TableInfo, Opcodes[0]);
+    return;
+  }
 
   // Choose the best filter to do the decodings!
   if (BestIndex != -1) {
     const Filter &Best = Filters[BestIndex];
     if (Best.getNumFiltered() == 1)
-      emitSingletonDecoder(o, Indentation, Best);
+      emitSingletonTableEntry(TableInfo, Best);
     else
-      Best.emit(o, Indentation);
-    return false;
+      Best.emitTableEntry(TableInfo);
+    return;
   }
 
-  // We don't know how to decode these instructions!  Return 0 and dump the
-  // conflict set!
-  o.indent(Indentation) << "return 0;" << " // Conflict set: ";
-  for (int i = 0, N = Opcodes.size(); i < N; ++i) {
-    o << nameWithID(Opcodes[i]);
-    if (i < (N - 1))
-      o << ", ";
-    else
-      o << '\n';
-  }
+  // We don't know how to decode these instructions!  Dump the
+  // conflict set and bail.
 
   // Print out useful conflict information for postmortem analysis.
   errs() << "Decoding Conflict:\n";
@@ -1385,8 +1692,6 @@
              getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
     errs() << '\n';
   }
-
-  return true;
 }
 
 static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc,
@@ -1453,8 +1758,8 @@
     // for decoding register classes.
     // FIXME: This need to be extended to handle instructions with custom
     // decoder methods, and operands with (simple) MIOperandInfo's.
-    TypedInit *TI = dynamic_cast<TypedInit*>(NI->first);
-    RecordRecTy *Type = dynamic_cast<RecordRecTy*>(TI->getType());
+    TypedInit *TI = cast<TypedInit>(NI->first);
+    RecordRecTy *Type = cast<RecordRecTy>(TI->getType());
     Record *TypeRecord = Type->getRecord();
     bool isReg = false;
     if (TypeRecord->isSubClassOf("RegisterOperand"))
@@ -1466,7 +1771,7 @@
 
     RecordVal *DecoderString = TypeRecord->getValue("DecoderMethod");
     StringInit *String = DecoderString ?
-      dynamic_cast<StringInit*>(DecoderString->getValue()) : 0;
+      dyn_cast<StringInit>(DecoderString->getValue()) : 0;
     if (!isReg && String && String->getValue() != "")
       Decoder = String->getValue();
 
@@ -1477,11 +1782,11 @@
 
     for (unsigned bi = 0; bi < Bits.getNumBits(); ++bi) {
       VarInit *Var = 0;
-      VarBitInit *BI = dynamic_cast<VarBitInit*>(Bits.getBit(bi));
+      VarBitInit *BI = dyn_cast<VarBitInit>(Bits.getBit(bi));
       if (BI)
-        Var = dynamic_cast<VarInit*>(BI->getVariable());
+        Var = dyn_cast<VarInit>(BI->getBitVar());
       else
-        Var = dynamic_cast<VarInit*>(Bits.getBit(bi));
+        Var = dyn_cast<VarInit>(Bits.getBit(bi));
 
       if (!Var) {
         if (Base != ~0U) {
@@ -1549,62 +1854,168 @@
   return true;
 }
 
-static void emitHelper(llvm::raw_ostream &o, unsigned BitWidth) {
-  unsigned Indentation = 0;
-  std::string WidthStr = "uint" + utostr(BitWidth) + "_t";
-
-  o << '\n';
-
-  o.indent(Indentation) << "static " << WidthStr <<
-    " fieldFromInstruction" << BitWidth <<
-    "(" << WidthStr <<" insn, unsigned startBit, unsigned numBits)\n";
-
-  o.indent(Indentation) << "{\n";
-
-  ++Indentation; ++Indentation;
-  o.indent(Indentation) << "assert(startBit + numBits <= " << BitWidth
-                        << " && \"Instruction field out of bounds!\");\n";
-  o << '\n';
-  o.indent(Indentation) << WidthStr << " fieldMask;\n";
-  o << '\n';
-  o.indent(Indentation) << "if (numBits == " << BitWidth << ")\n";
-
-  ++Indentation; ++Indentation;
-  o.indent(Indentation) << "fieldMask = (" << WidthStr << ")-1;\n";
-  --Indentation; --Indentation;
-
-  o.indent(Indentation) << "else\n";
-
-  ++Indentation; ++Indentation;
-  o.indent(Indentation) << "fieldMask = ((1 << numBits) - 1) << startBit;\n";
-  --Indentation; --Indentation;
-
-  o << '\n';
-  o.indent(Indentation) << "return (insn & fieldMask) >> startBit;\n";
-  --Indentation; --Indentation;
-
-  o.indent(Indentation) << "}\n";
-
-  o << '\n';
+// emitFieldFromInstruction - Emit the templated helper function
+// fieldFromInstruction().
+static void emitFieldFromInstruction(formatted_raw_ostream &OS) {
+  OS << "// Helper function for extracting fields from encoded instructions.\n"
+     << "template<typename InsnType>\n"
+   << "static InsnType fieldFromInstruction(InsnType insn, unsigned startBit,\n"
+     << "                                     unsigned numBits) {\n"
+     << "    assert(startBit + numBits <= (sizeof(InsnType)*8) &&\n"
+     << "           \"Instruction field out of bounds!\");\n"
+     << "    InsnType fieldMask;\n"
+     << "    if (numBits == sizeof(InsnType)*8)\n"
+     << "      fieldMask = (InsnType)(-1LL);\n"
+     << "    else\n"
+     << "      fieldMask = ((1 << numBits) - 1) << startBit;\n"
+     << "    return (insn & fieldMask) >> startBit;\n"
+     << "}\n\n";
+}
+
+// emitDecodeInstruction - Emit the templated helper function
+// decodeInstruction().
+static void emitDecodeInstruction(formatted_raw_ostream &OS) {
+  OS << "template<typename InsnType>\n"
+     << "static DecodeStatus decodeInstruction(const uint8_t DecodeTable[], MCInst &MI,\n"
+     << "                                      InsnType insn, uint64_t Address,\n"
+     << "                                      const void *DisAsm,\n"
+     << "                                      const MCSubtargetInfo &STI) {\n"
+     << "  uint64_t Bits = STI.getFeatureBits();\n"
+     << "\n"
+     << "  const uint8_t *Ptr = DecodeTable;\n"
+     << "  uint32_t CurFieldValue = 0;\n"
+     << "  DecodeStatus S = MCDisassembler::Success;\n"
+     << "  for (;;) {\n"
+     << "    ptrdiff_t Loc = Ptr - DecodeTable;\n"
+     << "    switch (*Ptr) {\n"
+     << "    default:\n"
+     << "      errs() << Loc << \": Unexpected decode table opcode!\\n\";\n"
+     << "      return MCDisassembler::Fail;\n"
+     << "    case MCD::OPC_ExtractField: {\n"
+     << "      unsigned Start = *++Ptr;\n"
+     << "      unsigned Len = *++Ptr;\n"
+     << "      ++Ptr;\n"
+     << "      CurFieldValue = fieldFromInstruction(insn, Start, Len);\n"
+     << "      DEBUG(dbgs() << Loc << \": OPC_ExtractField(\" << Start << \", \"\n"
+     << "                   << Len << \"): \" << CurFieldValue << \"\\n\");\n"
+     << "      break;\n"
+     << "    }\n"
+     << "    case MCD::OPC_FilterValue: {\n"
+     << "      // Decode the field value.\n"
+     << "      unsigned Len;\n"
+     << "      InsnType Val = decodeULEB128(++Ptr, &Len);\n"
+     << "      Ptr += Len;\n"
+     << "      // NumToSkip is a plain 16-bit integer.\n"
+     << "      unsigned NumToSkip = *Ptr++;\n"
+     << "      NumToSkip |= (*Ptr++) << 8;\n"
+     << "\n"
+     << "      // Perform the filter operation.\n"
+     << "      if (Val != CurFieldValue)\n"
+     << "        Ptr += NumToSkip;\n"
+     << "      DEBUG(dbgs() << Loc << \": OPC_FilterValue(\" << Val << \", \" << NumToSkip\n"
+     << "                   << \"): \" << ((Val != CurFieldValue) ? \"FAIL:\" : \"PASS:\")\n"
+     << "                   << \" continuing at \" << (Ptr - DecodeTable) << \"\\n\");\n"
+     << "\n"
+     << "      break;\n"
+     << "    }\n"
+     << "    case MCD::OPC_CheckField: {\n"
+     << "      unsigned Start = *++Ptr;\n"
+     << "      unsigned Len = *++Ptr;\n"
+     << "      InsnType FieldValue = fieldFromInstruction(insn, Start, Len);\n"
+     << "      // Decode the field value.\n"
+     << "      uint32_t ExpectedValue = decodeULEB128(++Ptr, &Len);\n"
+     << "      Ptr += Len;\n"
+     << "      // NumToSkip is a plain 16-bit integer.\n"
+     << "      unsigned NumToSkip = *Ptr++;\n"
+     << "      NumToSkip |= (*Ptr++) << 8;\n"
+     << "\n"
+     << "      // If the actual and expected values don't match, skip.\n"
+     << "      if (ExpectedValue != FieldValue)\n"
+     << "        Ptr += NumToSkip;\n"
+     << "      DEBUG(dbgs() << Loc << \": OPC_CheckField(\" << Start << \", \"\n"
+     << "                   << Len << \", \" << ExpectedValue << \", \" << NumToSkip\n"
+     << "                   << \"): FieldValue = \" << FieldValue << \", ExpectedValue = \"\n"
+     << "                   << ExpectedValue << \": \"\n"
+     << "                   << ((ExpectedValue == FieldValue) ? \"PASS\\n\" : \"FAIL\\n\"));\n"
+     << "      break;\n"
+     << "    }\n"
+     << "    case MCD::OPC_CheckPredicate: {\n"
+     << "      unsigned Len;\n"
+     << "      // Decode the Predicate Index value.\n"
+     << "      unsigned PIdx = decodeULEB128(++Ptr, &Len);\n"
+     << "      Ptr += Len;\n"
+     << "      // NumToSkip is a plain 16-bit integer.\n"
+     << "      unsigned NumToSkip = *Ptr++;\n"
+     << "      NumToSkip |= (*Ptr++) << 8;\n"
+     << "      // Check the predicate.\n"
+     << "      bool Pred;\n"
+     << "      if (!(Pred = checkDecoderPredicate(PIdx, Bits)))\n"
+     << "        Ptr += NumToSkip;\n"
+     << "      (void)Pred;\n"
+     << "      DEBUG(dbgs() << Loc << \": OPC_CheckPredicate(\" << PIdx << \"): \"\n"
+     << "            << (Pred ? \"PASS\\n\" : \"FAIL\\n\"));\n"
+     << "\n"
+     << "      break;\n"
+     << "    }\n"
+     << "    case MCD::OPC_Decode: {\n"
+     << "      unsigned Len;\n"
+     << "      // Decode the Opcode value.\n"
+     << "      unsigned Opc = decodeULEB128(++Ptr, &Len);\n"
+     << "      Ptr += Len;\n"
+     << "      unsigned DecodeIdx = decodeULEB128(Ptr, &Len);\n"
+     << "      Ptr += Len;\n"
+     << "      DEBUG(dbgs() << Loc << \": OPC_Decode: opcode \" << Opc\n"
+     << "                   << \", using decoder \" << DecodeIdx << \"\\n\" );\n"
+     << "      DEBUG(dbgs() << \"----- DECODE SUCCESSFUL -----\\n\");\n"
+     << "\n"
+     << "      MI.setOpcode(Opc);\n"
+     << "      return decodeToMCInst(S, DecodeIdx, insn, MI, Address, DisAsm);\n"
+     << "    }\n"
+     << "    case MCD::OPC_SoftFail: {\n"
+     << "      // Decode the mask values.\n"
+     << "      unsigned Len;\n"
+     << "      InsnType PositiveMask = decodeULEB128(++Ptr, &Len);\n"
+     << "      Ptr += Len;\n"
+     << "      InsnType NegativeMask = decodeULEB128(Ptr, &Len);\n"
+     << "      Ptr += Len;\n"
+     << "      bool Fail = (insn & PositiveMask) || (~insn & NegativeMask);\n"
+     << "      if (Fail)\n"
+     << "        S = MCDisassembler::SoftFail;\n"
+     << "      DEBUG(dbgs() << Loc << \": OPC_SoftFail: \" << (Fail ? \"FAIL\\n\":\"PASS\\n\"));\n"
+     << "      break;\n"
+     << "    }\n"
+     << "    case MCD::OPC_Fail: {\n"
+     << "      DEBUG(dbgs() << Loc << \": OPC_Fail\\n\");\n"
+     << "      return MCDisassembler::Fail;\n"
+     << "    }\n"
+     << "    }\n"
+     << "  }\n"
+     << "  llvm_unreachable(\"bogosity detected in disassembler state machine!\");\n"
+     << "}\n\n";
 }
 
 // Emits disassembler code for instruction decoding.
 void FixedLenDecoderEmitter::run(raw_ostream &o) {
-  o << "#include \"llvm/MC/MCInst.h\"\n";
-  o << "#include \"llvm/Support/DataTypes.h\"\n";
-  o << "#include <assert.h>\n";
-  o << '\n';
-  o << "namespace llvm {\n\n";
+  formatted_raw_ostream OS(o);
+  OS << "#include \"llvm/MC/MCInst.h\"\n";
+  OS << "#include \"llvm/Support/Debug.h\"\n";
+  OS << "#include \"llvm/Support/DataTypes.h\"\n";
+  OS << "#include \"llvm/Support/LEB128.h\"\n";
+  OS << "#include \"llvm/Support/raw_ostream.h\"\n";
+  OS << "#include <assert.h>\n";
+  OS << '\n';
+  OS << "namespace llvm {\n\n";
+
+  emitFieldFromInstruction(OS);
 
   // Parameterize the decoders based on namespace and instruction width.
-  const std::vector<const CodeGenInstruction*> &NumberedInstructions =
-    Target.getInstructionsByEnumValue();
+  NumberedInstructions = &Target.getInstructionsByEnumValue();
   std::map<std::pair<std::string, unsigned>,
            std::vector<unsigned> > OpcMap;
   std::map<unsigned, std::vector<OperandInfo> > Operands;
 
-  for (unsigned i = 0; i < NumberedInstructions.size(); ++i) {
-    const CodeGenInstruction *Inst = NumberedInstructions[i];
+  for (unsigned i = 0; i < NumberedInstructions->size(); ++i) {
+    const CodeGenInstruction *Inst = NumberedInstructions->at(i);
     const Record *Def = Inst->TheDef;
     unsigned Size = Def->getValueAsInt("Size");
     if (Def->getValueAsString("Namespace") == "TargetOpcode" ||
@@ -1622,24 +2033,48 @@
     }
   }
 
+  DecoderTableInfo TableInfo;
   std::set<unsigned> Sizes;
   for (std::map<std::pair<std::string, unsigned>,
                 std::vector<unsigned> >::const_iterator
        I = OpcMap.begin(), E = OpcMap.end(); I != E; ++I) {
-    // If we haven't visited this instruction width before, emit the
-    // helper method to extract fields.
-    if (!Sizes.count(I->first.second)) {
-      emitHelper(o, 8*I->first.second);
-      Sizes.insert(I->first.second);
-    }
-
     // Emit the decoder for this namespace+width combination.
-    FilterChooser FC(NumberedInstructions, I->second, Operands,
+    FilterChooser FC(*NumberedInstructions, I->second, Operands,
                      8*I->first.second, this);
-    FC.emitTop(o, 0, I->first.first);
-  }
 
-  o << "\n} // End llvm namespace \n";
+    // The decode table is cleared for each top level decoder function. The
+    // predicates and decoders themselves, however, are shared across all
+    // decoders to give more opportunities for uniqueing.
+    TableInfo.Table.clear();
+    TableInfo.FixupStack.clear();
+    TableInfo.Table.reserve(16384);
+    TableInfo.FixupStack.push_back(FixupList());
+    FC.emitTableEntries(TableInfo);
+    // Any NumToSkip fixups in the top level scope can resolve to the
+    // OPC_Fail at the end of the table.
+    assert(TableInfo.FixupStack.size() == 1 && "fixup stack phasing error!");
+    // Resolve any NumToSkip fixups in the current scope.
+    resolveTableFixups(TableInfo.Table, TableInfo.FixupStack.back(),
+                       TableInfo.Table.size());
+    TableInfo.FixupStack.clear();
+
+    TableInfo.Table.push_back(MCD::OPC_Fail);
+
+    // Print the table to the output stream.
+    emitTable(OS, TableInfo.Table, 0, FC.getBitWidth(), I->first.first);
+    OS.flush();
+  }
+
+  // Emit the predicate function.
+  emitPredicateFunction(OS, TableInfo.Predicates, 0);
+
+  // Emit the decoder function.
+  emitDecoderFunction(OS, TableInfo.Decoders, 0);
+
+  // Emit the main entry point for the decoder, decodeInstruction().
+  emitDecodeInstruction(OS);
+
+  OS << "\n} // End llvm namespace\n";
 }
 
 namespace llvm {

Modified: llvm/branches/AMDILBackend/utils/TableGen/InstrInfoEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/InstrInfoEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/InstrInfoEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/InstrInfoEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -16,8 +16,10 @@
 #include "CodeGenDAGPatterns.h"
 #include "CodeGenSchedule.h"
 #include "CodeGenTarget.h"
+#include "TableGenBackends.h"
 #include "SequenceToOffsetTable.h"
 #include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/TableGen/TableGenBackend.h"
 #include <algorithm>
@@ -89,7 +91,7 @@
       for (unsigned j = 0, e = Inst.Operands[i].MINumOperands; j != e; ++j) {
         OperandList.push_back(Inst.Operands[i]);
 
-        Record *OpR = dynamic_cast<DefInit*>(MIOI->getArg(j))->getDef();
+        Record *OpR = cast<DefInit>(MIOI->getArg(j))->getDef();
         OperandList.back().Rec = OpR;
       }
     }
@@ -299,16 +301,15 @@
                                   const OperandInfoMapTy &OpInfo,
                                   raw_ostream &OS) {
   int MinOperands = 0;
-  if (!Inst.Operands.size() == 0)
+  if (!Inst.Operands.empty())
     // Each logical operand can be multiple MI operands.
     MinOperands = Inst.Operands.back().MIOperandNo +
                   Inst.Operands.back().MINumOperands;
 
-  Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary");
   OS << "  { ";
   OS << Num << ",\t" << MinOperands << ",\t"
      << Inst.Operands.NumDefs << ",\t"
-     << SchedModels.getItinClassIdx(ItinDef) << ",\t"
+     << SchedModels.getSchedClassIdx(Inst) << ",\t"
      << Inst.TheDef->getValueAsInt("Size") << ",\t0";
 
   // Emit all of the target indepedent flags...
@@ -319,6 +320,7 @@
   if (Inst.isCompare)          OS << "|(1<<MCID::Compare)";
   if (Inst.isMoveImm)          OS << "|(1<<MCID::MoveImm)";
   if (Inst.isBitcast)          OS << "|(1<<MCID::Bitcast)";
+  if (Inst.isSelect)           OS << "|(1<<MCID::Select)";
   if (Inst.isBarrier)          OS << "|(1<<MCID::Barrier)";
   if (Inst.hasDelaySlot)       OS << "|(1<<MCID::DelaySlot)";
   if (Inst.isCall)             OS << "|(1<<MCID::Call)";
@@ -342,13 +344,14 @@
 
   // Emit all of the target-specific flags...
   BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags");
-  if (!TSF) throw "no TSFlags?";
+  if (!TSF)
+    PrintFatalError("no TSFlags?");
   uint64_t Value = 0;
   for (unsigned i = 0, e = TSF->getNumBits(); i != e; ++i) {
-    if (BitInit *Bit = dynamic_cast<BitInit*>(TSF->getBit(i)))
+    if (BitInit *Bit = dyn_cast<BitInit>(TSF->getBit(i)))
       Value |= uint64_t(Bit->getValue()) << i;
     else
-      throw "Invalid TSFlags bit in " + Inst.TheDef->getName();
+      PrintFatalError("Invalid TSFlags bit in " + Inst.TheDef->getName());
   }
   OS << ", 0x";
   OS.write_hex(Value);
@@ -415,6 +418,7 @@
 
 void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS) {
   InstrInfoEmitter(RK).run(OS);
+  EmitMapTable(RK, OS);
 }
 
 } // End llvm namespace

Modified: llvm/branches/AMDILBackend/utils/TableGen/IntrinsicEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/IntrinsicEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/IntrinsicEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/IntrinsicEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
 #include "CodeGenTarget.h"
 #include "SequenceToOffsetTable.h"
 #include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/TableGen/StringMatcher.h"
 #include "llvm/TableGen/TableGenBackend.h"
@@ -249,7 +250,7 @@
   if (EVT(VT).isInteger()) {
     unsigned BitWidth = EVT(VT).getSizeInBits();
     switch (BitWidth) {
-    default: throw "unhandled integer type width in intrinsic!";
+    default: PrintFatalError("unhandled integer type width in intrinsic!");
     case 1: return Sig.push_back(IIT_I1);
     case 8: return Sig.push_back(IIT_I8);
     case 16: return Sig.push_back(IIT_I16);
@@ -259,7 +260,7 @@
   }
   
   switch (VT) {
-  default: throw "unhandled MVT in intrinsic!";
+  default: PrintFatalError("unhandled MVT in intrinsic!");
   case MVT::f32: return Sig.push_back(IIT_F32);
   case MVT::f64: return Sig.push_back(IIT_F64);
   case MVT::Metadata: return Sig.push_back(IIT_METADATA);
@@ -328,7 +329,7 @@
   if (EVT(VT).isVector()) {
     EVT VVT = VT;
     switch (VVT.getVectorNumElements()) {
-    default: throw "unhandled vector type width in intrinsic!";
+    default: PrintFatalError("unhandled vector type width in intrinsic!");
     case 2: Sig.push_back(IIT_V2); break;
     case 4: Sig.push_back(IIT_V4); break;
     case 8: Sig.push_back(IIT_V8); break;
@@ -510,10 +511,10 @@
   OS << "// Add parameter attributes that are not common to all intrinsics.\n";
   OS << "#ifdef GET_INTRINSIC_ATTRIBUTES\n";
   if (TargetOnly)
-    OS << "static AttrListPtr getAttributes(" << TargetPrefix 
+    OS << "static AttrListPtr getAttributes(LLVMContext &C, " << TargetPrefix
        << "Intrinsic::ID id) {\n";
   else
-    OS << "AttrListPtr Intrinsic::getAttributes(ID id) {\n";
+    OS << "AttrListPtr Intrinsic::getAttributes(LLVMContext &C, ID id) {\n";
 
   // Compute the maximum number of attribute arguments and the map
   typedef std::map<const CodeGenIntrinsic*, unsigned,
@@ -547,6 +548,7 @@
   OS << "  AttributeWithIndex AWI[" << maxArgAttrs+1 << "];\n";
   OS << "  unsigned NumAttrs = 0;\n";
   OS << "  if (id != 0) {\n";
+  OS << "    SmallVector<Attributes::AttrVal, 8> AttrVec;\n";
   OS << "    switch(IntrinsicsToAttributesMap[id - ";
   if (TargetOnly)
     OS << "Intrinsic::num_intrinsics";
@@ -564,58 +566,49 @@
     unsigned numAttrs = 0;
 
     // The argument attributes are alreadys sorted by argument index.
-    for (unsigned ai = 0, ae = intrinsic.ArgumentAttributes.size(); ai != ae;) {
-      unsigned argNo = intrinsic.ArgumentAttributes[ai].first;
+    unsigned ai = 0, ae = intrinsic.ArgumentAttributes.size();
+    if (ae) {
+      while (ai != ae) {
+        unsigned argNo = intrinsic.ArgumentAttributes[ai].first;
+
+        OS << "      AttrVec.clear();\n";
+
+        do {
+          switch (intrinsic.ArgumentAttributes[ai].second) {
+          case CodeGenIntrinsic::NoCapture:
+            OS << "      AttrVec.push_back(Attributes::NoCapture);\n";
+            break;
+          }
 
-      OS << "      AWI[" << numAttrs++ << "] = AttributeWithIndex::get("
-         << argNo+1 << ", ";
+          ++ai;
+        } while (ai != ae && intrinsic.ArgumentAttributes[ai].first == argNo);
 
-      bool moreThanOne = false;
-
-      do {
-        if (moreThanOne) OS << '|';
-
-        switch (intrinsic.ArgumentAttributes[ai].second) {
-        case CodeGenIntrinsic::NoCapture:
-          OS << "Attribute::NoCapture";
-          break;
-        }
-
-        ++ai;
-        moreThanOne = true;
-      } while (ai != ae && intrinsic.ArgumentAttributes[ai].first == argNo);
-
-      OS << ");\n";
+        OS << "      AWI[" << numAttrs++ << "] = AttributeWithIndex::get(C, "
+           << argNo+1 << ", AttrVec);\n";
+      }
     }
 
     ModRefKind modRef = getModRefKind(intrinsic);
 
     if (!intrinsic.canThrow || modRef || intrinsic.isNoReturn) {
-      OS << "      AWI[" << numAttrs++ << "] = AttributeWithIndex::get(~0, ";
-      bool Emitted = false;
-      if (!intrinsic.canThrow) {
-        OS << "Attribute::NoUnwind";
-        Emitted = true;
-      }
-      
-      if (intrinsic.isNoReturn) {
-        if (Emitted) OS << '|';
-        OS << "Attribute::NoReturn";
-        Emitted = true;
-      }
+      OS << "      AttrVec.clear();\n";
+
+      if (!intrinsic.canThrow)
+        OS << "      AttrVec.push_back(Attributes::NoUnwind);\n";
+      if (intrinsic.isNoReturn)
+        OS << "      AttrVec.push_back(Attributes::NoReturn);\n";
 
       switch (modRef) {
       case MRK_none: break;
       case MRK_readonly:
-        if (Emitted) OS << '|';
-        OS << "Attribute::ReadOnly";
+        OS << "      AttrVec.push_back(Attributes::ReadOnly);\n";
         break;
       case MRK_readnone:
-        if (Emitted) OS << '|';
-        OS << "Attribute::ReadNone"; 
+        OS << "      AttrVec.push_back(Attributes::ReadNone);\n"; 
         break;
       }
-      OS << ");\n";
+      OS << "      AWI[" << numAttrs++ << "] = AttributeWithIndex::get(C, "
+         << "AttrListPtr::FunctionIndex, AttrVec);\n";
     }
 
     if (numAttrs) {
@@ -628,7 +621,7 @@
   
   OS << "    }\n";
   OS << "  }\n";
-  OS << "  return AttrListPtr::get(ArrayRef<AttributeWithIndex>(AWI, "
+  OS << "  return AttrListPtr::get(C, ArrayRef<AttributeWithIndex>(AWI, "
              "NumAttrs));\n";
   OS << "}\n";
   OS << "#endif // GET_INTRINSIC_ATTRIBUTES\n\n";
@@ -700,8 +693,8 @@
       
       if (!BIM.insert(std::make_pair(Ints[i].GCCBuiltinName,
                                      Ints[i].EnumName)).second)
-        throw "Intrinsic '" + Ints[i].TheDef->getName() +
-              "': duplicate GCC builtin name!";
+        PrintFatalError("Intrinsic '" + Ints[i].TheDef->getName() +
+              "': duplicate GCC builtin name!");
     }
   }
   

Modified: llvm/branches/AMDILBackend/utils/TableGen/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/Makefile?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/Makefile (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/Makefile Tue Jan 15 11:16:16 2013
@@ -10,8 +10,6 @@
 LEVEL = ../..
 TOOLNAME = llvm-tblgen
 USEDLIBS = LLVMTableGen.a LLVMSupport.a
-REQUIRES_EH := 1
-REQUIRES_RTTI := 1
 
 # This tool has no plugins, optimize startup time.
 TOOL_NO_EXPORTS = 1

Modified: llvm/branches/AMDILBackend/utils/TableGen/PseudoLoweringEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/PseudoLoweringEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/PseudoLoweringEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/PseudoLoweringEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -74,7 +74,7 @@
                      IndexedMap<OpData> &OperandMap, unsigned BaseIdx) {
   unsigned OpsAdded = 0;
   for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i) {
-    if (DefInit *DI = dynamic_cast<DefInit*>(Dag->getArg(i))) {
+    if (DefInit *DI = dyn_cast<DefInit>(Dag->getArg(i))) {
       // Physical register reference. Explicit check for the special case
       // "zero_reg" definition.
       if (DI->getDef()->isSubClassOf("Register") ||
@@ -90,7 +90,7 @@
       // FIXME: We probably shouldn't ever get a non-zero BaseIdx here.
       assert(BaseIdx == 0 && "Named subargument in pseudo expansion?!");
       if (DI->getDef() != Insn.Operands[BaseIdx + i].Rec)
-        throw TGError(Rec->getLoc(),
+        PrintFatalError(Rec->getLoc(),
                       "Pseudo operand type '" + DI->getDef()->getName() +
                       "' does not match expansion operand type '" +
                       Insn.Operands[BaseIdx + i].Rec->getName() + "'");
@@ -100,11 +100,11 @@
       for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I)
         OperandMap[BaseIdx + i + I].Kind = OpData::Operand;
       OpsAdded += Insn.Operands[i].MINumOperands;
-    } else if (IntInit *II = dynamic_cast<IntInit*>(Dag->getArg(i))) {
+    } else if (IntInit *II = dyn_cast<IntInit>(Dag->getArg(i))) {
       OperandMap[BaseIdx + i].Kind = OpData::Imm;
       OperandMap[BaseIdx + i].Data.Imm = II->getValue();
       ++OpsAdded;
-    } else if (DagInit *SubDag = dynamic_cast<DagInit*>(Dag->getArg(i))) {
+    } else if (DagInit *SubDag = dyn_cast<DagInit>(Dag->getArg(i))) {
       // Just add the operands recursively. This is almost certainly
       // a constant value for a complex operand (> 1 MI operand).
       unsigned NewOps =
@@ -127,24 +127,24 @@
   assert(Dag && "Missing result instruction in pseudo expansion!");
   DEBUG(dbgs() << "  Result: " << *Dag << "\n");
 
-  DefInit *OpDef = dynamic_cast<DefInit*>(Dag->getOperator());
+  DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
   if (!OpDef)
-    throw TGError(Rec->getLoc(), Rec->getName() +
+    PrintFatalError(Rec->getLoc(), Rec->getName() +
                   " has unexpected operator type!");
   Record *Operator = OpDef->getDef();
   if (!Operator->isSubClassOf("Instruction"))
-    throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
-                                 "' is not an instruction!");
+    PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
+                    "' is not an instruction!");
 
   CodeGenInstruction Insn(Operator);
 
   if (Insn.isCodeGenOnly || Insn.isPseudo)
-    throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
-                                 "' cannot be another pseudo instruction!");
+    PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
+                    "' cannot be another pseudo instruction!");
 
   if (Insn.Operands.size() != Dag->getNumArgs())
-    throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
-                                 "' operand count mismatch");
+    PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
+                    "' operand count mismatch");
 
   unsigned NumMIOperands = 0;
   for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i)
@@ -156,7 +156,7 @@
 
   // If there are more operands that weren't in the DAG, they have to
   // be operands that have default values, or we have an error. Currently,
-  // PredicateOperand and OptionalDefOperand both have default values.
+  // Operands that are a sublass of OperandWithDefaultOp have default values.
 
 
   // Validate that each result pattern argument has a matching (by name)
@@ -179,9 +179,9 @@
     StringMap<unsigned>::iterator SourceOp =
       SourceOperands.find(Dag->getArgName(i));
     if (SourceOp == SourceOperands.end())
-      throw TGError(Rec->getLoc(),
-                    "Pseudo output operand '" + Dag->getArgName(i) +
-                    "' has no matching source operand.");
+      PrintFatalError(Rec->getLoc(),
+                      "Pseudo output operand '" + Dag->getArgName(i) +
+                      "' has no matching source operand.");
     // Map the source operand to the destination operand index for each
     // MachineInstr operand.
     for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I)
@@ -267,7 +267,7 @@
 
 void PseudoLoweringEmitter::run(raw_ostream &o) {
   Record *ExpansionClass = Records.getClass("PseudoInstExpansion");
-  Record *InstructionClass = Records.getClass("PseudoInstExpansion");
+  Record *InstructionClass = Records.getClass("Instruction");
   assert(ExpansionClass && "PseudoInstExpansion class definition missing!");
   assert(InstructionClass && "Instruction class definition missing!");
 

Modified: llvm/branches/AMDILBackend/utils/TableGen/RegisterInfoEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/RegisterInfoEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/RegisterInfoEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/RegisterInfoEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -62,6 +62,8 @@
 
   void EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
                            const std::string &ClassName);
+  void emitComposeSubRegIndices(raw_ostream &OS, CodeGenRegBank &RegBank,
+                                const std::string &ClassName);
 };
 } // End anonymous namespace
 
@@ -145,9 +147,9 @@
     if (!Namespace.empty())
       OS << "namespace " << Namespace << " {\n";
     OS << "enum {\n  NoSubRegister,\n";
-    for (unsigned i = 0, e = Bank.getNumNamedIndices(); i != e; ++i)
+    for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
       OS << "  " << SubRegIndices[i]->getName() << ",\t// " << i+1 << "\n";
-    OS << "  NUM_TARGET_NAMED_SUBREGS\n};\n";
+    OS << "  NUM_TARGET_SUBREGS\n};\n";
     if (!Namespace.empty())
       OS << "}\n";
   }
@@ -325,7 +327,7 @@
     if (!V || !V->getValue())
       continue;
 
-    DefInit *DI = dynamic_cast<DefInit*>(V->getValue());
+    DefInit *DI = cast<DefInit>(V->getValue());
     Record *Alias = DI->getDef();
     DwarfRegNums[Reg] = DwarfRegNums[Alias];
   }
@@ -530,6 +532,102 @@
   OS << Val;
 }
 
+// Try to combine Idx's compose map into Vec if it is compatible.
+// Return false if it's not possible.
+static bool combine(const CodeGenSubRegIndex *Idx,
+                    SmallVectorImpl<CodeGenSubRegIndex*> &Vec) {
+  const CodeGenSubRegIndex::CompMap &Map = Idx->getComposites();
+  for (CodeGenSubRegIndex::CompMap::const_iterator
+       I = Map.begin(), E = Map.end(); I != E; ++I) {
+    CodeGenSubRegIndex *&Entry = Vec[I->first->EnumValue - 1];
+    if (Entry && Entry != I->second)
+      return false;
+  }
+
+  // All entries are compatible. Make it so.
+  for (CodeGenSubRegIndex::CompMap::const_iterator
+       I = Map.begin(), E = Map.end(); I != E; ++I)
+    Vec[I->first->EnumValue - 1] = I->second;
+  return true;
+}
+
+static const char *getMinimalTypeForRange(uint64_t Range) {
+  assert(Range < 0xFFFFFFFFULL && "Enum too large");
+  if (Range > 0xFFFF)
+    return "uint32_t";
+  if (Range > 0xFF)
+    return "uint16_t";
+  return "uint8_t";
+}
+
+void
+RegisterInfoEmitter::emitComposeSubRegIndices(raw_ostream &OS,
+                                              CodeGenRegBank &RegBank,
+                                              const std::string &ClName) {
+  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
+  OS << "unsigned " << ClName
+     << "::composeSubRegIndicesImpl(unsigned IdxA, unsigned IdxB) const {\n";
+
+  // Many sub-register indexes are composition-compatible, meaning that
+  //
+  //   compose(IdxA, IdxB) == compose(IdxA', IdxB)
+  //
+  // for many IdxA, IdxA' pairs. Not all sub-register indexes can be composed.
+  // The illegal entries can be use as wildcards to compress the table further.
+
+  // Map each Sub-register index to a compatible table row.
+  SmallVector<unsigned, 4> RowMap;
+  SmallVector<SmallVector<CodeGenSubRegIndex*, 4>, 4> Rows;
+
+  for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+    unsigned Found = ~0u;
+    for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
+      if (combine(SubRegIndices[i], Rows[r])) {
+        Found = r;
+        break;
+      }
+    }
+    if (Found == ~0u) {
+      Found = Rows.size();
+      Rows.resize(Found + 1);
+      Rows.back().resize(SubRegIndices.size());
+      combine(SubRegIndices[i], Rows.back());
+    }
+    RowMap.push_back(Found);
+  }
+
+  // Output the row map if there is multiple rows.
+  if (Rows.size() > 1) {
+    OS << "  static const " << getMinimalTypeForRange(Rows.size())
+       << " RowMap[" << SubRegIndices.size() << "] = {\n    ";
+    for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
+      OS << RowMap[i] << ", ";
+    OS << "\n  };\n";
+  }
+
+  // Output the rows.
+  OS << "  static const " << getMinimalTypeForRange(SubRegIndices.size()+1)
+     << " Rows[" << Rows.size() << "][" << SubRegIndices.size() << "] = {\n";
+  for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
+    OS << "    { ";
+    for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
+      if (Rows[r][i])
+        OS << Rows[r][i]->EnumValue << ", ";
+      else
+        OS << "0, ";
+    OS << "},\n";
+  }
+  OS << "  };\n\n";
+
+  OS << "  --IdxA; assert(IdxA < " << SubRegIndices.size() << ");\n"
+     << "  --IdxB; assert(IdxB < " << SubRegIndices.size() << ");\n";
+  if (Rows.size() > 1)
+    OS << "  return Rows[RowMap[IdxA]][IdxB];\n";
+  else
+    OS << "  return Rows[0][IdxB];\n";
+  OS << "}\n\n";
+}
+
 //
 // runMCDesc - Print out MC register descriptions.
 //
@@ -751,7 +849,7 @@
     BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
     uint64_t Value = 0;
     for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
-      if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(b)))
+      if (BitInit *B = dyn_cast<BitInit>(BI->getBit(b)))
       Value |= (uint64_t)B->getValue() << b;
     }
     OS << "  " << Value << ",\n";
@@ -770,7 +868,7 @@
      << TargetName << "RegDiffLists, "
      << TargetName << "RegStrings, "
      << TargetName << "SubRegIdxLists, "
-     << SubRegIndices.size() << ",\n"
+     << (SubRegIndices.size() + 1) << ",\n"
      << "  " << TargetName << "RegEncodingTable);\n\n";
 
   EmitRegMapping(OS, Regs, false);
@@ -802,16 +900,17 @@
      << "  virtual bool needsStackRealignment(const MachineFunction &) const\n"
      << "     { return false; }\n";
   if (!RegBank.getSubRegIndices().empty()) {
-    OS << "  unsigned composeSubRegIndices(unsigned, unsigned) const;\n"
-      << "  const TargetRegisterClass *"
+    OS << "  virtual unsigned composeSubRegIndicesImpl"
+       << "(unsigned, unsigned) const;\n"
+      << "  virtual const TargetRegisterClass *"
       "getSubClassWithSubReg(const TargetRegisterClass*, unsigned) const;\n";
   }
-  OS << "  const RegClassWeight &getRegClassWeight("
+  OS << "  virtual const RegClassWeight &getRegClassWeight("
      << "const TargetRegisterClass *RC) const;\n"
-     << "  unsigned getNumRegPressureSets() const;\n"
-     << "  const char *getRegPressureSetName(unsigned Idx) const;\n"
-     << "  unsigned getRegPressureSetLimit(unsigned Idx) const;\n"
-     << "  const int *getRegClassPressureSets("
+     << "  virtual unsigned getNumRegPressureSets() const;\n"
+     << "  virtual const char *getRegPressureSetName(unsigned Idx) const;\n"
+     << "  virtual unsigned getRegPressureSetLimit(unsigned Idx) const;\n"
+     << "  virtual const int *getRegClassPressureSets("
      << "const TargetRegisterClass *RC) const;\n"
      << "};\n\n";
 
@@ -876,26 +975,23 @@
   VTSeqs.emit(OS, printSimpleValueType, "MVT::Other");
   OS << "};\n";
 
-  // Emit SubRegIndex names, skipping 0
-  OS << "\nstatic const char *const SubRegIndexTable[] = { \"";
+  // Emit SubRegIndex names, skipping 0.
+  OS << "\nstatic const char *const SubRegIndexNameTable[] = { \"";
   for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
     OS << SubRegIndices[i]->getName();
-    if (i+1 != e)
+    if (i + 1 != e)
       OS << "\", \"";
   }
   OS << "\" };\n\n";
 
-  // Emit names of the anonymous subreg indices.
-  unsigned NamedIndices = RegBank.getNumNamedIndices();
-  if (SubRegIndices.size() > NamedIndices) {
-    OS << "  enum {";
-    for (unsigned i = NamedIndices, e = SubRegIndices.size(); i != e; ++i) {
-      OS << "\n    " << SubRegIndices[i]->getName() << " = " << i+1;
-      if (i+1 != e)
-        OS << ',';
-    }
-    OS << "\n  };\n\n";
+  // Emit SubRegIndex lane masks, including 0.
+  OS << "\nstatic const unsigned SubRegIndexLaneMaskTable[] = {\n  ~0u,\n";
+  for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+    OS << format("  0x%08x, // ", SubRegIndices[i]->LaneMask)
+       << SubRegIndices[i]->getName() << '\n';
   }
+  OS << " };\n\n";
+
   OS << "\n";
 
   // Now that all of the structs have been emitted, emit the instances.
@@ -1057,31 +1153,8 @@
 
   std::string ClassName = Target.getName() + "GenRegisterInfo";
 
-  // Emit composeSubRegIndices
-  if (!SubRegIndices.empty()) {
-    OS << "unsigned " << ClassName
-      << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
-      << "  switch (IdxA) {\n"
-      << "  default:\n    return IdxB;\n";
-    for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
-      bool Open = false;
-      for (unsigned j = 0; j != e; ++j) {
-        if (CodeGenSubRegIndex *Comp =
-            SubRegIndices[i]->compose(SubRegIndices[j])) {
-          if (!Open) {
-            OS << "  case " << SubRegIndices[i]->getQualifiedName()
-              << ": switch(IdxB) {\n    default: return IdxB;\n";
-            Open = true;
-          }
-          OS << "    case " << SubRegIndices[j]->getQualifiedName()
-            << ": return " << Comp->getQualifiedName() << ";\n";
-        }
-      }
-      if (Open)
-        OS << "    }\n";
-    }
-    OS << "  }\n}\n\n";
-  }
+  if (!SubRegIndices.empty())
+    emitComposeSubRegIndices(OS, RegBank, ClassName);
 
   // Emit getSubClassWithSubReg.
   if (!SubRegIndices.empty()) {
@@ -1095,7 +1168,7 @@
     else if (RegisterClasses.size() < UINT16_MAX)
       OS << "  static const uint16_t Table[";
     else
-      throw "Too many register classes.";
+      PrintFatalError("Too many register classes.");
     OS << RegisterClasses.size() << "][" << SubRegIndices.size() << "] = {\n";
     for (unsigned rci = 0, rce = RegisterClasses.size(); rci != rce; ++rci) {
       const CodeGenRegisterClass &RC = *RegisterClasses[rci];
@@ -1133,7 +1206,7 @@
      << "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour)\n"
      << "  : TargetRegisterInfo(" << TargetName << "RegInfoDesc"
      << ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n"
-     << "             SubRegIndexTable) {\n"
+     << "             SubRegIndexNameTable, SubRegIndexLaneMaskTable) {\n"
      << "  InitMCRegisterInfo(" << TargetName << "RegDesc, "
      << Regs.size()+1 << ", RA,\n                     " << TargetName
      << "MCRegisterClasses, " << RegisterClasses.size() << ",\n"
@@ -1142,7 +1215,7 @@
      << "                     " << TargetName << "RegDiffLists,\n"
      << "                     " << TargetName << "RegStrings,\n"
      << "                     " << TargetName << "SubRegIdxLists,\n"
-     << "                     " << SubRegIndices.size() << ",\n"
+     << "                     " << SubRegIndices.size() + 1 << ",\n"
      << "                     " << TargetName << "RegEncodingTable);\n\n";
 
   EmitRegMapping(OS, Regs, true);

Modified: llvm/branches/AMDILBackend/utils/TableGen/SequenceToOffsetTable.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/SequenceToOffsetTable.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/SequenceToOffsetTable.h (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/SequenceToOffsetTable.h Tue Jan 15 11:16:16 2013
@@ -29,8 +29,8 @@
 /// Compute the layout of a table that contains all the sequences, possibly by
 /// reusing entries.
 ///
-/// @param SeqT The sequence container. (vector or string).
-/// @param Less A stable comparator for SeqT elements.
+/// @tparam SeqT The sequence container. (vector or string).
+/// @tparam Less A stable comparator for SeqT elements.
 template<typename SeqT, typename Less = std::less<typename SeqT::value_type> >
 class SequenceToOffsetTable {
   typedef typename SeqT::value_type ElemT;
@@ -82,7 +82,7 @@
   }
 
   bool empty() const { return Seqs.empty(); }
-  
+
   /// layout - Computes the final table layout.
   void layout() {
     assert(Entries == 0 && "Can only call layout() once");

Modified: llvm/branches/AMDILBackend/utils/TableGen/SetTheory.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/SetTheory.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/SetTheory.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/SetTheory.cpp Tue Jan 15 11:16:16 2013
@@ -27,20 +27,20 @@
 
 // (add a, b, ...) Evaluate and union all arguments.
 struct AddOp : public SetTheory::Operator {
-  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
-    ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts);
+  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
+    ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
   }
 };
 
 // (sub Add, Sub, ...) Set difference.
 struct SubOp : public SetTheory::Operator {
-  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
     if (Expr->arg_size() < 2)
-      throw "Set difference needs at least two arguments: " +
-        Expr->getAsString();
+      PrintFatalError(Loc, "Set difference needs at least two arguments: " +
+        Expr->getAsString());
     RecSet Add, Sub;
-    ST.evaluate(*Expr->arg_begin(), Add);
-    ST.evaluate(Expr->arg_begin() + 1, Expr->arg_end(), Sub);
+    ST.evaluate(*Expr->arg_begin(), Add, Loc);
+    ST.evaluate(Expr->arg_begin() + 1, Expr->arg_end(), Sub, Loc);
     for (RecSet::iterator I = Add.begin(), E = Add.end(); I != E; ++I)
       if (!Sub.count(*I))
         Elts.insert(*I);
@@ -49,12 +49,13 @@
 
 // (and S1, S2) Set intersection.
 struct AndOp : public SetTheory::Operator {
-  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
     if (Expr->arg_size() != 2)
-      throw "Set intersection requires two arguments: " + Expr->getAsString();
+      PrintFatalError(Loc, "Set intersection requires two arguments: " +
+        Expr->getAsString());
     RecSet S1, S2;
-    ST.evaluate(Expr->arg_begin()[0], S1);
-    ST.evaluate(Expr->arg_begin()[1], S2);
+    ST.evaluate(Expr->arg_begin()[0], S1, Loc);
+    ST.evaluate(Expr->arg_begin()[1], S2, Loc);
     for (RecSet::iterator I = S1.begin(), E = S1.end(); I != E; ++I)
       if (S2.count(*I))
         Elts.insert(*I);
@@ -65,17 +66,19 @@
 struct SetIntBinOp : public SetTheory::Operator {
   virtual void apply2(SetTheory &ST, DagInit *Expr,
                      RecSet &Set, int64_t N,
-                     RecSet &Elts) =0;
+                     RecSet &Elts, ArrayRef<SMLoc> Loc) =0;
 
-  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
     if (Expr->arg_size() != 2)
-      throw "Operator requires (Op Set, Int) arguments: " + Expr->getAsString();
+      PrintFatalError(Loc, "Operator requires (Op Set, Int) arguments: " +
+        Expr->getAsString());
     RecSet Set;
-    ST.evaluate(Expr->arg_begin()[0], Set);
-    IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[1]);
+    ST.evaluate(Expr->arg_begin()[0], Set, Loc);
+    IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[1]);
     if (!II)
-      throw "Second argument must be an integer: " + Expr->getAsString();
-    apply2(ST, Expr, Set, II->getValue(), Elts);
+      PrintFatalError(Loc, "Second argument must be an integer: " +
+        Expr->getAsString());
+    apply2(ST, Expr, Set, II->getValue(), Elts, Loc);
   }
 };
 
@@ -83,9 +86,10 @@
 struct ShlOp : public SetIntBinOp {
   void apply2(SetTheory &ST, DagInit *Expr,
              RecSet &Set, int64_t N,
-             RecSet &Elts) {
+             RecSet &Elts, ArrayRef<SMLoc> Loc) {
     if (N < 0)
-      throw "Positive shift required: " + Expr->getAsString();
+      PrintFatalError(Loc, "Positive shift required: " +
+        Expr->getAsString());
     if (unsigned(N) < Set.size())
       Elts.insert(Set.begin() + N, Set.end());
   }
@@ -95,9 +99,10 @@
 struct TruncOp : public SetIntBinOp {
   void apply2(SetTheory &ST, DagInit *Expr,
              RecSet &Set, int64_t N,
-             RecSet &Elts) {
+             RecSet &Elts, ArrayRef<SMLoc> Loc) {
     if (N < 0)
-      throw "Positive length required: " + Expr->getAsString();
+      PrintFatalError(Loc, "Positive length required: " +
+        Expr->getAsString());
     if (unsigned(N) > Set.size())
       N = Set.size();
     Elts.insert(Set.begin(), Set.begin() + N);
@@ -112,7 +117,7 @@
 
   void apply2(SetTheory &ST, DagInit *Expr,
              RecSet &Set, int64_t N,
-             RecSet &Elts) {
+             RecSet &Elts, ArrayRef<SMLoc> Loc) {
     if (Reverse)
       N = -N;
     // N > 0 -> rotate left, N < 0 -> rotate right.
@@ -131,9 +136,10 @@
 struct DecimateOp : public SetIntBinOp {
   void apply2(SetTheory &ST, DagInit *Expr,
              RecSet &Set, int64_t N,
-             RecSet &Elts) {
+             RecSet &Elts, ArrayRef<SMLoc> Loc) {
     if (N <= 0)
-      throw "Positive stride required: " + Expr->getAsString();
+      PrintFatalError(Loc, "Positive stride required: " +
+        Expr->getAsString());
     for (unsigned I = 0; I < Set.size(); I += N)
       Elts.insert(Set[I]);
   }
@@ -141,12 +147,12 @@
 
 // (interleave S1, S2, ...) Interleave elements of the arguments.
 struct InterleaveOp : public SetTheory::Operator {
-  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
     // Evaluate the arguments individually.
     SmallVector<RecSet, 4> Args(Expr->getNumArgs());
     unsigned MaxSize = 0;
     for (unsigned i = 0, e = Expr->getNumArgs(); i != e; ++i) {
-      ST.evaluate(Expr->getArg(i), Args[i]);
+      ST.evaluate(Expr->getArg(i), Args[i], Loc);
       MaxSize = std::max(MaxSize, unsigned(Args[i].size()));
     }
     // Interleave arguments into Elts.
@@ -159,41 +165,42 @@
 
 // (sequence "Format", From, To) Generate a sequence of records by name.
 struct SequenceOp : public SetTheory::Operator {
-  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+  void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
     int Step = 1;
     if (Expr->arg_size() > 4)
-      throw "Bad args to (sequence \"Format\", From, To): " +
-        Expr->getAsString();
+      PrintFatalError(Loc, "Bad args to (sequence \"Format\", From, To): " +
+        Expr->getAsString());
     else if (Expr->arg_size() == 4) {
-      if (IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[3])) {
+      if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[3])) {
         Step = II->getValue();
       } else
-        throw "Stride must be an integer: " + Expr->getAsString();
+        PrintFatalError(Loc, "Stride must be an integer: " +
+          Expr->getAsString());
     }
 
     std::string Format;
-    if (StringInit *SI = dynamic_cast<StringInit*>(Expr->arg_begin()[0]))
+    if (StringInit *SI = dyn_cast<StringInit>(Expr->arg_begin()[0]))
       Format = SI->getValue();
     else
-      throw "Format must be a string: " + Expr->getAsString();
+      PrintFatalError(Loc,  "Format must be a string: " + Expr->getAsString());
 
     int64_t From, To;
-    if (IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[1]))
+    if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[1]))
       From = II->getValue();
     else
-      throw "From must be an integer: " + Expr->getAsString();
+      PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString());
     if (From < 0 || From >= (1 << 30))
-      throw "From out of range";
+      PrintFatalError(Loc, "From out of range");
 
-    if (IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[2]))
+    if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[2]))
       To = II->getValue();
     else
-      throw "From must be an integer: " + Expr->getAsString();
+      PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString());
     if (To < 0 || To >= (1 << 30))
-      throw "To out of range";
+      PrintFatalError(Loc, "To out of range");
 
     RecordKeeper &Records =
-      dynamic_cast<DefInit&>(*Expr->getOperator()).getDef()->getRecords();
+      cast<DefInit>(Expr->getOperator())->getDef()->getRecords();
 
     Step *= From <= To ? 1 : -1;
     while (true) {
@@ -206,7 +213,8 @@
       OS << format(Format.c_str(), unsigned(From));
       Record *Rec = Records.getDef(OS.str());
       if (!Rec)
-        throw "No def named '" + Name + "': " + Expr->getAsString();
+        PrintFatalError(Loc, "No def named '" + Name + "': " +
+          Expr->getAsString());
       // Try to reevaluate Rec in case it is a set.
       if (const RecVec *Result = ST.expand(Rec))
         Elts.insert(Result->begin(), Result->end());
@@ -225,7 +233,7 @@
   FieldExpander(StringRef fn) : FieldName(fn) {}
 
   void expand(SetTheory &ST, Record *Def, RecSet &Elts) {
-    ST.evaluate(Def->getValueInit(FieldName), Elts);
+    ST.evaluate(Def->getValueInit(FieldName), Elts, Def->getLoc());
   }
 };
 } // end anonymous namespace
@@ -259,9 +267,9 @@
   addExpander(ClassName, new FieldExpander(FieldName));
 }
 
-void SetTheory::evaluate(Init *Expr, RecSet &Elts) {
+void SetTheory::evaluate(Init *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
   // A def in a list can be a just an element, or it may expand.
-  if (DefInit *Def = dynamic_cast<DefInit*>(Expr)) {
+  if (DefInit *Def = dyn_cast<DefInit>(Expr)) {
     if (const RecVec *Result = expand(Def->getDef()))
       return Elts.insert(Result->begin(), Result->end());
     Elts.insert(Def->getDef());
@@ -269,20 +277,20 @@
   }
 
   // Lists simply expand.
-  if (ListInit *LI = dynamic_cast<ListInit*>(Expr))
-    return evaluate(LI->begin(), LI->end(), Elts);
+  if (ListInit *LI = dyn_cast<ListInit>(Expr))
+    return evaluate(LI->begin(), LI->end(), Elts, Loc);
 
   // Anything else must be a DAG.
-  DagInit *DagExpr = dynamic_cast<DagInit*>(Expr);
+  DagInit *DagExpr = dyn_cast<DagInit>(Expr);
   if (!DagExpr)
-    throw "Invalid set element: " + Expr->getAsString();
-  DefInit *OpInit = dynamic_cast<DefInit*>(DagExpr->getOperator());
+    PrintFatalError(Loc, "Invalid set element: " + Expr->getAsString());
+  DefInit *OpInit = dyn_cast<DefInit>(DagExpr->getOperator());
   if (!OpInit)
-    throw "Bad set expression: " + Expr->getAsString();
+    PrintFatalError(Loc, "Bad set expression: " + Expr->getAsString());
   Operator *Op = Operators.lookup(OpInit->getDef()->getName());
   if (!Op)
-    throw "Unknown set operator: " + Expr->getAsString();
-  Op->apply(*this, DagExpr, Elts);
+    PrintFatalError(Loc, "Unknown set operator: " + Expr->getAsString());
+  Op->apply(*this, DagExpr, Elts, Loc);
 }
 
 const RecVec *SetTheory::expand(Record *Set) {
@@ -292,19 +300,19 @@
     return &I->second;
 
   // This is the first time we see Set. Find a suitable expander.
-  try {
-    const std::vector<Record*> &SC = Set->getSuperClasses();
-    for (unsigned i = 0, e = SC.size(); i != e; ++i)
-      if (Expander *Exp = Expanders.lookup(SC[i]->getName())) {
-        // This breaks recursive definitions.
-        RecVec &EltVec = Expansions[Set];
-        RecSet Elts;
-        Exp->expand(*this, Set, Elts);
-        EltVec.assign(Elts.begin(), Elts.end());
-        return &EltVec;
-      }
-  } catch (const std::string &Error) {
-    throw TGError(Set->getLoc(), Error);
+  const std::vector<Record*> &SC = Set->getSuperClasses();
+  for (unsigned i = 0, e = SC.size(); i != e; ++i) {
+    // Skip unnamed superclasses.
+    if (!dyn_cast<StringInit>(SC[i]->getNameInit()))
+      continue;
+    if (Expander *Exp = Expanders.lookup(SC[i]->getName())) {
+      // This breaks recursive definitions.
+      RecVec &EltVec = Expansions[Set];
+      RecSet Elts;
+      Exp->expand(*this, Set, Elts);
+      EltVec.assign(Elts.begin(), Elts.end());
+      return &EltVec;
+    }
   }
 
   // Set is not expandable.

Modified: llvm/branches/AMDILBackend/utils/TableGen/SetTheory.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/SetTheory.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/SetTheory.h (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/SetTheory.h Tue Jan 15 11:16:16 2013
@@ -49,6 +49,7 @@
 
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/SetVector.h"
+#include "llvm/Support/SourceMgr.h"
 #include <map>
 #include <vector>
 
@@ -72,7 +73,8 @@
 
     /// apply - Apply this operator to Expr's arguments and insert the result
     /// in Elts.
-    virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts) =0;
+    virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts,
+                       ArrayRef<SMLoc> Loc) =0;
   };
 
   /// Expander - A callback function that can transform a Record representing a
@@ -119,13 +121,13 @@
   void addOperator(StringRef Name, Operator*);
 
   /// evaluate - Evaluate Expr and append the resulting set to Elts.
-  void evaluate(Init *Expr, RecSet &Elts);
+  void evaluate(Init *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc);
 
   /// evaluate - Evaluate a sequence of Inits and append to Elts.
   template<typename Iter>
-  void evaluate(Iter begin, Iter end, RecSet &Elts) {
+  void evaluate(Iter begin, Iter end, RecSet &Elts, ArrayRef<SMLoc> Loc) {
     while (begin != end)
-      evaluate(*begin++, Elts);
+      evaluate(*begin++, Elts, Loc);
   }
 
   /// expand - Expand a record into a set of elements if possible.  Return a

Modified: llvm/branches/AMDILBackend/utils/TableGen/SubtargetEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/utils/TableGen/SubtargetEmitter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/utils/TableGen/SubtargetEmitter.cpp (original)
+++ llvm/branches/AMDILBackend/utils/TableGen/SubtargetEmitter.cpp Tue Jan 15 11:16:16 2013
@@ -11,13 +11,18 @@
 //
 //===----------------------------------------------------------------------===//
 
+#define DEBUG_TYPE "subtarget-emitter"
+
 #include "CodeGenTarget.h"
 #include "CodeGenSchedule.h"
 #include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/STLExtras.h"
 #include "llvm/MC/MCInstrItineraries.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Error.h"
 #include "llvm/TableGen/Record.h"
 #include "llvm/TableGen/TableGenBackend.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
 #include <algorithm>
 #include <map>
 #include <string>
@@ -26,6 +31,32 @@
 
 namespace {
 class SubtargetEmitter {
+  // Each processor has a SchedClassDesc table with an entry for each SchedClass.
+  // The SchedClassDesc table indexes into a global write resource table, write
+  // latency table, and read advance table.
+  struct SchedClassTables {
+    std::vector<std::vector<MCSchedClassDesc> > ProcSchedClasses;
+    std::vector<MCWriteProcResEntry> WriteProcResources;
+    std::vector<MCWriteLatencyEntry> WriteLatencies;
+    std::vector<std::string> WriterNames;
+    std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
+
+    // Reserve an invalid entry at index 0
+    SchedClassTables() {
+      ProcSchedClasses.resize(1);
+      WriteProcResources.resize(1);
+      WriteLatencies.resize(1);
+      WriterNames.push_back("InvalidWrite");
+      ReadAdvanceEntries.resize(1);
+    }
+  };
+
+  struct LessWriteProcResources {
+    bool operator()(const MCWriteProcResEntry &LHS,
+                    const MCWriteProcResEntry &RHS) {
+      return LHS.ProcResourceIdx < RHS.ProcResourceIdx;
+    }
+  };
 
   RecordKeeper &Records;
   CodeGenSchedModels &SchedModels;
@@ -50,8 +81,18 @@
                          &ProcItinLists);
   void EmitProcessorProp(raw_ostream &OS, const Record *R, const char *Name,
                          char Separator);
+  void EmitProcessorResources(const CodeGenProcModel &ProcModel,
+                              raw_ostream &OS);
+  Record *FindWriteResources(const CodeGenSchedRW &SchedWrite,
+                             const CodeGenProcModel &ProcModel);
+  Record *FindReadAdvance(const CodeGenSchedRW &SchedRead,
+                          const CodeGenProcModel &ProcModel);
+  void GenSchedClassTables(const CodeGenProcModel &ProcModel,
+                           SchedClassTables &SchedTables);
+  void EmitSchedClassTables(SchedClassTables &SchedTables, raw_ostream &OS);
   void EmitProcessorModels(raw_ostream &OS);
   void EmitProcessorLookup(raw_ostream &OS);
+  void EmitSchedModelHelpers(std::string ClassName, raw_ostream &OS);
   void EmitSchedModel(raw_ostream &OS);
   void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures,
                              unsigned NumProcs);
@@ -521,7 +562,7 @@
   std::vector<std::vector<InstrItinerary> >::iterator
       ProcItinListsIter = ProcItinLists.begin();
   for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
-         PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+         PE = SchedModels.procModelEnd(); PI != PE; ++PI, ++ProcItinListsIter) {
 
     Record *ItinsDef = PI->ItinsDef;
     if (!ItinsDefSet.insert(ItinsDef))
@@ -532,7 +573,7 @@
 
     // Get the itinerary list for the processor.
     assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
-    std::vector<InstrItinerary> &ItinList = *ProcItinListsIter++;
+    std::vector<InstrItinerary> &ItinList = *ProcItinListsIter;
 
     OS << "\n";
     OS << "static const llvm::InstrItinerary ";
@@ -578,11 +619,488 @@
   OS << '\n';
 }
 
+void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel &ProcModel,
+                                              raw_ostream &OS) {
+  char Sep = ProcModel.ProcResourceDefs.empty() ? ' ' : ',';
+
+  OS << "\n// {Name, NumUnits, SuperIdx, IsBuffered}\n";
+  OS << "static const llvm::MCProcResourceDesc "
+     << ProcModel.ModelName << "ProcResources" << "[] = {\n"
+     << "  {DBGFIELD(\"InvalidUnit\")     0, 0, 0}" << Sep << "\n";
+
+  for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) {
+    Record *PRDef = ProcModel.ProcResourceDefs[i];
+
+    // Find the SuperIdx
+    unsigned SuperIdx = 0;
+    Record *SuperDef = 0;
+    if (PRDef->getValueInit("Super")->isComplete()) {
+      SuperDef =
+        SchedModels.findProcResUnits(PRDef->getValueAsDef("Super"), ProcModel);
+      SuperIdx = ProcModel.getProcResourceIdx(SuperDef);
+    }
+    // Emit the ProcResourceDesc
+    if (i+1 == e)
+      Sep = ' ';
+    OS << "  {DBGFIELD(\"" << PRDef->getName() << "\") ";
+    if (PRDef->getName().size() < 15)
+      OS.indent(15 - PRDef->getName().size());
+    OS << PRDef->getValueAsInt("NumUnits") << ", " << SuperIdx << ", "
+       << PRDef->getValueAsBit("Buffered") << "}" << Sep << " // #" << i+1;
+    if (SuperDef)
+      OS << ", Super=" << SuperDef->getName();
+    OS << "\n";
+  }
+  OS << "};\n";
+}
+
+// Find the WriteRes Record that defines processor resources for this
+// SchedWrite.
+Record *SubtargetEmitter::FindWriteResources(
+  const CodeGenSchedRW &SchedWrite, const CodeGenProcModel &ProcModel) {
+
+  // Check if the SchedWrite is already subtarget-specific and directly
+  // specifies a set of processor resources.
+  if (SchedWrite.TheDef->isSubClassOf("SchedWriteRes"))
+    return SchedWrite.TheDef;
+
+  Record *AliasDef = 0;
+  for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end();
+       AI != AE; ++AI) {
+    const CodeGenSchedRW &AliasRW =
+      SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
+    if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+      Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel");
+      if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
+        continue;
+    }
+    if (AliasDef)
+      PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+                    "defined for processor " + ProcModel.ModelName +
+                    " Ensure only one SchedAlias exists per RW.");
+    AliasDef = AliasRW.TheDef;
+  }
+  if (AliasDef && AliasDef->isSubClassOf("SchedWriteRes"))
+    return AliasDef;
+
+  // Check this processor's list of write resources.
+  Record *ResDef = 0;
+  for (RecIter WRI = ProcModel.WriteResDefs.begin(),
+         WRE = ProcModel.WriteResDefs.end(); WRI != WRE; ++WRI) {
+    if (!(*WRI)->isSubClassOf("WriteRes"))
+      continue;
+    if (AliasDef == (*WRI)->getValueAsDef("WriteType")
+        || SchedWrite.TheDef == (*WRI)->getValueAsDef("WriteType")) {
+      if (ResDef) {
+        PrintFatalError((*WRI)->getLoc(), "Resources are defined for both "
+                      "SchedWrite and its alias on processor " +
+                      ProcModel.ModelName);
+      }
+      ResDef = *WRI;
+    }
+  }
+  // TODO: If ProcModel has a base model (previous generation processor),
+  // then call FindWriteResources recursively with that model here.
+  if (!ResDef) {
+    PrintFatalError(ProcModel.ModelDef->getLoc(),
+                  std::string("Processor does not define resources for ")
+                  + SchedWrite.TheDef->getName());
+  }
+  return ResDef;
+}
+
+/// Find the ReadAdvance record for the given SchedRead on this processor or
+/// return NULL.
+Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead,
+                                          const CodeGenProcModel &ProcModel) {
+  // Check for SchedReads that directly specify a ReadAdvance.
+  if (SchedRead.TheDef->isSubClassOf("SchedReadAdvance"))
+    return SchedRead.TheDef;
+
+  // Check this processor's list of aliases for SchedRead.
+  Record *AliasDef = 0;
+  for (RecIter AI = SchedRead.Aliases.begin(), AE = SchedRead.Aliases.end();
+       AI != AE; ++AI) {
+    const CodeGenSchedRW &AliasRW =
+      SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
+    if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+      Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel");
+      if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
+        continue;
+    }
+    if (AliasDef)
+      PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+                    "defined for processor " + ProcModel.ModelName +
+                    " Ensure only one SchedAlias exists per RW.");
+    AliasDef = AliasRW.TheDef;
+  }
+  if (AliasDef && AliasDef->isSubClassOf("SchedReadAdvance"))
+    return AliasDef;
+
+  // Check this processor's ReadAdvanceList.
+  Record *ResDef = 0;
+  for (RecIter RAI = ProcModel.ReadAdvanceDefs.begin(),
+         RAE = ProcModel.ReadAdvanceDefs.end(); RAI != RAE; ++RAI) {
+    if (!(*RAI)->isSubClassOf("ReadAdvance"))
+      continue;
+    if (AliasDef == (*RAI)->getValueAsDef("ReadType")
+        || SchedRead.TheDef == (*RAI)->getValueAsDef("ReadType")) {
+      if (ResDef) {
+        PrintFatalError((*RAI)->getLoc(), "Resources are defined for both "
+                      "SchedRead and its alias on processor " +
+                      ProcModel.ModelName);
+      }
+      ResDef = *RAI;
+    }
+  }
+  // TODO: If ProcModel has a base model (previous generation processor),
+  // then call FindReadAdvance recursively with that model here.
+  if (!ResDef && SchedRead.TheDef->getName() != "ReadDefault") {
+    PrintFatalError(ProcModel.ModelDef->getLoc(),
+                  std::string("Processor does not define resources for ")
+                  + SchedRead.TheDef->getName());
+  }
+  return ResDef;
+}
+
+// Generate the SchedClass table for this processor and update global
+// tables. Must be called for each processor in order.
+void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
+                                           SchedClassTables &SchedTables) {
+  SchedTables.ProcSchedClasses.resize(SchedTables.ProcSchedClasses.size() + 1);
+  if (!ProcModel.hasInstrSchedModel())
+    return;
+
+  std::vector<MCSchedClassDesc> &SCTab = SchedTables.ProcSchedClasses.back();
+  for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(),
+         SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) {
+    DEBUG(SCI->dump(&SchedModels));
+
+    SCTab.resize(SCTab.size() + 1);
+    MCSchedClassDesc &SCDesc = SCTab.back();
+    // SCDesc.Name is guarded by NDEBUG
+    SCDesc.NumMicroOps = 0;
+    SCDesc.BeginGroup = false;
+    SCDesc.EndGroup = false;
+    SCDesc.WriteProcResIdx = 0;
+    SCDesc.WriteLatencyIdx = 0;
+    SCDesc.ReadAdvanceIdx = 0;
+
+    // A Variant SchedClass has no resources of its own.
+    if (!SCI->Transitions.empty()) {
+      SCDesc.NumMicroOps = MCSchedClassDesc::VariantNumMicroOps;
+      continue;
+    }
+
+    // Determine if the SchedClass is actually reachable on this processor. If
+    // not don't try to locate the processor resources, it will fail.
+    // If ProcIndices contains 0, this class applies to all processors.
+    assert(!SCI->ProcIndices.empty() && "expect at least one procidx");
+    if (SCI->ProcIndices[0] != 0) {
+      IdxIter PIPos = std::find(SCI->ProcIndices.begin(),
+                                SCI->ProcIndices.end(), ProcModel.Index);
+      if (PIPos == SCI->ProcIndices.end())
+        continue;
+    }
+    IdxVec Writes = SCI->Writes;
+    IdxVec Reads = SCI->Reads;
+    if (SCI->ItinClassDef) {
+      assert(SCI->InstRWs.empty() && "ItinClass should not have InstRWs");
+      // Check this processor's itinerary class resources.
+      for (RecIter II = ProcModel.ItinRWDefs.begin(),
+             IE = ProcModel.ItinRWDefs.end(); II != IE; ++II) {
+        RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
+        if (std::find(Matched.begin(), Matched.end(), SCI->ItinClassDef)
+            != Matched.end()) {
+          SchedModels.findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"),
+                              Writes, Reads);
+          break;
+        }
+      }
+      if (Writes.empty()) {
+        DEBUG(dbgs() << ProcModel.ItinsDef->getName()
+              << " does not have resources for itinerary class "
+              << SCI->ItinClassDef->getName() << '\n');
+      }
+    }
+    else if (!SCI->InstRWs.empty()) {
+      // This class may have a default ReadWrite list which can be overriden by
+      // InstRW definitions.
+      Record *RWDef = 0;
+      for (RecIter RWI = SCI->InstRWs.begin(), RWE = SCI->InstRWs.end();
+           RWI != RWE; ++RWI) {
+        Record *RWModelDef = (*RWI)->getValueAsDef("SchedModel");
+        if (&ProcModel == &SchedModels.getProcModel(RWModelDef)) {
+          RWDef = *RWI;
+          break;
+        }
+      }
+      if (RWDef) {
+        Writes.clear();
+        Reads.clear();
+        SchedModels.findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"),
+                            Writes, Reads);
+      }
+    }
+    // Sum resources across all operand writes.
+    std::vector<MCWriteProcResEntry> WriteProcResources;
+    std::vector<MCWriteLatencyEntry> WriteLatencies;
+    std::vector<std::string> WriterNames;
+    std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
+    for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) {
+      IdxVec WriteSeq;
+      SchedModels.expandRWSeqForProc(*WI, WriteSeq, /*IsRead=*/false,
+                                     ProcModel);
+
+      // For each operand, create a latency entry.
+      MCWriteLatencyEntry WLEntry;
+      WLEntry.Cycles = 0;
+      unsigned WriteID = WriteSeq.back();
+      WriterNames.push_back(SchedModels.getSchedWrite(WriteID).Name);
+      // If this Write is not referenced by a ReadAdvance, don't distinguish it
+      // from other WriteLatency entries.
+      if (!SchedModels.hasReadOfWrite(SchedModels.getSchedWrite(WriteID).TheDef)) {
+        WriteID = 0;
+      }
+      WLEntry.WriteResourceID = WriteID;
+
+      for (IdxIter WSI = WriteSeq.begin(), WSE = WriteSeq.end();
+           WSI != WSE; ++WSI) {
+
+        Record *WriteRes =
+          FindWriteResources(SchedModels.getSchedWrite(*WSI), ProcModel);
+
+        // Mark the parent class as invalid for unsupported write types.
+        if (WriteRes->getValueAsBit("Unsupported")) {
+          SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
+          break;
+        }
+        WLEntry.Cycles += WriteRes->getValueAsInt("Latency");
+        SCDesc.NumMicroOps += WriteRes->getValueAsInt("NumMicroOps");
+        SCDesc.BeginGroup |= WriteRes->getValueAsBit("BeginGroup");
+        SCDesc.EndGroup |= WriteRes->getValueAsBit("EndGroup");
+
+        // Create an entry for each ProcResource listed in WriteRes.
+        RecVec PRVec = WriteRes->getValueAsListOfDefs("ProcResources");
+        std::vector<int64_t> Cycles =
+          WriteRes->getValueAsListOfInts("ResourceCycles");
+        for (unsigned PRIdx = 0, PREnd = PRVec.size();
+             PRIdx != PREnd; ++PRIdx) {
+          MCWriteProcResEntry WPREntry;
+          WPREntry.ProcResourceIdx = ProcModel.getProcResourceIdx(PRVec[PRIdx]);
+          assert(WPREntry.ProcResourceIdx && "Bad ProcResourceIdx");
+          if (Cycles.size() > PRIdx)
+            WPREntry.Cycles = Cycles[PRIdx];
+          else
+            WPREntry.Cycles = 1;
+          WriteProcResources.push_back(WPREntry);
+        }
+      }
+      WriteLatencies.push_back(WLEntry);
+    }
+    // Create an entry for each operand Read in this SchedClass.
+    // Entries must be sorted first by UseIdx then by WriteResourceID.
+    for (unsigned UseIdx = 0, EndIdx = Reads.size();
+         UseIdx != EndIdx; ++UseIdx) {
+      Record *ReadAdvance =
+        FindReadAdvance(SchedModels.getSchedRead(Reads[UseIdx]), ProcModel);
+      if (!ReadAdvance)
+        continue;
+
+      // Mark the parent class as invalid for unsupported write types.
+      if (ReadAdvance->getValueAsBit("Unsupported")) {
+        SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
+        break;
+      }
+      RecVec ValidWrites = ReadAdvance->getValueAsListOfDefs("ValidWrites");
+      IdxVec WriteIDs;
+      if (ValidWrites.empty())
+        WriteIDs.push_back(0);
+      else {
+        for (RecIter VWI = ValidWrites.begin(), VWE = ValidWrites.end();
+             VWI != VWE; ++VWI) {
+          WriteIDs.push_back(SchedModels.getSchedRWIdx(*VWI, /*IsRead=*/false));
+        }
+      }
+      std::sort(WriteIDs.begin(), WriteIDs.end());
+      for(IdxIter WI = WriteIDs.begin(), WE = WriteIDs.end(); WI != WE; ++WI) {
+        MCReadAdvanceEntry RAEntry;
+        RAEntry.UseIdx = UseIdx;
+        RAEntry.WriteResourceID = *WI;
+        RAEntry.Cycles = ReadAdvance->getValueAsInt("Cycles");
+        ReadAdvanceEntries.push_back(RAEntry);
+      }
+    }
+    if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
+      WriteProcResources.clear();
+      WriteLatencies.clear();
+      ReadAdvanceEntries.clear();
+    }
+    // Add the information for this SchedClass to the global tables using basic
+    // compression.
+    //
+    // WritePrecRes entries are sorted by ProcResIdx.
+    std::sort(WriteProcResources.begin(), WriteProcResources.end(),
+              LessWriteProcResources());
+
+    SCDesc.NumWriteProcResEntries = WriteProcResources.size();
+    std::vector<MCWriteProcResEntry>::iterator WPRPos =
+      std::search(SchedTables.WriteProcResources.begin(),
+                  SchedTables.WriteProcResources.end(),
+                  WriteProcResources.begin(), WriteProcResources.end());
+    if (WPRPos != SchedTables.WriteProcResources.end())
+      SCDesc.WriteProcResIdx = WPRPos - SchedTables.WriteProcResources.begin();
+    else {
+      SCDesc.WriteProcResIdx = SchedTables.WriteProcResources.size();
+      SchedTables.WriteProcResources.insert(WPRPos, WriteProcResources.begin(),
+                                            WriteProcResources.end());
+    }
+    // Latency entries must remain in operand order.
+    SCDesc.NumWriteLatencyEntries = WriteLatencies.size();
+    std::vector<MCWriteLatencyEntry>::iterator WLPos =
+      std::search(SchedTables.WriteLatencies.begin(),
+                  SchedTables.WriteLatencies.end(),
+                  WriteLatencies.begin(), WriteLatencies.end());
+    if (WLPos != SchedTables.WriteLatencies.end()) {
+      unsigned idx = WLPos - SchedTables.WriteLatencies.begin();
+      SCDesc.WriteLatencyIdx = idx;
+      for (unsigned i = 0, e = WriteLatencies.size(); i < e; ++i)
+        if (SchedTables.WriterNames[idx + i].find(WriterNames[i]) ==
+            std::string::npos) {
+          SchedTables.WriterNames[idx + i] += std::string("_") + WriterNames[i];
+        }
+    }
+    else {
+      SCDesc.WriteLatencyIdx = SchedTables.WriteLatencies.size();
+      SchedTables.WriteLatencies.insert(SchedTables.WriteLatencies.end(),
+                                        WriteLatencies.begin(),
+                                        WriteLatencies.end());
+      SchedTables.WriterNames.insert(SchedTables.WriterNames.end(),
+                                     WriterNames.begin(), WriterNames.end());
+    }
+    // ReadAdvanceEntries must remain in operand order.
+    SCDesc.NumReadAdvanceEntries = ReadAdvanceEntries.size();
+    std::vector<MCReadAdvanceEntry>::iterator RAPos =
+      std::search(SchedTables.ReadAdvanceEntries.begin(),
+                  SchedTables.ReadAdvanceEntries.end(),
+                  ReadAdvanceEntries.begin(), ReadAdvanceEntries.end());
+    if (RAPos != SchedTables.ReadAdvanceEntries.end())
+      SCDesc.ReadAdvanceIdx = RAPos - SchedTables.ReadAdvanceEntries.begin();
+    else {
+      SCDesc.ReadAdvanceIdx = SchedTables.ReadAdvanceEntries.size();
+      SchedTables.ReadAdvanceEntries.insert(RAPos, ReadAdvanceEntries.begin(),
+                                            ReadAdvanceEntries.end());
+    }
+  }
+}
+
+// Emit SchedClass tables for all processors and associated global tables.
+void SubtargetEmitter::EmitSchedClassTables(SchedClassTables &SchedTables,
+                                            raw_ostream &OS) {
+  // Emit global WriteProcResTable.
+  OS << "\n// {ProcResourceIdx, Cycles}\n"
+     << "extern const llvm::MCWriteProcResEntry "
+     << Target << "WriteProcResTable[] = {\n"
+     << "  { 0,  0}, // Invalid\n";
+  for (unsigned WPRIdx = 1, WPREnd = SchedTables.WriteProcResources.size();
+       WPRIdx != WPREnd; ++WPRIdx) {
+    MCWriteProcResEntry &WPREntry = SchedTables.WriteProcResources[WPRIdx];
+    OS << "  {" << format("%2d", WPREntry.ProcResourceIdx) << ", "
+       << format("%2d", WPREntry.Cycles) << "}";
+    if (WPRIdx + 1 < WPREnd)
+      OS << ',';
+    OS << " // #" << WPRIdx << '\n';
+  }
+  OS << "}; // " << Target << "WriteProcResTable\n";
+
+  // Emit global WriteLatencyTable.
+  OS << "\n// {Cycles, WriteResourceID}\n"
+     << "extern const llvm::MCWriteLatencyEntry "
+     << Target << "WriteLatencyTable[] = {\n"
+     << "  { 0,  0}, // Invalid\n";
+  for (unsigned WLIdx = 1, WLEnd = SchedTables.WriteLatencies.size();
+       WLIdx != WLEnd; ++WLIdx) {
+    MCWriteLatencyEntry &WLEntry = SchedTables.WriteLatencies[WLIdx];
+    OS << "  {" << format("%2d", WLEntry.Cycles) << ", "
+       << format("%2d", WLEntry.WriteResourceID) << "}";
+    if (WLIdx + 1 < WLEnd)
+      OS << ',';
+    OS << " // #" << WLIdx << " " << SchedTables.WriterNames[WLIdx] << '\n';
+  }
+  OS << "}; // " << Target << "WriteLatencyTable\n";
+
+  // Emit global ReadAdvanceTable.
+  OS << "\n// {UseIdx, WriteResourceID, Cycles}\n"
+     << "extern const llvm::MCReadAdvanceEntry "
+     << Target << "ReadAdvanceTable[] = {\n"
+     << "  {0,  0,  0}, // Invalid\n";
+  for (unsigned RAIdx = 1, RAEnd = SchedTables.ReadAdvanceEntries.size();
+       RAIdx != RAEnd; ++RAIdx) {
+    MCReadAdvanceEntry &RAEntry = SchedTables.ReadAdvanceEntries[RAIdx];
+    OS << "  {" << RAEntry.UseIdx << ", "
+       << format("%2d", RAEntry.WriteResourceID) << ", "
+       << format("%2d", RAEntry.Cycles) << "}";
+    if (RAIdx + 1 < RAEnd)
+      OS << ',';
+    OS << " // #" << RAIdx << '\n';
+  }
+  OS << "}; // " << Target << "ReadAdvanceTable\n";
+
+  // Emit a SchedClass table for each processor.
+  for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+         PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+    if (!PI->hasInstrSchedModel())
+      continue;
+
+    std::vector<MCSchedClassDesc> &SCTab =
+      SchedTables.ProcSchedClasses[1 + (PI - SchedModels.procModelBegin())];
+
+    OS << "\n// {Name, NumMicroOps, BeginGroup, EndGroup,"
+       << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n";
+    OS << "static const llvm::MCSchedClassDesc "
+       << PI->ModelName << "SchedClasses[] = {\n";
+
+    // The first class is always invalid. We no way to distinguish it except by
+    // name and position.
+    assert(SchedModels.getSchedClass(0).Name == "NoItinerary"
+           && "invalid class not first");
+    OS << "  {DBGFIELD(\"InvalidSchedClass\")  "
+       << MCSchedClassDesc::InvalidNumMicroOps
+       << ", 0, 0,  0, 0,  0, 0,  0, 0},\n";
+
+    for (unsigned SCIdx = 1, SCEnd = SCTab.size(); SCIdx != SCEnd; ++SCIdx) {
+      MCSchedClassDesc &MCDesc = SCTab[SCIdx];
+      const CodeGenSchedClass &SchedClass = SchedModels.getSchedClass(SCIdx);
+      OS << "  {DBGFIELD(\"" << SchedClass.Name << "\") ";
+      if (SchedClass.Name.size() < 18)
+        OS.indent(18 - SchedClass.Name.size());
+      OS << MCDesc.NumMicroOps
+         << ", " << MCDesc.BeginGroup << ", " << MCDesc.EndGroup
+         << ", " << format("%2d", MCDesc.WriteProcResIdx)
+         << ", " << MCDesc.NumWriteProcResEntries
+         << ", " << format("%2d", MCDesc.WriteLatencyIdx)
+         << ", " << MCDesc.NumWriteLatencyEntries
+         << ", " << format("%2d", MCDesc.ReadAdvanceIdx)
+         << ", " << MCDesc.NumReadAdvanceEntries << "}";
+      if (SCIdx + 1 < SCEnd)
+        OS << ',';
+      OS << " // #" << SCIdx << '\n';
+    }
+    OS << "}; // " << PI->ModelName << "SchedClasses\n";
+  }
+}
+
 void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
   // For each processor model.
   for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
          PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
-    // Skip default
+    // Emit processor resource table.
+    if (PI->hasInstrSchedModel())
+      EmitProcessorResources(*PI, OS);
+    else if(!PI->ProcResourceDefs.empty())
+      PrintFatalError(PI->ModelDef->getLoc(), "SchedMachineModel defines "
+                    "ProcResources without defining WriteRes SchedWriteRes");
+
     // Begin processor itinerary properties
     OS << "\n";
     OS << "static const llvm::MCSchedModel " << PI->ModelName << "(\n";
@@ -591,11 +1109,19 @@
     EmitProcessorProp(OS, PI->ModelDef, "LoadLatency", ',');
     EmitProcessorProp(OS, PI->ModelDef, "HighLatency", ',');
     EmitProcessorProp(OS, PI->ModelDef, "MispredictPenalty", ',');
+    OS << "  " << PI->Index << ", // Processor ID\n";
+    if (PI->hasInstrSchedModel())
+      OS << "  " << PI->ModelName << "ProcResources" << ",\n"
+         << "  " << PI->ModelName << "SchedClasses" << ",\n"
+         << "  " << PI->ProcResourceDefs.size()+1 << ",\n"
+         << "  " << (SchedModels.schedClassEnd()
+                     - SchedModels.schedClassBegin()) << ",\n";
+    else
+      OS << "  0, 0, 0, 0, // No instruction-level machine model.\n";
     if (SchedModels.hasItineraryClasses())
-      OS << "  " << PI->ItinsDef->getName();
+      OS << "  " << PI->ItinsDef->getName() << ");\n";
     else
-      OS << "  0";
-    OS << ");\n";
+      OS << "  0); // No Itinerary\n";
   }
 }
 
@@ -621,14 +1147,10 @@
 
     const std::string &Name = Processor->getValueAsString("Name");
     const std::string &ProcModelName =
-      SchedModels.getProcModel(Processor).ModelName;
+      SchedModels.getModelForProc(Processor).ModelName;
 
     // Emit as { "cpu", procinit },
-    OS << "  { "
-       << "\"" << Name << "\", "
-       << "(void *)&" << ProcModelName;
-
-    OS << " }";
+    OS << "  { \"" << Name << "\", (const void *)&" << ProcModelName << " }";
 
     // Depending on ''if more in the list'' emit comma
     if (++i < N) OS << ",";
@@ -644,16 +1166,116 @@
 // EmitSchedModel - Emits all scheduling model tables, folding common patterns.
 //
 void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) {
+  OS << "#ifdef DBGFIELD\n"
+     << "#error \"<target>GenSubtargetInfo.inc requires a DBGFIELD macro\"\n"
+     << "#endif\n"
+     << "#ifndef NDEBUG\n"
+     << "#define DBGFIELD(x) x,\n"
+     << "#else\n"
+     << "#define DBGFIELD(x)\n"
+     << "#endif\n";
+
   if (SchedModels.hasItineraryClasses()) {
     std::vector<std::vector<InstrItinerary> > ProcItinLists;
     // Emit the stage data
     EmitStageAndOperandCycleData(OS, ProcItinLists);
     EmitItineraries(OS, ProcItinLists);
   }
+  OS << "\n// ===============================================================\n"
+     << "// Data tables for the new per-operand machine model.\n";
+
+  SchedClassTables SchedTables;
+  for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+         PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+    GenSchedClassTables(*PI, SchedTables);
+  }
+  EmitSchedClassTables(SchedTables, OS);
+
   // Emit the processor machine model
   EmitProcessorModels(OS);
   // Emit the processor lookup data
   EmitProcessorLookup(OS);
+
+  OS << "#undef DBGFIELD";
+}
+
+void SubtargetEmitter::EmitSchedModelHelpers(std::string ClassName,
+                                             raw_ostream &OS) {
+  OS << "unsigned " << ClassName
+     << "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
+     << " const TargetSchedModel *SchedModel) const {\n";
+
+  std::vector<Record*> Prologs = Records.getAllDerivedDefinitions("PredicateProlog");
+  std::sort(Prologs.begin(), Prologs.end(), LessRecord());
+  for (std::vector<Record*>::const_iterator
+         PI = Prologs.begin(), PE = Prologs.end(); PI != PE; ++PI) {
+    OS << (*PI)->getValueAsString("Code") << '\n';
+  }
+  IdxVec VariantClasses;
+  for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(),
+         SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) {
+    if (SCI->Transitions.empty())
+      continue;
+    VariantClasses.push_back(SCI - SchedModels.schedClassBegin());
+  }
+  if (!VariantClasses.empty()) {
+    OS << "  switch (SchedClass) {\n";
+    for (IdxIter VCI = VariantClasses.begin(), VCE = VariantClasses.end();
+         VCI != VCE; ++VCI) {
+      const CodeGenSchedClass &SC = SchedModels.getSchedClass(*VCI);
+      OS << "  case " << *VCI << ": // " << SC.Name << '\n';
+      IdxVec ProcIndices;
+      for (std::vector<CodeGenSchedTransition>::const_iterator
+             TI = SC.Transitions.begin(), TE = SC.Transitions.end();
+           TI != TE; ++TI) {
+        IdxVec PI;
+        std::set_union(TI->ProcIndices.begin(), TI->ProcIndices.end(),
+                       ProcIndices.begin(), ProcIndices.end(),
+                       std::back_inserter(PI));
+        ProcIndices.swap(PI);
+      }
+      for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
+           PI != PE; ++PI) {
+        OS << "    ";
+        if (*PI != 0)
+          OS << "if (SchedModel->getProcessorID() == " << *PI << ") ";
+        OS << "{ // " << (SchedModels.procModelBegin() + *PI)->ModelName
+           << '\n';
+        for (std::vector<CodeGenSchedTransition>::const_iterator
+               TI = SC.Transitions.begin(), TE = SC.Transitions.end();
+             TI != TE; ++TI) {
+          OS << "      if (";
+          if (*PI != 0 && !std::count(TI->ProcIndices.begin(),
+                                      TI->ProcIndices.end(), *PI)) {
+              continue;
+          }
+          for (RecIter RI = TI->PredTerm.begin(), RE = TI->PredTerm.end();
+               RI != RE; ++RI) {
+            if (RI != TI->PredTerm.begin())
+              OS << "\n          && ";
+            OS << "(" << (*RI)->getValueAsString("Predicate") << ")";
+          }
+          OS << ")\n"
+             << "        return " << TI->ToClassIdx << "; // "
+             << SchedModels.getSchedClass(TI->ToClassIdx).Name << '\n';
+        }
+        OS << "    }\n";
+        if (*PI == 0)
+          break;
+      }
+      unsigned SCIdx = 0;
+      if (SC.ItinClassDef)
+        SCIdx = SchedModels.getSchedClassIdxForItin(SC.ItinClassDef);
+      else
+        SCIdx = SchedModels.findSchedClassIdx(SC.Writes, SC.Reads);
+      if (SCIdx != *VCI)
+        OS << "    return " << SCIdx << ";\n";
+      OS << "    break;\n";
+    }
+    OS << "  };\n";
+  }
+  OS << "  report_fatal_error(\"Expected a variant SchedClass\");\n"
+     << "} // " << ClassName << "::resolveSchedClass\n";
 }
 
 //
@@ -680,7 +1302,8 @@
     return;
   }
 
-  OS << "  uint64_t Bits = ReInitMCSubtargetInfo(CPU, FS);\n";
+  OS << "  InitMCProcessorInfo(CPU, FS);\n"
+     << "  uint64_t Bits = getFeatureBits();\n";
 
   for (unsigned i = 0; i < Features.size(); i++) {
     // Next record
@@ -747,13 +1370,18 @@
     OS << Target << "SubTypeKV, ";
   else
     OS << "0, ";
+  OS << '\n'; OS.indent(22);
+  OS << Target << "ProcSchedKV, "
+     << Target << "WriteProcResTable, "
+     << Target << "WriteLatencyTable, "
+     << Target << "ReadAdvanceTable, ";
   if (SchedModels.hasItineraryClasses()) {
-    OS << Target << "ProcSchedKV, "
-       << Target << "Stages, "
+    OS << '\n'; OS.indent(22);
+    OS << Target << "Stages, "
        << Target << "OperandCycles, "
        << Target << "ForwardingPaths, ";
   } else
-    OS << "0, 0, 0, 0, ";
+    OS << "0, 0, 0, ";
   OS << NumFeatures << ", " << NumProcs << ");\n}\n\n";
 
   OS << "} // End llvm namespace \n";
@@ -780,6 +1408,8 @@
      << "  explicit " << ClassName << "(StringRef TT, StringRef CPU, "
      << "StringRef FS);\n"
      << "public:\n"
+     << "  unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *DefMI,"
+     << " const TargetSchedModel *SchedModel) const;\n"
      << "  DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
      << " const;\n"
      << "};\n";
@@ -790,11 +1420,19 @@
   OS << "\n#ifdef GET_SUBTARGETINFO_CTOR\n";
   OS << "#undef GET_SUBTARGETINFO_CTOR\n";
 
+  OS << "#include \"llvm/CodeGen/TargetSchedule.h\"\n";
   OS << "namespace llvm {\n";
   OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
   OS << "extern const llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n";
+  OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n";
+  OS << "extern const llvm::MCWriteProcResEntry "
+     << Target << "WriteProcResTable[];\n";
+  OS << "extern const llvm::MCWriteLatencyEntry "
+     << Target << "WriteLatencyTable[];\n";
+  OS << "extern const llvm::MCReadAdvanceEntry "
+     << Target << "ReadAdvanceTable[];\n";
+
   if (SchedModels.hasItineraryClasses()) {
-    OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n";
     OS << "extern const llvm::InstrStage " << Target << "Stages[];\n";
     OS << "extern const unsigned " << Target << "OperandCycles[];\n";
     OS << "extern const unsigned " << Target << "ForwardingPaths[];\n";
@@ -812,14 +1450,22 @@
     OS << Target << "SubTypeKV, ";
   else
     OS << "0, ";
+  OS << '\n'; OS.indent(22);
+  OS << Target << "ProcSchedKV, "
+     << Target << "WriteProcResTable, "
+     << Target << "WriteLatencyTable, "
+     << Target << "ReadAdvanceTable, ";
+  OS << '\n'; OS.indent(22);
   if (SchedModels.hasItineraryClasses()) {
-    OS << Target << "ProcSchedKV, "
-       << Target << "Stages, "
+    OS << Target << "Stages, "
        << Target << "OperandCycles, "
        << Target << "ForwardingPaths, ";
   } else
-    OS << "0, 0, 0, 0, ";
+    OS << "0, 0, 0, ";
   OS << NumFeatures << ", " << NumProcs << ");\n}\n\n";
+
+  EmitSchedModelHelpers(ClassName, OS);
+
   OS << "} // End llvm namespace \n";
 
   OS << "#endif // GET_SUBTARGETINFO_CTOR\n\n";





More information about the llvm-branch-commits mailing list