[llvm-commits] [llvm] r54195 - in /llvm/trunk: docs/LangRef.html include/llvm/CodeGen/ValueTypes.h include/llvm/CodeGen/ValueTypes.td include/llvm/Intrinsics.td lib/VMCore/AutoUpgrade.cpp lib/VMCore/Function.cpp lib/VMCore/Verifier.cpp test/CodeGen/X86/atomic_op.ll utils/TableGen/CodeGenDAGPatterns.cpp utils/TableGen/CodeGenDAGPatterns.h utils/TableGen/CodeGenTarget.cpp utils/TableGen/DAGISelEmitter.cpp utils/TableGen/IntrinsicEmitter.cpp
Mon P Wang
wangmp at apple.com
Tue Jul 29 21:36:53 PDT 2008
Author: wangmp
Date: Tue Jul 29 23:36:53 2008
New Revision: 54195
URL: http://llvm.org/viewvc/llvm-project?rev=54195&view=rev
Log:
Added support for overloading intrinsics (atomics) based on pointers
to different address spaces. This alters the naming scheme for those
intrinsics, e.g., atomic.load.add.i32 => atomic.load.add.i32.p0i32
Modified:
llvm/trunk/docs/LangRef.html
llvm/trunk/include/llvm/CodeGen/ValueTypes.h
llvm/trunk/include/llvm/CodeGen/ValueTypes.td
llvm/trunk/include/llvm/Intrinsics.td
llvm/trunk/lib/VMCore/AutoUpgrade.cpp
llvm/trunk/lib/VMCore/Function.cpp
llvm/trunk/lib/VMCore/Verifier.cpp
llvm/trunk/test/CodeGen/X86/atomic_op.ll
llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp
llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h
llvm/trunk/utils/TableGen/CodeGenTarget.cpp
llvm/trunk/utils/TableGen/DAGISelEmitter.cpp
llvm/trunk/utils/TableGen/IntrinsicEmitter.cpp
Modified: llvm/trunk/docs/LangRef.html
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/LangRef.html?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/docs/LangRef.html (original)
+++ llvm/trunk/docs/LangRef.html Tue Jul 29 23:36:53 2008
@@ -5788,14 +5788,15 @@
<div class="doc_text">
<h5>Syntax:</h5>
<p>
- This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on any
- integer bit width. Not all targets support all bit widths however.</p>
+ This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on
+ any integer bit width and for different address spaces. Not all targets
+ support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.cmp.swap.i8( i8* <ptr>, i8 <cmp>, i8 <val> )
-declare i16 @llvm.atomic.cmp.swap.i16( i16* <ptr>, i16 <cmp>, i16 <val> )
-declare i32 @llvm.atomic.cmp.swap.i32( i32* <ptr>, i32 <cmp>, i32 <val> )
-declare i64 @llvm.atomic.cmp.swap.i64( i64* <ptr>, i64 <cmp>, i64 <val> )
+declare i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* <ptr>, i8 <cmp>, i8 <val> )
+declare i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* <ptr>, i16 <cmp>, i16 <val> )
+declare i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* <ptr>, i32 <cmp>, i32 <val> )
+declare i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* <ptr>, i64 <cmp>, i64 <val> )
</pre>
<h5>Overview:</h5>
@@ -5827,13 +5828,13 @@
store i32 4, %ptr
%val1 = add i32 4, 4
-%result1 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 4, %val1 )
+%result1 = call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %ptr, i32 4, %val1 )
<i>; yields {i32}:result1 = 4</i>
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
%val2 = add i32 1, 1
-%result2 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 5, %val2 )
+%result2 = call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %ptr, i32 5, %val2 )
<i>; yields {i32}:result2 = 8</i>
%stored2 = icmp eq i32 %result2, 5 <i>; yields {i1}:stored2 = false</i>
@@ -5852,10 +5853,10 @@
This is an overloaded intrinsic. You can use <tt>llvm.atomic.swap</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.swap.i8( i8* <ptr>, i8 <val> )
-declare i16 @llvm.atomic.swap.i16( i16* <ptr>, i16 <val> )
-declare i32 @llvm.atomic.swap.i32( i32* <ptr>, i32 <val> )
-declare i64 @llvm.atomic.swap.i64( i64* <ptr>, i64 <val> )
+declare i8 @llvm.atomic.swap.i8.p0i8( i8* <ptr>, i8 <val> )
+declare i16 @llvm.atomic.swap.i16.p0i16( i16* <ptr>, i16 <val> )
+declare i32 @llvm.atomic.swap.i32.p0i32( i32* <ptr>, i32 <val> )
+declare i64 @llvm.atomic.swap.i64.p0i64( i64* <ptr>, i64 <val> )
</pre>
<h5>Overview:</h5>
@@ -5886,13 +5887,13 @@
store i32 4, %ptr
%val1 = add i32 4, 4
-%result1 = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val1 )
+%result1 = call i32 @llvm.atomic.swap.i32.p0i32( i32* %ptr, i32 %val1 )
<i>; yields {i32}:result1 = 4</i>
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
%val2 = add i32 1, 1
-%result2 = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val2 )
+%result2 = call i32 @llvm.atomic.swap.i32.p0i32( i32* %ptr, i32 %val2 )
<i>; yields {i32}:result2 = 8</i>
%stored2 = icmp eq i32 %result2, 8 <i>; yields {i1}:stored2 = true</i>
@@ -5911,10 +5912,10 @@
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.add</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.load.add.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.add.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.add.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.add.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.add.i8..p0i8( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.add.i16..p0i16( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.add.i32..p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.add.i64..p0i64( i64* <ptr>, i64 <delta> )
</pre>
<h5>Overview:</h5>
@@ -5941,11 +5942,11 @@
<pre>
%ptr = malloc i32
store i32 4, %ptr
-%result1 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 4 )
+%result1 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 4 )
<i>; yields {i32}:result1 = 4</i>
-%result2 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 2 )
+%result2 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 2 )
<i>; yields {i32}:result2 = 8</i>
-%result3 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 5 )
+%result3 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 5 )
<i>; yields {i32}:result3 = 10</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 15</i>
</pre>
@@ -5960,12 +5961,13 @@
<h5>Syntax:</h5>
<p>
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.sub</tt> on
- any integer bit width. Not all targets support all bit widths however.</p>
+ any integer bit width and for different address spaces. Not all targets
+ support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.load.sub.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.sub.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.sub.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.sub.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.sub.i8.p0i32( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.sub.i16.p0i32( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.sub.i32.p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.sub.i64.p0i32( i64* <ptr>, i64 <delta> )
</pre>
<h5>Overview:</h5>
@@ -5992,11 +5994,11 @@
<pre>
%ptr = malloc i32
store i32 8, %ptr
-%result1 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 4 )
+%result1 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 4 )
<i>; yields {i32}:result1 = 8</i>
-%result2 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 2 )
+%result2 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 2 )
<i>; yields {i32}:result2 = 4</i>
-%result3 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 5 )
+%result3 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 5 )
<i>; yields {i32}:result3 = 2</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = -3</i>
</pre>
@@ -6015,37 +6017,37 @@
<p>
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_and</tt>,
<tt>llvm.atomic.load_nand</tt>, <tt>llvm.atomic.load_or</tt>, and
- <tt>llvm.atomic.load_xor</tt> on any integer bit width. Not all targets
- support all bit widths however.</p>
+ <tt>llvm.atomic.load_xor</tt> on any integer bit width and for different
+ address spaces. Not all targets support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.load.and.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.and.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.and.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.and.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.and.i8.p0i8( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.and.i16.p0i16( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.and.i32.p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.and.i64.p0i64( i64* <ptr>, i64 <delta> )
</pre>
<pre>
-declare i8 @llvm.atomic.load.or.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.or.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.or.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.or.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.or.i8.p0i8( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.or.i16.p0i16( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.or.i32.p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.or.i64.p0i64( i64* <ptr>, i64 <delta> )
</pre>
<pre>
-declare i8 @llvm.atomic.load.nand.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.nand.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.nand.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.nand.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.nand.i8.p0i32( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.nand.i16.p0i32( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.nand.i32.p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.nand.i64.p0i32( i64* <ptr>, i64 <delta> )
</pre>
<pre>
-declare i8 @llvm.atomic.load.xor.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.xor.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.xor.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.xor.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.xor.i8.p0i32( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.xor.i16.p0i32( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.xor.i32.p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.xor.i64.p0i32( i64* <ptr>, i64 <delta> )
</pre>
<h5>Overview:</h5>
@@ -6074,13 +6076,13 @@
<pre>
%ptr = malloc i32
store i32 0x0F0F, %ptr
-%result0 = call i32 @llvm.atomic.load.nand.i32( i32* %ptr, i32 0xFF )
+%result0 = call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %ptr, i32 0xFF )
<i>; yields {i32}:result0 = 0x0F0F</i>
-%result1 = call i32 @llvm.atomic.load.and.i32( i32* %ptr, i32 0xFF )
+%result1 = call i32 @llvm.atomic.load.and.i32.p0i32( i32* %ptr, i32 0xFF )
<i>; yields {i32}:result1 = 0xFFFFFFF0</i>
-%result2 = call i32 @llvm.atomic.load.or.i32( i32* %ptr, i32 0F )
+%result2 = call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ptr, i32 0F )
<i>; yields {i32}:result2 = 0xF0</i>
-%result3 = call i32 @llvm.atomic.load.xor.i32( i32* %ptr, i32 0F )
+%result3 = call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %ptr, i32 0F )
<i>; yields {i32}:result3 = FF</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = F0</i>
</pre>
@@ -6100,37 +6102,38 @@
<p>
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_max</tt>,
<tt>llvm.atomic.load_min</tt>, <tt>llvm.atomic.load_umax</tt>, and
- <tt>llvm.atomic.load_umin</tt> on any integer bit width. Not all targets
+ <tt>llvm.atomic.load_umin</tt> on any integer bit width and for different
+ address spaces. Not all targets
support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.load.max.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.max.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.max.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.max.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.max.i8.p0i8( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.max.i16.p0i16( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.max.i32.p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.max.i64.p0i64( i64* <ptr>, i64 <delta> )
</pre>
<pre>
-declare i8 @llvm.atomic.load.min.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.min.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.min.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.min.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.min.i8.p0i8( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.min.i16.p0i16( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.min.i32..p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.min.i64..p0i64( i64* <ptr>, i64 <delta> )
</pre>
<pre>
-declare i8 @llvm.atomic.load.umax.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.umax.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.umax.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.umax.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.umax.i8.p0i8( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.umax.i16.p0i16( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.umax.i32.p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.umax.i64.p0i64( i64* <ptr>, i64 <delta> )
</pre>
<pre>
-declare i8 @llvm.atomic.load.umin.i8.( i8* <ptr>, i8 <delta> )
-declare i16 @llvm.atomic.load.umin.i16.( i16* <ptr>, i16 <delta> )
-declare i32 @llvm.atomic.load.umin.i32.( i32* <ptr>, i32 <delta> )
-declare i64 @llvm.atomic.load.umin.i64.( i64* <ptr>, i64 <delta> )
+declare i8 @llvm.atomic.load.umin.i8..p0i8( i8* <ptr>, i8 <delta> )
+declare i16 @llvm.atomic.load.umin.i16.p0i16( i16* <ptr>, i16 <delta> )
+declare i32 @llvm.atomic.load.umin.i32..p0i32( i32* <ptr>, i32 <delta> )
+declare i64 @llvm.atomic.load.umin.i64..p0i64( i64* <ptr>, i64 <delta> )
</pre>
<h5>Overview:</h5>
@@ -6159,13 +6162,13 @@
<pre>
%ptr = malloc i32
store i32 7, %ptr
-%result0 = call i32 @llvm.atomic.load.min.i32( i32* %ptr, i32 -2 )
+%result0 = call i32 @llvm.atomic.load.min.i32.p0i32( i32* %ptr, i32 -2 )
<i>; yields {i32}:result0 = 7</i>
-%result1 = call i32 @llvm.atomic.load.max.i32( i32* %ptr, i32 8 )
+%result1 = call i32 @llvm.atomic.load.max.i32.p0i32( i32* %ptr, i32 8 )
<i>; yields {i32}:result1 = -2</i>
-%result2 = call i32 @llvm.atomic.load.umin.i32( i32* %ptr, i32 10 )
+%result2 = call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %ptr, i32 10 )
<i>; yields {i32}:result2 = 8</i>
-%result3 = call i32 @llvm.atomic.load.umax.i32( i32* %ptr, i32 30 )
+%result3 = call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %ptr, i32 30 )
<i>; yields {i32}:result3 = 8</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 30</i>
</pre>
Modified: llvm/trunk/include/llvm/CodeGen/ValueTypes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/ValueTypes.h?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/ValueTypes.h (original)
+++ llvm/trunk/include/llvm/CodeGen/ValueTypes.h Tue Jul 29 23:36:53 2008
@@ -70,6 +70,11 @@
LAST_VALUETYPE = 27, // This always remains at the end of the list.
+ // iPTRAny - An int value the size of the pointer of the current
+ // target to any address space. This must only be used internal to
+ // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
+ iPTRAny = 252,
+
// fAny - Any floating-point or vector floating-point value. This is used
// for intrinsics that have overloadings based on floating-point types.
// This is only for tblgen's consumption!
Modified: llvm/trunk/include/llvm/CodeGen/ValueTypes.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/ValueTypes.td?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/ValueTypes.td (original)
+++ llvm/trunk/include/llvm/CodeGen/ValueTypes.td Tue Jul 29 23:36:53 2008
@@ -49,6 +49,10 @@
def v4f32 : ValueType<128, 25>; // 4 x f32 vector value
def v2f64 : ValueType<128, 26>; // 2 x f64 vector value
+// Pseudo valuetype mapped to the current pointer size to any address space.
+// Should only be used in TableGen.
+def iPTRAny : ValueType<0, 252>;
+
// Pseudo valuetype to represent "float of any format"
def fAny : ValueType<0 , 253>;
Modified: llvm/trunk/include/llvm/Intrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Intrinsics.td?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Intrinsics.td (original)
+++ llvm/trunk/include/llvm/Intrinsics.td Tue Jul 29 23:36:53 2008
@@ -64,6 +64,11 @@
LLVMType ElTy = elty;
}
+class LLVMAnyPointerType<LLVMType elty>
+ : LLVMType<iPTRAny>{
+ LLVMType ElTy = elty;
+}
+
class LLVMMatchType<int num>
: LLVMType<OtherVT>{
int Number = num;
@@ -84,6 +89,7 @@
def llvm_ppcf128_ty : LLVMType<ppcf128>;
def llvm_ptr_ty : LLVMPointerType<llvm_i8_ty>; // i8*
def llvm_ptrptr_ty : LLVMPointerType<llvm_ptr_ty>; // i8**
+def llvm_anyptr_ty : LLVMPointerType<llvm_i8_ty>; // (space)i8*
def llvm_empty_ty : LLVMType<OtherVT>; // { }
def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>; // { }*
@@ -271,62 +277,62 @@
llvm_i1_ty, llvm_i1_ty, llvm_i1_ty], []>;
def int_atomic_cmp_swap : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>, LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_val_compare_and_swap">;
def int_atomic_load_add : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_add">;
def int_atomic_swap : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_lock_test_and_set">;
def int_atomic_load_sub : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_sub">;
def int_atomic_load_and : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_and">;
def int_atomic_load_or : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_or">;
def int_atomic_load_xor : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_xor">;
def int_atomic_load_nand : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_nand">;
def int_atomic_load_min : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_min">;
def int_atomic_load_max : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_max">;
def int_atomic_load_umin : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_umin">;
def int_atomic_load_umax : Intrinsic<[llvm_anyint_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_umax">;
Modified: llvm/trunk/lib/VMCore/AutoUpgrade.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/VMCore/AutoUpgrade.cpp?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/lib/VMCore/AutoUpgrade.cpp (original)
+++ llvm/trunk/lib/VMCore/AutoUpgrade.cpp Tue Jul 29 23:36:53 2008
@@ -40,24 +40,38 @@
switch (Name[5]) {
default: break;
case 'a':
- // This upgrades the llvm.atomic.lcs, llvm.atomic.las, and llvm.atomic.lss
- // to their new function name
- if (Name.compare(5,8,"atomic.l",8) == 0) {
+ // This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss,
+ // and atomics with default address spaces to their new names to their new
+ // function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32)
+ if (Name.compare(5,7,"atomic.",7) == 0) {
if (Name.compare(12,3,"lcs",3) == 0) {
std::string::size_type delim = Name.find('.',12);
- F->setName("llvm.atomic.cmp.swap"+Name.substr(delim));
+ F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) +
+ ".p0" + Name.substr(delim+1));
NewFn = F;
return true;
}
else if (Name.compare(12,3,"las",3) == 0) {
std::string::size_type delim = Name.find('.',12);
- F->setName("llvm.atomic.load.add"+Name.substr(delim));
+ F->setName("llvm.atomic.load.add"+Name.substr(delim)
+ + ".p0" + Name.substr(delim+1));
NewFn = F;
return true;
}
else if (Name.compare(12,3,"lss",3) == 0) {
std::string::size_type delim = Name.find('.',12);
- F->setName("llvm.atomic.load.sub"+Name.substr(delim));
+ F->setName("llvm.atomic.load.sub"+Name.substr(delim)
+ + ".p0" + Name.substr(delim+1));
+ NewFn = F;
+ return true;
+ }
+ else if (Name.rfind(".p") == std::string::npos) {
+ // We don't have an address space qualifier so this has be upgraded
+ // to the new name. Copy the type name at the end of the intrinsic
+ // and add to it
+ std::string::size_type delim = Name.find_last_of('.');
+ assert(delim != std::string::npos && "can not find type");
+ F->setName(Name + ".p0" + Name.substr(delim+1));
NewFn = F;
return true;
}
Modified: llvm/trunk/lib/VMCore/Function.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/VMCore/Function.cpp?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/lib/VMCore/Function.cpp (original)
+++ llvm/trunk/lib/VMCore/Function.cpp Tue Jul 29 23:36:53 2008
@@ -328,9 +328,14 @@
if (numTys == 0)
return Table[id];
std::string Result(Table[id]);
- for (unsigned i = 0; i < numTys; ++i)
- if (Tys[i])
+ for (unsigned i = 0; i < numTys; ++i) {
+ if (const PointerType* PTyp = dyn_cast<PointerType>(Tys[i])) {
+ Result += ".p" + llvm::utostr(PTyp->getAddressSpace()) +
+ MVT::getMVT(PTyp->getElementType()).getMVTString();
+ }
+ else if (Tys[i])
Result += "." + MVT::getMVT(Tys[i]).getMVTString();
+ }
return Result;
}
Modified: llvm/trunk/lib/VMCore/Verifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/VMCore/Verifier.cpp?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/lib/VMCore/Verifier.cpp (original)
+++ llvm/trunk/lib/VMCore/Verifier.cpp Tue Jul 29 23:36:53 2008
@@ -1327,7 +1327,6 @@
unsigned Count, ...) {
va_list VA;
va_start(VA, Count);
-
const FunctionType *FTy = F->getFunctionType();
// For overloaded intrinsics, the Suffix of the function name must match the
@@ -1423,6 +1422,21 @@
else
CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " is not a "
"pointer and a pointer is required.", F);
+ }
+ } else if (VT == MVT::iPTRAny) {
+ // Outside of TableGen, we don't distinguish iPTRAny (to any address
+ // space) and iPTR. In the verifier, we can not distinguish which case
+ // we have so allow either case to be legal.
+ if (const PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
+ Suffix += ".p" + utostr(PTyp->getAddressSpace()) +
+ MVT::getMVT(PTyp->getElementType()).getMVTString();
+ } else {
+ if (ArgNo == 0)
+ CheckFailed("Intrinsic result type is not a "
+ "pointer and a pointer is required.", F);
+ else
+ CheckFailed("Intrinsic parameter #" + utostr(ArgNo-1) + " is not a "
+ "pointer and a pointer is required.", F);
break;
}
} else if (MVT((MVT::SimpleValueType)VT).isVector()) {
@@ -1456,17 +1470,21 @@
va_end(VA);
- // If we computed a Suffix then the intrinsic is overloaded and we need to
- // make sure that the name of the function is correct. We add the suffix to
- // the name of the intrinsic and compare against the given function name. If
- // they are not the same, the function name is invalid. This ensures that
- // overloading of intrinsics uses a sane and consistent naming convention.
+ // For intrinsics without pointer arguments, if we computed a Suffix then the
+ // intrinsic is overloaded and we need to make sure that the name of the
+ // function is correct. We add the suffix to the name of the intrinsic and
+ // compare against the given function name. If they are not the same, the
+ // function name is invalid. This ensures that overloading of intrinsics
+ // uses a sane and consistent naming convention. Note that intrinsics with
+ // pointer argument may or may not be overloaded so we will check assuming it
+ // has a suffix and not.
if (!Suffix.empty()) {
std::string Name(Intrinsic::getName(ID));
- if (Name + Suffix != F->getName())
+ if (Name + Suffix != F->getName()) {
CheckFailed("Overloaded intrinsic has incorrect suffix: '" +
F->getName().substr(Name.length()) + "'. It should be '" +
Suffix + "'", F);
+ }
}
// Check parameter attributes.
Modified: llvm/trunk/test/CodeGen/X86/atomic_op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic_op.ll?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic_op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic_op.ll Tue Jul 29 23:36:53 2008
@@ -29,65 +29,65 @@
store i32 3855, i32* %xort
store i32 4, i32* %temp
%tmp = load i32* %temp ; <i32> [#uses=1]
- call i32 @llvm.atomic.load.add.i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
+ call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
store i32 %0, i32* %old
- call i32 @llvm.atomic.load.sub.i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
+ call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
store i32 %1, i32* %old
- call i32 @llvm.atomic.load.add.i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
+ call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
store i32 %2, i32* %old
- call i32 @llvm.atomic.load.sub.i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
+ call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
store i32 %3, i32* %old
- call i32 @llvm.atomic.load.and.i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
+ call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
store i32 %4, i32* %old
- call i32 @llvm.atomic.load.or.i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1]
+ call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1]
store i32 %5, i32* %old
- call i32 @llvm.atomic.load.xor.i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1]
+ call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1]
store i32 %6, i32* %old
- call i32 @llvm.atomic.load.min.i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1]
+ call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1]
store i32 %7, i32* %old
%neg = sub i32 0, 1 ; <i32> [#uses=1]
- call i32 @llvm.atomic.load.min.i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1]
+ call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1]
store i32 %8, i32* %old
- call i32 @llvm.atomic.load.max.i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1]
+ call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1]
store i32 %9, i32* %old
- call i32 @llvm.atomic.load.max.i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1]
+ call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1]
store i32 %10, i32* %old
- call i32 @llvm.atomic.load.umax.i32( i32* %val2, i32 65535 ) ; <i32>:11 [#uses=1]
+ call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 65535 ) ; <i32>:11 [#uses=1]
store i32 %11, i32* %old
- call i32 @llvm.atomic.load.umax.i32( i32* %val2, i32 10 ) ; <i32>:12 [#uses=1]
+ call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:12 [#uses=1]
store i32 %12, i32* %old
- call i32 @llvm.atomic.load.umin.i32( i32* %val2, i32 1 ) ; <i32>:13 [#uses=1]
+ call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:13 [#uses=1]
store i32 %13, i32* %old
- call i32 @llvm.atomic.load.umin.i32( i32* %val2, i32 10 ) ; <i32>:14 [#uses=1]
+ call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:14 [#uses=1]
store i32 %14, i32* %old
- call i32 @llvm.atomic.swap.i32( i32* %val2, i32 1976 ) ; <i32>:15 [#uses=1]
+ call i32 @llvm.atomic.swap.i32.p0i32( i32* %val2, i32 1976 ) ; <i32>:15 [#uses=1]
store i32 %15, i32* %old
%neg1 = sub i32 0, 10 ; <i32> [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1]
+ call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1]
store i32 %16, i32* %old
- call i32 @llvm.atomic.cmp.swap.i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1]
+ call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1]
store i32 %17, i32* %old
ret void
}
-declare i32 @llvm.atomic.load.add.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.load.sub.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.load.and.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.load.or.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.load.xor.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.load.min.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.min.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.load.max.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.max.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.load.umax.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.umax.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.load.umin.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.load.umin.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.swap.i32(i32*, i32) nounwind
+declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.cmp.swap.i32(i32*, i32, i32) nounwind
+declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
Modified: llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp (original)
+++ llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp Tue Jul 29 23:36:53 2008
@@ -443,8 +443,8 @@
return true;
}
- if (getExtTypeNum(0) == MVT::iPTR) {
- if (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == EMVT::isInt)
+ if (getExtTypeNum(0) == MVT::iPTR || getExtTypeNum(0) == MVT::iPTRAny) {
+ if (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny || ExtVTs[0] == EMVT::isInt)
return false;
if (EMVT::isExtIntegerInVTs(ExtVTs)) {
std::vector<unsigned char> FVTs = FilterEVTs(ExtVTs, isInteger);
@@ -463,7 +463,8 @@
setTypes(FVTs);
return true;
}
- if (ExtVTs[0] == MVT::iPTR && EMVT::isExtIntegerInVTs(getExtTypes())) {
+ if ((ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny) &&
+ EMVT::isExtIntegerInVTs(getExtTypes())) {
//assert(hasTypeSet() && "should be handled above!");
std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isInteger);
if (getExtTypes() == FVTs)
@@ -495,7 +496,8 @@
setTypes(ExtVTs);
return true;
}
- if (getExtTypeNum(0) == EMVT::isInt && ExtVTs[0] == MVT::iPTR) {
+ if (getExtTypeNum(0) == EMVT::isInt &&
+ (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny)) {
setTypes(ExtVTs);
return true;
}
@@ -527,6 +529,7 @@
case EMVT::isFP : OS << ":isFP"; break;
case EMVT::isUnknown: ; /*OS << ":?";*/ break;
case MVT::iPTR: OS << ":iPTR"; break;
+ case MVT::iPTRAny: OS << ":iPTRAny"; break;
default: {
std::string VTName = llvm::getName(getTypeNum(0));
// Strip off MVT:: prefix if present.
@@ -781,7 +784,7 @@
assert(getTypeNum(i) == VT && "TreePattern has too many types!");
VT = getTypeNum(0);
- if (VT != MVT::iPTR) {
+ if (VT != MVT::iPTR && VT != MVT::iPTRAny) {
unsigned Size = MVT(VT).getSizeInBits();
// Make sure that the value is representable for this type.
if (Size < 32) {
Modified: llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h (original)
+++ llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h Tue Jul 29 23:36:53 2008
@@ -182,13 +182,14 @@
bool isLeaf() const { return Val != 0; }
bool hasTypeSet() const {
- return (Types[0] < MVT::LAST_VALUETYPE) || (Types[0] == MVT::iPTR);
+ return (Types[0] < MVT::LAST_VALUETYPE) || (Types[0] == MVT::iPTR) ||
+ (Types[0] == MVT::iPTRAny);
}
bool isTypeCompletelyUnknown() const {
return Types[0] == EMVT::isUnknown;
}
bool isTypeDynamicallyResolved() const {
- return Types[0] == MVT::iPTR;
+ return (Types[0] == MVT::iPTR) || (Types[0] == MVT::iPTRAny);
}
MVT::SimpleValueType getTypeNum(unsigned Num) const {
assert(hasTypeSet() && "Doesn't have a type yet!");
Modified: llvm/trunk/utils/TableGen/CodeGenTarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/CodeGenTarget.cpp?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/CodeGenTarget.cpp (original)
+++ llvm/trunk/utils/TableGen/CodeGenTarget.cpp Tue Jul 29 23:36:53 2008
@@ -65,6 +65,7 @@
case MVT::v3i32: return "MVT::v3i32";
case MVT::v3f32: return "MVT::v3f32";
case MVT::iPTR: return "TLI.getPointerTy()";
+ case MVT::iPTRAny: return "TLI.getPointerTy()";
default: assert(0 && "ILLEGAL VALUE TYPE!"); return "";
}
}
@@ -101,6 +102,7 @@
case MVT::v3i32: return "MVT::v3i32";
case MVT::v3f32: return "MVT::v3f32";
case MVT::iPTR: return "MVT::iPTR";
+ case MVT::iPTRAny: return "MVT::iPTRAny";
default: assert(0 && "ILLEGAL VALUE TYPE!"); return "";
}
}
@@ -459,7 +461,7 @@
Record *TyEl = TypeList->getElementAsRecord(i);
assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
MVT::SimpleValueType VT = getValueType(TyEl->getValueAsDef("VT"));
- isOverloaded |= VT == MVT::iAny || VT == MVT::fAny;
+ isOverloaded |= VT == MVT::iAny || VT == MVT::fAny || VT == MVT::iPTRAny;
ArgVTs.push_back(VT);
ArgTypeDefs.push_back(TyEl);
}
Modified: llvm/trunk/utils/TableGen/DAGISelEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/DAGISelEmitter.cpp?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/DAGISelEmitter.cpp (original)
+++ llvm/trunk/utils/TableGen/DAGISelEmitter.cpp Tue Jul 29 23:36:53 2008
@@ -56,7 +56,8 @@
EMVT::isExtFloatingPointInVTs(P->getExtTypes()) ||
P->getExtTypeNum(0) == MVT::isVoid ||
P->getExtTypeNum(0) == MVT::Flag ||
- P->getExtTypeNum(0) == MVT::iPTR) &&
+ P->getExtTypeNum(0) == MVT::iPTR ||
+ P->getExtTypeNum(0) == MVT::iPTRAny) &&
"Not a valid pattern node to size!");
unsigned Size = 3; // The node itself.
// If the root node is a ConstantSDNode, increases its size.
@@ -1828,6 +1829,8 @@
std::string OpVTStr;
if (OpVT == MVT::iPTR) {
OpVTStr = "_iPTR";
+ } else if (OpVT == MVT::iPTRAny) {
+ OpVTStr = "_iPTRAny";
} else if (OpVT == MVT::isVoid) {
// Nodes with a void result actually have a first result type of either
// Other (a chain) or Flag. Since there is no one-to-one mapping from
Modified: llvm/trunk/utils/TableGen/IntrinsicEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/IntrinsicEmitter.cpp?rev=54195&r1=54194&r2=54195&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/IntrinsicEmitter.cpp (original)
+++ llvm/trunk/utils/TableGen/IntrinsicEmitter.cpp Tue Jul 29 23:36:53 2008
@@ -162,6 +162,14 @@
OS << "PointerType::getUnqual(";
EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
OS << ")";
+ } else if (VT == MVT::iPTRAny) {
+ // Make sure the user has passed us an argument type to overload. If not,
+ // treat it as an ordinary (not overloaded) intrinsic.
+ OS << "(" << ArgNo << " < numTys) ? Tys[" << ArgNo
+ << "] : PointerType::getUnqual(";
+ EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
+ OS << ")";
+ ++ArgNo;
} else if (VT == MVT::isVoid) {
if (ArgNo == 0)
OS << "Type::VoidTy";
More information about the llvm-commits
mailing list