[llvm-commits] [llvm] r157544 - in /llvm/trunk/test: CodeGen/ARM/vlddup.ll CodeGen/MSP430/2009-12-21-FrameAddr.ll CodeGen/X86/sse4a.ll Verifier/2008-08-22-MemCpyAlignment.ll

Chris Lattner sabre at nondot.org
Sun May 27 12:35:41 PDT 2012


Author: lattner
Date: Sun May 27 14:35:41 2012
New Revision: 157544

URL: http://llvm.org/viewvc/llvm-project?rev=157544&view=rev
Log:
These tests used intrinsics with the wrong prototype.  They weren't caught because
the old verifier just checked that something "was a pointer", but not that the pointee
was correct.

Modified:
    llvm/trunk/test/CodeGen/ARM/vlddup.ll
    llvm/trunk/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll
    llvm/trunk/test/CodeGen/X86/sse4a.ll
    llvm/trunk/test/Verifier/2008-08-22-MemCpyAlignment.ll

Modified: llvm/trunk/test/CodeGen/ARM/vlddup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vlddup.ll?rev=157544&r1=157543&r2=157544&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vlddup.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vlddup.ll Sun May 27 14:35:41 2012
@@ -75,12 +75,12 @@
         ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @vld2dupi16(i16* %A) nounwind {
+define <4 x i16> @vld2dupi16(i8* %A) nounwind {
 ;CHECK: vld2dupi16:
 ;Check that a power-of-two alignment smaller than the total size of the memory
 ;being loaded is ignored.
 ;CHECK: vld2.16 {d16[], d17[]}, [r0]
-	%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
+	%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
 	%tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
 	%tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1
@@ -94,7 +94,8 @@
 ;CHECK: vld2dupi16_update:
 ;CHECK: vld2.16 {d16[], d17[]}, [r1]!
 	%A = load i16** %ptr
-	%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
+        %A2 = bitcast i16* %A to i8*
+	%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %A2, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
 	%tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
 	%tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1
@@ -105,11 +106,11 @@
 	ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @vld2dupi32(i32* %A) nounwind {
+define <2 x i32> @vld2dupi32(i8* %A) nounwind {
 ;CHECK: vld2dupi32:
 ;Check the alignment value.  Max for this instruction is 64 bits:
 ;CHECK: vld2.32 {d16[], d17[]}, [r0, :64]
-	%tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16)
+	%tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16)
 	%tmp1 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 0
 	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
 	%tmp3 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 1
@@ -119,8 +120,8 @@
 }
 
 declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly
-declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16*, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
-declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly
+declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly
 
 %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
 %struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
@@ -144,11 +145,11 @@
 	ret <8 x i8> %tmp8
 }
 
-define <4 x i16> @vld3dupi16(i16* %A) nounwind {
+define <4 x i16> @vld3dupi16(i8* %A) nounwind {
 ;CHECK: vld3dupi16:
 ;Check the (default) alignment value. VLD3 does not support alignment.
 ;CHECK: vld3.16 {d16[], d17[], d18[]}, [r0]
-	%tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8)
+	%tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8)
 	%tmp1 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 0
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
 	%tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 1
@@ -161,7 +162,7 @@
 }
 
 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly
-declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
 
 %struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
 %struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
@@ -171,7 +172,8 @@
 ;CHECK: vld4dupi16_update:
 ;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [r1]!
 	%A = load i16** %ptr
-	%tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1)
+        %A2 = bitcast i16* %A to i8*
+	%tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %A2, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1)
 	%tmp1 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 0
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
 	%tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 1
@@ -188,12 +190,12 @@
 	ret <4 x i16> %tmp11
 }
 
-define <2 x i32> @vld4dupi32(i32* %A) nounwind {
+define <2 x i32> @vld4dupi32(i8* %A) nounwind {
 ;CHECK: vld4dupi32:
 ;Check the alignment value.  An 8-byte alignment is allowed here even though
 ;it is smaller than the total size of the memory being loaded.
 ;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0, :64]
-	%tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8)
+	%tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8)
 	%tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0
 	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
 	%tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 1
@@ -208,5 +210,5 @@
         ret <2 x i32> %tmp11
 }
 
-declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
-declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly

Modified: llvm/trunk/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll?rev=157544&r1=157543&r2=157544&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll Sun May 27 14:35:41 2012
@@ -5,9 +5,9 @@
 
 define msp430_intrcc void @foo() nounwind {
 entry:
-	%fa = call i16* @llvm.frameaddress(i32 0)
-	store i16 0, i16* %fa
+	%fa = call i8* @llvm.frameaddress(i32 0)
+	store i8 0, i8* %fa
 	ret void
 }
 
-declare i16* @llvm.frameaddress(i32)
+declare i8* @llvm.frameaddress(i32)

Modified: llvm/trunk/test/CodeGen/X86/sse4a.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse4a.ll?rev=157544&r1=157543&r2=157544&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse4a.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse4a.ll Sun May 27 14:35:41 2012
@@ -1,19 +1,19 @@
 ; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4a | FileCheck %s
 
-define void @test1(float* %p, <4 x float> %a) nounwind optsize ssp {
+define void @test1(i8* %p, <4 x float> %a) nounwind optsize ssp {
 ; CHECK: movntss
 entry:
-  tail call void @llvm.x86.sse4a.movnt.ss(float* %p, <4 x float> %a) nounwind
+  tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) nounwind
   ret void
 }
 
-declare void @llvm.x86.sse4a.movnt.ss(float*, <4 x float>)
+declare void @llvm.x86.sse4a.movnt.ss(i8*, <4 x float>)
 
-define void @test2(double* %p, <2 x double> %a) nounwind optsize ssp {
+define void @test2(i8* %p, <2 x double> %a) nounwind optsize ssp {
 ; CHECK: movntsd
 entry:
-  tail call void @llvm.x86.sse4a.movnt.sd(double* %p, <2 x double> %a) nounwind
+  tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) nounwind
   ret void
 }
 
-declare void @llvm.x86.sse4a.movnt.sd(double*, <2 x double>)
+declare void @llvm.x86.sse4a.movnt.sd(i8*, <2 x double>)

Modified: llvm/trunk/test/Verifier/2008-08-22-MemCpyAlignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Verifier/2008-08-22-MemCpyAlignment.ll?rev=157544&r1=157543&r2=157544&view=diff
==============================================================================
--- llvm/trunk/test/Verifier/2008-08-22-MemCpyAlignment.ll (original)
+++ llvm/trunk/test/Verifier/2008-08-22-MemCpyAlignment.ll Sun May 27 14:35:41 2012
@@ -3,9 +3,9 @@
 
 define void @x(i8* %a, i8* %src, i64 %len, i32 %align) nounwind  {
 entry:
-        tail call void @llvm.memcpy.i64( i8* %a, i8* %src, i64 %len, i32 %align) nounwind 
+        tail call void @llvm.memcpy.p0i8.p0i8.i64( i8* %a, i8* %src, i64 %len, i32 %align, i1 false) nounwind 
         ret void
 }
 
-declare void @llvm.memcpy.i64( i8* %a, i8* %src, i64 %len, i32)
+declare void @llvm.memcpy.p0i8.p0i8.i64( i8* %a, i8* %src, i64 %len, i32, i1)
 





More information about the llvm-commits mailing list