[llvm-commits] [llvm] r117937 - in /llvm/trunk/test/MC/ARM: neon-shuffle-encoding.ll neon-shuffle-encoding.s

Owen Anderson resistor at mac.com
Mon Nov 1 11:13:11 PDT 2010


Author: resistor
Date: Mon Nov  1 13:13:11 2010
New Revision: 117937

URL: http://llvm.org/viewvc/llvm-project?rev=117937&view=rev
Log:
Covert this test to .s form.

Added:
    llvm/trunk/test/MC/ARM/neon-shuffle-encoding.s
Removed:
    llvm/trunk/test/MC/ARM/neon-shuffle-encoding.ll

Removed: llvm/trunk/test/MC/ARM/neon-shuffle-encoding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/ARM/neon-shuffle-encoding.ll?rev=117936&view=auto
==============================================================================
--- llvm/trunk/test/MC/ARM/neon-shuffle-encoding.ll (original)
+++ llvm/trunk/test/MC/ARM/neon-shuffle-encoding.ll (removed)
@@ -1,215 +0,0 @@
-; RUN: llc -show-mc-encoding -march=arm -mcpu=cortex-a8 -mattr=+neon < %s | FileCheck %s
-
-; XFAIL: *
-
-define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-; CHECK: vext.8	d16, d17, d16, #3       @ encoding: [0xa0,0x03,0xf1,0xf2]
-	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
-	ret <8 x i8> %tmp3
-}
-
-define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-; CHECK: vext.8	d16, d17, d16, #5       @ encoding: [0xa0,0x05,0xf1,0xf2]
-	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
-	ret <8 x i8> %tmp3
-}
-
-define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-; CHECK: vext.8	q8, q8, q9, #3          @ encoding: [0xe2,0x03,0xf0,0xf2
-	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
-	ret <16 x i8> %tmp3
-}
-
-define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-; CHECK: vext.8	q8, q8, q9, #7          @ encoding: [0xe2,0x07,0xf0,0xf2]
-	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
-	ret <16 x i8> %tmp3
-}
-
-define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-; CHECK: vext.16	d16, d17, d16, #3       @ encoding: [0xa0,0x03,0xf1,0xf2]
-	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-	ret <4 x i16> %tmp3
-}
-
-define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-; CHECK: vext.32	q8, q8, q9, #3          @ encoding: [0xe2,0x03,0xf0,0xf2]
-	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-	ret <4 x i32> %tmp3
-}
-
-define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-; CHECK: vtrn.8	d17, d16                @ encoding: [0xa0,0x10,0xf2,0xf3]
-	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
-	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
-        %tmp5 = add <8 x i8> %tmp3, %tmp4
-	ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-; CHECK: vtrn.16	d17, d16                @ encoding: [0xa0,0x10,0xf6,0xf3]
-	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
-	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
-        %tmp5 = add <4 x i16> %tmp3, %tmp4
-	ret <4 x i16> %tmp5
-}
-
-define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-	%tmp1 = load <2 x i32>* %A
-	%tmp2 = load <2 x i32>* %B
-; CHECK: vtrn.32	d17, d16                @ encoding: [0xa0,0x10,0xfa,0xf3]
-	%tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
-	%tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
-        %tmp5 = add <2 x i32> %tmp3, %tmp4
-	ret <2 x i32> %tmp5
-}
-
-define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-; CHECK: vadd.i8	q8, q9, q8              @ encoding: [0xe0,0x08,0x42,0xf2]
-	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
-	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
-        %tmp5 = add <16 x i8> %tmp3, %tmp4
-	ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-; CHECK: vadd.i16	q8, q9, q8      @ encoding: [0xe0,0x08,0x52,0xf2]
-	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
-	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
-        %tmp5 = add <8 x i16> %tmp3, %tmp4
-	ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-; CHECK: vtrn.32	q9, q8                  @ encoding: [0xe0,0x20,0xfa,0xf3]
-	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
-	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
-        %tmp5 = add <4 x i32> %tmp3, %tmp4
-	ret <4 x i32> %tmp5
-}
-
-define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-; CHECK: vuzp.8	d17, d16                @ encoding: [0x20,0x11,0xf2,0xf3]
-	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-        %tmp5 = add <8 x i8> %tmp3, %tmp4
-	ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-; CHECK: vuzp.16	d17, d16                @ encoding: [0x20,0x11,0xf6,0xf3
-	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-        %tmp5 = add <4 x i16> %tmp3, %tmp4
-	ret <4 x i16> %tmp5
-}
-
-; VUZP.32 is equivalent to VTRN.32 for 64-bit vectors.
-
-define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-; CHECK: vuzp.8	q9, q8                  @ encoding: [0x60,0x21,0xf2,0xf3]
-	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
-	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
-        %tmp5 = add <16 x i8> %tmp3, %tmp4
-	ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-; CHECK: vuzp.16	q9, q8                  @ encoding: [0x60,0x21,0xf6,0xf3]
-	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-        %tmp5 = add <8 x i16> %tmp3, %tmp4
-	ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-; CHECK: vuzp.32	q9, q8                  @ encoding: [0x60,0x21,0xfa,0xf3]
-	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-        %tmp5 = add <4 x i32> %tmp3, %tmp4
-	ret <4 x i32> %tmp5
-}
-
-define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-	%tmp1 = load <8 x i8>* %A
-	%tmp2 = load <8 x i8>* %B
-; CHECK: vzip.8	d17, d16                @ encoding: [0xa0,0x11,0xf2,0xf3]
-	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
-	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-        %tmp5 = add <8 x i8> %tmp3, %tmp4
-	ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-	%tmp1 = load <4 x i16>* %A
-	%tmp2 = load <4 x i16>* %B
-; CHECK: vzip.16	d17, d16                @ encoding: [0xa0,0x11,0xf6,0xf3]
-	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
-	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
-        %tmp5 = add <4 x i16> %tmp3, %tmp4
-	ret <4 x i16> %tmp5
-}
-
-; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
-
-define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-	%tmp1 = load <16 x i8>* %A
-	%tmp2 = load <16 x i8>* %B
-; CHECK: vzip.8	q9, q8                  @ encoding: [0xe0,0x21,0xf2,0xf3]
-	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
-	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
-        %tmp5 = add <16 x i8> %tmp3, %tmp4
-	ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-	%tmp1 = load <8 x i16>* %A
-	%tmp2 = load <8 x i16>* %B
-; CHECK: vzip.16	q9, q8                  @ encoding: [0xe0,0x21,0xf6,0xf3]
-	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
-	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-        %tmp5 = add <8 x i16> %tmp3, %tmp4
-	ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-	%tmp1 = load <4 x i32>* %A
-	%tmp2 = load <4 x i32>* %B
-; CHECK: vzip.32	q9, q8                  @ encoding: [0xe0,0x21,0xfa,0xf3]
-	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
-	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
-        %tmp5 = add <4 x i32> %tmp3, %tmp4
-	ret <4 x i32> %tmp5
-}

Added: llvm/trunk/test/MC/ARM/neon-shuffle-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/ARM/neon-shuffle-encoding.s?rev=117937&view=auto
==============================================================================
--- llvm/trunk/test/MC/ARM/neon-shuffle-encoding.s (added)
+++ llvm/trunk/test/MC/ARM/neon-shuffle-encoding.s Mon Nov  1 13:13:11 2010
@@ -0,0 +1,46 @@
+@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
+
+@ CHECK: vext.8	d16, d17, d16, #3       @ encoding: [0xa0,0x03,0xf1,0xf2]
+	vext.8	d16, d17, d16, #3
+@ CHECK: vext.8	d16, d17, d16, #5       @ encoding: [0xa0,0x05,0xf1,0xf2]
+	vext.8	d16, d17, d16, #5
+@ CHECK: vext.8	q8, q9, q8, #3          @ encoding: [0xe0,0x03,0xf2,0xf2]
+	vext.8	q8, q9, q8, #3
+@ CHECK: vext.8	q8, q9, q8, #7          @ encoding: [0xe0,0x07,0xf2,0xf2]
+	vext.8	q8, q9, q8, #7
+@ CHECK: vext.16	d16, d17, d16, #3       @ encoding: [0xa0,0x03,0xf1,0xf2]
+	vext.16	d16, d17, d16, #3
+@ CHECK: vext.32	q8, q9, q8, #3          @ encoding: [0xe0,0x03,0xf2,0xf2]
+	vext.32	q8, q9, q8, #3
+@ CHECK: vtrn.8	d17, d16                @ encoding: [0xa0,0x10,0xf2,0xf3]
+	vtrn.8	d17, d16
+@ CHECK: vtrn.16	d17, d16                @ encoding: [0xa0,0x10,0xf6,0xf3]
+	vtrn.16	d17, d16
+@ CHECK: vtrn.32	d17, d16                @ encoding: [0xa0,0x10,0xfa,0xf3]
+	vtrn.32	d17, d16
+@ CHECK: vtrn.8	q9, q8                  @ encoding: [0xe0,0x20,0xf2,0xf3]
+	vtrn.8	q9, q8
+@ CHECK: vtrn.16	q9, q8                  @ encoding: [0xe0,0x20,0xf6,0xf3]
+	vtrn.16	q9, q8
+@ CHECK: vtrn.32	q9, q8                  @ encoding: [0xe0,0x20,0xfa,0xf3]
+	vtrn.32	q9, q8
+@ CHECK: vuzp.8	d17, d16                @ encoding: [0x20,0x11,0xf2,0xf3]
+	vuzp.8	d17, d16
+@ CHECK: vuzp.16	d17, d16                @ encoding: [0x20,0x11,0xf6,0xf3]
+	vuzp.16	d17, d16
+@ CHECK: vuzp.8	q9, q8                  @ encoding: [0x60,0x21,0xf2,0xf3]
+	vuzp.8	q9, q8
+@ CHECK: vuzp.16	q9, q8                  @ encoding: [0x60,0x21,0xf6,0xf3]
+	vuzp.16	q9, q8
+@ CHECK: vuzp.32	q9, q8                  @ encoding: [0x60,0x21,0xfa,0xf3]
+	vuzp.32	q9, q8
+@ CHECK: vzip.8	d17, d16                @ encoding: [0xa0,0x11,0xf2,0xf3]
+	vzip.8	d17, d16
+@ CHECK: vzip.16	d17, d16                @ encoding: [0xa0,0x11,0xf6,0xf3]
+	vzip.16	d17, d16
+@ CHECK: vzip.8	q9, q8                  @ encoding: [0xe0,0x21,0xf2,0xf3]
+	vzip.8	q9, q8
+@ CHECK: vzip.16	q9, q8                  @ encoding: [0xe0,0x21,0xf6,0xf3]
+	vzip.16	q9, q8
+@ CHECK: vzip.32	q9, q8                  @ encoding: [0xe0,0x21,0xfa,0xf3]
+	vzip.32	q9, q8





More information about the llvm-commits mailing list