[llvm] 824d073 - [X86] fold-vector-sext - replace X32 check prefixes with X86

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 31 04:09:10 PST 2024


Author: Simon Pilgrim
Date: 2024-01-31T12:01:02Z
New Revision: 824d073fb654891dc31523f6b68f49818cfaf40a

URL: https://github.com/llvm/llvm-project/commit/824d073fb654891dc31523f6b68f49818cfaf40a
DIFF: https://github.com/llvm/llvm-project/commit/824d073fb654891dc31523f6b68f49818cfaf40a.diff

LOG: [X86] fold-vector-sext - replace X32 check prefixes with X86

We try to only use X32 for gnux32 triple tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fold-vector-sext-crash2.ll
    llvm/test/CodeGen/X86/fold-vector-sext-zext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fold-vector-sext-crash2.ll b/llvm/test/CodeGen/X86/fold-vector-sext-crash2.ll
index d210f173238a7..9a955ce5a24cb 100644
--- a/llvm/test/CodeGen/X86/fold-vector-sext-crash2.ll
+++ b/llvm/test/CodeGen/X86/fold-vector-sext-crash2.ll
@@ -1,30 +1,30 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown    | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown    | FileCheck %s -check-prefix=X86
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown  | FileCheck %s -check-prefix=X64
 
 ; DAGCombiner crashes during sext folding
 
 define <2 x i256> @test_sext1() {
-; X32-LABEL: test_sext1:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl $-1, 60(%eax)
-; X32-NEXT:    movl $-1, 56(%eax)
-; X32-NEXT:    movl $-1, 52(%eax)
-; X32-NEXT:    movl $-1, 48(%eax)
-; X32-NEXT:    movl $-1, 44(%eax)
-; X32-NEXT:    movl $-1, 40(%eax)
-; X32-NEXT:    movl $-1, 36(%eax)
-; X32-NEXT:    movl $-99, 32(%eax)
-; X32-NEXT:    movl $0, 28(%eax)
-; X32-NEXT:    movl $0, 24(%eax)
-; X32-NEXT:    movl $0, 20(%eax)
-; X32-NEXT:    movl $0, 16(%eax)
-; X32-NEXT:    movl $0, 12(%eax)
-; X32-NEXT:    movl $0, 8(%eax)
-; X32-NEXT:    movl $0, 4(%eax)
-; X32-NEXT:    movl $0, (%eax)
-; X32-NEXT:    retl $4
+; X86-LABEL: test_sext1:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-1, 60(%eax)
+; X86-NEXT:    movl $-1, 56(%eax)
+; X86-NEXT:    movl $-1, 52(%eax)
+; X86-NEXT:    movl $-1, 48(%eax)
+; X86-NEXT:    movl $-1, 44(%eax)
+; X86-NEXT:    movl $-1, 40(%eax)
+; X86-NEXT:    movl $-1, 36(%eax)
+; X86-NEXT:    movl $-99, 32(%eax)
+; X86-NEXT:    movl $0, 28(%eax)
+; X86-NEXT:    movl $0, 24(%eax)
+; X86-NEXT:    movl $0, 20(%eax)
+; X86-NEXT:    movl $0, 16(%eax)
+; X86-NEXT:    movl $0, 12(%eax)
+; X86-NEXT:    movl $0, 8(%eax)
+; X86-NEXT:    movl $0, 4(%eax)
+; X86-NEXT:    movl $0, (%eax)
+; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: test_sext1:
 ; X64:       # %bb.0:
@@ -43,26 +43,26 @@ define <2 x i256> @test_sext1() {
 }
 
 define <2 x i256> @test_sext2() {
-; X32-LABEL: test_sext2:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl $-1, 60(%eax)
-; X32-NEXT:    movl $-1, 56(%eax)
-; X32-NEXT:    movl $-1, 52(%eax)
-; X32-NEXT:    movl $-1, 48(%eax)
-; X32-NEXT:    movl $-1, 44(%eax)
-; X32-NEXT:    movl $-1, 40(%eax)
-; X32-NEXT:    movl $-1, 36(%eax)
-; X32-NEXT:    movl $-1999, 32(%eax) # imm = 0xF831
-; X32-NEXT:    movl $0, 28(%eax)
-; X32-NEXT:    movl $0, 24(%eax)
-; X32-NEXT:    movl $0, 20(%eax)
-; X32-NEXT:    movl $0, 16(%eax)
-; X32-NEXT:    movl $0, 12(%eax)
-; X32-NEXT:    movl $0, 8(%eax)
-; X32-NEXT:    movl $0, 4(%eax)
-; X32-NEXT:    movl $0, (%eax)
-; X32-NEXT:    retl $4
+; X86-LABEL: test_sext2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-1, 60(%eax)
+; X86-NEXT:    movl $-1, 56(%eax)
+; X86-NEXT:    movl $-1, 52(%eax)
+; X86-NEXT:    movl $-1, 48(%eax)
+; X86-NEXT:    movl $-1, 44(%eax)
+; X86-NEXT:    movl $-1, 40(%eax)
+; X86-NEXT:    movl $-1, 36(%eax)
+; X86-NEXT:    movl $-1999, 32(%eax) # imm = 0xF831
+; X86-NEXT:    movl $0, 28(%eax)
+; X86-NEXT:    movl $0, 24(%eax)
+; X86-NEXT:    movl $0, 20(%eax)
+; X86-NEXT:    movl $0, 16(%eax)
+; X86-NEXT:    movl $0, 12(%eax)
+; X86-NEXT:    movl $0, 8(%eax)
+; X86-NEXT:    movl $0, 4(%eax)
+; X86-NEXT:    movl $0, (%eax)
+; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: test_sext2:
 ; X64:       # %bb.0:
@@ -81,26 +81,26 @@ define <2 x i256> @test_sext2() {
 }
 
 define <2 x i256> @test_zext1() {
-; X32-LABEL: test_zext1:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl $0, 60(%eax)
-; X32-NEXT:    movl $0, 56(%eax)
-; X32-NEXT:    movl $0, 52(%eax)
-; X32-NEXT:    movl $0, 48(%eax)
-; X32-NEXT:    movl $0, 44(%eax)
-; X32-NEXT:    movl $0, 40(%eax)
-; X32-NEXT:    movl $0, 36(%eax)
-; X32-NEXT:    movl $254, 32(%eax)
-; X32-NEXT:    movl $0, 28(%eax)
-; X32-NEXT:    movl $0, 24(%eax)
-; X32-NEXT:    movl $0, 20(%eax)
-; X32-NEXT:    movl $0, 16(%eax)
-; X32-NEXT:    movl $0, 12(%eax)
-; X32-NEXT:    movl $0, 8(%eax)
-; X32-NEXT:    movl $0, 4(%eax)
-; X32-NEXT:    movl $0, (%eax)
-; X32-NEXT:    retl $4
+; X86-LABEL: test_zext1:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $0, 60(%eax)
+; X86-NEXT:    movl $0, 56(%eax)
+; X86-NEXT:    movl $0, 52(%eax)
+; X86-NEXT:    movl $0, 48(%eax)
+; X86-NEXT:    movl $0, 44(%eax)
+; X86-NEXT:    movl $0, 40(%eax)
+; X86-NEXT:    movl $0, 36(%eax)
+; X86-NEXT:    movl $254, 32(%eax)
+; X86-NEXT:    movl $0, 28(%eax)
+; X86-NEXT:    movl $0, 24(%eax)
+; X86-NEXT:    movl $0, 20(%eax)
+; X86-NEXT:    movl $0, 16(%eax)
+; X86-NEXT:    movl $0, 12(%eax)
+; X86-NEXT:    movl $0, 8(%eax)
+; X86-NEXT:    movl $0, 4(%eax)
+; X86-NEXT:    movl $0, (%eax)
+; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: test_zext1:
 ; X64:       # %bb.0:
@@ -118,26 +118,26 @@ define <2 x i256> @test_zext1() {
 }
 
 define <2 x i256> @test_zext2() {
-; X32-LABEL: test_zext2:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl $0, 60(%eax)
-; X32-NEXT:    movl $0, 56(%eax)
-; X32-NEXT:    movl $0, 52(%eax)
-; X32-NEXT:    movl $0, 48(%eax)
-; X32-NEXT:    movl $-1, 44(%eax)
-; X32-NEXT:    movl $-1, 40(%eax)
-; X32-NEXT:    movl $-1, 36(%eax)
-; X32-NEXT:    movl $-2, 32(%eax)
-; X32-NEXT:    movl $0, 28(%eax)
-; X32-NEXT:    movl $0, 24(%eax)
-; X32-NEXT:    movl $0, 20(%eax)
-; X32-NEXT:    movl $0, 16(%eax)
-; X32-NEXT:    movl $0, 12(%eax)
-; X32-NEXT:    movl $0, 8(%eax)
-; X32-NEXT:    movl $0, 4(%eax)
-; X32-NEXT:    movl $0, (%eax)
-; X32-NEXT:    retl $4
+; X86-LABEL: test_zext2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $0, 60(%eax)
+; X86-NEXT:    movl $0, 56(%eax)
+; X86-NEXT:    movl $0, 52(%eax)
+; X86-NEXT:    movl $0, 48(%eax)
+; X86-NEXT:    movl $-1, 44(%eax)
+; X86-NEXT:    movl $-1, 40(%eax)
+; X86-NEXT:    movl $-1, 36(%eax)
+; X86-NEXT:    movl $-2, 32(%eax)
+; X86-NEXT:    movl $0, 28(%eax)
+; X86-NEXT:    movl $0, 24(%eax)
+; X86-NEXT:    movl $0, 20(%eax)
+; X86-NEXT:    movl $0, 16(%eax)
+; X86-NEXT:    movl $0, 12(%eax)
+; X86-NEXT:    movl $0, 8(%eax)
+; X86-NEXT:    movl $0, 4(%eax)
+; X86-NEXT:    movl $0, (%eax)
+; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: test_zext2:
 ; X64:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/fold-vector-sext-zext.ll b/llvm/test/CodeGen/X86/fold-vector-sext-zext.ll
index d31168f407890..b8746626d6072 100644
--- a/llvm/test/CodeGen/X86/fold-vector-sext-zext.ll
+++ b/llvm/test/CodeGen/X86/fold-vector-sext-zext.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefix=X64
 
 ; Verify that the backend correctly folds a sign/zero extend of a vector where
@@ -9,10 +9,10 @@
 ; vector should be known at static time.
 
 define <4 x i16> @test_sext_4i8_4i16() {
-; X32-LABEL: test_sext_4i8_4i16:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovsd {{.*#+}} xmm0 = [0,65535,2,65533,0,0,0,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_4i8_4i16:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovsd {{.*#+}} xmm0 = [0,65535,2,65533,0,0,0,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i16:
 ; X64:       # %bb.0:
@@ -27,10 +27,10 @@ define <4 x i16> @test_sext_4i8_4i16() {
 }
 
 define <4 x i16> @test_sext_4i8_4i16_undef() {
-; X32-LABEL: test_sext_4i8_4i16_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovsd {{.*#+}} xmm0 = [0,65535,0,65533,0,0,0,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_4i8_4i16_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovsd {{.*#+}} xmm0 = [0,65535,0,65533,0,0,0,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i16_undef:
 ; X64:       # %bb.0:
@@ -45,10 +45,10 @@ define <4 x i16> @test_sext_4i8_4i16_undef() {
 }
 
 define <4 x i32> @test_sext_4i8_4i32() {
-; X32-LABEL: test_sext_4i8_4i32:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_4i8_4i32:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i32:
 ; X64:       # %bb.0:
@@ -63,10 +63,10 @@ define <4 x i32> @test_sext_4i8_4i32() {
 }
 
 define <4 x i32> @test_sext_4i8_4i32_undef() {
-; X32-LABEL: test_sext_4i8_4i32_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,4294967295,0,4294967293]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_4i8_4i32_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} xmm0 = [0,4294967295,0,4294967293]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i32_undef:
 ; X64:       # %bb.0:
@@ -81,10 +81,10 @@ define <4 x i32> @test_sext_4i8_4i32_undef() {
 }
 
 define <4 x i64> @test_sext_4i8_4i64() {
-; X32-LABEL: test_sext_4i8_4i64:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,4294967295,4294967295,2,0,4294967293,4294967295]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_4i8_4i64:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,4294967295,4294967295,2,0,4294967293,4294967295]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i64:
 ; X64:       # %bb.0:
@@ -99,10 +99,10 @@ define <4 x i64> @test_sext_4i8_4i64() {
 }
 
 define <4 x i64> @test_sext_4i8_4i64_undef() {
-; X32-LABEL: test_sext_4i8_4i64_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,4294967295,4294967295,0,0,4294967293,4294967295]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_4i8_4i64_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,4294967295,4294967295,0,0,4294967293,4294967295]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i64_undef:
 ; X64:       # %bb.0:
@@ -117,10 +117,10 @@ define <4 x i64> @test_sext_4i8_4i64_undef() {
 }
 
 define <8 x i16> @test_sext_8i8_8i16() {
-; X32-LABEL: test_sext_8i8_8i16:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,65535,2,65533,4,65531,6,65529]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_8i8_8i16:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} xmm0 = [0,65535,2,65533,4,65531,6,65529]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i16:
 ; X64:       # %bb.0:
@@ -139,10 +139,10 @@ define <8 x i16> @test_sext_8i8_8i16() {
 }
 
 define <8 x i32> @test_sext_8i8_8i32() {
-; X32-LABEL: test_sext_8i8_8i32:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,4294967295,2,4294967293,4,4294967291,6,4294967289]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_8i8_8i32:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} ymm0 = [0,4294967295,2,4294967293,4,4294967291,6,4294967289]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i32:
 ; X64:       # %bb.0:
@@ -161,10 +161,10 @@ define <8 x i32> @test_sext_8i8_8i32() {
 }
 
 define <8 x i16> @test_sext_8i8_8i16_undef() {
-; X32-LABEL: test_sext_8i8_8i16_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,65535,0,65533,0,65531,0,65529]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_8i8_8i16_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} xmm0 = [0,65535,0,65533,0,65531,0,65529]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i16_undef:
 ; X64:       # %bb.0:
@@ -183,10 +183,10 @@ define <8 x i16> @test_sext_8i8_8i16_undef() {
 }
 
 define <8 x i32> @test_sext_8i8_8i32_undef() {
-; X32-LABEL: test_sext_8i8_8i32_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,2,0,4,0,6,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_sext_8i8_8i32_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,2,0,4,0,6,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i32_undef:
 ; X64:       # %bb.0:
@@ -205,10 +205,10 @@ define <8 x i32> @test_sext_8i8_8i32_undef() {
 }
 
 define <4 x i16> @test_zext_4i8_4i16() {
-; X32-LABEL: test_zext_4i8_4i16:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovsd {{.*#+}} xmm0 = [0,255,2,253,0,0,0,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_4i8_4i16:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovsd {{.*#+}} xmm0 = [0,255,2,253,0,0,0,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i16:
 ; X64:       # %bb.0:
@@ -223,10 +223,10 @@ define <4 x i16> @test_zext_4i8_4i16() {
 }
 
 define <4 x i32> @test_zext_4i8_4i32() {
-; X32-LABEL: test_zext_4i8_4i32:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_4i8_4i32:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i32:
 ; X64:       # %bb.0:
@@ -241,10 +241,10 @@ define <4 x i32> @test_zext_4i8_4i32() {
 }
 
 define <4 x i64> @test_zext_4i8_4i64() {
-; X32-LABEL: test_zext_4i8_4i64:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,255,0,2,0,253,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_4i8_4i64:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,255,0,2,0,253,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i64:
 ; X64:       # %bb.0:
@@ -259,10 +259,10 @@ define <4 x i64> @test_zext_4i8_4i64() {
 }
 
 define <4 x i16> @test_zext_4i8_4i16_undef() {
-; X32-LABEL: test_zext_4i8_4i16_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovsd {{.*#+}} xmm0 = [0,255,0,253,0,0,0,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_4i8_4i16_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovsd {{.*#+}} xmm0 = [0,255,0,253,0,0,0,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i16_undef:
 ; X64:       # %bb.0:
@@ -277,10 +277,10 @@ define <4 x i16> @test_zext_4i8_4i16_undef() {
 }
 
 define <4 x i32> @test_zext_4i8_4i32_undef() {
-; X32-LABEL: test_zext_4i8_4i32_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,2,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_4i8_4i32_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,2,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i32_undef:
 ; X64:       # %bb.0:
@@ -295,10 +295,10 @@ define <4 x i32> @test_zext_4i8_4i32_undef() {
 }
 
 define <4 x i64> @test_zext_4i8_4i64_undef() {
-; X32-LABEL: test_zext_4i8_4i64_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,255,0,2,0,0,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_4i8_4i64_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,255,0,2,0,0,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i64_undef:
 ; X64:       # %bb.0:
@@ -313,10 +313,10 @@ define <4 x i64> @test_zext_4i8_4i64_undef() {
 }
 
 define <8 x i16> @test_zext_8i8_8i16() {
-; X32-LABEL: test_zext_8i8_8i16:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253,4,251,6,249]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_8i8_8i16:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253,4,251,6,249]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i16:
 ; X64:       # %bb.0:
@@ -335,10 +335,10 @@ define <8 x i16> @test_zext_8i8_8i16() {
 }
 
 define <8 x i32> @test_zext_8i8_8i32() {
-; X32-LABEL: test_zext_8i8_8i32:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,255,2,253,4,251,6,249]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_8i8_8i32:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} ymm0 = [0,255,2,253,4,251,6,249]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i32:
 ; X64:       # %bb.0:
@@ -357,10 +357,10 @@ define <8 x i32> @test_zext_8i8_8i32() {
 }
 
 define <8 x i16> @test_zext_8i8_8i16_undef() {
-; X32-LABEL: test_zext_8i8_8i16_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,0,253,0,251,0,249]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_8i8_8i16_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,0,253,0,251,0,249]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i16_undef:
 ; X64:       # %bb.0:
@@ -379,10 +379,10 @@ define <8 x i16> @test_zext_8i8_8i16_undef() {
 }
 
 define <8 x i32> @test_zext_8i8_8i32_undef() {
-; X32-LABEL: test_zext_8i8_8i32_undef:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,2,253,4,0,6,0]
-; X32-NEXT:    retl
+; X86-LABEL: test_zext_8i8_8i32_undef:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,2,253,4,0,6,0]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i32_undef:
 ; X64:       # %bb.0:


        


More information about the llvm-commits mailing list