[llvm] r348403 - [X86][SSE] Fix a copy+paste typo that was folding the sext/zext of partial vectors

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 5 11:32:19 PST 2018


Author: rksimon
Date: Wed Dec  5 11:32:19 2018
New Revision: 348403

URL: http://llvm.org/viewvc/llvm-project?rev=348403&view=rev
Log:
[X86][SSE] Fix a copy+paste typo that was folding the sext/zext of partial vectors

Modified:
    llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll

Modified: llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll?rev=348403&r1=348402&r2=348403&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll Wed Dec  5 11:32:19 2018
@@ -119,12 +119,12 @@ define <4 x i64> @test_sext_4i8_4i64_und
 define <8 x i16> @test_sext_8i8_8i16() {
 ; X32-LABEL: test_sext_8i8_8i16:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <0,65535,2,65533,u,u,u,u>
+; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,65535,2,65533,4,65531,6,65529]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <0,65535,2,65533,u,u,u,u>
+; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,65535,2,65533,4,65531,6,65529]
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
   %2 = insertelement <8 x i8> %1, i8 -1, i32 1
@@ -134,19 +134,19 @@ define <8 x i16> @test_sext_8i8_8i16() {
   %6 = insertelement <8 x i8> %5, i8 -5, i32 5
   %7 = insertelement <8 x i8> %6, i8 6, i32 6
   %8 = insertelement <8 x i8> %7, i8 -7, i32 7
-  %9 = sext <8 x i8> %4 to <8 x i16>
+  %9 = sext <8 x i8> %8 to <8 x i16>
   ret <8 x i16> %9
 }
 
 define <8 x i32> @test_sext_8i8_8i32() {
 ; X32-LABEL: test_sext_8i8_8i32:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <0,4294967295,2,4294967293,u,u,u,u>
+; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,4294967295,2,4294967293,4,4294967291,6,4294967289]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <0,4294967295,2,4294967293,u,u,u,u>
+; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [0,4294967295,2,4294967293,4,4294967291,6,4294967289]
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
   %2 = insertelement <8 x i8> %1, i8 -1, i32 1
@@ -156,19 +156,19 @@ define <8 x i32> @test_sext_8i8_8i32() {
   %6 = insertelement <8 x i8> %5, i8 -5, i32 5
   %7 = insertelement <8 x i8> %6, i8 6, i32 6
   %8 = insertelement <8 x i8> %7, i8 -7, i32 7
-  %9 = sext <8 x i8> %4 to <8 x i32>
+  %9 = sext <8 x i8> %8 to <8 x i32>
   ret <8 x i32> %9
 }
 
 define <8 x i16> @test_sext_8i8_8i16_undef() {
 ; X32-LABEL: test_sext_8i8_8i16_undef:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,65535,u,65533,u,u,u,u>
+; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,65535,u,65533,u,65531,u,65529>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i16_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,65535,u,65533,u,u,u,u>
+; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,65535,u,65533,u,65531,u,65529>
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 undef, i32 0
   %2 = insertelement <8 x i8> %1, i8 -1, i32 1
@@ -178,19 +178,19 @@ define <8 x i16> @test_sext_8i8_8i16_und
   %6 = insertelement <8 x i8> %5, i8 -5, i32 5
   %7 = insertelement <8 x i8> %6, i8 undef, i32 6
   %8 = insertelement <8 x i8> %7, i8 -7, i32 7
-  %9 = sext <8 x i8> %4 to <8 x i16>
+  %9 = sext <8 x i8> %8 to <8 x i16>
   ret <8 x i16> %9
 }
 
 define <8 x i32> @test_sext_8i8_8i32_undef() {
 ; X32-LABEL: test_sext_8i8_8i32_undef:
 ; X32:       # %bb.0:
-; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,u,u,u,u,u>
+; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,u,4,u,6,u>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i32_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,u,u,u,u,u>
+; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,u,4,u,6,u>
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
   %2 = insertelement <8 x i8> %1, i8 undef, i32 1
@@ -200,7 +200,7 @@ define <8 x i32> @test_sext_8i8_8i32_und
   %6 = insertelement <8 x i8> %5, i8 undef, i32 5
   %7 = insertelement <8 x i8> %6, i8 6, i32 6
   %8 = insertelement <8 x i8> %7, i8 undef, i32 7
-  %9 = sext <8 x i8> %4 to <8 x i32>
+  %9 = sext <8 x i8> %8 to <8 x i32>
   ret <8 x i32> %9
 }
 




More information about the llvm-commits mailing list