[llvm] e62836e - [X86] Regenerate avx2-shift.ll check prefixes

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 27 09:36:50 PDT 2023


Author: Simon Pilgrim
Date: 2023-03-27T17:34:41+01:00
New Revision: e62836e31fb926ff41f91a7597d674a8c76276bc

URL: https://github.com/llvm/llvm-project/commit/e62836e31fb926ff41f91a7597d674a8c76276bc
DIFF: https://github.com/llvm/llvm-project/commit/e62836e31fb926ff41f91a7597d674a8c76276bc.diff

LOG: [X86] Regenerate avx2-shift.ll check prefixes

Add common CHECK prefix, and rename X32 -> X86 (we try to use X32 for gnux32 triples)

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx2-shift.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx2-shift.ll b/llvm/test/CodeGen/X86/avx2-shift.ll
index c7b258d7eaa0..7f163ef266c7 100644
--- a/llvm/test/CodeGen/X86/avx2-shift.ll
+++ b/llvm/test/CodeGen/X86/avx2-shift.ll
@@ -1,143 +1,93 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64
 
 define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
-; X32-LABEL: variable_shl0:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_shl0:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_shl0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = shl <4 x i32> %x, %y
   ret <4 x i32> %k
 }
 
 define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
-; X32-LABEL: variable_shl1:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_shl1:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_shl1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = shl <8 x i32> %x, %y
   ret <8 x i32> %k
 }
 
 define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
-; X32-LABEL: variable_shl2:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_shl2:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_shl2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = shl <2 x i64> %x, %y
   ret <2 x i64> %k
 }
 
 define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
-; X32-LABEL: variable_shl3:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_shl3:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_shl3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = shl <4 x i64> %x, %y
   ret <4 x i64> %k
 }
 
 define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
-; X32-LABEL: variable_srl0:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_srl0:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_srl0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = lshr <4 x i32> %x, %y
   ret <4 x i32> %k
 }
 
 define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
-; X32-LABEL: variable_srl1:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_srl1:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_srl1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = lshr <8 x i32> %x, %y
   ret <8 x i32> %k
 }
 
 define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
-; X32-LABEL: variable_srl2:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_srl2:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_srl2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = lshr <2 x i64> %x, %y
   ret <2 x i64> %k
 }
 
 define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
-; X32-LABEL: variable_srl3:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_srl3:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_srl3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = lshr <4 x i64> %x, %y
   ret <4 x i64> %k
 }
 
 define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
-; X32-LABEL: variable_sra0:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_sra0:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_sra0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = ashr <4 x i32> %x, %y
   ret <4 x i32> %k
 }
 
 define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
-; X32-LABEL: variable_sra1:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_sra1:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_sra1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %k = ashr <8 x i32> %x, %y
   ret <8 x i32> %k
 }
@@ -145,43 +95,28 @@ define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
 ;;; Shift left
 
 define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
-; X32-LABEL: vshift00:
-; X32:       # %bb.0:
-; X32-NEXT:    vpslld $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vshift00:
-; X64:       # %bb.0:
-; X64-NEXT:    vpslld $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vshift00:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpslld $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %s = shl <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
   ret <8 x i32> %s
 }
 
 define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
-; X32-LABEL: vshift01:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllw $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vshift01:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllw $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vshift01:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %s = shl <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
   ret <16 x i16> %s
 }
 
 define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
-; X32-LABEL: vshift02:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllq $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vshift02:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllq $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vshift02:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllq $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %s = shl <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
   ret <4 x i64> %s
 }
@@ -189,43 +124,28 @@ define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
 ;;; Logical Shift right
 
 define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
-; X32-LABEL: vshift03:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrld $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vshift03:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrld $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vshift03:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrld $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %s = lshr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
   ret <8 x i32> %s
 }
 
 define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
-; X32-LABEL: vshift04:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlw $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vshift04:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrlw $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vshift04:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlw $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %s = lshr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
   ret <16 x i16> %s
 }
 
 define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
-; X32-LABEL: vshift05:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlq $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vshift05:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrlq $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vshift05:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlq $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %s = lshr <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
   ret <4 x i64> %s
 }
@@ -233,39 +153,29 @@ define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
 ;;; Arithmetic Shift right
 
 define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
-; X32-LABEL: vshift06:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrad $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vshift06:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrad $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vshift06:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrad $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %s = ashr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
   ret <8 x i32> %s
 }
 
 define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
-; X32-LABEL: vshift07:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsraw $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vshift07:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsraw $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vshift07:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsraw $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %s = ashr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
   ret <16 x i16> %s
 }
 
 define <4 x i32> @variable_sra0_load(<4 x i32> %x, ptr %y) {
-; X32-LABEL: variable_sra0_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsravd (%eax), %xmm0, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_sra0_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsravd (%eax), %xmm0, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_sra0_load:
 ; X64:       # %bb.0:
@@ -277,11 +187,11 @@ define <4 x i32> @variable_sra0_load(<4 x i32> %x, ptr %y) {
 }
 
 define <8 x i32> @variable_sra1_load(<8 x i32> %x, ptr %y) {
-; X32-LABEL: variable_sra1_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsravd (%eax), %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_sra1_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsravd (%eax), %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_sra1_load:
 ; X64:       # %bb.0:
@@ -293,11 +203,11 @@ define <8 x i32> @variable_sra1_load(<8 x i32> %x, ptr %y) {
 }
 
 define <4 x i32> @variable_shl0_load(<4 x i32> %x, ptr %y) {
-; X32-LABEL: variable_shl0_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsllvd (%eax), %xmm0, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_shl0_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsllvd (%eax), %xmm0, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_shl0_load:
 ; X64:       # %bb.0:
@@ -309,11 +219,11 @@ define <4 x i32> @variable_shl0_load(<4 x i32> %x, ptr %y) {
 }
 
 define <8 x i32> @variable_shl1_load(<8 x i32> %x, ptr %y) {
-; X32-LABEL: variable_shl1_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsllvd (%eax), %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_shl1_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsllvd (%eax), %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_shl1_load:
 ; X64:       # %bb.0:
@@ -325,11 +235,11 @@ define <8 x i32> @variable_shl1_load(<8 x i32> %x, ptr %y) {
 }
 
 define <2 x i64> @variable_shl2_load(<2 x i64> %x, ptr %y) {
-; X32-LABEL: variable_shl2_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsllvq (%eax), %xmm0, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_shl2_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsllvq (%eax), %xmm0, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_shl2_load:
 ; X64:       # %bb.0:
@@ -341,11 +251,11 @@ define <2 x i64> @variable_shl2_load(<2 x i64> %x, ptr %y) {
 }
 
 define <4 x i64> @variable_shl3_load(<4 x i64> %x, ptr %y) {
-; X32-LABEL: variable_shl3_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsllvq (%eax), %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_shl3_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsllvq (%eax), %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_shl3_load:
 ; X64:       # %bb.0:
@@ -357,11 +267,11 @@ define <4 x i64> @variable_shl3_load(<4 x i64> %x, ptr %y) {
 }
 
 define <4 x i32> @variable_srl0_load(<4 x i32> %x, ptr %y) {
-; X32-LABEL: variable_srl0_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsrlvd (%eax), %xmm0, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_srl0_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsrlvd (%eax), %xmm0, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_srl0_load:
 ; X64:       # %bb.0:
@@ -373,11 +283,11 @@ define <4 x i32> @variable_srl0_load(<4 x i32> %x, ptr %y) {
 }
 
 define <8 x i32> @variable_srl1_load(<8 x i32> %x, ptr %y) {
-; X32-LABEL: variable_srl1_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsrlvd (%eax), %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_srl1_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsrlvd (%eax), %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_srl1_load:
 ; X64:       # %bb.0:
@@ -389,11 +299,11 @@ define <8 x i32> @variable_srl1_load(<8 x i32> %x, ptr %y) {
 }
 
 define <2 x i64> @variable_srl2_load(<2 x i64> %x, ptr %y) {
-; X32-LABEL: variable_srl2_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsrlvq (%eax), %xmm0, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_srl2_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsrlvq (%eax), %xmm0, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_srl2_load:
 ; X64:       # %bb.0:
@@ -405,11 +315,11 @@ define <2 x i64> @variable_srl2_load(<2 x i64> %x, ptr %y) {
 }
 
 define <4 x i64> @variable_srl3_load(<4 x i64> %x, ptr %y) {
-; X32-LABEL: variable_srl3_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpsrlvq (%eax), %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: variable_srl3_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpsrlvq (%eax), %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: variable_srl3_load:
 ; X64:       # %bb.0:
@@ -421,11 +331,11 @@ define <4 x i64> @variable_srl3_load(<4 x i64> %x, ptr %y) {
 }
 
 define <32 x i8> @shl9(<32 x i8> %A) nounwind {
-; X32-LABEL: shl9:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllw $3, %ymm0, %ymm0
-; X32-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: shl9:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsllw $3, %ymm0, %ymm0
+; X86-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: shl9:
 ; X64:       # %bb.0:
@@ -437,11 +347,11 @@ define <32 x i8> @shl9(<32 x i8> %A) nounwind {
 }
 
 define <32 x i8> @shr9(<32 x i8> %A) nounwind {
-; X32-LABEL: shr9:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlw $3, %ymm0, %ymm0
-; X32-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: shr9:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsrlw $3, %ymm0, %ymm0
+; X86-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: shr9:
 ; X64:       # %bb.0:
@@ -453,30 +363,24 @@ define <32 x i8> @shr9(<32 x i8> %A) nounwind {
 }
 
 define <32 x i8> @sra_v32i8_7(<32 x i8> %A) nounwind {
-; X32-LABEL: sra_v32i8_7:
-; X32:       # %bb.0:
-; X32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: sra_v32i8_7:
-; X64:       # %bb.0:
-; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: sra_v32i8_7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %B = ashr <32 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   ret <32 x i8> %B
 }
 
 define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
-; X32-LABEL: sra_v32i8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlw $3, %ymm0, %ymm0
-; X32-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; X32-NEXT:    vpxor %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: sra_v32i8:
+; X86:       # %bb.0:
+; X86-NEXT:    vpsrlw $3, %ymm0, %ymm0
+; X86-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; X86-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; X86-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: sra_v32i8:
 ; X64:       # %bb.0:
@@ -491,109 +395,66 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
 }
 
 define <16 x i16> @sext_v16i16(<16 x i16> %a) nounwind {
-; X32-LABEL: sext_v16i16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllw $8, %ymm0, %ymm0
-; X32-NEXT:    vpsraw $8, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: sext_v16i16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllw $8, %ymm0, %ymm0
-; X64-NEXT:    vpsraw $8, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: sext_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $8, %ymm0, %ymm0
+; CHECK-NEXT:    vpsraw $8, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %b = trunc <16 x i16> %a to <16 x i8>
   %c = sext <16 x i8> %b to <16 x i16>
   ret <16 x i16> %c
 }
 
 define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
-; X32-LABEL: sext_v8i32:
-; X32:       # %bb.0:
-; X32-NEXT:    vpslld $16, %ymm0, %ymm0
-; X32-NEXT:    vpsrad $16, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: sext_v8i32:
-; X64:       # %bb.0:
-; X64-NEXT:    vpslld $16, %ymm0, %ymm0
-; X64-NEXT:    vpsrad $16, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: sext_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpslld $16, %ymm0, %ymm0
+; CHECK-NEXT:    vpsrad $16, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %b = trunc <8 x i32> %a to <8 x i16>
   %c = sext <8 x i16> %b to <8 x i32>
   ret <8 x i32> %c
 }
 
 define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8  x i16> %rhs) {
-; X32-LABEL: variable_shl16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; X32-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X32-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; X32-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; X32-NEXT:    vzeroupper
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_shl16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; X64-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; X64-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; X64-NEXT:    vzeroupper
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_shl16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; CHECK-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; CHECK-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
+; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = shl <8 x i16> %lhs, %rhs
   ret <8 x i16> %res
 }
 
 define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8  x i16> %rhs) {
-; X32-LABEL: variable_ashr16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; X32-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X32-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; X32-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; X32-NEXT:    vzeroupper
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_ashr16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; X64-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X64-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; X64-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vzeroupper
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_ashr16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; CHECK-NEXT:    vpmovsxwd %xmm0, %ymm0
+; CHECK-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; CHECK-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = ashr <8 x i16> %lhs, %rhs
   ret <8 x i16> %res
 }
 
 define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8  x i16> %rhs) {
-; X32-LABEL: variable_lshr16:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; X32-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X32-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; X32-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; X32-NEXT:    vzeroupper
-; X32-NEXT:    retl
-;
-; X64-LABEL: variable_lshr16:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; X64-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; X64-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vzeroupper
-; X64-NEXT:    retq
+; CHECK-LABEL: variable_lshr16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; CHECK-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; CHECK-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; CHECK-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = lshr <8 x i16> %lhs, %rhs
   ret <8 x i16> %res
 }


        


More information about the llvm-commits mailing list