[llvm] d6bb577 - [X86] Regenerate slow-pmulld.ll with common SSE check prefixes

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 8 10:23:36 PDT 2022


Author: Simon Pilgrim
Date: 2022-06-08T18:23:25+01:00
New Revision: d6bb577ffb677a9a3ea1b7ca319737e254eda873

URL: https://github.com/llvm/llvm-project/commit/d6bb577ffb677a9a3ea1b7ca319737e254eda873
DIFF: https://github.com/llvm/llvm-project/commit/d6bb577ffb677a9a3ea1b7ca319737e254eda873.diff

LOG: [X86] Regenerate slow-pmulld.ll with common SSE check prefixes

Add back some unused check prefixes to simplify the D127115 regeneration

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/slow-pmulld.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/slow-pmulld.ll b/llvm/test/CodeGen/X86/slow-pmulld.ll
index 1f4990061a1b2..06a2f7840cd67 100644
--- a/llvm/test/CodeGen/X86/slow-pmulld.ll
+++ b/llvm/test/CodeGen/X86/slow-pmulld.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefixes=SLM,CHECK32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefixes=SLM,CHECK64
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.2,+slow-pmulld | FileCheck %s --check-prefixes=SLOW,CHECK32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2,+slow-pmulld | FileCheck %s --check-prefixes=SLOW,CHECK64
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE4,SSE4-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE4,SSE4-64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefixes=SSE-32,SLM,SLM-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefixes=SSE-64,SLM,SLM-64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.2,+slow-pmulld | FileCheck %s --check-prefixes=SSE-32,SLOW,SLOW-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2,+slow-pmulld | FileCheck %s --check-prefixes=SSE-64,SLOW,SLOW-64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE-32,SSE4,SSE4-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE-64,SSE4,SSE4-64
 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx2,+slow-pmulld | FileCheck %s --check-prefixes=AVX2,AVX2-SLOW,AVX2-SLOW32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+slow-pmulld | FileCheck %s --check-prefixes=AVX2,AVX2-SLOW,AVX2-SLOW64
 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2,AVX-32,AVX2-32
@@ -20,29 +20,17 @@
 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont -mattr=-sse4.1
 
 define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
-; CHECK32-LABEL: test_mul_v4i32_v4i8:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; CHECK32-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; CHECK32-NEXT:    retl
-;
-; CHECK64-LABEL: test_mul_v4i32_v4i8:
-; CHECK64:       # %bb.0:
-; CHECK64-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; CHECK64-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK64-NEXT:    retq
-;
-; SSE4-32-LABEL: test_mul_v4i32_v4i8:
-; SSE4-32:       # %bb.0:
-; SSE4-32-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE4-32-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; SSE4-32-NEXT:    retl
-;
-; SSE4-64-LABEL: test_mul_v4i32_v4i8:
-; SSE4-64:       # %bb.0:
-; SSE4-64-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE4-64-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE4-64-NEXT:    retq
+; SSE-32-LABEL: test_mul_v4i32_v4i8:
+; SSE-32:       # %bb.0:
+; SSE-32-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE-32-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE-32-NEXT:    retl
+;
+; SSE-64-LABEL: test_mul_v4i32_v4i8:
+; SSE-64:       # %bb.0:
+; SSE-64-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE-64-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-64-NEXT:    retq
 ;
 ; AVX2-SLOW32-LABEL: test_mul_v4i32_v4i8:
 ; AVX2-SLOW32:       # %bb.0:
@@ -556,29 +544,17 @@ define <16 x i32> @test_mul_v16i32_v16i16(<16 x i16> %A) {
 ;
 
 define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
-; CHECK32-LABEL: test_mul_v4i32_v4i8_minsize:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; CHECK32-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; CHECK32-NEXT:    retl
-;
-; CHECK64-LABEL: test_mul_v4i32_v4i8_minsize:
-; CHECK64:       # %bb.0:
-; CHECK64-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; CHECK64-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK64-NEXT:    retq
-;
-; SSE4-32-LABEL: test_mul_v4i32_v4i8_minsize:
-; SSE4-32:       # %bb.0:
-; SSE4-32-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE4-32-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; SSE4-32-NEXT:    retl
-;
-; SSE4-64-LABEL: test_mul_v4i32_v4i8_minsize:
-; SSE4-64:       # %bb.0:
-; SSE4-64-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE4-64-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE4-64-NEXT:    retq
+; SSE-32-LABEL: test_mul_v4i32_v4i8_minsize:
+; SSE-32:       # %bb.0:
+; SSE-32-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE-32-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE-32-NEXT:    retl
+;
+; SSE-64-LABEL: test_mul_v4i32_v4i8_minsize:
+; SSE-64:       # %bb.0:
+; SSE-64-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE-64-NEXT:    pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-64-NEXT:    retq
 ;
 ; AVX2-SLOW32-LABEL: test_mul_v4i32_v4i8_minsize:
 ; AVX2-SLOW32:       # %bb.0:
@@ -863,29 +839,17 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
 }
 
 define <4 x i32> @test_mul_v4i32_v4i16_minsize(<4 x i16> %A) minsize {
-; CHECK32-LABEL: test_mul_v4i32_v4i16_minsize:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; CHECK32-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; CHECK32-NEXT:    retl
-;
-; CHECK64-LABEL: test_mul_v4i32_v4i16_minsize:
-; CHECK64:       # %bb.0:
-; CHECK64-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; CHECK64-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK64-NEXT:    retq
-;
-; SSE4-32-LABEL: test_mul_v4i32_v4i16_minsize:
-; SSE4-32:       # %bb.0:
-; SSE4-32-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE4-32-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; SSE4-32-NEXT:    retl
-;
-; SSE4-64-LABEL: test_mul_v4i32_v4i16_minsize:
-; SSE4-64:       # %bb.0:
-; SSE4-64-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE4-64-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE4-64-NEXT:    retq
+; SSE-32-LABEL: test_mul_v4i32_v4i16_minsize:
+; SSE-32:       # %bb.0:
+; SSE-32-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE-32-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE-32-NEXT:    retl
+;
+; SSE-64-LABEL: test_mul_v4i32_v4i16_minsize:
+; SSE-64:       # %bb.0:
+; SSE-64-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE-64-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-64-NEXT:    retq
 ;
 ; AVX2-LABEL: test_mul_v4i32_v4i16_minsize:
 ; AVX2:       # %bb.0:
@@ -1037,3 +1001,8 @@ define <16 x i32> @test_mul_v16i32_v16i16_minsize(<16 x i16> %A) minsize {
   %m = mul nuw nsw <16 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778>
   ret <16 x i32> %m
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SLM-32: {{.*}}
+; SLM-64: {{.*}}
+; SLOW-32: {{.*}}
+; SLOW-64: {{.*}}


        


More information about the llvm-commits mailing list